hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
3d94dd2bc0060ee0dd5ff5e48335ba407a277e0523e46d4380e6a92ce85223dd | from sympy import Derivative
from sympy.core.function import UndefinedFunction, AppliedUndef
from sympy.core.symbol import Symbol
from sympy.interactive.printing import init_printing
from sympy.printing.latex import LatexPrinter
from sympy.printing.pretty.pretty import PrettyPrinter
from sympy.printing.pretty.pretty_symbology import center_accent
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import PRECEDENCE
__all__ = ['vprint', 'vsstrrepr', 'vsprint', 'vpprint', 'vlatex',
'init_vprinting']
class VectorStrPrinter(StrPrinter):
"""String Printer for vector expressions. """
def _print_Derivative(self, e):
from sympy.physics.vector.functions import dynamicsymbols
t = dynamicsymbols._t
if (bool(sum([i == t for i in e.variables])) &
isinstance(type(e.args[0]), UndefinedFunction)):
ol = str(e.args[0].func)
for i, v in enumerate(e.variables):
ol += dynamicsymbols._str
return ol
else:
return StrPrinter().doprint(e)
def _print_Function(self, e):
from sympy.physics.vector.functions import dynamicsymbols
t = dynamicsymbols._t
if isinstance(type(e), UndefinedFunction):
return StrPrinter().doprint(e).replace("(%s)" % t, '')
return e.func.__name__ + "(%s)" % self.stringify(e.args, ", ")
class VectorStrReprPrinter(VectorStrPrinter):
"""String repr printer for vector expressions."""
def _print_str(self, s):
return repr(s)
class VectorLatexPrinter(LatexPrinter):
"""Latex Printer for vector expressions. """
def _print_Function(self, expr, exp=None):
from sympy.physics.vector.functions import dynamicsymbols
func = expr.func.__name__
t = dynamicsymbols._t
if hasattr(self, '_print_' + func) and \
not isinstance(type(expr), UndefinedFunction):
return getattr(self, '_print_' + func)(expr, exp)
elif isinstance(type(expr), UndefinedFunction) and (expr.args == (t,)):
# treat this function like a symbol
expr = Symbol(func)
if exp is not None:
# copied from LatexPrinter._helper_print_standard_power, which
# we can't call because we only have exp as a string.
base = self.parenthesize(expr, PRECEDENCE['Pow'])
base = self.parenthesize_super(base)
return r"%s^{%s}" % (base, exp)
else:
return super()._print(expr)
else:
return super()._print_Function(expr, exp)
def _print_Derivative(self, der_expr):
from sympy.physics.vector.functions import dynamicsymbols
# make sure it is in the right form
der_expr = der_expr.doit()
if not isinstance(der_expr, Derivative):
return r"\left(%s\right)" % self.doprint(der_expr)
# check if expr is a dynamicsymbol
t = dynamicsymbols._t
expr = der_expr.expr
red = expr.atoms(AppliedUndef)
syms = der_expr.variables
test1 = not all([True for i in red if i.free_symbols == {t}])
test2 = not all([(t == i) for i in syms])
if test1 or test2:
return super()._print_Derivative(der_expr)
# done checking
dots = len(syms)
base = self._print_Function(expr)
base_split = base.split('_', 1)
base = base_split[0]
if dots == 1:
base = r"\dot{%s}" % base
elif dots == 2:
base = r"\ddot{%s}" % base
elif dots == 3:
base = r"\dddot{%s}" % base
elif dots == 4:
base = r"\ddddot{%s}" % base
else: # Fallback to standard printing
return super()._print_Derivative(der_expr)
if len(base_split) != 1:
base += '_' + base_split[1]
return base
class VectorPrettyPrinter(PrettyPrinter):
"""Pretty Printer for vectorialexpressions. """
def _print_Derivative(self, deriv):
from sympy.physics.vector.functions import dynamicsymbols
# XXX use U('PARTIAL DIFFERENTIAL') here ?
t = dynamicsymbols._t
dot_i = 0
syms = list(reversed(deriv.variables))
while len(syms) > 0:
if syms[-1] == t:
syms.pop()
dot_i += 1
else:
return super()._print_Derivative(deriv)
if not (isinstance(type(deriv.expr), UndefinedFunction)
and (deriv.expr.args == (t,))):
return super()._print_Derivative(deriv)
else:
pform = self._print_Function(deriv.expr)
# the following condition would happen with some sort of non-standard
# dynamic symbol I guess, so we'll just print the SymPy way
if len(pform.picture) > 1:
return super()._print_Derivative(deriv)
# There are only special symbols up to fourth-order derivatives
if dot_i >= 5:
return super()._print_Derivative(deriv)
# Deal with special symbols
dots = {0 : "",
1 : "\N{COMBINING DOT ABOVE}",
2 : "\N{COMBINING DIAERESIS}",
3 : "\N{COMBINING THREE DOTS ABOVE}",
4 : "\N{COMBINING FOUR DOTS ABOVE}"}
d = pform.__dict__
#if unicode is false then calculate number of apostrophes needed and add to output
if not self._use_unicode:
apostrophes = ""
for i in range(0, dot_i):
apostrophes += "'"
d['picture'][0] += apostrophes + "(t)"
else:
d['picture'] = [center_accent(d['picture'][0], dots[dot_i])]
return pform
def _print_Function(self, e):
from sympy.physics.vector.functions import dynamicsymbols
t = dynamicsymbols._t
# XXX works only for applied functions
func = e.func
args = e.args
func_name = func.__name__
pform = self._print_Symbol(Symbol(func_name))
# If this function is an Undefined function of t, it is probably a
# dynamic symbol, so we'll skip the (t). The rest of the code is
# identical to the normal PrettyPrinter code
if not (isinstance(func, UndefinedFunction) and (args == (t,))):
return super()._print_Function(e)
return pform
def vprint(expr, **settings):
r"""Function for printing of expressions generated in the
sympy.physics vector package.
Extends SymPy's StrPrinter, takes the same setting accepted by SymPy's
:func:`~.sstr`, and is equivalent to ``print(sstr(foo))``.
Parameters
==========
expr : valid SymPy object
SymPy expression to print.
settings : args
Same as the settings accepted by SymPy's sstr().
Examples
========
>>> from sympy.physics.vector import vprint, dynamicsymbols
>>> u1 = dynamicsymbols('u1')
>>> print(u1)
u1(t)
>>> vprint(u1)
u1
"""
outstr = vsprint(expr, **settings)
from sympy.core.compatibility import builtins
if (outstr != 'None'):
builtins._ = outstr
print(outstr)
def vsstrrepr(expr, **settings):
"""Function for displaying expression representation's with vector
printing enabled.
Parameters
==========
expr : valid SymPy object
SymPy expression to print.
settings : args
Same as the settings accepted by SymPy's sstrrepr().
"""
p = VectorStrReprPrinter(settings)
return p.doprint(expr)
def vsprint(expr, **settings):
r"""Function for displaying expressions generated in the
sympy.physics vector package.
Returns the output of vprint() as a string.
Parameters
==========
expr : valid SymPy object
SymPy expression to print
settings : args
Same as the settings accepted by SymPy's sstr().
Examples
========
>>> from sympy.physics.vector import vsprint, dynamicsymbols
>>> u1, u2 = dynamicsymbols('u1 u2')
>>> u2d = dynamicsymbols('u2', level=1)
>>> print("%s = %s" % (u1, u2 + u2d))
u1(t) = u2(t) + Derivative(u2(t), t)
>>> print("%s = %s" % (vsprint(u1), vsprint(u2 + u2d)))
u1 = u2 + u2'
"""
string_printer = VectorStrPrinter(settings)
return string_printer.doprint(expr)
def vpprint(expr, **settings):
r"""Function for pretty printing of expressions generated in the
sympy.physics vector package.
Mainly used for expressions not inside a vector; the output of running
scripts and generating equations of motion. Takes the same options as
SymPy's :func:`~.pretty_print`; see that function for more information.
Parameters
==========
expr : valid SymPy object
SymPy expression to pretty print
settings : args
Same as those accepted by SymPy's pretty_print.
"""
pp = VectorPrettyPrinter(settings)
# Note that this is copied from sympy.printing.pretty.pretty_print:
# XXX: this is an ugly hack, but at least it works
use_unicode = pp._settings['use_unicode']
from sympy.printing.pretty.pretty_symbology import pretty_use_unicode
uflag = pretty_use_unicode(use_unicode)
try:
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def vlatex(expr, **settings):
r"""Function for printing latex representation of sympy.physics.vector
objects.
For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the
same options as SymPy's :func:`~.latex`; see that function for more information;
Parameters
==========
expr : valid SymPy object
SymPy expression to represent in LaTeX form
settings : args
Same as latex()
Examples
========
>>> from sympy.physics.vector import vlatex, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q1, q2 = dynamicsymbols('q1 q2')
>>> q1d, q2d = dynamicsymbols('q1 q2', 1)
>>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)
>>> vlatex(N.x + N.y)
'\\mathbf{\\hat{n}_x} + \\mathbf{\\hat{n}_y}'
>>> vlatex(q1 + q2)
'q_{1} + q_{2}'
>>> vlatex(q1d)
'\\dot{q}_{1}'
>>> vlatex(q1 * q2d)
'q_{1} \\dot{q}_{2}'
>>> vlatex(q1dd * q1 / q1d)
'\\frac{q_{1} \\ddot{q}_{1}}{\\dot{q}_{1}}'
"""
latex_printer = VectorLatexPrinter(settings)
return latex_printer.doprint(expr)
def init_vprinting(**kwargs):
"""Initializes time derivative printing for all SymPy objects, i.e. any
functions of time will be displayed in a more compact notation. The main
benefit of this is for printing of time derivatives; instead of
displaying as ``Derivative(f(t),t)``, it will display ``f'``. This is
only actually needed for when derivatives are present and are not in a
physics.vector.Vector or physics.vector.Dyadic object. This function is a
light wrapper to :func:`~.init_printing`. Any keyword
arguments for it are valid here.
{0}
Examples
========
>>> from sympy import Function, symbols
>>> t, x = symbols('t, x')
>>> omega = Function('omega')
>>> omega(x).diff()
Derivative(omega(x), x)
>>> omega(t).diff()
Derivative(omega(t), t)
Now use the string printer:
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> omega(x).diff()
Derivative(omega(x), x)
>>> omega(t).diff()
omega'
"""
kwargs['str_printer'] = vsstrrepr
kwargs['pretty_printer'] = vpprint
kwargs['latex_printer'] = vlatex
init_printing(**kwargs)
params = init_printing.__doc__.split('Examples\n ========')[0] # type: ignore
init_vprinting.__doc__ = init_vprinting.__doc__.format(params) # type: ignore
|
b0be4e7bd99ecf303bd36a0c204d0fc7416800b84dfef4a3bddce3e450c73cc6 | from sympy.core.backend import (S, sympify, expand, sqrt, Add, zeros,
ImmutableMatrix as Matrix)
from sympy import trigsimp
from sympy.printing.defaults import Printable
from sympy.utilities.misc import filldedent
__all__ = ['Vector']
class Vector(Printable):
"""The class used to define vectors.
It along with ReferenceFrame are the building blocks of describing a
classical mechanics system in PyDy and sympy.physics.vector.
Attributes
==========
simp : Boolean
Let certain methods use trigsimp on their outputs
"""
simp = False
def __init__(self, inlist):
"""This is the constructor for the Vector class. You shouldn't be
calling this, it should only be used by other functions. You should be
treating Vectors like you would with if you were doing the math by
hand, and getting the first 3 from the standard basis vectors from a
ReferenceFrame.
The only exception is to create a zero vector:
zv = Vector(0)
"""
self.args = []
if inlist == 0:
inlist = []
if isinstance(inlist, dict):
d = inlist
else:
d = {}
for inp in inlist:
if inp[1] in d:
d[inp[1]] += inp[0]
else:
d[inp[1]] = inp[0]
for k, v in d.items():
if v != Matrix([0, 0, 0]):
self.args.append((v, k))
def __hash__(self):
return hash(tuple(self.args))
def __add__(self, other):
"""The add operator for Vector. """
if other == 0:
return self
other = _check_vector(other)
return Vector(self.args + other.args)
def __and__(self, other):
"""Dot product of two vectors.
Returns a scalar, the dot product of the two Vectors
Parameters
==========
other : Vector
The Vector which we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> dot(N.x, N.x)
1
>>> dot(N.x, N.y)
0
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> dot(N.y, A.y)
cos(q1)
"""
from sympy.physics.vector.dyadic import Dyadic
if isinstance(other, Dyadic):
return NotImplemented
other = _check_vector(other)
out = S.Zero
for i, v1 in enumerate(self.args):
for j, v2 in enumerate(other.args):
out += ((v2[0].T)
* (v2[1].dcm(v1[1]))
* (v1[0]))[0]
if Vector.simp:
return trigsimp(sympify(out), recursive=True)
else:
return sympify(out)
def __truediv__(self, other):
"""This uses mul and inputs self and 1 divided by other. """
return self.__mul__(sympify(1) / other)
def __eq__(self, other):
"""Tests for equality.
It is very import to note that this is only as good as the SymPy
equality test; False does not always mean they are not equivalent
Vectors.
If other is 0, and self is empty, returns True.
If other is 0 and self is not empty, returns False.
If none of the above, only accepts other as a Vector.
"""
if other == 0:
other = Vector(0)
try:
other = _check_vector(other)
except TypeError:
return False
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
frame = self.args[0][1]
for v in frame:
if expand((self - other) & v) != 0:
return False
return True
def __mul__(self, other):
"""Multiplies the Vector by a sympifyable expression.
Parameters
==========
other : Sympifyable
The scalar to multiply this Vector with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> b = Symbol('b')
>>> V = 10 * b * N.x
>>> print(V)
10*b*N.x
"""
newlist = [v for v in self.args]
for i, v in enumerate(newlist):
newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1])
return Vector(newlist)
def __ne__(self, other):
return not self == other
def __neg__(self):
return self * -1
def __or__(self, other):
"""Outer product between two Vectors.
A rank increasing operation, which returns a Dyadic from two Vectors
Parameters
==========
other : Vector
The Vector to take the outer product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> outer(N.x, N.x)
(N.x|N.x)
"""
from sympy.physics.vector.dyadic import Dyadic
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
for i2, v2 in enumerate(other.args):
# it looks this way because if we are in the same frame and
# use the enumerate function on the same frame in a nested
# fashion, then bad things happen
ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)])
ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)])
ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)])
ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)])
ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)])
ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)])
ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)])
ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)])
ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)])
return ol
def _latex(self, printer):
"""Latex Printing method. """
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
ol.append(' + ' + ar[i][1].latex_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
ol.append(' - ' + ar[i][1].latex_vecs[j])
elif ar[i][0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = printer._print(ar[i][0][j])
if isinstance(ar[i][0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + ar[i][1].latex_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer):
"""Pretty Printing method. """
from sympy.printing.pretty.stringpict import prettyForm
e = self
class Fake:
def render(self, *args, **kwargs):
ar = e.args # just to shorten things
if len(ar) == 0:
return str(0)
pforms = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
pform = printer._print(ar[i][1].pretty_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
pform = printer._print(ar[i][1].pretty_vecs[j])
pform = prettyForm(*pform.left(" - "))
bin = prettyForm.NEG
pform = prettyForm(binding=bin, *pform)
elif ar[i][0][j] != 0:
# If the basis vector coeff is not 1 or -1,
# we might wrap it in parentheses, for readability.
pform = printer._print(ar[i][0][j])
if isinstance(ar[i][0][j], Add):
tmp = pform.parens()
pform = prettyForm(tmp[0], tmp[1])
pform = prettyForm(*pform.right(" ",
ar[i][1].pretty_vecs[j]))
else:
continue
pforms.append(pform)
pform = prettyForm.__add__(*pforms)
kwargs["wrap_line"] = kwargs.get("wrap_line")
kwargs["num_columns"] = kwargs.get("num_columns")
out_str = pform.render(*args, **kwargs)
mlines = [line.rstrip() for line in out_str.split("\n")]
return "\n".join(mlines)
return Fake()
def __ror__(self, other):
"""Outer product between two Vectors.
A rank increasing operation, which returns a Dyadic from two Vectors
Parameters
==========
other : Vector
The Vector to take the outer product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> outer(N.x, N.x)
(N.x|N.x)
"""
from sympy.physics.vector.dyadic import Dyadic
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(other.args):
for i2, v2 in enumerate(self.args):
# it looks this way because if we are in the same frame and
# use the enumerate function on the same frame in a nested
# fashion, then bad things happen
ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)])
ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)])
ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)])
ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)])
ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)])
ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)])
ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)])
ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)])
ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)])
return ol
def __rsub__(self, other):
return (-1 * self) + other
def _sympystr(self, printer, order=True):
"""Printing method. """
if not order or len(self.args) == 1:
ar = list(self.args)
elif len(self.args) == 0:
return printer._print(0)
else:
d = {v[1]: v[0] for v in self.args}
keys = sorted(d.keys(), key=lambda x: x.index)
ar = []
for key in keys:
ar.append((d[key], key))
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
ol.append(' + ' + ar[i][1].str_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
ol.append(' - ' + ar[i][1].str_vecs[j])
elif ar[i][0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = printer._print(ar[i][0][j])
if isinstance(ar[i][0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*' + ar[i][1].str_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subtraction operator. """
return self.__add__(other * -1)
def __xor__(self, other):
"""The cross product operator for two Vectors.
Returns a Vector, expressed in the same ReferenceFrames as self.
Parameters
==========
other : Vector
The Vector which we are crossing with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> N.x ^ N.y
N.z
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> A.x ^ N.y
N.z
>>> N.y ^ A.x
- sin(q1)*A.y - cos(q1)*A.z
"""
from sympy.physics.vector.dyadic import Dyadic
if isinstance(other, Dyadic):
return NotImplemented
other = _check_vector(other)
if other.args == []:
return Vector(0)
def _det(mat):
"""This is needed as a little method for to find the determinant
of a list in python; needs to work for a 3x3 list.
SymPy's Matrix won't take in Vector, so need a custom function.
You shouldn't be calling this.
"""
return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])
+ mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *
mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -
mat[1][1] * mat[2][0]))
outlist = []
ar = other.args # For brevity
for i, v in enumerate(ar):
tempx = v[1].x
tempy = v[1].y
tempz = v[1].z
tempm = ([[tempx, tempy, tempz], [self & tempx, self & tempy,
self & tempz], [Vector([ar[i]]) & tempx,
Vector([ar[i]]) & tempy, Vector([ar[i]]) & tempz]])
outlist += _det(tempm).args
return Vector(outlist)
__radd__ = __add__
__rand__ = __and__
__rmul__ = __mul__
def separate(self):
"""
The constituents of this vector in different reference frames,
as per its definition.
Returns a dict mapping each ReferenceFrame to the corresponding
constituent Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> R1 = ReferenceFrame('R1')
>>> R2 = ReferenceFrame('R2')
>>> v = R1.x + R2.x
>>> v.separate() == {R1: R1.x, R2: R2.x}
True
"""
components = {}
for x in self.args:
components[x[1]] = Vector([x])
return components
def dot(self, other):
return self & other
dot.__doc__ = __and__.__doc__
def cross(self, other):
return self ^ other
cross.__doc__ = __xor__.__doc__
def outer(self, other):
return self | other
outer.__doc__ = __or__.__doc__
def diff(self, var, frame, var_in_dcm=True):
"""Returns the partial derivative of the vector with respect to a
variable in the provided reference frame.
Parameters
==========
var : Symbol
What the partial derivative is taken with respect to.
frame : ReferenceFrame
The reference frame that the partial derivative is taken in.
var_in_dcm : boolean
If true, the differentiation algorithm assumes that the variable
may be present in any of the direction cosine matrices that relate
the frame to the frames of any component of the vector. But if it
is known that the variable is not present in the direction cosine
matrices, false can be set to skip full reexpression in the desired
frame.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.vector import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.vector import Vector
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> Vector.simp = True
>>> t = Symbol('t')
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.diff(t, N)
- q1'*A.z
>>> B = ReferenceFrame('B')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> v = u1 * A.x + u2 * B.y
>>> v.diff(u2, N, var_in_dcm=False)
B.y
"""
from sympy.physics.vector.frame import _check_frame
var = sympify(var)
_check_frame(frame)
inlist = []
for vector_component in self.args:
measure_number = vector_component[0]
component_frame = vector_component[1]
if component_frame == frame:
inlist += [(measure_number.diff(var), frame)]
else:
# If the direction cosine matrix relating the component frame
# with the derivative frame does not contain the variable.
if not var_in_dcm or (frame.dcm(component_frame).diff(var) ==
zeros(3, 3)):
inlist += [(measure_number.diff(var),
component_frame)]
else: # else express in the frame
reexp_vec_comp = Vector([vector_component]).express(frame)
deriv = reexp_vec_comp.args[0][0].diff(var)
inlist += Vector([(deriv, frame)]).express(component_frame).args
return Vector(inlist)
def express(self, otherframe, variables=False):
"""
Returns a Vector equivalent to this one, expressed in otherframe.
Uses the global express method.
Parameters
==========
otherframe : ReferenceFrame
The frame for this Vector to be described in
variables : boolean
If True, the coordinate symbols(if present) in this Vector
are re-expressed in terms otherframe
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.express(N)
cos(q1)*N.x - sin(q1)*N.z
"""
from sympy.physics.vector import express
return express(self, otherframe, variables=variables)
def to_matrix(self, reference_frame):
"""Returns the matrix form of the vector with respect to the given
frame.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows of the matrix correspond to.
Returns
-------
matrix : ImmutableMatrix, shape(3,1)
The matrix that gives the 1D vector.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> a, b, c = symbols('a, b, c')
>>> N = ReferenceFrame('N')
>>> vector = a * N.x + b * N.y + c * N.z
>>> vector.to_matrix(N)
Matrix([
[a],
[b],
[c]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> vector.to_matrix(A)
Matrix([
[ a],
[ b*cos(beta) + c*sin(beta)],
[-b*sin(beta) + c*cos(beta)]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
reference_frame]).reshape(3, 1)
def doit(self, **hints):
"""Calls .doit() on each term in the Vector"""
d = {}
for v in self.args:
d[v[1]] = v[0].applyfunc(lambda x: x.doit(**hints))
return Vector(d)
def dt(self, otherframe):
"""
Returns a Vector which is the time derivative of
the self Vector, taken in frame otherframe.
Calls the global time_derivative method
Parameters
==========
otherframe : ReferenceFrame
The frame to calculate the time derivative in
"""
from sympy.physics.vector import time_derivative
return time_derivative(self, otherframe)
def simplify(self):
"""Returns a simplified Vector."""
d = {}
for v in self.args:
d[v[1]] = v[0].simplify()
return Vector(d)
def subs(self, *args, **kwargs):
"""Substitution on the Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = N.x * s
>>> a.subs({s: 2})
2*N.x
"""
d = {}
for v in self.args:
d[v[1]] = v[0].subs(*args, **kwargs)
return Vector(d)
def magnitude(self):
"""Returns the magnitude (Euclidean norm) of self."""
return sqrt(self & self)
def normalize(self):
"""Returns a Vector of magnitude 1, codirectional with self."""
return Vector(self.args + []) / self.magnitude()
def applyfunc(self, f):
"""Apply a function to each component of a vector."""
if not callable(f):
raise TypeError("`f` must be callable.")
d = {}
for v in self.args:
d[v[1]] = v[0].applyfunc(f)
return Vector(d)
def free_symbols(self, reference_frame):
"""
Returns the free symbols in the measure numbers of the vector
expressed in the given reference frame.
Parameter
=========
reference_frame : ReferenceFrame
The frame with respect to which the free symbols of the
given vector is to be determined.
"""
return self.to_matrix(reference_frame).free_symbols
class VectorTypeError(TypeError):
def __init__(self, other, want):
msg = filldedent("Expected an instance of %s, but received object "
"'%s' of %s." % (type(want), other, type(other)))
super().__init__(msg)
def _check_vector(other):
if not isinstance(other, Vector):
raise TypeError('A Vector must be supplied')
return other
|
0530af17c54e82631ef3eaea98538d39b786040d5c60db4d07a88a6ea47bdb17 | from .vector import Vector, _check_vector
from .frame import _check_frame
from warnings import warn
__all__ = ['Point']
class Point:
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
Parameters
==========
name : string
The display name of the Point
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> P = Point('P')
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> O.set_vel(N, u1 * N.x + u2 * N.y + u3 * N.z)
>>> O.acc(N)
u1'*N.x + u2'*N.y + u3'*N.z
symbols() can be used to create multiple Points in a single step, for example:
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> from sympy import symbols
>>> N = ReferenceFrame('N')
>>> u1, u2 = dynamicsymbols('u1 u2')
>>> A, B = symbols('A B', cls=Point)
>>> type(A)
<class 'sympy.physics.vector.point.Point'>
>>> A.set_vel(N, u1 * N.x + u2 * N.y)
>>> B.set_vel(N, u2 * N.x + u1 * N.y)
>>> A.acc(N) - B.acc(N)
(u1' - u2')*N.x + (-u1' + u2')*N.y
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _pdict_list(self, other, num):
"""Returns a list of points that gives the shortest path with respect
to position, velocity, or acceleration from this point to the provided
point.
Parameters
==========
other : Point
A point that may be related to this point by position, velocity, or
acceleration.
num : integer
0 for searching the position tree, 1 for searching the velocity
tree, and 2 for searching the acceleration tree.
Returns
=======
list of Points
A sequence of points from self to other.
Notes
=====
It isn't clear if num = 1 or num = 2 actually works because the keys to
``_vel_dict`` and ``_acc_dict`` are :class:`ReferenceFrame` objects which
do not have the ``_pdlist`` attribute.
"""
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._pdlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) +
(omega ^ (omega ^ dist)))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist)))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined in
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._acc_dict):
if self._vel_dict[frame] != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return Vector(0)
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, str):
raise TypeError('Must supply a valid name')
if value == 0:
value = Vector(0)
value = _check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = Vector(0)
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's acceleration is defined
value : Vector
The vector value of this point's acceleration in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
otherpoint : Point
The other point which this point's location is defined relative to
value : Vector
The vector which defines the location of this point
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's velocity is defined
value : Vector
The vector value of this point's velocity in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega ^ dist))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega ^ dist))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
Velocities will be automatically calculated if possible, otherwise a ``ValueError`` will be returned. If it is possible to calculate multiple different velocities from the relative points, the points defined most directly relative to this point will be used. In the case of inconsistent relative positions of points, incorrect velocities may be returned. It is up to the user to define prior relative positions and velocities of points in a self-consistent way.
>>> p = Point('p')
>>> q = dynamicsymbols('q')
>>> p.set_vel(N, 10 * N.x)
>>> p2 = Point('p2')
>>> p2.set_pos(p, q*N.x)
>>> p2.vel(N)
(Derivative(q(t), t) + 10)*N.x
"""
_check_frame(frame)
if not (frame in self._vel_dict):
valid_neighbor_found = False
is_cyclic = False
visited = []
queue = [self]
candidate_neighbor = []
while queue: #BFS to find nearest point
node = queue.pop(0)
if node not in visited:
visited.append(node)
for neighbor, neighbor_pos in node._pos_dict.items():
if neighbor in visited:
continue
try:
neighbor_pos.express(frame) #Checks if pos vector is valid
except ValueError:
continue
if neighbor in queue:
is_cyclic = True
try :
neighbor_velocity = neighbor._vel_dict[frame] #Checks if point has its vel defined in req frame
except KeyError:
queue.append(neighbor)
continue
candidate_neighbor.append(neighbor)
if not valid_neighbor_found:
self.set_vel(frame, self.pos_from(neighbor).dt(frame) + neighbor_velocity)
valid_neighbor_found = True
if is_cyclic:
warn('Kinematic loops are defined among the positions of points. This is likely not desired and may cause errors in your calculations.')
if len(candidate_neighbor) > 1:
warn('Velocity automatically calculated based on point ' +
candidate_neighbor[0].name + ' but it is also possible from points(s):' +
str(candidate_neighbor[1:]) + '. Velocities from these points are not necessarily the same. This may cause errors in your calculations.')
if valid_neighbor_found:
return self._vel_dict[frame]
else:
raise ValueError('Velocity of point ' + self.name + ' has not been'
' defined in ReferenceFrame ' + frame.name)
return self._vel_dict[frame]
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial velocities of the linear velocity vector of this
point in the given frame with respect to one or more provided
generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> from sympy.physics.vector import dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> p = Point('p')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> p.set_vel(N, u1 * N.x + u2 * A.y)
>>> p.partial_velocity(N, u1)
N.x
>>> p.partial_velocity(N, u1, u2)
(N.x, A.y)
"""
partials = [self.vel(frame).diff(speed, frame, var_in_dcm=False) for
speed in gen_speeds]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
|
f9a0de95cd51bf676de9a2e97d64902d160bc3b31c80cde3b49264b046c7dc1e | from sympy.core.backend import (sympify, diff, sin, cos, Matrix, symbols,
Function, S, Symbol)
from sympy import integrate, trigsimp
from sympy.core.compatibility import reduce
from .vector import Vector, _check_vector
from .frame import CoordinateSym, _check_frame
from .dyadic import Dyadic
from .printing import vprint, vsprint, vpprint, vlatex, init_vprinting
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import translate
__all__ = ['cross', 'dot', 'express', 'time_derivative', 'outer',
'kinematic_equations', 'get_motion_params', 'partial_velocity',
'dynamicsymbols', 'vprint', 'vsprint', 'vpprint', 'vlatex',
'init_vprinting']
def cross(vec1, vec2):
"""Cross product convenience wrapper for Vector.cross(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Cross product is between two vectors')
return vec1 ^ vec2
cross.__doc__ += Vector.cross.__doc__ # type: ignore
def dot(vec1, vec2):
"""Dot product convenience wrapper for Vector.dot(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Dot product is between two vectors')
return vec1 & vec2
dot.__doc__ += Vector.dot.__doc__ # type: ignore
def express(expr, frame, frame2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, scalar(sympyfiable) or Dyadic in given frame.
Refer to the local methods of Vector and Dyadic for details.
If 'variables' is True, then the coordinate variables (CoordinateSym
instances) of other frames present in the vector/scalar field or
dyadic expression are also substituted in terms of the base scalars of
this frame.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in ReferenceFrame 'frame'
frame: ReferenceFrame
The reference frame to express expr in
frame2 : ReferenceFrame
The other frame required for re-expression(only for Dyadic expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of frame
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> from sympy.physics.vector import express
>>> express(d, B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
>>> express(B.x, N)
cos(q)*N.x + sin(q)*N.y
>>> express(N[0], B, variables=True)
B_x*cos(q) - B_y*sin(q)
"""
_check_frame(frame)
if expr == 0:
return expr
if isinstance(expr, Vector):
#Given expr is a Vector
if variables:
#If variables attribute is True, substitute
#the coordinate variables in the Vector
frame_list = [x[-1] for x in expr.args]
subs_dict = {}
for f in frame_list:
subs_dict.update(f.variable_map(frame))
expr = expr.subs(subs_dict)
#Re-express in this frame
outvec = Vector([])
for i, v in enumerate(expr.args):
if v[1] != frame:
temp = frame.dcm(v[1]) * v[0]
if Vector.simp:
temp = temp.applyfunc(lambda x:
trigsimp(x, method='fu'))
outvec += Vector([(temp, frame)])
else:
outvec += Vector([v])
return outvec
if isinstance(expr, Dyadic):
if frame2 is None:
frame2 = frame
_check_frame(frame2)
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += express(v[0], frame, variables=variables) * \
(express(v[1], frame, variables=variables) |
express(v[2], frame2, variables=variables))
return ol
else:
if variables:
#Given expr is a scalar field
frame_set = set()
expr = sympify(expr)
#Substitute all the coordinate variables
for x in expr.free_symbols:
if isinstance(x, CoordinateSym)and x.frame != frame:
frame_set.add(x.frame)
subs_dict = {}
for f in frame_set:
subs_dict.update(f.variable_map(frame))
return expr.subs(subs_dict)
return expr
def time_derivative(expr, frame, order=1):
"""
Calculate the time derivative of a vector/scalar field function
or dyadic expression in given frame.
References
==========
https://en.wikipedia.org/wiki/Rotating_reference_frame#Time_derivatives_in_the_two_frames
Parameters
==========
expr : Vector/Dyadic/sympifyable
The expression whose time derivative is to be calculated
frame : ReferenceFrame
The reference frame to calculate the time derivative in
order : integer
The order of the derivative to be calculated
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> from sympy import Symbol
>>> q1 = Symbol('q1')
>>> u1 = dynamicsymbols('u1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> v = u1 * N.x
>>> A.set_ang_vel(N, 10*A.x)
>>> from sympy.physics.vector import time_derivative
>>> time_derivative(v, N)
u1'*N.x
>>> time_derivative(u1*A[0], N)
N_x*u1'
>>> B = N.orientnew('B', 'Axis', [u1, N.z])
>>> from sympy.physics.vector import outer
>>> d = outer(N.x, N.x)
>>> time_derivative(d, B)
- u1'*(N.y|N.x) - u1'*(N.x|N.y)
"""
t = dynamicsymbols._t
_check_frame(frame)
if order == 0:
return expr
if order % 1 != 0 or order < 0:
raise ValueError("Unsupported value of order entered")
if isinstance(expr, Vector):
outlist = []
for i, v in enumerate(expr.args):
if v[1] == frame:
outlist += [(express(v[0], frame,
variables=True).diff(t), frame)]
else:
outlist += (time_derivative(Vector([v]), v[1]) + \
(v[1].ang_vel_in(frame) ^ Vector([v]))).args
outvec = Vector(outlist)
return time_derivative(outvec, frame, order - 1)
if isinstance(expr, Dyadic):
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += (v[0].diff(t) * (v[1] | v[2]))
ol += (v[0] * (time_derivative(v[1], frame) | v[2]))
ol += (v[0] * (v[1] | time_derivative(v[2], frame)))
return time_derivative(ol, frame, order - 1)
else:
return diff(express(expr, frame, variables=True), t, order)
def outer(vec1, vec2):
"""Outer product convenience wrapper for Vector.outer():\n"""
if not isinstance(vec1, Vector):
raise TypeError('Outer product is between two Vectors')
return vec1 | vec2
outer.__doc__ += Vector.outer.__doc__ # type: ignore
def kinematic_equations(speeds, coords, rot_type, rot_order=''):
"""Gives equations relating the qdot's to u's for a rotation type.
Supply rotation type and order as in orient. Speeds are assumed to be
body-fixed; if we are defining the orientation of B in A using by rot_type,
the angular velocity of B in A is assumed to be in the form: speed[0]*B.x +
speed[1]*B.y + speed[2]*B.z
Parameters
==========
speeds : list of length 3
The body fixed angular velocity measure numbers.
coords : list of length 3 or 4
The coordinates used to define the orientation of the two frames.
rot_type : str
The type of rotation used to create the equations. Body, Space, or
Quaternion only
rot_order : str or int
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import kinematic_equations, vprint
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> q1, q2, q3 = dynamicsymbols('q1 q2 q3')
>>> vprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'),
... order=None)
[-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3']
"""
# Code below is checking and sanitizing input
approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131',
'212', '232', '313', '323', '1', '2', '3', '')
# make sure XYZ => 123 and rot_type is in lower case
rot_order = translate(str(rot_order), 'XYZxyz', '123123')
rot_type = rot_type.lower()
if not isinstance(speeds, (list, tuple)):
raise TypeError('Need to supply speeds in a list')
if len(speeds) != 3:
raise TypeError('Need to supply 3 body-fixed speeds')
if not isinstance(coords, (list, tuple)):
raise TypeError('Need to supply coordinates in a list')
if rot_type in ['body', 'space']:
if rot_order not in approved_orders:
raise ValueError('Not an acceptable rotation order')
if len(coords) != 3:
raise ValueError('Need 3 coordinates for body or space')
# Actual hard-coded kinematic differential equations
w1, w2, w3 = speeds
if w1 == w2 == w3 == 0:
return [S.Zero]*3
q1, q2, q3 = coords
q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords]
s1, s2, s3 = [sin(q1), sin(q2), sin(q3)]
c1, c2, c3 = [cos(q1), cos(q2), cos(q3)]
if rot_type == 'body':
if rot_order == '123':
return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 *
c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3]
if rot_order == '231':
return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2]
if rot_order == '312':
return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 *
s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2]
if rot_order == '132':
return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 *
c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2]
if rot_order == '213':
return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 *
s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3]
if rot_order == '321':
return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 *
s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2]
if rot_order == '121':
return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 *
s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2]
if rot_order == '131':
return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2]
if rot_order == '212':
return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 *
s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2]
if rot_order == '232':
return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 *
c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2]
if rot_order == '313':
return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 *
s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3]
if rot_order == '323':
return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 *
c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3]
if rot_type == 'space':
if rot_order == '123':
return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2]
if rot_order == '231':
return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2]
if rot_order == '312':
return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2]
if rot_order == '132':
return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 *
s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2]
if rot_order == '213':
return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2]
if rot_order == '321':
return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2]
if rot_order == '121':
return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2]
if rot_order == '131':
return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 *
s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2]
if rot_order == '212':
return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2]
if rot_order == '232':
return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2]
if rot_order == '313':
return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2]
if rot_order == '323':
return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2]
elif rot_type == 'quaternion':
if rot_order != '':
raise ValueError('Cannot have rotation order for quaternion')
if len(coords) != 4:
raise ValueError('Need 4 coordinates for quaternion')
# Actual hard-coded kinematic differential equations
e0, e1, e2, e3 = coords
w = Matrix(speeds + [0])
E = Matrix([[e0, -e3, e2, e1], [e3, e0, -e1, e2], [-e2, e1, e0, e3],
[-e1, -e2, -e3, e0]])
edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]])
return list(edots.T - 0.5 * w.T * E.T)
else:
raise ValueError('Not an approved rotation type for this function')
def get_motion_params(frame, **kwargs):
"""
Returns the three motion parameters - (acceleration, velocity, and
position) as vectorial functions of time in the given frame.
If a higher order differential function is provided, the lower order
functions are used as boundary conditions. For example, given the
acceleration, the velocity and position parameters are taken as
boundary conditions.
The values of time at which the boundary conditions are specified
are taken from timevalue1(for position boundary condition) and
timevalue2(for velocity boundary condition).
If any of the boundary conditions are not provided, they are taken
to be zero by default (zero vectors, in case of vectorial inputs). If
the boundary conditions are also functions of time, they are converted
to constants by substituting the time values in the dynamicsymbols._t
time Symbol.
This function can also be used for calculating rotational motion
parameters. Have a look at the Parameters and Examples for more clarity.
Parameters
==========
frame : ReferenceFrame
The frame to express the motion parameters in
acceleration : Vector
Acceleration of the object/frame as a function of time
velocity : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
position : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
timevalue1 : sympyfiable
Value of time for position boundary condition
timevalue2 : sympyfiable
Value of time for velocity boundary condition
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> from sympy import symbols
>>> R = ReferenceFrame('R')
>>> v1, v2, v3 = dynamicsymbols('v1 v2 v3')
>>> v = v1*R.x + v2*R.y + v3*R.z
>>> get_motion_params(R, position = v)
(v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z)
>>> a, b, c = symbols('a b c')
>>> v = a*R.x + b*R.y + c*R.z
>>> get_motion_params(R, velocity = v)
(0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z)
>>> parameters = get_motion_params(R, acceleration = v)
>>> parameters[1]
a*t*R.x + b*t*R.y + c*t*R.z
>>> parameters[2]
a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z
"""
##Helper functions
def _process_vector_differential(vectdiff, condition, \
variable, ordinate, frame):
"""
Helper function for get_motion methods. Finds derivative of vectdiff wrt
variable, and its integral using the specified boundary condition at
value of variable = ordinate.
Returns a tuple of - (derivative, function and integral) wrt vectdiff
"""
#Make sure boundary condition is independent of 'variable'
if condition != 0:
condition = express(condition, frame, variables=True)
#Special case of vectdiff == 0
if vectdiff == Vector(0):
return (0, 0, condition)
#Express vectdiff completely in condition's frame to give vectdiff1
vectdiff1 = express(vectdiff, frame)
#Find derivative of vectdiff
vectdiff2 = time_derivative(vectdiff, frame)
#Integrate and use boundary condition
vectdiff0 = Vector(0)
lims = (variable, ordinate, variable)
for dim in frame:
function1 = vectdiff1.dot(dim)
abscissa = dim.dot(condition).subs({variable : ordinate})
# Indefinite integral of 'function1' wrt 'variable', using
# the given initial condition (ordinate, abscissa).
vectdiff0 += (integrate(function1, lims) + abscissa) * dim
#Return tuple
return (vectdiff2, vectdiff, vectdiff0)
##Function body
_check_frame(frame)
#Decide mode of operation based on user's input
if 'acceleration' in kwargs:
mode = 2
elif 'velocity' in kwargs:
mode = 1
else:
mode = 0
#All the possible parameters in kwargs
#Not all are required for every case
#If not specified, set to default values(may or may not be used in
#calculations)
conditions = ['acceleration', 'velocity', 'position',
'timevalue', 'timevalue1', 'timevalue2']
for i, x in enumerate(conditions):
if x not in kwargs:
if i < 3:
kwargs[x] = Vector(0)
else:
kwargs[x] = S.Zero
elif i < 3:
_check_vector(kwargs[x])
else:
kwargs[x] = sympify(kwargs[x])
if mode == 2:
vel = _process_vector_differential(kwargs['acceleration'],
kwargs['velocity'],
dynamicsymbols._t,
kwargs['timevalue2'], frame)[2]
pos = _process_vector_differential(vel, kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)[2]
return (kwargs['acceleration'], vel, pos)
elif mode == 1:
return _process_vector_differential(kwargs['velocity'],
kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)
else:
vel = time_derivative(kwargs['position'], frame)
acc = time_derivative(vel, frame)
return (acc, vel, kwargs['position'])
def partial_velocity(vel_vecs, gen_speeds, frame):
"""Returns a list of partial velocities with respect to the provided
generalized speeds in the given reference frame for each of the supplied
velocity vectors.
The output is a list of lists. The outer list has a number of elements
equal to the number of supplied velocity vectors. The inner lists are, for
each velocity vector, the partial derivatives of that velocity vector with
respect to the generalized speeds supplied.
Parameters
==========
vel_vecs : iterable
An iterable of velocity vectors (angular or linear).
gen_speeds : iterable
An iterable of generalized speeds.
frame : ReferenceFrame
The reference frame that the partial derivatives are going to be taken
in.
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import partial_velocity
>>> u = dynamicsymbols('u')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
>>> vel_vecs = [P.vel(N)]
>>> gen_speeds = [u]
>>> partial_velocity(vel_vecs, gen_speeds, N)
[[N.x]]
"""
if not iterable(vel_vecs):
raise TypeError('Velocity vectors must be contained in an iterable.')
if not iterable(gen_speeds):
raise TypeError('Generalized speeds must be contained in an iterable')
vec_partials = []
for vec in vel_vecs:
partials = []
for speed in gen_speeds:
partials.append(vec.diff(speed, frame, var_in_dcm=False))
vec_partials.append(partials)
return vec_partials
def dynamicsymbols(names, level=0,**assumptions):
"""Uses symbols and Function for functions of time.
Creates a SymPy UndefinedFunction, which is then initialized as a function
of a variable, the default being Symbol('t').
Parameters
==========
names : str
Names of the dynamic symbols you want to create; works the same way as
inputs to symbols
level : int
Level of differentiation of the returned function; d/dt once of t,
twice of t, etc.
assumptions :
- real(bool) : This is used to set the dynamicsymbol as real,
by default is False.
- positive(bool) : This is used to set the dynamicsymbol as positive,
by default is False.
- commutative(bool) : This is used to set the commutative property of
a dynamicsymbol, by default is True.
- integer(bool) : This is used to set the dynamicsymbol as integer,
by default is False.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy import diff, Symbol
>>> q1 = dynamicsymbols('q1')
>>> q1
q1(t)
>>> q2 = dynamicsymbols('q2', real=True)
>>> q2.is_real
True
>>> q3 = dynamicsymbols('q3', positive=True)
>>> q3.is_positive
True
>>> q4, q5 = dynamicsymbols('q4,q5', commutative=False)
>>> bool(q4*q5 != q5*q4)
True
>>> q6 = dynamicsymbols('q6', integer=True)
>>> q6.is_integer
True
>>> diff(q1, Symbol('t'))
Derivative(q1(t), t)
"""
esses = symbols(names, cls=Function,**assumptions)
t = dynamicsymbols._t
if iterable(esses):
esses = [reduce(diff, [t] * level, e(t)) for e in esses]
return esses
else:
return reduce(diff, [t] * level, esses(t))
dynamicsymbols._t = Symbol('t') # type: ignore
dynamicsymbols._str = '\'' # type: ignore
|
bb31a5e662f6793f654a6e3b85e31f67f216c4e831841bf7ce7472c4d5e7f9e7 | from sympy.core.backend import (diff, expand, sin, cos, sympify,
eye, symbols, ImmutableMatrix as Matrix, MatrixBase)
from sympy import (trigsimp, solve, Symbol, Dummy)
from sympy.physics.vector.vector import Vector, _check_vector
from sympy.utilities.misc import translate
__all__ = ['CoordinateSym', 'ReferenceFrame']
class CoordinateSym(Symbol):
"""
A coordinate symbol/base scalar associated wrt a Reference Frame.
Ideally, users should not instantiate this class. Instances of
this class must only be accessed through the corresponding frame
as 'frame[index]'.
CoordinateSyms having the same frame and index parameters are equal
(even though they may be instantiated separately).
Parameters
==========
name : string
The display name of the CoordinateSym
frame : ReferenceFrame
The reference frame this base scalar belongs to
index : 0, 1 or 2
The index of the dimension denoted by this coordinate variable
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, CoordinateSym
>>> A = ReferenceFrame('A')
>>> A[1]
A_y
>>> type(A[0])
<class 'sympy.physics.vector.frame.CoordinateSym'>
>>> a_y = CoordinateSym('a_y', A, 1)
>>> a_y == A[1]
True
"""
def __new__(cls, name, frame, index):
# We can't use the cached Symbol.__new__ because this class depends on
# frame and index, which are not passed to Symbol.__xnew__.
assumptions = {}
super()._sanitize(assumptions, cls)
obj = super().__xnew__(cls, name, **assumptions)
_check_frame(frame)
if index not in range(0, 3):
raise ValueError("Invalid index specified")
obj._id = (frame, index)
return obj
@property
def frame(self):
return self._id[0]
def __eq__(self, other):
#Check if the other object is a CoordinateSym of the same frame
#and same index
if isinstance(other, CoordinateSym):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return tuple((self._id[0].__hash__(), self._id[1])).__hash__()
class ReferenceFrame:
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
_count = 0
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : tuple of str
Enables the reference frame's basis unit vectors to be accessed by
Python's square bracket indexing notation using the provided three
indice strings and alters the printing of the unit vectors to
reflect this choice.
latexs : tuple of str
Alters the LaTeX printing of the reference frame's basis unit
vectors to the provided three valid LaTeX strings.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
symbols() can be used to create multiple Reference Frames in one step, for example:
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import symbols
>>> A, B, C = symbols('A B C', cls=ReferenceFrame)
>>> D, E = symbols('D E', cls=ReferenceFrame, indices=('1', '2', '3'))
>>> A[0]
A_x
>>> D.x
D['1']
>>> E.y
E['2']
>>> type(A) == type(D)
True
"""
if not isinstance(name, str):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, str):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + "_" + indices[0]),
(name.lower() + "_" + indices[1]),
(name.lower() + "_" + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + "_x",
name.lower() + "_y",
name.lower() + "_z"]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, str):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
#The _dcm_dict dictionary will only store the dcms of parent-child
#relationships. The _dcm_cache dictionary will work as the dcm
#cache.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
#Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, str):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0), \
CoordinateSym(variables[1], self, 1), \
CoordinateSym(variables[2], self, 2))
ReferenceFrame._count += 1
self.index = ReferenceFrame._count
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not isinstance(ind, str):
if ind < 3:
return self.varlist[ind]
else:
raise ValueError("Invalid index provided")
if self.indices[0] == ind:
return self.x
if self.indices[1] == ind:
return self.y
if self.indices[2] == ind:
return self.z
else:
raise ValueError('Not a defined index')
def __iter__(self):
return iter([self.x, self.y, self.z])
def __str__(self):
"""Returns the name of the frame. """
return self.name
__repr__ = __str__
def _dict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._dlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + self.name +
' and ' + other.name)
def _w_diff_dcm(self, otherframe):
"""Angular velocity from time differentiating the DCM. """
from sympy.physics.vector.functions import dynamicsymbols
dcm2diff = otherframe.dcm(self)
diffed = dcm2diff.diff(dynamicsymbols._t)
angvelmat = diffed * dcm2diff.T
w1 = trigsimp(expand(angvelmat[7]), recursive=True)
w2 = trigsimp(expand(angvelmat[2]), recursive=True)
w3 = trigsimp(expand(angvelmat[3]), recursive=True)
return Vector([(Matrix([w1, w2, w3]), otherframe)])
def variable_map(self, otherframe):
"""
Returns a dictionary which expresses the coordinate variables
of this frame in terms of the variables of otherframe.
If Vector.simp is True, returns a simplified version of the mapped
values. Else, returns them without simplification.
Simplification of the expressions may take time.
Parameters
==========
otherframe : ReferenceFrame
The other frame to map the variables to
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> A = ReferenceFrame('A')
>>> q = dynamicsymbols('q')
>>> B = A.orientnew('B', 'Axis', [q, A.z])
>>> A.variable_map(B)
{A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z}
"""
_check_frame(otherframe)
if (otherframe, Vector.simp) in self._var_dict:
return self._var_dict[(otherframe, Vector.simp)]
else:
vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist)
mapping = {}
for i, x in enumerate(self):
if Vector.simp:
mapping[self.varlist[i]] = trigsimp(vars_matrix[i], method='fu')
else:
mapping[self.varlist[i]] = vars_matrix[i]
self._var_dict[(otherframe, Vector.simp)] = mapping
return mapping
def ang_acc_in(self, otherframe):
"""Returns the angular acceleration Vector of the ReferenceFrame.
Effectively returns the Vector:
^N alpha ^B
which represent the angular acceleration of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular acceleration is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
_check_frame(otherframe)
if otherframe in self._ang_acc_dict:
return self._ang_acc_dict[otherframe]
else:
return self.ang_vel_in(otherframe).dt(otherframe)
def ang_vel_in(self, otherframe):
"""Returns the angular velocity Vector of the ReferenceFrame.
Effectively returns the Vector:
^N omega ^B
which represent the angular velocity of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular velocity is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
_check_frame(otherframe)
flist = self._dict_list(otherframe, 1)
outvec = Vector(0)
for i in range(len(flist) - 1):
outvec += flist[i]._ang_vel_dict[flist[i + 1]]
return outvec
def dcm(self, otherframe):
r"""Returns the direction cosine matrix relative to the provided
reference frame.
The returned matrix can be used to express the orthogonal unit vectors
of this frame in terms of the orthogonal unit vectors of
``otherframe``.
Parameters
==========
otherframe : ReferenceFrame
The reference frame which the direction cosine matrix of this frame
is formed relative to.
Examples
========
The following example rotates the reference frame A relative to N by a
simple rotation and then calculates the direction cosine matrix of N
relative to A.
>>> from sympy import symbols, sin, cos
>>> from sympy.physics.vector import ReferenceFrame
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', (q1, N.x))
>>> N.dcm(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
The second row of the above direction cosine matrix represents the
``N.y`` unit vector in N expressed in A. Like so:
>>> Ny = 0*A.x + cos(q1)*A.y - sin(q1)*A.z
Thus, expressing ``N.y`` in A should return the same result:
>>> N.y.express(A)
cos(q1)*A.y - sin(q1)*A.z
Notes
=====
It is import to know what form of the direction cosine matrix is
returned. If ``B.dcm(A)`` is called, it means the "direction cosine
matrix of B relative to A". This is the matrix :math:`^{\mathbf{A}} \mathbf{R} ^{\mathbf{B}}`
shown in the following relationship:
.. math::
\begin{bmatrix}
\hat{\mathbf{b}}_1 \\
\hat{\mathbf{b}}_2 \\
\hat{\mathbf{b}}_3
\end{bmatrix}
=
{}^A\mathbf{R}^B
\begin{bmatrix}
\hat{\mathbf{a}}_1 \\
\hat{\mathbf{a}}_2 \\
\hat{\mathbf{a}}_3
\end{bmatrix}.
:math:`^{}A\mathbf{R}^B` is the matrix that expresses the B unit
vectors in terms of the A unit vectors.
"""
_check_frame(otherframe)
# Check if the dcm wrt that frame has already been calculated
if otherframe in self._dcm_cache:
return self._dcm_cache[otherframe]
flist = self._dict_list(otherframe, 0)
outdcm = eye(3)
for i in range(len(flist) - 1):
outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]]
# After calculation, store the dcm in dcm cache for faster future
# retrieval
self._dcm_cache[otherframe] = outdcm
otherframe._dcm_cache[self] = outdcm.T
return outdcm
def orient(self, parent, rot_type, amounts, rot_order=''):
"""Sets the orientation of this reference frame relative to another
(parent) reference frame.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
rot_type : str
The method used to generate the direction cosine matrix. Supported
methods are:
- ``'Axis'``: simple rotations about a single common axis
- ``'DCM'``: for setting the direction cosine matrix directly
- ``'Body'``: three successive rotations about new intermediate
axes, also called "Euler and Tait-Bryan angles"
- ``'Space'``: three successive rotations about the parent
frames' unit vectors
- ``'Quaternion'``: rotations defined by four parameters which
result in a singularity free direction cosine matrix
amounts :
Expressions defining the rotation angles or direction cosine
matrix. These must match the ``rot_type``. See examples below for
details. The input types are:
- ``'Axis'``: 2-tuple (expr/sym/func, Vector)
- ``'DCM'``: Matrix, shape(3,3)
- ``'Body'``: 3-tuple of expressions, symbols, or functions
- ``'Space'``: 3-tuple of expressions, symbols, or functions
- ``'Quaternion'``: 4-tuple of expressions, symbols, or
functions
rot_order : str or int, optional
If applicable, the order of the successive of rotations. The string
``'123'`` and integer ``123`` are equivalent, for example. Required
for ``'Body'`` and ``'Space'``.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B1 = ReferenceFrame('B')
>>> B2 = ReferenceFrame('B2')
Axis
----
``rot_type='Axis'`` creates a direction cosine matrix defined by a
simple rotation about a single axis fixed in both reference frames.
This is a rotation about an arbitrary, non-time-varying
axis by some angle. The axis is supplied as a Vector. This is how
simple rotations are defined.
>>> B.orient(N, 'Axis', (q1, N.x))
The ``orient()`` method generates a direction cosine matrix and its
transpose which defines the orientation of B relative to N and vice
versa. Once orient is called, ``dcm()`` outputs the appropriate
direction cosine matrix.
>>> B.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
The following two lines show how the sense of the rotation can be
defined. Both lines produce the same result.
>>> B.orient(N, 'Axis', (q1, -N.x))
>>> B.orient(N, 'Axis', (-q1, N.x))
The axis does not have to be defined by a unit vector, it can be any
vector in the parent frame.
>>> B.orient(N, 'Axis', (q1, N.x + 2 * N.y))
DCM
---
The direction cosine matrix can be set directly. The orientation of a
frame A can be set to be the same as the frame B above like so:
>>> B.orient(N, 'Axis', (q1, N.x))
>>> A = ReferenceFrame('A')
>>> A.orient(N, 'DCM', N.dcm(B))
>>> A.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
**Note carefully that** ``N.dcm(B)`` **was passed into** ``orient()``
**for** ``A.dcm(N)`` **to match** ``B.dcm(N)``.
Body
----
``rot_type='Body'`` rotates this reference frame relative to the
provided reference frame by rotating through three successive simple
rotations. Each subsequent axis of rotation is about the "body fixed"
unit vectors of the new intermediate reference frame. This type of
rotation is also referred to rotating through the `Euler and Tait-Bryan
Angles <https://en.wikipedia.org/wiki/Euler_angles>`_.
For example, the classic Euler Angle rotation can be done by:
>>> B.orient(N, 'Body', (q1, q2, q3), 'XYX')
>>> B.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
This rotates B relative to N through ``q1`` about ``N.x``, then rotates
B again through q2 about B.y, and finally through q3 about B.x. It is
equivalent to:
>>> B1.orient(N, 'Axis', (q1, N.x))
>>> B2.orient(B1, 'Axis', (q2, B1.y))
>>> B.orient(B2, 'Axis', (q3, B2.x))
>>> B.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
Acceptable rotation orders are of length 3, expressed in as a string
``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis
twice in a row are prohibited.
>>> B.orient(N, 'Body', (q1, q2, 0), 'ZXZ')
>>> B.orient(N, 'Body', (q1, q2, 0), '121')
>>> B.orient(N, 'Body', (q1, q2, q3), 123)
Space
-----
``rot_type='Space'`` also rotates the reference frame in three
successive simple rotations but the axes of rotation are the
"Space-fixed" axes. For example:
>>> B.orient(N, 'Space', (q1, q2, q3), '312')
>>> B.dcm(N)
Matrix([
[ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)],
[-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)],
[ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]])
is equivalent to:
>>> B1.orient(N, 'Axis', (q1, N.z))
>>> B2.orient(B1, 'Axis', (q2, N.x))
>>> B.orient(B2, 'Axis', (q3, N.y))
>>> B.dcm(N).simplify() # doctest: +SKIP
Matrix([
[ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)],
[-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)],
[ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]])
It is worth noting that space-fixed and body-fixed rotations are
related by the order of the rotations, i.e. the reverse order of body
fixed will give space fixed and vice versa.
>>> B.orient(N, 'Space', (q1, q2, q3), '231')
>>> B.dcm(N)
Matrix([
[cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],
[ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)],
[sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]])
>>> B.orient(N, 'Body', (q3, q2, q1), '132')
>>> B.dcm(N)
Matrix([
[cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],
[ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)],
[sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]])
Quaternion
----------
``rot_type='Quaternion'`` orients the reference frame using
quaternions. Quaternion rotation is defined as a finite rotation about
lambda, a unit vector, by an amount theta. This orientation is
described by four parameters:
- ``q0 = cos(theta/2)``
- ``q1 = lambda_x sin(theta/2)``
- ``q2 = lambda_y sin(theta/2)``
- ``q3 = lambda_z sin(theta/2)``
This type does not need a ``rot_order``.
>>> B.orient(N, 'Quaternion', (q0, q1, q2, q3))
>>> B.dcm(N)
Matrix([
[q0**2 + q1**2 - q2**2 - q3**2, 2*q0*q3 + 2*q1*q2, -2*q0*q2 + 2*q1*q3],
[ -2*q0*q3 + 2*q1*q2, q0**2 - q1**2 + q2**2 - q3**2, 2*q0*q1 + 2*q2*q3],
[ 2*q0*q2 + 2*q1*q3, -2*q0*q1 + 2*q2*q3, q0**2 - q1**2 - q2**2 + q3**2]])
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
# Allow passing a rotation matrix manually.
if rot_type == 'DCM':
# When rot_type == 'DCM', then amounts must be a Matrix type object
# (e.g. sympy.matrices.dense.MutableDenseMatrix).
if not isinstance(amounts, MatrixBase):
raise TypeError("Amounts must be a sympy Matrix type object.")
else:
amounts = list(amounts)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
def _rot(axis, angle):
"""DCM for simple axis 1,2,or 3 rotations. """
if axis == 1:
return Matrix([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == 2:
return Matrix([[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]])
elif axis == 3:
return Matrix([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123 and rot_type is in upper case
rot_order = translate(str(rot_order), 'XYZxyz', '123123')
rot_type = rot_type.upper()
if rot_order not in approved_orders:
raise TypeError('The supplied order is not an approved type')
parent_orient = []
if rot_type == 'AXIS':
if not rot_order == '':
raise TypeError('Axis orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):
raise TypeError('Amounts are a list or tuple of length 2')
theta = amounts[0]
axis = amounts[1]
axis = _check_vector(axis)
if not axis.dt(parent) == 0:
raise ValueError('Axis cannot be time-varying')
axis = axis.express(parent).normalize()
axis = axis.args[0][0]
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) *
sin(theta) + axis * axis.T)
elif rot_type == 'QUATERNION':
if not rot_order == '':
raise TypeError(
'Quaternion orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):
raise TypeError('Amounts are a list or tuple of length 4')
q0, q1, q2, q3 = amounts
parent_orient = (Matrix([[q0**2 + q1**2 - q2**2 - q3**2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0**2 - q1**2 + q2**2 - q3**2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0**2 - q1**2 - q2**2 + q3**2]]))
elif rot_type == 'BODY':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1]) *
_rot(a3, amounts[2]))
elif rot_type == 'SPACE':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Space orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1]) *
_rot(a1, amounts[0]))
elif rot_type == 'DCM':
parent_orient = amounts
else:
raise NotImplementedError('That is not an implemented rotation')
# Reset the _dcm_cache of this frame, and remove it from the
# _dcm_caches of the frames it is linked to. Also remove it from the
# _dcm_dict of its parent
frames = self._dcm_cache.keys()
dcm_dict_del = []
dcm_cache_del = []
for frame in frames:
if frame in self._dcm_dict:
dcm_dict_del += [frame]
dcm_cache_del += [frame]
for frame in dcm_dict_del:
del frame._dcm_dict[self]
for frame in dcm_cache_del:
del frame._dcm_cache[self]
# Add the dcm relationship to _dcm_dict
self._dcm_dict = self._dlist[0] = {}
self._dcm_dict.update({parent: parent_orient.T})
parent._dcm_dict.update({self: parent_orient})
# Also update the dcm cache after resetting it
self._dcm_cache = {}
self._dcm_cache.update({parent: parent_orient.T})
parent._dcm_cache.update({self: parent_orient})
if rot_type == 'QUATERNION':
t = dynamicsymbols._t
q0, q1, q2, q3 = amounts
q0d = diff(q0, t)
q1d = diff(q1, t)
q2d = diff(q2, t)
q3d = diff(q3, t)
w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)
w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)
w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)
wvec = Vector([(Matrix([w1, w2, w3]), self)])
elif rot_type == 'AXIS':
thetad = (amounts[0]).diff(dynamicsymbols._t)
wvec = thetad * amounts[1].express(parent).normalize()
elif rot_type == 'DCM':
wvec = self._w_diff_dcm(parent)
else:
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
rot_type, rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orientnew(self, newname, rot_type, amounts, rot_order='',
variables=None, indices=None, latexs=None):
r"""Returns a new reference frame oriented with respect to this
reference frame.
See ``ReferenceFrame.orient()`` for detailed examples of how to orient
reference frames.
Parameters
==========
newname : str
Name for the new reference frame.
rot_type : str
The method used to generate the direction cosine matrix. Supported
methods are:
- ``'Axis'``: simple rotations about a single common axis
- ``'DCM'``: for setting the direction cosine matrix directly
- ``'Body'``: three successive rotations about new intermediate
axes, also called "Euler and Tait-Bryan angles"
- ``'Space'``: three successive rotations about the parent
frames' unit vectors
- ``'Quaternion'``: rotations defined by four parameters which
result in a singularity free direction cosine matrix
amounts :
Expressions defining the rotation angles or direction cosine
matrix. These must match the ``rot_type``. See examples below for
details. The input types are:
- ``'Axis'``: 2-tuple (expr/sym/func, Vector)
- ``'DCM'``: Matrix, shape(3,3)
- ``'Body'``: 3-tuple of expressions, symbols, or functions
- ``'Space'``: 3-tuple of expressions, symbols, or functions
- ``'Quaternion'``: 4-tuple of expressions, symbols, or
functions
rot_order : str or int, optional
If applicable, the order of the successive of rotations. The string
``'123'`` and integer ``123`` are equivalent, for example. Required
for ``'Body'`` and ``'Space'``.
indices : tuple of str
Enables the reference frame's basis unit vectors to be accessed by
Python's square bracket indexing notation using the provided three
indice strings and alters the printing of the unit vectors to
reflect this choice.
latexs : tuple of str
Alters the LaTeX printing of the reference frame's basis unit
vectors to the provided three valid LaTeX strings.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
Create a new reference frame A rotated relative to N through a simple
rotation.
>>> A = N.orientnew('A', 'Axis', (q0, N.x))
Create a new reference frame B rotated relative to N through body-fixed
rotations.
>>> B = N.orientnew('B', 'Body', (q1, q2, q3), '123')
Create a new reference frame C rotated relative to N through a simple
rotation with unique indices and LaTeX printing.
>>> C = N.orientnew('C', 'Axis', (q0, N.x), indices=('1', '2', '3'),
... latexs=(r'\hat{\mathbf{c}}_1',r'\hat{\mathbf{c}}_2',
... r'\hat{\mathbf{c}}_3'))
>>> C['1']
C['1']
>>> print(vlatex(C['1']))
\hat{\mathbf{c}}_1
"""
newframe = self.__class__(newname, variables=variables,
indices=indices, latexs=latexs)
newframe.orient(self, rot_type, amounts, rot_order)
return newframe
def set_ang_acc(self, otherframe, value):
"""Define the angular acceleration Vector in a ReferenceFrame.
Defines the angular acceleration of this ReferenceFrame, in another.
Angular acceleration can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular acceleration in
value : Vector
The Vector representing angular acceleration
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_acc_dict.update({otherframe: value})
otherframe._ang_acc_dict.update({self: -value})
def set_ang_vel(self, otherframe, value):
"""Define the angular velocity vector in a ReferenceFrame.
Defines the angular velocity of this ReferenceFrame, in another.
Angular velocity can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular velocity in
value : Vector
The Vector representing angular velocity
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_vel_dict.update({otherframe: value})
otherframe._ang_vel_dict.update({self: -value})
@property
def x(self):
"""The basis Vector for the ReferenceFrame, in the x direction. """
return self._x
@property
def y(self):
"""The basis Vector for the ReferenceFrame, in the y direction. """
return self._y
@property
def z(self):
"""The basis Vector for the ReferenceFrame, in the z direction. """
return self._z
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial angular velocities of this frame in the given
frame with respect to one or more provided generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the angular velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial angular velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> A.set_ang_vel(N, u1 * A.x + u2 * N.y)
>>> A.partial_velocity(N, u1)
A.x
>>> A.partial_velocity(N, u1, u2)
(A.x, N.y)
"""
partials = [self.ang_vel_in(frame).diff(speed, frame, var_in_dcm=False)
for speed in gen_speeds]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
def _check_frame(other):
from .vector import VectorTypeError
if not isinstance(other, ReferenceFrame):
raise VectorTypeError(other, ReferenceFrame('A'))
|
67d885e2446d3ded95b835c507c4fe530a509f71d2e7e4a7f8df9d0a86424bcc | from sympy.core.backend import sympify, Add, ImmutableMatrix as Matrix
from sympy.printing.defaults import Printable
__all__ = ['Dyadic']
class Dyadic(Printable):
"""A Dyadic object.
See:
https://en.wikipedia.org/wiki/Dyadic_tensor
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
A more powerful way to represent a rigid body's inertia. While it is more
complex, by choosing Dyadic components to be in body fixed basis vectors,
the resulting matrix is equivalent to the inertia tensor.
"""
def __init__(self, inlist):
"""
Just like Vector's init, you shouldn't call this unless creating a
zero dyadic.
zd = Dyadic(0)
Stores a Dyadic as a list of lists; the inner list has the measure
number and the two unit vectors; the outerlist holds each unique
unit vector pair.
"""
self.args = []
if inlist == 0:
inlist = []
while len(inlist) != 0:
added = 0
for i, v in enumerate(self.args):
if ((str(inlist[0][1]) == str(self.args[i][1])) and
(str(inlist[0][2]) == str(self.args[i][2]))):
self.args[i] = (self.args[i][0] + inlist[0][0],
inlist[0][1], inlist[0][2])
inlist.remove(inlist[0])
added = 1
break
if added != 1:
self.args.append(inlist[0])
inlist.remove(inlist[0])
i = 0
# This code is to remove empty parts from the list
while i < len(self.args):
if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |
(self.args[i][2] == 0)):
self.args.remove(self.args[i])
i -= 1
i += 1
def __add__(self, other):
"""The add operator for Dyadic. """
other = _check_dyadic(other)
return Dyadic(self.args + other.args)
def __and__(self, other):
"""The inner product operator for a Dyadic and a Dyadic or Vector.
Parameters
==========
other : Dyadic or Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> D1 = outer(N.x, N.y)
>>> D2 = outer(N.y, N.y)
>>> D1.dot(D2)
(N.x|N.y)
>>> D1.dot(N.y)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
if isinstance(other, Dyadic):
other = _check_dyadic(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
for i2, v2 in enumerate(other.args):
ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2])
else:
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[1] * (v[2] & other)
return ol
def __truediv__(self, other):
"""Divides the Dyadic by a sympifyable expression. """
return self.__mul__(1 / other)
def __eq__(self, other):
"""Tests for equality.
Is currently weak; needs stronger comparison testing
"""
if other == 0:
other = Dyadic(0)
other = _check_dyadic(other)
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
return set(self.args) == set(other.args)
def __mul__(self, other):
"""Multiplies the Dyadic by a sympifyable expression.
Parameters
==========
other : Sympafiable
The scalar to multiply this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> 5 * d
5*(N.x|N.x)
"""
newlist = [v for v in self.args]
for i, v in enumerate(newlist):
newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],
newlist[i][2])
return Dyadic(newlist)
def __ne__(self, other):
return not self == other
def __neg__(self):
return self * -1
def _latex(self, printer):
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + ' + printer._print(ar[i][1]) + r"\otimes " +
printer._print(ar[i][2]))
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - ' +
printer._print(ar[i][1]) +
r"\otimes " +
printer._print(ar[i][2]))
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = printer._print(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = '(%s)' % arg_str
if arg_str.startswith('-'):
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + printer._print(ar[i][1]) +
r"\otimes " + printer._print(ar[i][2]))
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer):
e = self
class Fake:
baseline = 0
def render(self, *args, **kwargs):
ar = e.args # just to shorten things
mpp = printer
if len(ar) == 0:
return str(0)
bar = "\N{CIRCLED TIMES}" if printer._use_unicode else "|"
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.extend([" + ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.extend([" - ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
if isinstance(ar[i][0], Add):
arg_str = mpp._print(
ar[i][0]).parens()[0]
else:
arg_str = mpp.doprint(ar[i][0])
if arg_str.startswith("-"):
arg_str = arg_str[1:]
str_start = " - "
else:
str_start = " + "
ol.extend([str_start, arg_str, " ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
outstr = "".join(ol)
if outstr.startswith(" + "):
outstr = outstr[3:]
elif outstr.startswith(" "):
outstr = outstr[1:]
return outstr
return Fake()
def __rand__(self, other):
"""The inner product operator for a Vector or Dyadic, and a Dyadic
This is for: Vector dot Dyadic
Parameters
==========
other : Vector
The vector we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> dot(N.x, d)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[2] * (v[1] & other)
return ol
def __rsub__(self, other):
return (-1 * self) + other
def __rxor__(self, other):
"""For a cross product in the form: Vector x Dyadic
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(N.y, d)
- (N.z|N.x)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * ((other ^ v[1]) | v[2])
return ol
def _sympystr(self, printer):
"""Printing method. """
ar = self.args # just to shorten things
if len(ar) == 0:
return printer._print(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + (' + printer._print(ar[i][1]) + '|' + printer._print(ar[i][2]) + ')')
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - (' + printer._print(ar[i][1]) + '|' + printer._print(ar[i][2]) + ')')
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = printer._print(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*(' + printer._print(ar[i][1]) +
'|' + printer._print(ar[i][2]) + ')')
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subtraction operator. """
return self.__add__(other * -1)
def __xor__(self, other):
"""For a cross product in the form: Dyadic x Vector.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(d, N.y)
(N.x|N.z)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * (v[1] | (v[2] ^ other))
return ol
__radd__ = __add__
__rmul__ = __mul__
def express(self, frame1, frame2=None):
"""Expresses this Dyadic in alternate frame(s)
The first frame is the list side expression, the second frame is the
right side; if Dyadic is in form A.x|B.y, you can express it in two
different frames. If no second frame is given, the Dyadic is
expressed in only one frame.
Calls the global express function
Parameters
==========
frame1 : ReferenceFrame
The frame to express the left side of the Dyadic in
frame2 : ReferenceFrame
If provided, the frame to express the right side of the Dyadic in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.express(B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
"""
from sympy.physics.vector.functions import express
return express(self, frame1, frame2)
def to_matrix(self, reference_frame, second_reference_frame=None):
"""Returns the matrix form of the dyadic with respect to one or two
reference frames.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows and columns of the matrix
correspond to. If a second reference frame is provided, this
only corresponds to the rows of the matrix.
second_reference_frame : ReferenceFrame, optional, default=None
The reference frame that the columns of the matrix correspond
to.
Returns
-------
matrix : ImmutableMatrix, shape(3,3)
The matrix that gives the 2D tensor form.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> Vector.simp = True
>>> from sympy.physics.mechanics import inertia
>>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz')
>>> N = ReferenceFrame('N')
>>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
>>> inertia_dyadic.to_matrix(N)
Matrix([
[Ixx, Ixy, Ixz],
[Ixy, Iyy, Iyz],
[Ixz, Iyz, Izz]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> inertia_dyadic.to_matrix(A)
Matrix([
[ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)],
[ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2],
[-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]])
"""
if second_reference_frame is None:
second_reference_frame = reference_frame
return Matrix([i.dot(self).dot(j) for i in reference_frame for j in
second_reference_frame]).reshape(3, 3)
def doit(self, **hints):
"""Calls .doit() on each term in the Dyadic"""
return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])])
for v in self.args], Dyadic(0))
def dt(self, frame):
"""Take the time derivative of this Dyadic in a frame.
This function calls the global time_derivative method
Parameters
==========
frame : ReferenceFrame
The frame to take the time derivative in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.dt(B)
- q'*(N.y|N.x) - q'*(N.x|N.y)
"""
from sympy.physics.vector.functions import time_derivative
return time_derivative(self, frame)
def simplify(self):
"""Returns a simplified Dyadic."""
out = Dyadic(0)
for v in self.args:
out += Dyadic([(v[0].simplify(), v[1], v[2])])
return out
def subs(self, *args, **kwargs):
"""Substitution on the Dyadic.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = s*(N.x|N.x)
>>> a.subs({s: 2})
2*(N.x|N.x)
"""
return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])])
for v in self.args], Dyadic(0))
def applyfunc(self, f):
"""Apply a function to each component of a Dyadic."""
if not callable(f):
raise TypeError("`f` must be callable.")
out = Dyadic(0)
for a, b, c in self.args:
out += f(a) * (b|c)
return out
dot = __and__
cross = __xor__
def _check_dyadic(other):
if not isinstance(other, Dyadic):
raise TypeError('A Dyadic must be supplied')
return other
|
255c3a4f5741a96ed707b0b07a89ef17519f1cce05857f8398477461d68ab232 | """
This module can be used to solve 2D beam bending problems with
singularity functions in mechanics.
"""
from sympy.core import S, Symbol, diff, symbols
from sympy.solvers import linsolve
from sympy.printing import sstr
from sympy.functions import SingularityFunction, Piecewise, factorial
from sympy.core import sympify
from sympy.integrals import integrate
from sympy.series import limit
from sympy.plotting import plot, PlotGrid
from sympy.geometry.entity import GeometryEntity
from sympy.external import import_module
from sympy import lambdify, Add
from sympy.core.compatibility import iterable
from sympy.utilities.decorator import doctest_depends_on
numpy = import_module('numpy', import_kwargs={'fromlist':['arange']})
class Beam:
"""
A Beam is a structural element that is capable of withstanding load
primarily by resisting against bending. Beams are characterized by
their cross sectional profile(Second moment of area), their length
and their material.
.. note::
While solving a beam bending problem, a user should choose its
own sign convention and should stick to it. The results will
automatically follow the chosen sign convention.
Examples
========
There is a beam of length 4 meters. A constant distributed load of 6 N/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. The deflection of the beam at the end is restricted.
Using the sign convention of downwards forces being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols, Piecewise
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(4, E, I)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(6, 2, 0)
>>> b.apply_load(R2, 4, -1)
>>> b.bc_deflection = [(0, 0), (4, 0)]
>>> b.boundary_conditions
{'deflection': [(0, 0), (4, 0)], 'slope': []}
>>> b.load
R1*SingularityFunction(x, 0, -1) + R2*SingularityFunction(x, 4, -1) + 6*SingularityFunction(x, 2, 0)
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.load
-3*SingularityFunction(x, 0, -1) + 6*SingularityFunction(x, 2, 0) - 9*SingularityFunction(x, 4, -1)
>>> b.shear_force()
-3*SingularityFunction(x, 0, 0) + 6*SingularityFunction(x, 2, 1) - 9*SingularityFunction(x, 4, 0)
>>> b.bending_moment()
-3*SingularityFunction(x, 0, 1) + 3*SingularityFunction(x, 2, 2) - 9*SingularityFunction(x, 4, 1)
>>> b.slope()
(-3*SingularityFunction(x, 0, 2)/2 + SingularityFunction(x, 2, 3) - 9*SingularityFunction(x, 4, 2)/2 + 7)/(E*I)
>>> b.deflection()
(7*x - SingularityFunction(x, 0, 3)/2 + SingularityFunction(x, 2, 4)/4 - 3*SingularityFunction(x, 4, 3)/2)/(E*I)
>>> b.deflection().rewrite(Piecewise)
(7*x - Piecewise((x**3, x > 0), (0, True))/2
- 3*Piecewise(((x - 4)**3, x - 4 > 0), (0, True))/2
+ Piecewise(((x - 2)**4, x - 2 > 0), (0, True))/4)/(E*I)
"""
def __init__(self, length, elastic_modulus, second_moment, area=Symbol('A'), variable=Symbol('x'), base_char='C'):
"""Initializes the class.
Parameters
==========
length : Sympifyable
A Symbol or value representing the Beam's length.
elastic_modulus : Sympifyable
A SymPy expression representing the Beam's Modulus of Elasticity.
It is a measure of the stiffness of the Beam material. It can
also be a continuous function of position along the beam.
second_moment : Sympifyable or Geometry object
Describes the cross-section of the beam via a SymPy expression
representing the Beam's second moment of area. It is a geometrical
property of an area which reflects how its points are distributed
with respect to its neutral axis. It can also be a continuous
function of position along the beam. Alternatively ``second_moment``
can be a shape object such as a ``Polygon`` from the geometry module
representing the shape of the cross-section of the beam. In such cases,
it is assumed that the x-axis of the shape object is aligned with the
bending axis of the beam. The second moment of area will be computed
from the shape object internally.
area : Symbol/float
Represents the cross-section area of beam
variable : Symbol, optional
A Symbol object that will be used as the variable along the beam
while representing the load, shear, moment, slope and deflection
curve. By default, it is set to ``Symbol('x')``.
base_char : String, optional
A String that will be used as base character to generate sequential
symbols for integration constants in cases where boundary conditions
are not sufficient to solve them.
"""
self.length = length
self.elastic_modulus = elastic_modulus
if isinstance(second_moment, GeometryEntity):
self.cross_section = second_moment
else:
self.cross_section = None
self.second_moment = second_moment
self.variable = variable
self._base_char = base_char
self._boundary_conditions = {'deflection': [], 'slope': []}
self._load = 0
self._area = area
self._applied_supports = []
self._support_as_loads = []
self._applied_loads = []
self._reaction_loads = {}
self._composite_type = None
self._hinge_position = None
def __str__(self):
shape_description = self._cross_section if self._cross_section else self._second_moment
str_sol = 'Beam({}, {}, {})'.format(sstr(self._length), sstr(self._elastic_modulus), sstr(shape_description))
return str_sol
@property
def reaction_loads(self):
""" Returns the reaction forces in a dictionary."""
return self._reaction_loads
@property
def length(self):
"""Length of the Beam."""
return self._length
@length.setter
def length(self, l):
self._length = sympify(l)
@property
def area(self):
"""Cross-sectional area of the Beam. """
return self._area
@area.setter
def area(self, a):
self._area = sympify(a)
@property
def variable(self):
"""
A symbol that can be used as a variable along the length of the beam
while representing load distribution, shear force curve, bending
moment, slope curve and the deflection curve. By default, it is set
to ``Symbol('x')``, but this property is mutable.
Examples
========
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I, A = symbols('E, I, A')
>>> x, y, z = symbols('x, y, z')
>>> b = Beam(4, E, I)
>>> b.variable
x
>>> b.variable = y
>>> b.variable
y
>>> b = Beam(4, E, I, A, z)
>>> b.variable
z
"""
return self._variable
@variable.setter
def variable(self, v):
if isinstance(v, Symbol):
self._variable = v
else:
raise TypeError("""The variable should be a Symbol object.""")
@property
def elastic_modulus(self):
"""Young's Modulus of the Beam. """
return self._elastic_modulus
@elastic_modulus.setter
def elastic_modulus(self, e):
self._elastic_modulus = sympify(e)
@property
def second_moment(self):
"""Second moment of area of the Beam. """
return self._second_moment
@second_moment.setter
def second_moment(self, i):
self._cross_section = None
if isinstance(i, GeometryEntity):
raise ValueError("To update cross-section geometry use `cross_section` attribute")
else:
self._second_moment = sympify(i)
@property
def cross_section(self):
"""Cross-section of the beam"""
return self._cross_section
@cross_section.setter
def cross_section(self, s):
if s:
self._second_moment = s.second_moment_of_area()[0]
self._cross_section = s
@property
def boundary_conditions(self):
"""
Returns a dictionary of boundary conditions applied on the beam.
The dictionary has three keywords namely moment, slope and deflection.
The value of each keyword is a list of tuple, where each tuple
contains location and value of a boundary condition in the format
(location, value).
Examples
========
There is a beam of length 4 meters. The bending moment at 0 should be 4
and at 4 it should be 0. The slope of the beam should be 1 at 0. The
deflection should be 2 at 0.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(4, E, I)
>>> b.bc_deflection = [(0, 2)]
>>> b.bc_slope = [(0, 1)]
>>> b.boundary_conditions
{'deflection': [(0, 2)], 'slope': [(0, 1)]}
Here the deflection of the beam should be ``2`` at ``0``.
Similarly, the slope of the beam should be ``1`` at ``0``.
"""
return self._boundary_conditions
@property
def bc_slope(self):
return self._boundary_conditions['slope']
@bc_slope.setter
def bc_slope(self, s_bcs):
self._boundary_conditions['slope'] = s_bcs
@property
def bc_deflection(self):
return self._boundary_conditions['deflection']
@bc_deflection.setter
def bc_deflection(self, d_bcs):
self._boundary_conditions['deflection'] = d_bcs
def join(self, beam, via="fixed"):
"""
This method joins two beams to make a new composite beam system.
Passed Beam class instance is attached to the right end of calling
object. This method can be used to form beams having Discontinuous
values of Elastic modulus or Second moment.
Parameters
==========
beam : Beam class object
The Beam object which would be connected to the right of calling
object.
via : String
States the way two Beam object would get connected
- For axially fixed Beams, via="fixed"
- For Beams connected via hinge, via="hinge"
Examples
========
There is a cantilever beam of length 4 meters. For first 2 meters
its moment of inertia is `1.5*I` and `I` for the other end.
A pointload of magnitude 4 N is applied from the top at its free end.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b1 = Beam(2, E, 1.5*I)
>>> b2 = Beam(2, E, I)
>>> b = b1.join(b2, "fixed")
>>> b.apply_load(20, 4, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 0, -2)
>>> b.bc_slope = [(0, 0)]
>>> b.bc_deflection = [(0, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.load
80*SingularityFunction(x, 0, -2) - 20*SingularityFunction(x, 0, -1) + 20*SingularityFunction(x, 4, -1)
>>> b.slope()
(((80*SingularityFunction(x, 0, 1) - 10*SingularityFunction(x, 0, 2) + 10*SingularityFunction(x, 4, 2))/I - 120/I)/E + 80.0/(E*I))*SingularityFunction(x, 2, 0)
+ 0.666666666666667*(80*SingularityFunction(x, 0, 1) - 10*SingularityFunction(x, 0, 2) + 10*SingularityFunction(x, 4, 2))*SingularityFunction(x, 0, 0)/(E*I)
- 0.666666666666667*(80*SingularityFunction(x, 0, 1) - 10*SingularityFunction(x, 0, 2) + 10*SingularityFunction(x, 4, 2))*SingularityFunction(x, 2, 0)/(E*I)
"""
x = self.variable
E = self.elastic_modulus
new_length = self.length + beam.length
if self.second_moment != beam.second_moment:
new_second_moment = Piecewise((self.second_moment, x<=self.length),
(beam.second_moment, x<=new_length))
else:
new_second_moment = self.second_moment
if via == "fixed":
new_beam = Beam(new_length, E, new_second_moment, x)
new_beam._composite_type = "fixed"
return new_beam
if via == "hinge":
new_beam = Beam(new_length, E, new_second_moment, x)
new_beam._composite_type = "hinge"
new_beam._hinge_position = self.length
return new_beam
def apply_support(self, loc, type="fixed"):
"""
This method applies support to a particular beam object.
Parameters
==========
loc : Sympifyable
Location of point at which support is applied.
type : String
Determines type of Beam support applied. To apply support structure
with
- zero degree of freedom, type = "fixed"
- one degree of freedom, type = "pin"
- two degrees of freedom, type = "roller"
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(30, E, I)
>>> b.apply_support(10, 'roller')
>>> b.apply_support(30, 'roller')
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(120, 30, -2)
>>> R_10, R_30 = symbols('R_10, R_30')
>>> b.solve_for_reaction_loads(R_10, R_30)
>>> b.load
-8*SingularityFunction(x, 0, -1) + 6*SingularityFunction(x, 10, -1)
+ 120*SingularityFunction(x, 30, -2) + 2*SingularityFunction(x, 30, -1)
>>> b.slope()
(-4*SingularityFunction(x, 0, 2) + 3*SingularityFunction(x, 10, 2)
+ 120*SingularityFunction(x, 30, 1) + SingularityFunction(x, 30, 2) + 4000/3)/(E*I)
"""
loc = sympify(loc)
self._applied_supports.append((loc, type))
if type == "pin" or type == "roller":
reaction_load = Symbol('R_'+str(loc))
self.apply_load(reaction_load, loc, -1)
self.bc_deflection.append((loc, 0))
else:
reaction_load = Symbol('R_'+str(loc))
reaction_moment = Symbol('M_'+str(loc))
self.apply_load(reaction_load, loc, -1)
self.apply_load(reaction_moment, loc, -2)
self.bc_deflection.append((loc, 0))
self.bc_slope.append((loc, 0))
self._support_as_loads.append((reaction_moment, loc, -2, None))
self._support_as_loads.append((reaction_load, loc, -1, None))
def apply_load(self, value, start, order, end=None):
"""
This method adds up the loads given to a particular beam object.
Parameters
==========
value : Sympifyable
The value inserted should have the units [Force/(Distance**(n+1)]
where n is the order of applied load.
Units for applied loads:
- For moments, unit = kN*m
- For point loads, unit = kN
- For constant distributed load, unit = kN/m
- For ramp loads, unit = kN/m/m
- For parabolic ramp loads, unit = kN/m/m/m
- ... so on.
start : Sympifyable
The starting point of the applied load. For point moments and
point forces this is the location of application.
order : Integer
The order of the applied load.
- For moments, order = -2
- For point loads, order =-1
- For constant distributed load, order = 0
- For ramp loads, order = 1
- For parabolic ramp loads, order = 2
- ... so on.
end : Sympifyable, optional
An optional argument that can be used if the load has an end point
within the length of the beam.
Examples
========
There is a beam of length 4 meters. A moment of magnitude 3 Nm is
applied in the clockwise direction at the starting point of the beam.
A point load of magnitude 4 N is applied from the top of the beam at
2 meters from the starting point and a parabolic ramp load of magnitude
2 N/m is applied below the beam starting from 2 meters to 3 meters
away from the starting point of the beam.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(4, E, I)
>>> b.apply_load(-3, 0, -2)
>>> b.apply_load(4, 2, -1)
>>> b.apply_load(-2, 2, 2, end=3)
>>> b.load
-3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1) - 2*SingularityFunction(x, 2, 2) + 2*SingularityFunction(x, 3, 0) + 4*SingularityFunction(x, 3, 1) + 2*SingularityFunction(x, 3, 2)
"""
x = self.variable
value = sympify(value)
start = sympify(start)
order = sympify(order)
self._applied_loads.append((value, start, order, end))
self._load += value*SingularityFunction(x, start, order)
if end:
if order.is_negative:
msg = ("If 'end' is provided the 'order' of the load cannot "
"be negative, i.e. 'end' is only valid for distributed "
"loads.")
raise ValueError(msg)
# NOTE : A Taylor series can be used to define the summation of
# singularity functions that subtract from the load past the end
# point such that it evaluates to zero past 'end'.
f = value*x**order
for i in range(0, order + 1):
self._load -= (f.diff(x, i).subs(x, end - start) *
SingularityFunction(x, end, i)/factorial(i))
def remove_load(self, value, start, order, end=None):
"""
This method removes a particular load present on the beam object.
Returns a ValueError if the load passed as an argument is not
present on the beam.
Parameters
==========
value : Sympifyable
The magnitude of an applied load.
start : Sympifyable
The starting point of the applied load. For point moments and
point forces this is the location of application.
order : Integer
The order of the applied load.
- For moments, order= -2
- For point loads, order=-1
- For constant distributed load, order=0
- For ramp loads, order=1
- For parabolic ramp loads, order=2
- ... so on.
end : Sympifyable, optional
An optional argument that can be used if the load has an end point
within the length of the beam.
Examples
========
There is a beam of length 4 meters. A moment of magnitude 3 Nm is
applied in the clockwise direction at the starting point of the beam.
A pointload of magnitude 4 N is applied from the top of the beam at
2 meters from the starting point and a parabolic ramp load of magnitude
2 N/m is applied below the beam starting from 2 meters to 3 meters
away from the starting point of the beam.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(4, E, I)
>>> b.apply_load(-3, 0, -2)
>>> b.apply_load(4, 2, -1)
>>> b.apply_load(-2, 2, 2, end=3)
>>> b.load
-3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1) - 2*SingularityFunction(x, 2, 2) + 2*SingularityFunction(x, 3, 0) + 4*SingularityFunction(x, 3, 1) + 2*SingularityFunction(x, 3, 2)
>>> b.remove_load(-2, 2, 2, end = 3)
>>> b.load
-3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1)
"""
x = self.variable
value = sympify(value)
start = sympify(start)
order = sympify(order)
if (value, start, order, end) in self._applied_loads:
self._load -= value*SingularityFunction(x, start, order)
self._applied_loads.remove((value, start, order, end))
else:
msg = "No such load distribution exists on the beam object."
raise ValueError(msg)
if end:
# TODO : This is essentially duplicate code wrt to apply_load,
# would be better to move it to one location and both methods use
# it.
if order.is_negative:
msg = ("If 'end' is provided the 'order' of the load cannot "
"be negative, i.e. 'end' is only valid for distributed "
"loads.")
raise ValueError(msg)
# NOTE : A Taylor series can be used to define the summation of
# singularity functions that subtract from the load past the end
# point such that it evaluates to zero past 'end'.
f = value*x**order
for i in range(0, order + 1):
self._load += (f.diff(x, i).subs(x, end - start) *
SingularityFunction(x, end, i)/factorial(i))
@property
def load(self):
"""
Returns a Singularity Function expression which represents
the load distribution curve of the Beam object.
Examples
========
There is a beam of length 4 meters. A moment of magnitude 3 Nm is
applied in the clockwise direction at the starting point of the beam.
A point load of magnitude 4 N is applied from the top of the beam at
2 meters from the starting point and a parabolic ramp load of magnitude
2 N/m is applied below the beam starting from 3 meters away from the
starting point of the beam.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(4, E, I)
>>> b.apply_load(-3, 0, -2)
>>> b.apply_load(4, 2, -1)
>>> b.apply_load(-2, 3, 2)
>>> b.load
-3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1) - 2*SingularityFunction(x, 3, 2)
"""
return self._load
@property
def applied_loads(self):
"""
Returns a list of all loads applied on the beam object.
Each load in the list is a tuple of form (value, start, order, end).
Examples
========
There is a beam of length 4 meters. A moment of magnitude 3 Nm is
applied in the clockwise direction at the starting point of the beam.
A pointload of magnitude 4 N is applied from the top of the beam at
2 meters from the starting point. Another pointload of magnitude 5 N
is applied at same position.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(4, E, I)
>>> b.apply_load(-3, 0, -2)
>>> b.apply_load(4, 2, -1)
>>> b.apply_load(5, 2, -1)
>>> b.load
-3*SingularityFunction(x, 0, -2) + 9*SingularityFunction(x, 2, -1)
>>> b.applied_loads
[(-3, 0, -2, None), (4, 2, -1, None), (5, 2, -1, None)]
"""
return self._applied_loads
def _solve_hinge_beams(self, *reactions):
"""Method to find integration constants and reactional variables in a
composite beam connected via hinge.
This method resolves the composite Beam into its sub-beams and then
equations of shear force, bending moment, slope and deflection are
evaluated for both of them separately. These equations are then solved
for unknown reactions and integration constants using the boundary
conditions applied on the Beam. Equal deflection of both sub-beams
at the hinge joint gives us another equation to solve the system.
Examples
========
A combined beam, with constant fkexural rigidity E*I, is formed by joining
a Beam of length 2*l to the right of another Beam of length l. The whole beam
is fixed at both of its both end. A point load of magnitude P is also applied
from the top at a distance of 2*l from starting point.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> l=symbols('l', positive=True)
>>> b1=Beam(l ,E,I)
>>> b2=Beam(2*l ,E,I)
>>> b=b1.join(b2,"hinge")
>>> M1, A1, M2, A2, P = symbols('M1 A1 M2 A2 P')
>>> b.apply_load(A1,0,-1)
>>> b.apply_load(M1,0,-2)
>>> b.apply_load(P,2*l,-1)
>>> b.apply_load(A2,3*l,-1)
>>> b.apply_load(M2,3*l,-2)
>>> b.bc_slope=[(0,0), (3*l, 0)]
>>> b.bc_deflection=[(0,0), (3*l, 0)]
>>> b.solve_for_reaction_loads(M1, A1, M2, A2)
>>> b.reaction_loads
{A1: -5*P/18, A2: -13*P/18, M1: 5*P*l/18, M2: -4*P*l/9}
>>> b.slope()
(5*P*l*SingularityFunction(x, 0, 1)/18 - 5*P*SingularityFunction(x, 0, 2)/36 + 5*P*SingularityFunction(x, l, 2)/36)*SingularityFunction(x, 0, 0)/(E*I)
- (5*P*l*SingularityFunction(x, 0, 1)/18 - 5*P*SingularityFunction(x, 0, 2)/36 + 5*P*SingularityFunction(x, l, 2)/36)*SingularityFunction(x, l, 0)/(E*I)
+ (P*l**2/18 - 4*P*l*SingularityFunction(-l + x, 2*l, 1)/9 - 5*P*SingularityFunction(-l + x, 0, 2)/36 + P*SingularityFunction(-l + x, l, 2)/2
- 13*P*SingularityFunction(-l + x, 2*l, 2)/36)*SingularityFunction(x, l, 0)/(E*I)
>>> b.deflection()
(5*P*l*SingularityFunction(x, 0, 2)/36 - 5*P*SingularityFunction(x, 0, 3)/108 + 5*P*SingularityFunction(x, l, 3)/108)*SingularityFunction(x, 0, 0)/(E*I)
- (5*P*l*SingularityFunction(x, 0, 2)/36 - 5*P*SingularityFunction(x, 0, 3)/108 + 5*P*SingularityFunction(x, l, 3)/108)*SingularityFunction(x, l, 0)/(E*I)
+ (5*P*l**3/54 + P*l**2*(-l + x)/18 - 2*P*l*SingularityFunction(-l + x, 2*l, 2)/9 - 5*P*SingularityFunction(-l + x, 0, 3)/108 + P*SingularityFunction(-l + x, l, 3)/6
- 13*P*SingularityFunction(-l + x, 2*l, 3)/108)*SingularityFunction(x, l, 0)/(E*I)
"""
x = self.variable
l = self._hinge_position
E = self._elastic_modulus
I = self._second_moment
if isinstance(I, Piecewise):
I1 = I.args[0][0]
I2 = I.args[1][0]
else:
I1 = I2 = I
load_1 = 0 # Load equation on first segment of composite beam
load_2 = 0 # Load equation on second segment of composite beam
# Distributing load on both segments
for load in self.applied_loads:
if load[1] < l:
load_1 += load[0]*SingularityFunction(x, load[1], load[2])
if load[2] == 0:
load_1 -= load[0]*SingularityFunction(x, load[3], load[2])
elif load[2] > 0:
load_1 -= load[0]*SingularityFunction(x, load[3], load[2]) + load[0]*SingularityFunction(x, load[3], 0)
elif load[1] == l:
load_1 += load[0]*SingularityFunction(x, load[1], load[2])
load_2 += load[0]*SingularityFunction(x, load[1] - l, load[2])
elif load[1] > l:
load_2 += load[0]*SingularityFunction(x, load[1] - l, load[2])
if load[2] == 0:
load_2 -= load[0]*SingularityFunction(x, load[3] - l, load[2])
elif load[2] > 0:
load_2 -= load[0]*SingularityFunction(x, load[3] - l, load[2]) + load[0]*SingularityFunction(x, load[3] - l, 0)
h = Symbol('h') # Force due to hinge
load_1 += h*SingularityFunction(x, l, -1)
load_2 -= h*SingularityFunction(x, 0, -1)
eq = []
shear_1 = integrate(load_1, x)
shear_curve_1 = limit(shear_1, x, l)
eq.append(shear_curve_1)
bending_1 = integrate(shear_1, x)
moment_curve_1 = limit(bending_1, x, l)
eq.append(moment_curve_1)
shear_2 = integrate(load_2, x)
shear_curve_2 = limit(shear_2, x, self.length - l)
eq.append(shear_curve_2)
bending_2 = integrate(shear_2, x)
moment_curve_2 = limit(bending_2, x, self.length - l)
eq.append(moment_curve_2)
C1 = Symbol('C1')
C2 = Symbol('C2')
C3 = Symbol('C3')
C4 = Symbol('C4')
slope_1 = S.One/(E*I1)*(integrate(bending_1, x) + C1)
def_1 = S.One/(E*I1)*(integrate((E*I)*slope_1, x) + C1*x + C2)
slope_2 = S.One/(E*I2)*(integrate(integrate(integrate(load_2, x), x), x) + C3)
def_2 = S.One/(E*I2)*(integrate((E*I)*slope_2, x) + C4)
for position, value in self.bc_slope:
if position<l:
eq.append(slope_1.subs(x, position) - value)
else:
eq.append(slope_2.subs(x, position - l) - value)
for position, value in self.bc_deflection:
if position<l:
eq.append(def_1.subs(x, position) - value)
else:
eq.append(def_2.subs(x, position - l) - value)
eq.append(def_1.subs(x, l) - def_2.subs(x, 0)) # Deflection of both the segments at hinge would be equal
constants = list(linsolve(eq, C1, C2, C3, C4, h, *reactions))
reaction_values = list(constants[0])[5:]
self._reaction_loads = dict(zip(reactions, reaction_values))
self._load = self._load.subs(self._reaction_loads)
# Substituting constants and reactional load and moments with their corresponding values
slope_1 = slope_1.subs({C1: constants[0][0], h:constants[0][4]}).subs(self._reaction_loads)
def_1 = def_1.subs({C1: constants[0][0], C2: constants[0][1], h:constants[0][4]}).subs(self._reaction_loads)
slope_2 = slope_2.subs({x: x-l, C3: constants[0][2], h:constants[0][4]}).subs(self._reaction_loads)
def_2 = def_2.subs({x: x-l,C3: constants[0][2], C4: constants[0][3], h:constants[0][4]}).subs(self._reaction_loads)
self._hinge_beam_slope = slope_1*SingularityFunction(x, 0, 0) - slope_1*SingularityFunction(x, l, 0) + slope_2*SingularityFunction(x, l, 0)
self._hinge_beam_deflection = def_1*SingularityFunction(x, 0, 0) - def_1*SingularityFunction(x, l, 0) + def_2*SingularityFunction(x, l, 0)
def solve_for_reaction_loads(self, *reactions):
"""
Solves for the reaction forces.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1) # Reaction force at x = 10
>>> b.apply_load(R2, 30, -1) # Reaction force at x = 30
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.load
R1*SingularityFunction(x, 10, -1) + R2*SingularityFunction(x, 30, -1)
- 8*SingularityFunction(x, 0, -1) + 120*SingularityFunction(x, 30, -2)
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.reaction_loads
{R1: 6, R2: 2}
>>> b.load
-8*SingularityFunction(x, 0, -1) + 6*SingularityFunction(x, 10, -1)
+ 120*SingularityFunction(x, 30, -2) + 2*SingularityFunction(x, 30, -1)
"""
if self._composite_type == "hinge":
return self._solve_hinge_beams(*reactions)
x = self.variable
l = self.length
C3 = Symbol('C3')
C4 = Symbol('C4')
shear_curve = limit(self.shear_force(), x, l)
moment_curve = limit(self.bending_moment(), x, l)
slope_eqs = []
deflection_eqs = []
slope_curve = integrate(self.bending_moment(), x) + C3
for position, value in self._boundary_conditions['slope']:
eqs = slope_curve.subs(x, position) - value
slope_eqs.append(eqs)
deflection_curve = integrate(slope_curve, x) + C4
for position, value in self._boundary_conditions['deflection']:
eqs = deflection_curve.subs(x, position) - value
deflection_eqs.append(eqs)
solution = list((linsolve([shear_curve, moment_curve] + slope_eqs
+ deflection_eqs, (C3, C4) + reactions).args)[0])
solution = solution[2:]
self._reaction_loads = dict(zip(reactions, solution))
self._load = self._load.subs(self._reaction_loads)
def shear_force(self):
"""
Returns a Singularity Function expression which represents
the shear force curve of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.shear_force()
-8*SingularityFunction(x, 0, 0) + 6*SingularityFunction(x, 10, 0) + 120*SingularityFunction(x, 30, -1) + 2*SingularityFunction(x, 30, 0)
"""
x = self.variable
return integrate(self.load, x)
def max_shear_force(self):
"""Returns maximum Shear force and its coordinate
in the Beam object."""
from sympy import solve, Mul, Interval
shear_curve = self.shear_force()
x = self.variable
terms = shear_curve.args
singularity = [] # Points at which shear function changes
for term in terms:
if isinstance(term, Mul):
term = term.args[-1] # SingularityFunction in the term
singularity.append(term.args[1])
singularity.sort()
singularity = list(set(singularity))
intervals = [] # List of Intervals with discrete value of shear force
shear_values = [] # List of values of shear force in each interval
for i, s in enumerate(singularity):
if s == 0:
continue
try:
shear_slope = Piecewise((float("nan"), x<=singularity[i-1]),(self._load.rewrite(Piecewise), x<s), (float("nan"), True))
points = solve(shear_slope, x)
val = []
for point in points:
val.append(shear_curve.subs(x, point))
points.extend([singularity[i-1], s])
val.extend([limit(shear_curve, x, singularity[i-1], '+'), limit(shear_curve, x, s, '-')])
val = list(map(abs, val))
max_shear = max(val)
shear_values.append(max_shear)
intervals.append(points[val.index(max_shear)])
# If shear force in a particular Interval has zero or constant
# slope, then above block gives NotImplementedError as
# solve can't represent Interval solutions.
except NotImplementedError:
initial_shear = limit(shear_curve, x, singularity[i-1], '+')
final_shear = limit(shear_curve, x, s, '-')
# If shear_curve has a constant slope(it is a line).
if shear_curve.subs(x, (singularity[i-1] + s)/2) == (initial_shear + final_shear)/2 and initial_shear != final_shear:
shear_values.extend([initial_shear, final_shear])
intervals.extend([singularity[i-1], s])
else: # shear_curve has same value in whole Interval
shear_values.append(final_shear)
intervals.append(Interval(singularity[i-1], s))
shear_values = list(map(abs, shear_values))
maximum_shear = max(shear_values)
point = intervals[shear_values.index(maximum_shear)]
return (point, maximum_shear)
def bending_moment(self):
"""
Returns a Singularity Function expression which represents
the bending moment curve of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.bending_moment()
-8*SingularityFunction(x, 0, 1) + 6*SingularityFunction(x, 10, 1) + 120*SingularityFunction(x, 30, 0) + 2*SingularityFunction(x, 30, 1)
"""
x = self.variable
return integrate(self.shear_force(), x)
def max_bmoment(self):
"""Returns maximum Shear force and its coordinate
in the Beam object."""
from sympy import solve, Mul, Interval
bending_curve = self.bending_moment()
x = self.variable
terms = bending_curve.args
singularity = [] # Points at which bending moment changes
for term in terms:
if isinstance(term, Mul):
term = term.args[-1] # SingularityFunction in the term
singularity.append(term.args[1])
singularity.sort()
singularity = list(set(singularity))
intervals = [] # List of Intervals with discrete value of bending moment
moment_values = [] # List of values of bending moment in each interval
for i, s in enumerate(singularity):
if s == 0:
continue
try:
moment_slope = Piecewise((float("nan"), x<=singularity[i-1]),(self.shear_force().rewrite(Piecewise), x<s), (float("nan"), True))
points = solve(moment_slope, x)
val = []
for point in points:
val.append(bending_curve.subs(x, point))
points.extend([singularity[i-1], s])
val.extend([limit(bending_curve, x, singularity[i-1], '+'), limit(bending_curve, x, s, '-')])
val = list(map(abs, val))
max_moment = max(val)
moment_values.append(max_moment)
intervals.append(points[val.index(max_moment)])
# If bending moment in a particular Interval has zero or constant
# slope, then above block gives NotImplementedError as solve
# can't represent Interval solutions.
except NotImplementedError:
initial_moment = limit(bending_curve, x, singularity[i-1], '+')
final_moment = limit(bending_curve, x, s, '-')
# If bending_curve has a constant slope(it is a line).
if bending_curve.subs(x, (singularity[i-1] + s)/2) == (initial_moment + final_moment)/2 and initial_moment != final_moment:
moment_values.extend([initial_moment, final_moment])
intervals.extend([singularity[i-1], s])
else: # bending_curve has same value in whole Interval
moment_values.append(final_moment)
intervals.append(Interval(singularity[i-1], s))
moment_values = list(map(abs, moment_values))
maximum_moment = max(moment_values)
point = intervals[moment_values.index(maximum_moment)]
return (point, maximum_moment)
def point_cflexure(self):
"""
Returns a Set of point(s) with zero bending moment and
where bending moment curve of the beam object changes
its sign from negative to positive or vice versa.
Examples
========
There is is 10 meter long overhanging beam. There are
two simple supports below the beam. One at the start
and another one at a distance of 6 meters from the start.
Point loads of magnitude 10KN and 20KN are applied at
2 meters and 4 meters from start respectively. A Uniformly
distribute load of magnitude of magnitude 3KN/m is also
applied on top starting from 6 meters away from starting
point till end.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> b = Beam(10, E, I)
>>> b.apply_load(-4, 0, -1)
>>> b.apply_load(-46, 6, -1)
>>> b.apply_load(10, 2, -1)
>>> b.apply_load(20, 4, -1)
>>> b.apply_load(3, 6, 0)
>>> b.point_cflexure()
[10/3]
"""
from sympy import solve, Piecewise
# To restrict the range within length of the Beam
moment_curve = Piecewise((float("nan"), self.variable<=0),
(self.bending_moment(), self.variable<self.length),
(float("nan"), True))
points = solve(moment_curve.rewrite(Piecewise), self.variable,
domain=S.Reals)
return points
def slope(self):
"""
Returns a Singularity Function expression which represents
the slope the elastic curve of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.slope()
(-4*SingularityFunction(x, 0, 2) + 3*SingularityFunction(x, 10, 2)
+ 120*SingularityFunction(x, 30, 1) + SingularityFunction(x, 30, 2) + 4000/3)/(E*I)
"""
x = self.variable
E = self.elastic_modulus
I = self.second_moment
if self._composite_type == "hinge":
return self._hinge_beam_slope
if not self._boundary_conditions['slope']:
return diff(self.deflection(), x)
if isinstance(I, Piecewise) and self._composite_type == "fixed":
args = I.args
slope = 0
prev_slope = 0
prev_end = 0
for i in range(len(args)):
if i != 0:
prev_end = args[i-1][1].args[1]
slope_value = S.One/E*integrate(self.bending_moment()/args[i][0], (x, prev_end, x))
if i != len(args) - 1:
slope += (prev_slope + slope_value)*SingularityFunction(x, prev_end, 0) - \
(prev_slope + slope_value)*SingularityFunction(x, args[i][1].args[1], 0)
else:
slope += (prev_slope + slope_value)*SingularityFunction(x, prev_end, 0)
prev_slope = slope_value.subs(x, args[i][1].args[1])
return slope
C3 = Symbol('C3')
slope_curve = integrate(S.One/(E*I)*self.bending_moment(), x) + C3
bc_eqs = []
for position, value in self._boundary_conditions['slope']:
eqs = slope_curve.subs(x, position) - value
bc_eqs.append(eqs)
constants = list(linsolve(bc_eqs, C3))
slope_curve = slope_curve.subs({C3: constants[0][0]})
return slope_curve
def deflection(self):
"""
Returns a Singularity Function expression which represents
the elastic curve or deflection of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.deflection()
(4000*x/3 - 4*SingularityFunction(x, 0, 3)/3 + SingularityFunction(x, 10, 3)
+ 60*SingularityFunction(x, 30, 2) + SingularityFunction(x, 30, 3)/3 - 12000)/(E*I)
"""
x = self.variable
E = self.elastic_modulus
I = self.second_moment
if self._composite_type == "hinge":
return self._hinge_beam_deflection
if not self._boundary_conditions['deflection'] and not self._boundary_conditions['slope']:
if isinstance(I, Piecewise) and self._composite_type == "fixed":
args = I.args
prev_slope = 0
prev_def = 0
prev_end = 0
deflection = 0
for i in range(len(args)):
if i != 0:
prev_end = args[i-1][1].args[1]
slope_value = S.One/E*integrate(self.bending_moment()/args[i][0], (x, prev_end, x))
recent_segment_slope = prev_slope + slope_value
deflection_value = integrate(recent_segment_slope, (x, prev_end, x))
if i != len(args) - 1:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0) \
- (prev_def + deflection_value)*SingularityFunction(x, args[i][1].args[1], 0)
else:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0)
prev_slope = slope_value.subs(x, args[i][1].args[1])
prev_def = deflection_value.subs(x, args[i][1].args[1])
return deflection
base_char = self._base_char
constants = symbols(base_char + '3:5')
return S.One/(E*I)*integrate(integrate(self.bending_moment(), x), x) + constants[0]*x + constants[1]
elif not self._boundary_conditions['deflection']:
base_char = self._base_char
constant = symbols(base_char + '4')
return integrate(self.slope(), x) + constant
elif not self._boundary_conditions['slope'] and self._boundary_conditions['deflection']:
if isinstance(I, Piecewise) and self._composite_type == "fixed":
args = I.args
prev_slope = 0
prev_def = 0
prev_end = 0
deflection = 0
for i in range(len(args)):
if i != 0:
prev_end = args[i-1][1].args[1]
slope_value = S.One/E*integrate(self.bending_moment()/args[i][0], (x, prev_end, x))
recent_segment_slope = prev_slope + slope_value
deflection_value = integrate(recent_segment_slope, (x, prev_end, x))
if i != len(args) - 1:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0) \
- (prev_def + deflection_value)*SingularityFunction(x, args[i][1].args[1], 0)
else:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0)
prev_slope = slope_value.subs(x, args[i][1].args[1])
prev_def = deflection_value.subs(x, args[i][1].args[1])
return deflection
base_char = self._base_char
C3, C4 = symbols(base_char + '3:5') # Integration constants
slope_curve = integrate(self.bending_moment(), x) + C3
deflection_curve = integrate(slope_curve, x) + C4
bc_eqs = []
for position, value in self._boundary_conditions['deflection']:
eqs = deflection_curve.subs(x, position) - value
bc_eqs.append(eqs)
constants = list(linsolve(bc_eqs, (C3, C4)))
deflection_curve = deflection_curve.subs({C3: constants[0][0], C4: constants[0][1]})
return S.One/(E*I)*deflection_curve
if isinstance(I, Piecewise) and self._composite_type == "fixed":
args = I.args
prev_slope = 0
prev_def = 0
prev_end = 0
deflection = 0
for i in range(len(args)):
if i != 0:
prev_end = args[i-1][1].args[1]
slope_value = S.One/E*integrate(self.bending_moment()/args[i][0], (x, prev_end, x))
recent_segment_slope = prev_slope + slope_value
deflection_value = integrate(recent_segment_slope, (x, prev_end, x))
if i != len(args) - 1:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0) \
- (prev_def + deflection_value)*SingularityFunction(x, args[i][1].args[1], 0)
else:
deflection += (prev_def + deflection_value)*SingularityFunction(x, prev_end, 0)
prev_slope = slope_value.subs(x, args[i][1].args[1])
prev_def = deflection_value.subs(x, args[i][1].args[1])
return deflection
C4 = Symbol('C4')
deflection_curve = integrate(self.slope(), x) + C4
bc_eqs = []
for position, value in self._boundary_conditions['deflection']:
eqs = deflection_curve.subs(x, position) - value
bc_eqs.append(eqs)
constants = list(linsolve(bc_eqs, C4))
deflection_curve = deflection_curve.subs({C4: constants[0][0]})
return deflection_curve
def max_deflection(self):
"""
Returns point of max deflection and its corresponding deflection value
in a Beam object.
"""
from sympy import solve, Piecewise
# To restrict the range within length of the Beam
slope_curve = Piecewise((float("nan"), self.variable<=0),
(self.slope(), self.variable<self.length),
(float("nan"), True))
points = solve(slope_curve.rewrite(Piecewise), self.variable,
domain=S.Reals)
deflection_curve = self.deflection()
deflections = [deflection_curve.subs(self.variable, x) for x in points]
deflections = list(map(abs, deflections))
if len(deflections) != 0:
max_def = max(deflections)
return (points[deflections.index(max_def)], max_def)
else:
return None
def shear_stress(self):
"""
Returns an expression representing the Shear Stress
curve of the Beam object.
"""
return self.shear_force()/self._area
def plot_shear_force(self, subs=None):
"""
Returns a plot for Shear force present in the Beam object.
Parameters
==========
subs : dictionary
Python dictionary containing Symbols as key and their
corresponding values.
Examples
========
There is a beam of length 8 meters. A constant distributed load of 10 KN/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. A pointload of magnitude 5 KN is also applied from top of the
beam, at a distance of 4 meters from the starting point.
Take E = 200 GPa and I = 400*(10**-6) meter**4.
Using the sign convention of downwards forces being positive.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(8, 200*(10**9), 400*(10**-6))
>>> b.apply_load(5000, 2, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 8, -1)
>>> b.apply_load(10000, 4, 0, end=8)
>>> b.bc_deflection = [(0, 0), (8, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.plot_shear_force()
Plot object containing:
[0]: cartesian line: -13750*SingularityFunction(x, 0, 0) + 5000*SingularityFunction(x, 2, 0)
+ 10000*SingularityFunction(x, 4, 1) - 31250*SingularityFunction(x, 8, 0)
- 10000*SingularityFunction(x, 8, 1) for x over (0.0, 8.0)
"""
shear_force = self.shear_force()
if subs is None:
subs = {}
for sym in shear_force.atoms(Symbol):
if sym == self.variable:
continue
if sym not in subs:
raise ValueError('Value of %s was not passed.' %sym)
if self.length in subs:
length = subs[self.length]
else:
length = self.length
return plot(shear_force.subs(subs), (self.variable, 0, length), title='Shear Force',
xlabel=r'$\mathrm{x}$', ylabel=r'$\mathrm{V}$', line_color='g')
def plot_bending_moment(self, subs=None):
"""
Returns a plot for Bending moment present in the Beam object.
Parameters
==========
subs : dictionary
Python dictionary containing Symbols as key and their
corresponding values.
Examples
========
There is a beam of length 8 meters. A constant distributed load of 10 KN/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. A pointload of magnitude 5 KN is also applied from top of the
beam, at a distance of 4 meters from the starting point.
Take E = 200 GPa and I = 400*(10**-6) meter**4.
Using the sign convention of downwards forces being positive.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(8, 200*(10**9), 400*(10**-6))
>>> b.apply_load(5000, 2, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 8, -1)
>>> b.apply_load(10000, 4, 0, end=8)
>>> b.bc_deflection = [(0, 0), (8, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.plot_bending_moment()
Plot object containing:
[0]: cartesian line: -13750*SingularityFunction(x, 0, 1) + 5000*SingularityFunction(x, 2, 1)
+ 5000*SingularityFunction(x, 4, 2) - 31250*SingularityFunction(x, 8, 1)
- 5000*SingularityFunction(x, 8, 2) for x over (0.0, 8.0)
"""
bending_moment = self.bending_moment()
if subs is None:
subs = {}
for sym in bending_moment.atoms(Symbol):
if sym == self.variable:
continue
if sym not in subs:
raise ValueError('Value of %s was not passed.' %sym)
if self.length in subs:
length = subs[self.length]
else:
length = self.length
return plot(bending_moment.subs(subs), (self.variable, 0, length), title='Bending Moment',
xlabel=r'$\mathrm{x}$', ylabel=r'$\mathrm{M}$', line_color='b')
def plot_slope(self, subs=None):
"""
Returns a plot for slope of deflection curve of the Beam object.
Parameters
==========
subs : dictionary
Python dictionary containing Symbols as key and their
corresponding values.
Examples
========
There is a beam of length 8 meters. A constant distributed load of 10 KN/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. A pointload of magnitude 5 KN is also applied from top of the
beam, at a distance of 4 meters from the starting point.
Take E = 200 GPa and I = 400*(10**-6) meter**4.
Using the sign convention of downwards forces being positive.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(8, 200*(10**9), 400*(10**-6))
>>> b.apply_load(5000, 2, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 8, -1)
>>> b.apply_load(10000, 4, 0, end=8)
>>> b.bc_deflection = [(0, 0), (8, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.plot_slope()
Plot object containing:
[0]: cartesian line: -8.59375e-5*SingularityFunction(x, 0, 2) + 3.125e-5*SingularityFunction(x, 2, 2)
+ 2.08333333333333e-5*SingularityFunction(x, 4, 3) - 0.0001953125*SingularityFunction(x, 8, 2)
- 2.08333333333333e-5*SingularityFunction(x, 8, 3) + 0.00138541666666667 for x over (0.0, 8.0)
"""
slope = self.slope()
if subs is None:
subs = {}
for sym in slope.atoms(Symbol):
if sym == self.variable:
continue
if sym not in subs:
raise ValueError('Value of %s was not passed.' %sym)
if self.length in subs:
length = subs[self.length]
else:
length = self.length
return plot(slope.subs(subs), (self.variable, 0, length), title='Slope',
xlabel=r'$\mathrm{x}$', ylabel=r'$\theta$', line_color='m')
def plot_deflection(self, subs=None):
"""
Returns a plot for deflection curve of the Beam object.
Parameters
==========
subs : dictionary
Python dictionary containing Symbols as key and their
corresponding values.
Examples
========
There is a beam of length 8 meters. A constant distributed load of 10 KN/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. A pointload of magnitude 5 KN is also applied from top of the
beam, at a distance of 4 meters from the starting point.
Take E = 200 GPa and I = 400*(10**-6) meter**4.
Using the sign convention of downwards forces being positive.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(8, 200*(10**9), 400*(10**-6))
>>> b.apply_load(5000, 2, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 8, -1)
>>> b.apply_load(10000, 4, 0, end=8)
>>> b.bc_deflection = [(0, 0), (8, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.plot_deflection()
Plot object containing:
[0]: cartesian line: 0.00138541666666667*x - 2.86458333333333e-5*SingularityFunction(x, 0, 3)
+ 1.04166666666667e-5*SingularityFunction(x, 2, 3) + 5.20833333333333e-6*SingularityFunction(x, 4, 4)
- 6.51041666666667e-5*SingularityFunction(x, 8, 3) - 5.20833333333333e-6*SingularityFunction(x, 8, 4)
for x over (0.0, 8.0)
"""
deflection = self.deflection()
if subs is None:
subs = {}
for sym in deflection.atoms(Symbol):
if sym == self.variable:
continue
if sym not in subs:
raise ValueError('Value of %s was not passed.' %sym)
if self.length in subs:
length = subs[self.length]
else:
length = self.length
return plot(deflection.subs(subs), (self.variable, 0, length),
title='Deflection', xlabel=r'$\mathrm{x}$', ylabel=r'$\delta$',
line_color='r')
def plot_loading_results(self, subs=None):
"""
Returns a subplot of Shear Force, Bending Moment,
Slope and Deflection of the Beam object.
Parameters
==========
subs : dictionary
Python dictionary containing Symbols as key and their
corresponding values.
Examples
========
There is a beam of length 8 meters. A constant distributed load of 10 KN/m
is applied from half of the beam till the end. There are two simple supports
below the beam, one at the starting point and another at the ending point
of the beam. A pointload of magnitude 5 KN is also applied from top of the
beam, at a distance of 4 meters from the starting point.
Take E = 200 GPa and I = 400*(10**-6) meter**4.
Using the sign convention of downwards forces being positive.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(8, 200*(10**9), 400*(10**-6))
>>> b.apply_load(5000, 2, -1)
>>> b.apply_load(R1, 0, -1)
>>> b.apply_load(R2, 8, -1)
>>> b.apply_load(10000, 4, 0, end=8)
>>> b.bc_deflection = [(0, 0), (8, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> axes = b.plot_loading_results()
"""
length = self.length
variable = self.variable
if subs is None:
subs = {}
for sym in self.deflection().atoms(Symbol):
if sym == self.variable:
continue
if sym not in subs:
raise ValueError('Value of %s was not passed.' %sym)
if self.length in subs:
length = subs[self.length]
else:
length = self.length
ax1 = plot(self.shear_force().subs(subs), (variable, 0, length),
title="Shear Force", xlabel=r'$\mathrm{x}$', ylabel=r'$\mathrm{V}$',
line_color='g', show=False)
ax2 = plot(self.bending_moment().subs(subs), (variable, 0, length),
title="Bending Moment", xlabel=r'$\mathrm{x}$', ylabel=r'$\mathrm{M}$',
line_color='b', show=False)
ax3 = plot(self.slope().subs(subs), (variable, 0, length),
title="Slope", xlabel=r'$\mathrm{x}$', ylabel=r'$\theta$',
line_color='m', show=False)
ax4 = plot(self.deflection().subs(subs), (variable, 0, length),
title="Deflection", xlabel=r'$\mathrm{x}$', ylabel=r'$\delta$',
line_color='r', show=False)
return PlotGrid(4, 1, ax1, ax2, ax3, ax4)
@doctest_depends_on(modules=('numpy',))
def draw(self, pictorial=True):
"""
Returns a plot object representing the beam diagram of the beam.
.. note::
The user must be careful while entering load values.
The draw function assumes a sign convention which is used
for plotting loads.
Given a right handed coordinate system with XYZ coordinates,
the beam's length is assumed to be along the positive X axis.
The draw function recognizes positve loads(with n>-2) as loads
acting along negative Y direction and positve moments acting
along positive Z direction.
Parameters
==========
pictorial: Boolean (default=True)
Setting ``pictorial=True`` would simply create a pictorial (scaled) view
of the beam diagram not with the exact dimensions.
Although setting ``pictorial=False`` would create a beam diagram with
the exact dimensions on the plot
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> R1, R2 = symbols('R1, R2')
>>> E, I = symbols('E, I')
>>> b = Beam(50, 20, 30)
>>> b.apply_load(10, 2, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(90, 5, 0, 23)
>>> b.apply_load(10, 30, 1, 50)
>>> b.apply_support(50, "pin")
>>> b.apply_support(0, "fixed")
>>> b.apply_support(20, "roller")
>>> p = b.draw()
>>> p
Plot object containing:
[0]: cartesian line: 25*SingularityFunction(x, 5, 0) - 25*SingularityFunction(x, 23, 0)
+ SingularityFunction(x, 30, 1) - 20*SingularityFunction(x, 50, 0)
- SingularityFunction(x, 50, 1) + 5 for x over (0.0, 50.0)
[1]: cartesian line: 5 for x over (0.0, 50.0)
>>> p.show()
"""
if not numpy:
raise ImportError("To use this function numpy module is required")
x = self.variable
# checking whether length is an expression in terms of any Symbol.
from sympy import Expr
if isinstance(self.length, Expr):
l = list(self.length.atoms(Symbol))
# assigning every Symbol a default value of 10
l = {i:10 for i in l}
length = self.length.subs(l)
else:
l = {}
length = self.length
height = length/10
rectangles = []
rectangles.append({'xy':(0, 0), 'width':length, 'height': height, 'facecolor':"brown"})
annotations, markers, load_eq,load_eq1, fill = self._draw_load(pictorial, length, l)
support_markers, support_rectangles = self._draw_supports(length, l)
rectangles += support_rectangles
markers += support_markers
sing_plot = plot(height + load_eq, height + load_eq1, (x, 0, length),
xlim=(-height, length + height), ylim=(-length, 1.25*length), annotations=annotations,
markers=markers, rectangles=rectangles, line_color='brown', fill=fill, axis=False, show=False)
return sing_plot
def _draw_load(self, pictorial, length, l):
loads = list(set(self.applied_loads) - set(self._support_as_loads))
height = length/10
x = self.variable
annotations = []
markers = []
load_args = []
scaled_load = 0
load_args1 = []
scaled_load1 = 0
load_eq = 0 # For positive valued higher order loads
load_eq1 = 0 # For negative valued higher order loads
fill = None
plus = 0 # For positive valued higher order loads
minus = 0 # For negative valued higher order loads
for load in loads:
# check if the position of load is in terms of the beam length.
if l:
pos = load[1].subs(l)
else:
pos = load[1]
# point loads
if load[2] == -1:
if isinstance(load[0], Symbol) or load[0].is_negative:
annotations.append({'s':'', 'xy':(pos, 0), 'xytext':(pos, height - 4*height), 'arrowprops':dict(width= 1.5, headlength=5, headwidth=5, facecolor='black')})
else:
annotations.append({'s':'', 'xy':(pos, height), 'xytext':(pos, height*4), 'arrowprops':dict(width= 1.5, headlength=4, headwidth=4, facecolor='black')})
# moment loads
elif load[2] == -2:
if load[0].is_negative:
markers.append({'args':[[pos], [height/2]], 'marker': r'$\circlearrowright$', 'markersize':15})
else:
markers.append({'args':[[pos], [height/2]], 'marker': r'$\circlearrowleft$', 'markersize':15})
# higher order loads
elif load[2] >= 0:
# `fill` will be assigned only when higher order loads are present
value, start, order, end = load
# Positive loads have their seperate equations
if(value>0):
plus = 1
# if pictorial is True we remake the load equation again with
# some constant magnitude values.
if pictorial:
value = 10**(1-order) if order > 0 else length/2
scaled_load += value*SingularityFunction(x, start, order)
if end:
f2 = 10**(1-order)*x**order if order > 0 else length/2*x**order
for i in range(0, order + 1):
scaled_load -= (f2.diff(x, i).subs(x, end - start)*
SingularityFunction(x, end, i)/factorial(i))
if pictorial:
if isinstance(scaled_load, Add):
load_args = scaled_load.args
else:
# when the load equation consists of only a single term
load_args = (scaled_load,)
load_eq = [i.subs(l) for i in load_args]
else:
if isinstance(self.load, Add):
load_args = self.load.args
else:
load_args = (self.load,)
load_eq = [i.subs(l) for i in load_args if list(i.atoms(SingularityFunction))[0].args[2] >= 0]
load_eq = Add(*load_eq)
# filling higher order loads with colour
expr = height + load_eq.rewrite(Piecewise)
y1 = lambdify(x, expr, 'numpy')
# For loads with negative value
else:
minus = 1
# if pictorial is True we remake the load equation again with
# some constant magnitude values.
if pictorial:
value = 10**(1-order) if order > 0 else length/2
scaled_load1 += value*SingularityFunction(x, start, order)
if end:
f2 = 10**(1-order)*x**order if order > 0 else length/2*x**order
for i in range(0, order + 1):
scaled_load1 -= (f2.diff(x, i).subs(x, end - start)*
SingularityFunction(x, end, i)/factorial(i))
if pictorial:
if isinstance(scaled_load1, Add):
load_args1 = scaled_load1.args
else:
# when the load equation consists of only a single term
load_args1 = (scaled_load1,)
load_eq1 = [i.subs(l) for i in load_args1]
else:
if isinstance(self.load, Add):
load_args1 = self.load.args1
else:
load_args1 = (self.load,)
load_eq1 = [i.subs(l) for i in load_args if list(i.atoms(SingularityFunction))[0].args[2] >= 0]
load_eq1 = -Add(*load_eq1)-height
# filling higher order loads with colour
expr = height + load_eq1.rewrite(Piecewise)
y1_ = lambdify(x, expr, 'numpy')
y = numpy.arange(0, float(length), 0.001)
y2 = float(height)
if(plus == 1 and minus == 1):
fill = {'x': y, 'y1': y1(y), 'y2': y1_(y), 'color':'darkkhaki'}
elif(plus == 1):
fill = {'x': y, 'y1': y1(y), 'y2': y2, 'color':'darkkhaki'}
else:
fill = {'x': y, 'y1': y1_(y), 'y2': y2 , 'color':'darkkhaki'}
return annotations, markers, load_eq, load_eq1, fill
def _draw_supports(self, length, l):
height = float(length/10)
support_markers = []
support_rectangles = []
for support in self._applied_supports:
if l:
pos = support[0].subs(l)
else:
pos = support[0]
if support[1] == "pin":
support_markers.append({'args':[pos, [0]], 'marker':6, 'markersize':13, 'color':"black"})
elif support[1] == "roller":
support_markers.append({'args':[pos, [-height/2.5]], 'marker':'o', 'markersize':11, 'color':"black"})
elif support[1] == "fixed":
if pos == 0:
support_rectangles.append({'xy':(0, -3*height), 'width':-length/20, 'height':6*height + height, 'fill':False, 'hatch':'/////'})
else:
support_rectangles.append({'xy':(length, -3*height), 'width':length/20, 'height': 6*height + height, 'fill':False, 'hatch':'/////'})
return support_markers, support_rectangles
class Beam3D(Beam):
"""
This class handles loads applied in any direction of a 3D space along
with unequal values of Second moment along different axes.
.. note::
While solving a beam bending problem, a user should choose its
own sign convention and should stick to it. The results will
automatically follow the chosen sign convention.
This class assumes that any kind of distributed load/moment is
applied through out the span of a beam.
Examples
========
There is a beam of l meters long. A constant distributed load of magnitude q
is applied along y-axis from start till the end of beam. A constant distributed
moment of magnitude m is also applied along z-axis from start till the end of beam.
Beam is fixed at both of its end. So, deflection of the beam at the both ends
is restricted.
>>> from sympy.physics.continuum_mechanics.beam import Beam3D
>>> from sympy import symbols, simplify, collect, factor
>>> l, E, G, I, A = symbols('l, E, G, I, A')
>>> b = Beam3D(l, E, G, I, A)
>>> x, q, m = symbols('x, q, m')
>>> b.apply_load(q, 0, 0, dir="y")
>>> b.apply_moment_load(m, 0, -1, dir="z")
>>> b.shear_force()
[0, -q*x, 0]
>>> b.bending_moment()
[0, 0, -m*x + q*x**2/2]
>>> b.bc_slope = [(0, [0, 0, 0]), (l, [0, 0, 0])]
>>> b.bc_deflection = [(0, [0, 0, 0]), (l, [0, 0, 0])]
>>> b.solve_slope_deflection()
>>> factor(b.slope())
[0, 0, x*(-l + x)*(-A*G*l**3*q + 2*A*G*l**2*q*x - 12*E*I*l*q
- 72*E*I*m + 24*E*I*q*x)/(12*E*I*(A*G*l**2 + 12*E*I))]
>>> dx, dy, dz = b.deflection()
>>> dy = collect(simplify(dy), x)
>>> dx == dz == 0
True
>>> dy == (x*(12*A*E*G*I*l**3*q - 24*A*E*G*I*l**2*m + 144*E**2*I**2*l*q +
... x**3*(A**2*G**2*l**2*q + 12*A*E*G*I*q) +
... x**2*(-2*A**2*G**2*l**3*q - 24*A*E*G*I*l*q - 48*A*E*G*I*m) +
... x*(A**2*G**2*l**4*q + 72*A*E*G*I*l*m - 144*E**2*I**2*q)
... )/(24*A*E*G*I*(A*G*l**2 + 12*E*I)))
True
References
==========
.. [1] http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf
"""
def __init__(self, length, elastic_modulus, shear_modulus , second_moment, area, variable=Symbol('x')):
"""Initializes the class.
Parameters
==========
length : Sympifyable
A Symbol or value representing the Beam's length.
elastic_modulus : Sympifyable
A SymPy expression representing the Beam's Modulus of Elasticity.
It is a measure of the stiffness of the Beam material.
shear_modulus : Sympifyable
A SymPy expression representing the Beam's Modulus of rigidity.
It is a measure of rigidity of the Beam material.
second_moment : Sympifyable or list
A list of two elements having SymPy expression representing the
Beam's Second moment of area. First value represent Second moment
across y-axis and second across z-axis.
Single SymPy expression can be passed if both values are same
area : Sympifyable
A SymPy expression representing the Beam's cross-sectional area
in a plane prependicular to length of the Beam.
variable : Symbol, optional
A Symbol object that will be used as the variable along the beam
while representing the load, shear, moment, slope and deflection
curve. By default, it is set to ``Symbol('x')``.
"""
super().__init__(length, elastic_modulus, second_moment, variable)
self.shear_modulus = shear_modulus
self._area = area
self._load_vector = [0, 0, 0]
self._moment_load_vector = [0, 0, 0]
self._load_Singularity = [0, 0, 0]
self._slope = [0, 0, 0]
self._deflection = [0, 0, 0]
@property
def shear_modulus(self):
"""Young's Modulus of the Beam. """
return self._shear_modulus
@shear_modulus.setter
def shear_modulus(self, e):
self._shear_modulus = sympify(e)
@property
def second_moment(self):
"""Second moment of area of the Beam. """
return self._second_moment
@second_moment.setter
def second_moment(self, i):
if isinstance(i, list):
i = [sympify(x) for x in i]
self._second_moment = i
else:
self._second_moment = sympify(i)
@property
def area(self):
"""Cross-sectional area of the Beam. """
return self._area
@area.setter
def area(self, a):
self._area = sympify(a)
@property
def load_vector(self):
"""
Returns a three element list representing the load vector.
"""
return self._load_vector
@property
def moment_load_vector(self):
"""
Returns a three element list representing moment loads on Beam.
"""
return self._moment_load_vector
@property
def boundary_conditions(self):
"""
Returns a dictionary of boundary conditions applied on the beam.
The dictionary has two keywords namely slope and deflection.
The value of each keyword is a list of tuple, where each tuple
contains location and value of a boundary condition in the format
(location, value). Further each value is a list corresponding to
slope or deflection(s) values along three axes at that location.
Examples
========
There is a beam of length 4 meters. The slope at 0 should be 4 along
the x-axis and 0 along others. At the other end of beam, deflection
along all the three axes should be zero.
>>> from sympy.physics.continuum_mechanics.beam import Beam3D
>>> from sympy import symbols
>>> l, E, G, I, A, x = symbols('l, E, G, I, A, x')
>>> b = Beam3D(30, E, G, I, A, x)
>>> b.bc_slope = [(0, (4, 0, 0))]
>>> b.bc_deflection = [(4, [0, 0, 0])]
>>> b.boundary_conditions
{'deflection': [(4, [0, 0, 0])], 'slope': [(0, (4, 0, 0))]}
Here the deflection of the beam should be ``0`` along all the three axes at ``4``.
Similarly, the slope of the beam should be ``4`` along x-axis and ``0``
along y and z axis at ``0``.
"""
return self._boundary_conditions
def polar_moment(self):
"""
Returns the polar moment of area of the beam
about the X axis with respect to the centroid.
Examples
========
>>> from sympy.physics.continuum_mechanics.beam import Beam3D
>>> from sympy import symbols
>>> l, E, G, I, A = symbols('l, E, G, I, A')
>>> b = Beam3D(l, E, G, I, A)
>>> b.polar_moment()
2*I
>>> I1 = [9, 15]
>>> b = Beam3D(l, E, G, I1, A)
>>> b.polar_moment()
24
"""
if not iterable(self.second_moment):
return 2*self.second_moment
return sum(self.second_moment)
def apply_load(self, value, start, order, dir="y"):
"""
This method adds up the force load to a particular beam object.
Parameters
==========
value : Sympifyable
The magnitude of an applied load.
dir : String
Axis along which load is applied.
order : Integer
The order of the applied load.
- For point loads, order=-1
- For constant distributed load, order=0
- For ramp loads, order=1
- For parabolic ramp loads, order=2
- ... so on.
"""
x = self.variable
value = sympify(value)
start = sympify(start)
order = sympify(order)
if dir == "x":
if not order == -1:
self._load_vector[0] += value
self._load_Singularity[0] += value*SingularityFunction(x, start, order)
elif dir == "y":
if not order == -1:
self._load_vector[1] += value
self._load_Singularity[1] += value*SingularityFunction(x, start, order)
else:
if not order == -1:
self._load_vector[2] += value
self._load_Singularity[2] += value*SingularityFunction(x, start, order)
def apply_moment_load(self, value, start, order, dir="y"):
"""
This method adds up the moment loads to a particular beam object.
Parameters
==========
value : Sympifyable
The magnitude of an applied moment.
dir : String
Axis along which moment is applied.
order : Integer
The order of the applied load.
- For point moments, order=-2
- For constant distributed moment, order=-1
- For ramp moments, order=0
- For parabolic ramp moments, order=1
- ... so on.
"""
x = self.variable
value = sympify(value)
start = sympify(start)
order = sympify(order)
if dir == "x":
if not order == -2:
self._moment_load_vector[0] += value
self._load_Singularity[0] += value*SingularityFunction(x, start, order)
elif dir == "y":
if not order == -2:
self._moment_load_vector[1] += value
self._load_Singularity[0] += value*SingularityFunction(x, start, order)
else:
if not order == -2:
self._moment_load_vector[2] += value
self._load_Singularity[0] += value*SingularityFunction(x, start, order)
def apply_support(self, loc, type="fixed"):
if type == "pin" or type == "roller":
reaction_load = Symbol('R_'+str(loc))
self._reaction_loads[reaction_load] = reaction_load
self.bc_deflection.append((loc, [0, 0, 0]))
else:
reaction_load = Symbol('R_'+str(loc))
reaction_moment = Symbol('M_'+str(loc))
self._reaction_loads[reaction_load] = [reaction_load, reaction_moment]
self.bc_deflection.append((loc, [0, 0, 0]))
self.bc_slope.append((loc, [0, 0, 0]))
def solve_for_reaction_loads(self, *reaction):
"""
Solves for the reaction forces.
Examples
========
There is a beam of length 30 meters. It it supported by rollers at
of its end. A constant distributed load of magnitude 8 N is applied
from start till its end along y-axis. Another linear load having
slope equal to 9 is applied along z-axis.
>>> from sympy.physics.continuum_mechanics.beam import Beam3D
>>> from sympy import symbols
>>> l, E, G, I, A, x = symbols('l, E, G, I, A, x')
>>> b = Beam3D(30, E, G, I, A, x)
>>> b.apply_load(8, start=0, order=0, dir="y")
>>> b.apply_load(9*x, start=0, order=0, dir="z")
>>> b.bc_deflection = [(0, [0, 0, 0]), (30, [0, 0, 0])]
>>> R1, R2, R3, R4 = symbols('R1, R2, R3, R4')
>>> b.apply_load(R1, start=0, order=-1, dir="y")
>>> b.apply_load(R2, start=30, order=-1, dir="y")
>>> b.apply_load(R3, start=0, order=-1, dir="z")
>>> b.apply_load(R4, start=30, order=-1, dir="z")
>>> b.solve_for_reaction_loads(R1, R2, R3, R4)
>>> b.reaction_loads
{R1: -120, R2: -120, R3: -1350, R4: -2700}
"""
x = self.variable
l = self.length
q = self._load_Singularity
shear_curves = [integrate(load, x) for load in q]
moment_curves = [integrate(shear, x) for shear in shear_curves]
for i in range(3):
react = [r for r in reaction if (shear_curves[i].has(r) or moment_curves[i].has(r))]
if len(react) == 0:
continue
shear_curve = limit(shear_curves[i], x, l)
moment_curve = limit(moment_curves[i], x, l)
sol = list((linsolve([shear_curve, moment_curve], react).args)[0])
sol_dict = dict(zip(react, sol))
reaction_loads = self._reaction_loads
# Check if any of the evaluated rection exists in another direction
# and if it exists then it should have same value.
for key in sol_dict:
if key in reaction_loads and sol_dict[key] != reaction_loads[key]:
raise ValueError("Ambiguous solution for %s in different directions." % key)
self._reaction_loads.update(sol_dict)
def shear_force(self):
"""
Returns a list of three expressions which represents the shear force
curve of the Beam object along all three axes.
"""
x = self.variable
q = self._load_vector
return [integrate(-q[0], x), integrate(-q[1], x), integrate(-q[2], x)]
def axial_force(self):
"""
Returns expression of Axial shear force present inside the Beam object.
"""
return self.shear_force()[0]
def shear_stress(self):
"""
Returns a list of three expressions which represents the shear stress
curve of the Beam object along all three axes.
"""
return [self.shear_force()[0]/self._area, self.shear_force()[1]/self._area, self.shear_force()[2]/self._area]
def axial_stress(self):
"""
Returns expression of Axial stress present inside the Beam object.
"""
return self.axial_force()/self._area
def bending_moment(self):
"""
Returns a list of three expressions which represents the bending moment
curve of the Beam object along all three axes.
"""
x = self.variable
m = self._moment_load_vector
shear = self.shear_force()
return [integrate(-m[0], x), integrate(-m[1] + shear[2], x),
integrate(-m[2] - shear[1], x) ]
def torsional_moment(self):
"""
Returns expression of Torsional moment present inside the Beam object.
"""
return self.bending_moment()[0]
def solve_slope_deflection(self):
from sympy import dsolve, Function, Derivative, Eq
x = self.variable
l = self.length
E = self.elastic_modulus
G = self.shear_modulus
I = self.second_moment
if isinstance(I, list):
I_y, I_z = I[0], I[1]
else:
I_y = I_z = I
A = self._area
load = self._load_vector
moment = self._moment_load_vector
defl = Function('defl')
theta = Function('theta')
# Finding deflection along x-axis(and corresponding slope value by differentiating it)
# Equation used: Derivative(E*A*Derivative(def_x(x), x), x) + load_x = 0
eq = Derivative(E*A*Derivative(defl(x), x), x) + load[0]
def_x = dsolve(Eq(eq, 0), defl(x)).args[1]
# Solving constants originated from dsolve
C1 = Symbol('C1')
C2 = Symbol('C2')
constants = list((linsolve([def_x.subs(x, 0), def_x.subs(x, l)], C1, C2).args)[0])
def_x = def_x.subs({C1:constants[0], C2:constants[1]})
slope_x = def_x.diff(x)
self._deflection[0] = def_x
self._slope[0] = slope_x
# Finding deflection along y-axis and slope across z-axis. System of equation involved:
# 1: Derivative(E*I_z*Derivative(theta_z(x), x), x) + G*A*(Derivative(defl_y(x), x) - theta_z(x)) + moment_z = 0
# 2: Derivative(G*A*(Derivative(defl_y(x), x) - theta_z(x)), x) + load_y = 0
C_i = Symbol('C_i')
# Substitute value of `G*A*(Derivative(defl_y(x), x) - theta_z(x))` from (2) in (1)
eq1 = Derivative(E*I_z*Derivative(theta(x), x), x) + (integrate(-load[1], x) + C_i) + moment[2]
slope_z = dsolve(Eq(eq1, 0)).args[1]
# Solve for constants originated from using dsolve on eq1
constants = list((linsolve([slope_z.subs(x, 0), slope_z.subs(x, l)], C1, C2).args)[0])
slope_z = slope_z.subs({C1:constants[0], C2:constants[1]})
# Put value of slope obtained back in (2) to solve for `C_i` and find deflection across y-axis
eq2 = G*A*(Derivative(defl(x), x)) + load[1]*x - C_i - G*A*slope_z
def_y = dsolve(Eq(eq2, 0), defl(x)).args[1]
# Solve for constants originated from using dsolve on eq2
constants = list((linsolve([def_y.subs(x, 0), def_y.subs(x, l)], C1, C_i).args)[0])
self._deflection[1] = def_y.subs({C1:constants[0], C_i:constants[1]})
self._slope[2] = slope_z.subs(C_i, constants[1])
# Finding deflection along z-axis and slope across y-axis. System of equation involved:
# 1: Derivative(E*I_y*Derivative(theta_y(x), x), x) - G*A*(Derivative(defl_z(x), x) + theta_y(x)) + moment_y = 0
# 2: Derivative(G*A*(Derivative(defl_z(x), x) + theta_y(x)), x) + load_z = 0
# Substitute value of `G*A*(Derivative(defl_y(x), x) + theta_z(x))` from (2) in (1)
eq1 = Derivative(E*I_y*Derivative(theta(x), x), x) + (integrate(load[2], x) - C_i) + moment[1]
slope_y = dsolve(Eq(eq1, 0)).args[1]
# Solve for constants originated from using dsolve on eq1
constants = list((linsolve([slope_y.subs(x, 0), slope_y.subs(x, l)], C1, C2).args)[0])
slope_y = slope_y.subs({C1:constants[0], C2:constants[1]})
# Put value of slope obtained back in (2) to solve for `C_i` and find deflection across z-axis
eq2 = G*A*(Derivative(defl(x), x)) + load[2]*x - C_i + G*A*slope_y
def_z = dsolve(Eq(eq2,0)).args[1]
# Solve for constants originated from using dsolve on eq2
constants = list((linsolve([def_z.subs(x, 0), def_z.subs(x, l)], C1, C_i).args)[0])
self._deflection[2] = def_z.subs({C1:constants[0], C_i:constants[1]})
self._slope[1] = slope_y.subs(C_i, constants[1])
def slope(self):
"""
Returns a three element list representing slope of deflection curve
along all the three axes.
"""
return self._slope
def deflection(self):
"""
Returns a three element list representing deflection curve along all
the three axes.
"""
return self._deflection
|
0cd4b90a5574e5561ef5909080c9b41e2108b5f3df59b42a05573dc4ac0c133c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The module implements routines to model the polarization of optical fields
and can be used to calculate the effects of polarization optical elements on
the fields.
- Jones vectors.
- Stokes vectors.
- Jones matrices.
- Mueller matrices.
Examples
--------
We calculate a generic Jones vector:
>>> from sympy import symbols, pprint, zeros, simplify
>>> from sympy.physics.optics.polarization import (jones_vector, stokes_vector,
... half_wave_retarder, polarizing_beam_splitter, jones_2_stokes)
>>> psi, chi, p, I0 = symbols("psi, chi, p, I0", real=True)
>>> x0 = jones_vector(psi, chi)
>>> pprint(x0, use_unicode=True)
⎡-ⅈ⋅sin(χ)⋅sin(ψ) + cos(χ)⋅cos(ψ)⎤
⎢ ⎥
⎣ⅈ⋅sin(χ)⋅cos(ψ) + sin(ψ)⋅cos(χ) ⎦
And the more general Stokes vector:
>>> s0 = stokes_vector(psi, chi, p, I0)
>>> pprint(s0, use_unicode=True)
⎡ I₀ ⎤
⎢ ⎥
⎢I₀⋅p⋅cos(2⋅χ)⋅cos(2⋅ψ)⎥
⎢ ⎥
⎢I₀⋅p⋅sin(2⋅ψ)⋅cos(2⋅χ)⎥
⎢ ⎥
⎣ I₀⋅p⋅sin(2⋅χ) ⎦
We calculate how the Jones vector is modified by a half-wave plate:
>>> alpha = symbols("alpha", real=True)
>>> HWP = half_wave_retarder(alpha)
>>> x1 = simplify(HWP*x0)
We calculate the very common operation of passing a beam through a half-wave
plate and then through a polarizing beam-splitter. We do this by putting this
Jones vector as the first entry of a two-Jones-vector state that is transformed
by a 4x4 Jones matrix modelling the polarizing beam-splitter to get the
transmitted and reflected Jones vectors:
>>> PBS = polarizing_beam_splitter()
>>> X1 = zeros(4, 1)
>>> X1[:2, :] = x1
>>> X2 = PBS*X1
>>> transmitted_port = X2[:2, :]
>>> reflected_port = X2[2:, :]
This allows us to calculate how the power in both ports depends on the initial
polarization:
>>> transmitted_power = jones_2_stokes(transmitted_port)[0]
>>> reflected_power = jones_2_stokes(reflected_port)[0]
>>> print(transmitted_power)
cos(-2*alpha + chi + psi)**2/2 + cos(2*alpha + chi - psi)**2/2
>>> print(reflected_power)
sin(-2*alpha + chi + psi)**2/2 + sin(2*alpha + chi - psi)**2/2
Please see the description of the individual functions for further
details and examples.
References
==========
.. [1] https://en.wikipedia.org/wiki/Jones_calculus
.. [2] https://en.wikipedia.org/wiki/Mueller_calculus
.. [3] https://en.wikipedia.org/wiki/Stokes_parameters
"""
from sympy import sin, cos, exp, I, pi, sqrt, Matrix, Abs, re, im, simplify
from sympy.physics.quantum import TensorProduct
def jones_vector(psi, chi):
"""A Jones vector corresponding to a polarization ellipse with `psi` tilt,
and `chi` circularity.
Parameters
----------
``psi`` : numeric type or sympy Symbol
The tilt of the polarization relative to the `x` axis.
``chi`` : numeric type or sympy Symbol
The angle adjacent to the mayor axis of the polarization ellipse.
Returns
-------
Matrix
A Jones vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, symbols, pi
>>> from sympy.physics.optics.polarization import jones_vector
>>> psi, chi = symbols("psi, chi", real=True)
A general Jones vector.
>>> pprint(jones_vector(psi, chi), use_unicode=True)
⎡-ⅈ⋅sin(χ)⋅sin(ψ) + cos(χ)⋅cos(ψ)⎤
⎢ ⎥
⎣ⅈ⋅sin(χ)⋅cos(ψ) + sin(ψ)⋅cos(χ) ⎦
Horizontal polarization.
>>> pprint(jones_vector(0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎣0⎦
Vertical polarization.
>>> pprint(jones_vector(pi/2, 0), use_unicode=True)
⎡0⎤
⎢ ⎥
⎣1⎦
Diagonal polarization.
>>> pprint(jones_vector(pi/4, 0), use_unicode=True)
⎡√2⎤
⎢──⎥
⎢2 ⎥
⎢ ⎥
⎢√2⎥
⎢──⎥
⎣2 ⎦
Anti-diagonal polarization.
>>> pprint(jones_vector(-pi/4, 0), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2 ⎥
⎢────⎥
⎣ 2 ⎦
Right-hand circular polarization.
>>> pprint(jones_vector(0, pi/4), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢√2⋅ⅈ⎥
⎢────⎥
⎣ 2 ⎦
Left-hand circular polarization.
>>> pprint(jones_vector(0, -pi/4), use_unicode=True)
⎡ √2 ⎤
⎢ ── ⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2⋅ⅈ ⎥
⎢──────⎥
⎣ 2 ⎦
"""
return Matrix([-I*sin(chi)*sin(psi) + cos(chi)*cos(psi),
I*sin(chi)*cos(psi) + sin(psi)*cos(chi)])
def stokes_vector(psi, chi, p=1, I=1):
"""A Stokes vector corresponding to a polarization ellipse with `psi`
tilt, and `chi` circularity.
Parameters
----------
``psi`` : numeric type or sympy Symbol
The tilt of the polarization relative to the `x` axis.
``chi`` : numeric type or sympy Symbol
The angle adjacent to the mayor axis of the polarization ellipse.
``p`` : numeric type or sympy Symbol
The degree of polarization.
``I`` : numeric type or sympy Symbol
The intensity of the field.
Returns
-------
Matrix
A Stokes vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, symbols, pi
>>> from sympy.physics.optics.polarization import stokes_vector
>>> psi, chi, p, I = symbols("psi, chi, p, I", real=True)
>>> pprint(stokes_vector(psi, chi, p, I), use_unicode=True)
⎡ I ⎤
⎢ ⎥
⎢I⋅p⋅cos(2⋅χ)⋅cos(2⋅ψ)⎥
⎢ ⎥
⎢I⋅p⋅sin(2⋅ψ)⋅cos(2⋅χ)⎥
⎢ ⎥
⎣ I⋅p⋅sin(2⋅χ) ⎦
Horizontal polarization
>>> pprint(stokes_vector(0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢1⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣0⎦
Vertical polarization
>>> pprint(stokes_vector(pi/2, 0), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢-1⎥
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎣0 ⎦
Diagonal polarization
>>> pprint(stokes_vector(pi/4, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢1⎥
⎢ ⎥
⎣0⎦
Anti-diagonal polarization
>>> pprint(stokes_vector(-pi/4, 0), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎢-1⎥
⎢ ⎥
⎣0 ⎦
Right-hand circular polarization
>>> pprint(stokes_vector(0, pi/4), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣1⎦
Left-hand circular polarization
>>> pprint(stokes_vector(0, -pi/4), use_unicode=True)
⎡1 ⎤
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎢0 ⎥
⎢ ⎥
⎣-1⎦
Unpolarized light
>>> pprint(stokes_vector(0, 0, 0), use_unicode=True)
⎡1⎤
⎢ ⎥
⎢0⎥
⎢ ⎥
⎢0⎥
⎢ ⎥
⎣0⎦
"""
S0 = I
S1 = I*p*cos(2*psi)*cos(2*chi)
S2 = I*p*sin(2*psi)*cos(2*chi)
S3 = I*p*sin(2*chi)
return Matrix([S0, S1, S2, S3])
def jones_2_stokes(e):
"""Return the Stokes vector for a Jones vector `e`.
Parameters
----------
``e`` : sympy Matrix
A Jones vector.
Returns
-------
sympy Matrix
A Jones vector.
Examples
--------
The axes on the Poincaré sphere.
>>> from sympy import pprint, pi
>>> from sympy.physics.optics.polarization import jones_vector
>>> from sympy.physics.optics.polarization import jones_2_stokes
>>> H = jones_vector(0, 0)
>>> V = jones_vector(pi/2, 0)
>>> D = jones_vector(pi/4, 0)
>>> A = jones_vector(-pi/4, 0)
>>> R = jones_vector(0, pi/4)
>>> L = jones_vector(0, -pi/4)
>>> pprint([jones_2_stokes(e) for e in [H, V, D, A, R, L]],
... use_unicode=True)
⎡⎡1⎤ ⎡1 ⎤ ⎡1⎤ ⎡1 ⎤ ⎡1⎤ ⎡1 ⎤⎤
⎢⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥⎥
⎢⎢1⎥ ⎢-1⎥ ⎢0⎥ ⎢0 ⎥ ⎢0⎥ ⎢0 ⎥⎥
⎢⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥, ⎢ ⎥⎥
⎢⎢0⎥ ⎢0 ⎥ ⎢1⎥ ⎢-1⎥ ⎢0⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥⎥
⎣⎣0⎦ ⎣0 ⎦ ⎣0⎦ ⎣0 ⎦ ⎣1⎦ ⎣-1⎦⎦
"""
ex, ey = e
return Matrix([Abs(ex)**2 + Abs(ey)**2,
Abs(ex)**2 - Abs(ey)**2,
2*re(ex*ey.conjugate()),
-2*im(ex*ey.conjugate())])
def linear_polarizer(theta=0):
"""A linear polarizer Jones matrix with transmission axis at
an angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the transmission axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the polarizer.
Examples
--------
A generic polarizer.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import linear_polarizer
>>> theta = symbols("theta", real=True)
>>> J = linear_polarizer(theta)
>>> pprint(J, use_unicode=True)
⎡ 2 ⎤
⎢ cos (θ) sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ 2 ⎥
⎣sin(θ)⋅cos(θ) sin (θ) ⎦
"""
M = Matrix([[cos(theta)**2, sin(theta)*cos(theta)],
[sin(theta)*cos(theta), sin(theta)**2]])
return M
def phase_retarder(theta=0, delta=0):
"""A phase retarder Jones matrix with retardance `delta` at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
``delta`` : numeric type or sympy Symbol
The phase difference between the fast and slow axes of the
transmitted light.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic retarder.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import phase_retarder
>>> theta, delta = symbols("theta, delta", real=True)
>>> R = phase_retarder(theta, delta)
>>> pprint(R, use_unicode=True)
⎡ -ⅈ⋅δ -ⅈ⋅δ ⎤
⎢ ───── ───── ⎥
⎢⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎛ ⅈ⋅δ⎞ 2 ⎥
⎢⎝ℯ ⋅sin (θ) + cos (θ)⎠⋅ℯ ⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ -ⅈ⋅δ -ⅈ⋅δ ⎥
⎢ ───── ─────⎥
⎢⎛ ⅈ⋅δ⎞ 2 ⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎥
⎣⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝ℯ ⋅cos (θ) + sin (θ)⎠⋅ℯ ⎦
"""
R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,
(1-exp(I*delta))*cos(theta)*sin(theta)],
[(1-exp(I*delta))*cos(theta)*sin(theta),
sin(theta)**2 + exp(I*delta)*cos(theta)**2]])
return R*exp(-I*delta/2)
def half_wave_retarder(theta):
"""A half-wave retarder Jones matrix at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic half-wave plate.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import half_wave_retarder
>>> theta= symbols("theta", real=True)
>>> HWP = half_wave_retarder(theta)
>>> pprint(HWP, use_unicode=True)
⎡ ⎛ 2 2 ⎞ ⎤
⎢-ⅈ⋅⎝- sin (θ) + cos (θ)⎠ -2⋅ⅈ⋅sin(θ)⋅cos(θ) ⎥
⎢ ⎥
⎢ ⎛ 2 2 ⎞⎥
⎣ -2⋅ⅈ⋅sin(θ)⋅cos(θ) -ⅈ⋅⎝sin (θ) - cos (θ)⎠⎦
"""
return phase_retarder(theta, pi)
def quarter_wave_retarder(theta):
"""A quarter-wave retarder Jones matrix at angle `theta`.
Parameters
----------
``theta`` : numeric type or sympy Symbol
The angle of the fast axis relative to the horizontal plane.
Returns
-------
sympy Matrix
A Jones matrix representing the retarder.
Examples
--------
A generic quarter-wave plate.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import quarter_wave_retarder
>>> theta= symbols("theta", real=True)
>>> QWP = quarter_wave_retarder(theta)
>>> pprint(QWP, use_unicode=True)
⎡ -ⅈ⋅π -ⅈ⋅π ⎤
⎢ ───── ───── ⎥
⎢⎛ 2 2 ⎞ 4 4 ⎥
⎢⎝ⅈ⋅sin (θ) + cos (θ)⎠⋅ℯ (1 - ⅈ)⋅ℯ ⋅sin(θ)⋅cos(θ)⎥
⎢ ⎥
⎢ -ⅈ⋅π -ⅈ⋅π ⎥
⎢ ───── ─────⎥
⎢ 4 ⎛ 2 2 ⎞ 4 ⎥
⎣(1 - ⅈ)⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝sin (θ) + ⅈ⋅cos (θ)⎠⋅ℯ ⎦
"""
return phase_retarder(theta, pi/2)
def transmissive_filter(T):
"""An attenuator Jones matrix with transmittance `T`.
Parameters
----------
``T`` : numeric type or sympy Symbol
The transmittance of the attenuator.
Returns
-------
sympy Matrix
A Jones matrix representing the filter.
Examples
--------
A generic filter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import transmissive_filter
>>> T = symbols("T", real=True)
>>> NDF = transmissive_filter(T)
>>> pprint(NDF, use_unicode=True)
⎡√T 0 ⎤
⎢ ⎥
⎣0 √T⎦
"""
return Matrix([[sqrt(T), 0], [0, sqrt(T)]])
def reflective_filter(R):
"""A reflective filter Jones matrix with reflectance `R`.
Parameters
----------
``R`` : numeric type or sympy Symbol
The reflectance of the filter.
Returns
-------
sympy Matrix
A Jones matrix representing the filter.
Examples
--------
A generic filter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import reflective_filter
>>> R = symbols("R", real=True)
>>> pprint(reflective_filter(R), use_unicode=True)
⎡√R 0 ⎤
⎢ ⎥
⎣0 -√R⎦
"""
return Matrix([[sqrt(R), 0], [0, -sqrt(R)]])
def mueller_matrix(J):
"""The Mueller matrix corresponding to Jones matrix `J`.
Parameters
----------
``J`` : sympy Matrix
A Jones matrix.
Returns
-------
sympy Matrix
The corresponding Mueller matrix.
Examples
--------
Generic optical components.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import (mueller_matrix,
... linear_polarizer, half_wave_retarder, quarter_wave_retarder)
>>> theta = symbols("theta", real=True)
A linear_polarizer
>>> pprint(mueller_matrix(linear_polarizer(theta)), use_unicode=True)
⎡ cos(2⋅θ) sin(2⋅θ) ⎤
⎢ 1/2 ──────── ──────── 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢cos(2⋅θ) cos(4⋅θ) 1 sin(4⋅θ) ⎥
⎢──────── ──────── + ─ ──────── 0⎥
⎢ 2 4 4 4 ⎥
⎢ ⎥
⎢sin(2⋅θ) sin(4⋅θ) 1 cos(4⋅θ) ⎥
⎢──────── ──────── ─ - ──────── 0⎥
⎢ 2 4 4 4 ⎥
⎢ ⎥
⎣ 0 0 0 0⎦
A half-wave plate
>>> pprint(mueller_matrix(half_wave_retarder(theta)), use_unicode=True)
⎡1 0 0 0 ⎤
⎢ ⎥
⎢ 4 2 ⎥
⎢0 8⋅sin (θ) - 8⋅sin (θ) + 1 sin(4⋅θ) 0 ⎥
⎢ ⎥
⎢ 4 2 ⎥
⎢0 sin(4⋅θ) - 8⋅sin (θ) + 8⋅sin (θ) - 1 0 ⎥
⎢ ⎥
⎣0 0 0 -1⎦
A quarter-wave plate
>>> pprint(mueller_matrix(quarter_wave_retarder(theta)), use_unicode=True)
⎡1 0 0 0 ⎤
⎢ ⎥
⎢ cos(4⋅θ) 1 sin(4⋅θ) ⎥
⎢0 ──────── + ─ ──────── -sin(2⋅θ)⎥
⎢ 2 2 2 ⎥
⎢ ⎥
⎢ sin(4⋅θ) 1 cos(4⋅θ) ⎥
⎢0 ──────── ─ - ──────── cos(2⋅θ) ⎥
⎢ 2 2 2 ⎥
⎢ ⎥
⎣0 sin(2⋅θ) -cos(2⋅θ) 0 ⎦
"""
A = Matrix([[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 1, 1, 0],
[0, -I, I, 0]])
return simplify(A*TensorProduct(J, J.conjugate())*A.inv())
def polarizing_beam_splitter(Tp=1, Rs=1, Ts=0, Rp=0, phia=0, phib=0):
r"""A polarizing beam splitter Jones matrix at angle `theta`.
Parameters
----------
``J`` : sympy Matrix
A Jones matrix.
``Tp`` : numeric type or sympy Symbol
The transmissivity of the P-polarized component.
``Rs`` : numeric type or sympy Symbol
The reflectivity of the S-polarized component.
``Ts`` : numeric type or sympy Symbol
The transmissivity of the S-polarized component.
``Rp`` : numeric type or sympy Symbol
The reflectivity of the P-polarized component.
``phia`` : numeric type or sympy Symbol
The phase difference between transmitted and reflected component for
output mode a.
``phib`` : numeric type or sympy Symbol
The phase difference between transmitted and reflected component for
output mode b.
Returns
-------
sympy Matrix
A 4x4 matrix representing the PBS. This matrix acts on a 4x1 vector
whose first two entries are the Jones vector on one of the PBS ports,
and the last two entries the Jones vector on the other port.
Examples
--------
Generic polarizing beam-splitter.
>>> from sympy import pprint, symbols
>>> from sympy.physics.optics.polarization import polarizing_beam_splitter
>>> Ts, Rs, Tp, Rp = symbols(r"Ts, Rs, Tp, Rp", positive=True)
>>> phia, phib = symbols("phi_a, phi_b", real=True)
>>> PBS = polarizing_beam_splitter(Tp, Rs, Ts, Rp, phia, phib)
>>> pprint(PBS, use_unicode=False)
[ ____ ____ ]
[ \/ Tp 0 I*\/ Rp 0 ]
[ ]
[ ____ ____ I*phi_a]
[ 0 \/ Ts 0 -I*\/ Rs *e ]
[ ]
[ ____ ____ ]
[I*\/ Rp 0 \/ Tp 0 ]
[ ]
[ ____ I*phi_b ____ ]
[ 0 -I*\/ Rs *e 0 \/ Ts ]
"""
PBS = Matrix([[sqrt(Tp), 0, I*sqrt(Rp), 0],
[0, sqrt(Ts), 0, -I*sqrt(Rs)*exp(I*phia)],
[I*sqrt(Rp), 0, sqrt(Tp), 0],
[0, -I*sqrt(Rs)*exp(I*phib), 0, sqrt(Ts)]])
return PBS
|
08442178a965be7a1efb9be82314715484df77d6046bfaec00f50d54634d1fc6 | """
**Contains**
* Medium
"""
from sympy.physics.units import second, meter, kilogram, ampere
__all__ = ['Medium']
from sympy import Symbol, sympify, sqrt
from sympy.physics.units import speed_of_light, u0, e0
c = speed_of_light.convert_to(meter/second)
_e0mksa = e0.convert_to(ampere**2*second**4/(kilogram*meter**3))
_u0mksa = u0.convert_to(meter*kilogram/(ampere**2*second**2))
class Medium(Symbol):
"""
This class represents an optical medium. The prime reason to implement this is
to facilitate refraction, Fermat's principle, etc.
An optical medium is a material through which electromagnetic waves propagate.
The permittivity and permeability of the medium define how electromagnetic
waves propagate in it.
Parameters
==========
name: string
The display name of the Medium.
permittivity: Sympifyable
Electric permittivity of the space.
permeability: Sympifyable
Magnetic permeability of the space.
n: Sympifyable
Index of refraction of the medium.
Examples
========
>>> from sympy.abc import epsilon, mu
>>> from sympy.physics.optics import Medium
>>> m1 = Medium('m1')
>>> m2 = Medium('m2', epsilon, mu)
>>> m1.intrinsic_impedance
149896229*pi*kilogram*meter**2/(1250000*ampere**2*second**3)
>>> m2.refractive_index
299792458*meter*sqrt(epsilon*mu)/second
References
==========
.. [1] https://en.wikipedia.org/wiki/Optical_medium
"""
def __new__(cls, name, permittivity=None, permeability=None, n=None):
obj = super().__new__(cls, name)
obj._permittivity = sympify(permittivity)
obj._permeability = sympify(permeability)
obj._n = sympify(n)
if n is not None:
if permittivity is not None and permeability is None:
obj._permeability = n**2/(c**2*obj._permittivity)
if permeability is not None and permittivity is None:
obj._permittivity = n**2/(c**2*obj._permeability)
if permittivity is not None and permittivity is not None:
if abs(n - c*sqrt(obj._permittivity*obj._permeability)) > 1e-6:
raise ValueError("Values are not consistent.")
elif permittivity is not None and permeability is not None:
obj._n = c*sqrt(permittivity*permeability)
elif permittivity is None and permeability is None:
obj._permittivity = _e0mksa
obj._permeability = _u0mksa
return obj
@property
def intrinsic_impedance(self):
"""
Returns intrinsic impedance of the medium.
The intrinsic impedance of a medium is the ratio of the
transverse components of the electric and magnetic fields
of the electromagnetic wave travelling in the medium.
In a region with no electrical conductivity it simplifies
to the square root of ratio of magnetic permeability to
electric permittivity.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.intrinsic_impedance
149896229*pi*kilogram*meter**2/(1250000*ampere**2*second**3)
"""
return sqrt(self._permeability/self._permittivity)
@property
def speed(self):
"""
Returns speed of the electromagnetic wave travelling in the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.speed
299792458*meter/second
>>> m2 = Medium('m2', n=1)
>>> m.speed == m2.speed
True
"""
if self._permittivity is not None and self._permeability is not None:
return 1/sqrt(self._permittivity*self._permeability)
else:
return c/self._n
@property
def refractive_index(self):
"""
Returns refractive index of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.refractive_index
1
"""
return (c/self.speed)
@property
def permittivity(self):
"""
Returns electric permittivity of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permittivity
625000*ampere**2*second**4/(22468879468420441*pi*kilogram*meter**3)
"""
return self._permittivity
@property
def permeability(self):
"""
Returns magnetic permeability of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permeability
pi*kilogram*meter/(2500000*ampere**2*second**2)
"""
return self._permeability
def __str__(self):
from sympy.printing import sstr
return type(self).__name__ + ': ' + sstr([self._permittivity,
self._permeability, self._n])
def __lt__(self, other):
"""
Compares based on refractive index of the medium.
"""
return self.refractive_index < other.refractive_index
def __gt__(self, other):
return not self < other
def __eq__(self, other):
return self.refractive_index == other.refractive_index
def __ne__(self, other):
return not self == other
|
28aea9b631848434f5804ee05552b0d4dd0a7f08b56fac1c0e7f0a46cc1c0546 | """
**Contains**
* refraction_angle
* fresnel_coefficients
* deviation
* brewster_angle
* critical_angle
* lens_makers_formula
* mirror_formula
* lens_formula
* hyperfocal_distance
* transverse_magnification
"""
__all__ = ['refraction_angle',
'deviation',
'fresnel_coefficients',
'brewster_angle',
'critical_angle',
'lens_makers_formula',
'mirror_formula',
'lens_formula',
'hyperfocal_distance',
'transverse_magnification'
]
from sympy import Symbol, sympify, sqrt, Matrix, acos, oo, Limit, atan2, asin,\
cos, sin, tan, I, cancel, pi, Float
from sympy.core.compatibility import is_sequence
from sympy.geometry.line import Ray3D
from sympy.geometry.util import intersection
from sympy.geometry.plane import Plane
from .medium import Medium
def refractive_index_of_medium(medium):
"""
Helper function that returns refractive index, given a medium
"""
if isinstance(medium, Medium):
n = medium.refractive_index
else:
n = sympify(medium)
return n
def refraction_angle(incident, medium1, medium2, normal=None, plane=None):
"""
This function calculates transmitted vector after refraction at planar
surface. `medium1` and `medium2` can be `Medium` or any sympifiable object.
If `incident` is a number then treated as angle of incidence (in radians)
in which case refraction angle is returned.
If `incident` is an object of `Ray3D`, `normal` also has to be an instance
of `Ray3D` in order to get the output as a `Ray3D`. Please note that if
plane of separation is not provided and normal is an instance of `Ray3D`,
normal will be assumed to be intersecting incident ray at the plane of
separation. This will not be the case when `normal` is a `Matrix` or
any other sequence.
If `incident` is an instance of `Ray3D` and `plane` has not been provided
and `normal` is not `Ray3D`, output will be a `Matrix`.
Parameters
==========
incident : Matrix, Ray3D, sequence or a number
Incident vector or angle of incidence
medium1 : sympy.physics.optics.medium.Medium or sympifiable
Medium 1 or its refractive index
medium2 : sympy.physics.optics.medium.Medium or sympifiable
Medium 2 or its refractive index
normal : Matrix, Ray3D, or sequence
Normal vector
plane : Plane
Plane of separation of the two media.
Returns an angle of refraction or a refracted ray depending on inputs.
Examples
========
>>> from sympy.physics.optics import refraction_angle
>>> from sympy.geometry import Point3D, Ray3D, Plane
>>> from sympy.matrices import Matrix
>>> from sympy import symbols, pi
>>> n = Matrix([0, 0, 1])
>>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
>>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
>>> refraction_angle(r1, 1, 1, n)
Matrix([
[ 1],
[ 1],
[-1]])
>>> refraction_angle(r1, 1, 1, plane=P)
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
With different index of refraction of the two media
>>> n1, n2 = symbols('n1, n2')
>>> refraction_angle(r1, n1, n2, n)
Matrix([
[ n1/n2],
[ n1/n2],
[-sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)]])
>>> refraction_angle(r1, n1, n2, plane=P)
Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)))
>>> round(refraction_angle(pi/6, 1.2, 1.5), 5)
0.41152
"""
n1 = refractive_index_of_medium(medium1)
n2 = refractive_index_of_medium(medium2)
# check if an incidence angle was supplied instead of a ray
try:
angle_of_incidence = float(incident)
except TypeError:
angle_of_incidence = None
try:
critical_angle_ = critical_angle(medium1, medium2)
except (ValueError, TypeError):
critical_angle_ = None
if angle_of_incidence is not None:
if normal is not None or plane is not None:
raise ValueError('Normal/plane not allowed if incident is an angle')
if not 0.0 <= angle_of_incidence < pi*0.5:
raise ValueError('Angle of incidence not in range [0:pi/2)')
if critical_angle_ and angle_of_incidence > critical_angle_:
raise ValueError('Ray undergoes total internal reflection')
return asin(n1*sin(angle_of_incidence)/n2)
if angle_of_incidence and not 0 <= angle_of_incidence < pi*0.5:
raise ValueError
# Treat the incident as ray below
# A flag to check whether to return Ray3D or not
return_ray = False
if plane is not None and normal is not None:
raise ValueError("Either plane or normal is acceptable.")
if not isinstance(incident, Matrix):
if is_sequence(incident):
_incident = Matrix(incident)
elif isinstance(incident, Ray3D):
_incident = Matrix(incident.direction_ratio)
else:
raise TypeError(
"incident should be a Matrix, Ray3D, or sequence")
else:
_incident = incident
# If plane is provided, get direction ratios of the normal
# to the plane from the plane else go with `normal` param.
if plane is not None:
if not isinstance(plane, Plane):
raise TypeError("plane should be an instance of geometry.plane.Plane")
# If we have the plane, we can get the intersection
# point of incident ray and the plane and thus return
# an instance of Ray3D.
if isinstance(incident, Ray3D):
return_ray = True
intersection_pt = plane.intersection(incident)[0]
_normal = Matrix(plane.normal_vector)
else:
if not isinstance(normal, Matrix):
if is_sequence(normal):
_normal = Matrix(normal)
elif isinstance(normal, Ray3D):
_normal = Matrix(normal.direction_ratio)
if isinstance(incident, Ray3D):
intersection_pt = intersection(incident, normal)
if len(intersection_pt) == 0:
raise ValueError(
"Normal isn't concurrent with the incident ray.")
else:
return_ray = True
intersection_pt = intersection_pt[0]
else:
raise TypeError(
"Normal should be a Matrix, Ray3D, or sequence")
else:
_normal = normal
eta = n1/n2 # Relative index of refraction
# Calculating magnitude of the vectors
mag_incident = sqrt(sum([i**2 for i in _incident]))
mag_normal = sqrt(sum([i**2 for i in _normal]))
# Converting vectors to unit vectors by dividing
# them with their magnitudes
_incident /= mag_incident
_normal /= mag_normal
c1 = -_incident.dot(_normal) # cos(angle_of_incidence)
cs2 = 1 - eta**2*(1 - c1**2) # cos(angle_of_refraction)**2
if cs2.is_negative: # This is the case of total internal reflection(TIR).
return 0
drs = eta*_incident + (eta*c1 - sqrt(cs2))*_normal
# Multiplying unit vector by its magnitude
drs = drs*mag_incident
if not return_ray:
return drs
else:
return Ray3D(intersection_pt, direction_ratio=drs)
def fresnel_coefficients(angle_of_incidence, medium1, medium2):
"""
This function uses Fresnel equations to calculate reflection and
transmission coefficients. Those are obtained for both polarisations
when the electric field vector is in the plane of incidence (labelled 'p')
and when the electric field vector is perpendicular to the plane of
incidence (labelled 's'). There are four real coefficients unless the
incident ray reflects in total internal in which case there are two complex
ones. Angle of incidence is the angle between the incident ray and the
surface normal. ``medium1`` and ``medium2`` can be ``Medium`` or any
sympifiable object.
Parameters
==========
angle_of_incidence : sympifiable
medium1 : Medium or sympifiable
Medium 1 or its refractive index
medium2 : Medium or sympifiable
Medium 2 or its refractive index
Returns a list with four real Fresnel coefficients:
[reflection p (TM), reflection s (TE),
transmission p (TM), transmission s (TE)]
If the ray is undergoes total internal reflection then returns a
list of two complex Fresnel coefficients:
[reflection p (TM), reflection s (TE)]
Examples
========
>>> from sympy.physics.optics import fresnel_coefficients
>>> fresnel_coefficients(0.3, 1, 2)
[0.317843553417859, -0.348645229818821,
0.658921776708929, 0.651354770181179]
>>> fresnel_coefficients(0.6, 2, 1)
[-0.235625382192159 - 0.971843958291041*I,
0.816477005968898 - 0.577377951366403*I]
References
==========
https://en.wikipedia.org/wiki/Fresnel_equations
"""
if not 0 <= 2*angle_of_incidence < pi:
raise ValueError('Angle of incidence not in range [0:pi/2)')
n1 = refractive_index_of_medium(medium1)
n2 = refractive_index_of_medium(medium2)
angle_of_refraction = asin(n1*sin(angle_of_incidence)/n2)
try:
angle_of_total_internal_reflection_onset = critical_angle(n1, n2)
except ValueError:
angle_of_total_internal_reflection_onset = None
if angle_of_total_internal_reflection_onset == None or\
angle_of_total_internal_reflection_onset > angle_of_incidence:
R_s = -sin(angle_of_incidence - angle_of_refraction)\
/sin(angle_of_incidence + angle_of_refraction)
R_p = tan(angle_of_incidence - angle_of_refraction)\
/tan(angle_of_incidence + angle_of_refraction)
T_s = 2*sin(angle_of_refraction)*cos(angle_of_incidence)\
/sin(angle_of_incidence + angle_of_refraction)
T_p = 2*sin(angle_of_refraction)*cos(angle_of_incidence)\
/(sin(angle_of_incidence + angle_of_refraction)\
*cos(angle_of_incidence - angle_of_refraction))
return [R_p, R_s, T_p, T_s]
else:
n = n2/n1
R_s = cancel((cos(angle_of_incidence)-\
I*sqrt(sin(angle_of_incidence)**2 - n**2))\
/(cos(angle_of_incidence)+\
I*sqrt(sin(angle_of_incidence)**2 - n**2)))
R_p = cancel((n**2*cos(angle_of_incidence)-\
I*sqrt(sin(angle_of_incidence)**2 - n**2))\
/(n**2*cos(angle_of_incidence)+\
I*sqrt(sin(angle_of_incidence)**2 - n**2)))
return [R_p, R_s]
def deviation(incident, medium1, medium2, normal=None, plane=None):
"""
This function calculates the angle of deviation of a ray
due to refraction at planar surface.
Parameters
==========
incident : Matrix, Ray3D, sequence or float
Incident vector or angle of incidence
medium1 : sympy.physics.optics.medium.Medium or sympifiable
Medium 1 or its refractive index
medium2 : sympy.physics.optics.medium.Medium or sympifiable
Medium 2 or its refractive index
normal : Matrix, Ray3D, or sequence
Normal vector
plane : Plane
Plane of separation of the two media.
Returns angular deviation between incident and refracted rays
Examples
========
>>> from sympy.physics.optics import deviation
>>> from sympy.geometry import Point3D, Ray3D, Plane
>>> from sympy.matrices import Matrix
>>> from sympy import symbols
>>> n1, n2 = symbols('n1, n2')
>>> n = Matrix([0, 0, 1])
>>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
>>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
>>> deviation(r1, 1, 1, n)
0
>>> deviation(r1, n1, n2, plane=P)
-acos(-sqrt(-2*n1**2/(3*n2**2) + 1)) + acos(-sqrt(3)/3)
>>> round(deviation(0.1, 1.2, 1.5), 5)
-0.02005
"""
refracted = refraction_angle(incident,
medium1,
medium2,
normal=normal,
plane=plane)
try:
angle_of_incidence = Float(incident)
except TypeError:
angle_of_incidence = None
if angle_of_incidence is not None:
return float(refracted) - angle_of_incidence
if refracted != 0:
if isinstance(refracted, Ray3D):
refracted = Matrix(refracted.direction_ratio)
if not isinstance(incident, Matrix):
if is_sequence(incident):
_incident = Matrix(incident)
elif isinstance(incident, Ray3D):
_incident = Matrix(incident.direction_ratio)
else:
raise TypeError(
"incident should be a Matrix, Ray3D, or sequence")
else:
_incident = incident
if plane is None:
if not isinstance(normal, Matrix):
if is_sequence(normal):
_normal = Matrix(normal)
elif isinstance(normal, Ray3D):
_normal = Matrix(normal.direction_ratio)
else:
raise TypeError(
"normal should be a Matrix, Ray3D, or sequence")
else:
_normal = normal
else:
_normal = Matrix(plane.normal_vector)
mag_incident = sqrt(sum([i**2 for i in _incident]))
mag_normal = sqrt(sum([i**2 for i in _normal]))
mag_refracted = sqrt(sum([i**2 for i in refracted]))
_incident /= mag_incident
_normal /= mag_normal
refracted /= mag_refracted
i = acos(_incident.dot(_normal))
r = acos(refracted.dot(_normal))
return i - r
def brewster_angle(medium1, medium2):
"""
This function calculates the Brewster's angle of incidence to Medium 2 from
Medium 1 in radians.
Parameters
==========
medium 1 : Medium or sympifiable
Refractive index of Medium 1
medium 2 : Medium or sympifiable
Refractive index of Medium 1
Examples
========
>>> from sympy.physics.optics import brewster_angle
>>> brewster_angle(1, 1.33)
0.926093295503462
"""
n1 = refractive_index_of_medium(medium1)
n2 = refractive_index_of_medium(medium2)
return atan2(n2, n1)
def critical_angle(medium1, medium2):
"""
This function calculates the critical angle of incidence (marking the onset
of total internal) to Medium 2 from Medium 1 in radians.
Parameters
==========
medium 1 : Medium or sympifiable
Refractive index of Medium 1
medium 2 : Medium or sympifiable
Refractive index of Medium 1
Examples
========
>>> from sympy.physics.optics import critical_angle
>>> critical_angle(1.33, 1)
0.850908514477849
"""
n1 = refractive_index_of_medium(medium1)
n2 = refractive_index_of_medium(medium2)
if n2 > n1:
raise ValueError('Total internal reflection impossible for n1 < n2')
else:
return asin(n2/n1)
def lens_makers_formula(n_lens, n_surr, r1, r2):
"""
This function calculates focal length of a thin lens.
It follows cartesian sign convention.
Parameters
==========
n_lens : Medium or sympifiable
Index of refraction of lens.
n_surr : Medium or sympifiable
Index of reflection of surrounding.
r1 : sympifiable
Radius of curvature of first surface.
r2 : sympifiable
Radius of curvature of second surface.
Examples
========
>>> from sympy.physics.optics import lens_makers_formula
>>> lens_makers_formula(1.33, 1, 10, -10)
15.1515151515151
"""
if isinstance(n_lens, Medium):
n_lens = n_lens.refractive_index
else:
n_lens = sympify(n_lens)
if isinstance(n_surr, Medium):
n_surr = n_surr.refractive_index
else:
n_surr = sympify(n_surr)
r1 = sympify(r1)
r2 = sympify(r2)
return 1/((n_lens - n_surr)/n_surr*(1/r1 - 1/r2))
def mirror_formula(focal_length=None, u=None, v=None):
"""
This function provides one of the three parameters
when two of them are supplied.
This is valid only for paraxial rays.
Parameters
==========
focal_length : sympifiable
Focal length of the mirror.
u : sympifiable
Distance of object from the pole on
the principal axis.
v : sympifiable
Distance of the image from the pole
on the principal axis.
Examples
========
>>> from sympy.physics.optics import mirror_formula
>>> from sympy.abc import f, u, v
>>> mirror_formula(focal_length=f, u=u)
f*u/(-f + u)
>>> mirror_formula(focal_length=f, v=v)
f*v/(-f + v)
>>> mirror_formula(u=u, v=v)
u*v/(u + v)
"""
if focal_length and u and v:
raise ValueError("Please provide only two parameters")
focal_length = sympify(focal_length)
u = sympify(u)
v = sympify(v)
if u is oo:
_u = Symbol('u')
if v is oo:
_v = Symbol('v')
if focal_length is oo:
_f = Symbol('f')
if focal_length is None:
if u is oo and v is oo:
return Limit(Limit(_v*_u/(_v + _u), _u, oo), _v, oo).doit()
if u is oo:
return Limit(v*_u/(v + _u), _u, oo).doit()
if v is oo:
return Limit(_v*u/(_v + u), _v, oo).doit()
return v*u/(v + u)
if u is None:
if v is oo and focal_length is oo:
return Limit(Limit(_v*_f/(_v - _f), _v, oo), _f, oo).doit()
if v is oo:
return Limit(_v*focal_length/(_v - focal_length), _v, oo).doit()
if focal_length is oo:
return Limit(v*_f/(v - _f), _f, oo).doit()
return v*focal_length/(v - focal_length)
if v is None:
if u is oo and focal_length is oo:
return Limit(Limit(_u*_f/(_u - _f), _u, oo), _f, oo).doit()
if u is oo:
return Limit(_u*focal_length/(_u - focal_length), _u, oo).doit()
if focal_length is oo:
return Limit(u*_f/(u - _f), _f, oo).doit()
return u*focal_length/(u - focal_length)
def lens_formula(focal_length=None, u=None, v=None):
"""
This function provides one of the three parameters
when two of them are supplied.
This is valid only for paraxial rays.
Parameters
==========
focal_length : sympifiable
Focal length of the mirror.
u : sympifiable
Distance of object from the optical center on
the principal axis.
v : sympifiable
Distance of the image from the optical center
on the principal axis.
Examples
========
>>> from sympy.physics.optics import lens_formula
>>> from sympy.abc import f, u, v
>>> lens_formula(focal_length=f, u=u)
f*u/(f + u)
>>> lens_formula(focal_length=f, v=v)
f*v/(f - v)
>>> lens_formula(u=u, v=v)
u*v/(u - v)
"""
if focal_length and u and v:
raise ValueError("Please provide only two parameters")
focal_length = sympify(focal_length)
u = sympify(u)
v = sympify(v)
if u is oo:
_u = Symbol('u')
if v is oo:
_v = Symbol('v')
if focal_length is oo:
_f = Symbol('f')
if focal_length is None:
if u is oo and v is oo:
return Limit(Limit(_v*_u/(_u - _v), _u, oo), _v, oo).doit()
if u is oo:
return Limit(v*_u/(_u - v), _u, oo).doit()
if v is oo:
return Limit(_v*u/(u - _v), _v, oo).doit()
return v*u/(u - v)
if u is None:
if v is oo and focal_length is oo:
return Limit(Limit(_v*_f/(_f - _v), _v, oo), _f, oo).doit()
if v is oo:
return Limit(_v*focal_length/(focal_length - _v), _v, oo).doit()
if focal_length is oo:
return Limit(v*_f/(_f - v), _f, oo).doit()
return v*focal_length/(focal_length - v)
if v is None:
if u is oo and focal_length is oo:
return Limit(Limit(_u*_f/(_u + _f), _u, oo), _f, oo).doit()
if u is oo:
return Limit(_u*focal_length/(_u + focal_length), _u, oo).doit()
if focal_length is oo:
return Limit(u*_f/(u + _f), _f, oo).doit()
return u*focal_length/(u + focal_length)
def hyperfocal_distance(f, N, c):
"""
Parameters
==========
f: sympifiable
Focal length of a given lens
N: sympifiable
F-number of a given lens
c: sympifiable
Circle of Confusion (CoC) of a given image format
Example
=======
>>> from sympy.physics.optics import hyperfocal_distance
>>> round(hyperfocal_distance(f = 0.5, N = 8, c = 0.0033), 2)
9.47
"""
f = sympify(f)
N = sympify(N)
c = sympify(c)
return (1/(N * c))*(f**2)
def transverse_magnification(si, so):
"""
Calculates the transverse magnification, which is the ratio of the
image size to the object size.
Parameters
==========
so: sympifiable
Lens-object distance
si: sympifiable
Lens-image distance
Example
=======
>>> from sympy.physics.optics import transverse_magnification
>>> transverse_magnification(30, 15)
-2
"""
si = sympify(si)
so = sympify(so)
return (-(si/so))
|
3978f144a70aec06e969e22939f80b9734ddcc46c0eae8693e5431b3652b115c | """
This module has all the classes and functions related to waves in optics.
**Contains**
* TWave
"""
__all__ = ['TWave']
from sympy import (sympify, pi, sin, cos, sqrt, Symbol, S,
symbols, Derivative, atan2)
from sympy.core.expr import Expr
from sympy.physics.units import speed_of_light, meter, second
c = speed_of_light.convert_to(meter/second)
class TWave(Expr):
r"""
This is a simple transverse sine wave travelling in a one-dimensional space.
Basic properties are required at the time of creation of the object,
but they can be changed later with respective methods provided.
It is represented as :math:`A \times cos(k*x - \omega \times t + \phi )`,
where :math:`A` is the amplitude, :math:`\omega` is the angular velocity,
:math:`k` is the wavenumber (spatial frequency), :math:`x` is a spatial variable
to represent the position on the dimension on which the wave propagates,
and :math:`\phi` is the phase angle of the wave.
Arguments
=========
amplitude : Sympifyable
Amplitude of the wave.
frequency : Sympifyable
Frequency of the wave.
phase : Sympifyable
Phase angle of the wave.
time_period : Sympifyable
Time period of the wave.
n : Sympifyable
Refractive index of the medium.
Raises
=======
ValueError : When neither frequency nor time period is provided
or they are not consistent.
TypeError : When anything other than TWave objects is added.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A1, phi1, A2, phi2, f = symbols('A1, phi1, A2, phi2, f')
>>> w1 = TWave(A1, f, phi1)
>>> w2 = TWave(A2, f, phi2)
>>> w3 = w1 + w2 # Superposition of two waves
>>> w3
TWave(sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2), f,
atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2)))
>>> w3.amplitude
sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)
>>> w3.phase
atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2))
>>> w3.speed
299792458*meter/(second*n)
>>> w3.angular_velocity
2*pi*f
"""
def __init__(
self,
amplitude,
frequency=None,
phase=S.Zero,
time_period=None,
n=Symbol('n')):
frequency = sympify(frequency)
amplitude = sympify(amplitude)
phase = sympify(phase)
time_period = sympify(time_period)
n = sympify(n)
self._frequency = frequency
self._amplitude = amplitude
self._phase = phase
self._time_period = time_period
self._n = n
if time_period is not None:
self._frequency = 1/self._time_period
if frequency is not None:
self._time_period = 1/self._frequency
if time_period is not None:
if frequency != 1/time_period:
raise ValueError("frequency and time_period should be consistent.")
if frequency is None and time_period is None:
raise ValueError("Either frequency or time period is needed.")
@property
def frequency(self):
"""
Returns the frequency of the wave,
in cycles per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.frequency
f
"""
return self._frequency
@property
def time_period(self):
"""
Returns the temporal period of the wave,
in seconds per cycle.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.time_period
1/f
"""
return self._time_period
@property
def wavelength(self):
"""
Returns the wavelength (spatial period) of the wave,
in meters per cycle.
It depends on the medium of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavelength
299792458*meter/(second*f*n)
"""
return c/(self._frequency*self._n)
@property
def amplitude(self):
"""
Returns the amplitude of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.amplitude
A
"""
return self._amplitude
@property
def phase(self):
"""
Returns the phase angle of the wave,
in radians.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.phase
phi
"""
return self._phase
@property
def speed(self):
"""
Returns the propagation speed of the wave,
in meters per second.
It is dependent on the propagation medium.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.speed
299792458*meter/(second*n)
"""
return self.wavelength*self._frequency
@property
def angular_velocity(self):
"""
Returns the angular velocity of the wave,
in radians per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.angular_velocity
2*pi*f
"""
return 2*pi*self._frequency
@property
def wavenumber(self):
"""
Returns the wavenumber of the wave,
in radians per meter.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavenumber
pi*second*f*n/(149896229*meter)
"""
return 2*pi/self.wavelength
def __str__(self):
"""String representation of a TWave."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
__repr__ = __str__
def __add__(self, other):
"""
Addition of two waves will result in their superposition.
The type of interference will depend on their phase angles.
"""
if isinstance(other, TWave):
if self._frequency == other._frequency and self.wavelength == other.wavelength:
return TWave(sqrt(self._amplitude**2 + other._amplitude**2 + 2 *
self.amplitude*other.amplitude*cos(
self._phase - other.phase)),
self.frequency,
atan2(self._amplitude*cos(self._phase)
+other._amplitude*cos(other._phase),
self._amplitude*sin(self._phase)
+other._amplitude*sin(other._phase))
)
else:
raise NotImplementedError("Interference of waves with different frequencies"
" has not been implemented.")
else:
raise TypeError(type(other).__name__ + " and TWave objects can't be added.")
def _eval_rewrite_as_sin(self, *args, **kwargs):
return self._amplitude*sin(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase + pi/2, evaluate=False)
def _eval_rewrite_as_cos(self, *args, **kwargs):
return self._amplitude*cos(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase)
def _eval_rewrite_as_pde(self, *args, **kwargs):
from sympy import Function
mu, epsilon, x, t = symbols('mu, epsilon, x, t')
E = Function('E')
return Derivative(E(x, t), x, 2) + mu*epsilon*Derivative(E(x, t), t, 2)
def _eval_rewrite_as_exp(self, *args, **kwargs):
from sympy import exp, I
return self._amplitude*exp(I*(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase))
|
f8fe0f2ed068833ac5ed5dc75820ff4f1caf010e45dd2e1ff962d806764b9599 | """
Gaussian optics.
The module implements:
- Ray transfer matrices for geometrical and gaussian optics.
See RayTransferMatrix, GeometricRay and BeamParameter
- Conjugation relations for geometrical and gaussian optics.
See geometric_conj*, gauss_conj and conjugate_gauss_beams
The conventions for the distances are as follows:
focal distance
positive for convergent lenses
object distance
positive for real objects
image distance
positive for real images
"""
__all__ = [
'RayTransferMatrix',
'FreeSpace',
'FlatRefraction',
'CurvedRefraction',
'FlatMirror',
'CurvedMirror',
'ThinLens',
'GeometricRay',
'BeamParameter',
'waist2rayleigh',
'rayleigh2waist',
'geometric_conj_ab',
'geometric_conj_af',
'geometric_conj_bf',
'gaussian_conj',
'conjugate_gauss_beams',
]
from sympy import (atan2, Expr, I, im, Matrix, pi, re, sqrt, sympify,
together, MutableDenseMatrix)
from sympy.utilities.misc import filldedent
###
# A, B, C, D matrices
###
class RayTransferMatrix(MutableDenseMatrix):
"""
Base class for a Ray Transfer Matrix.
It should be used if there isn't already a more specific subclass mentioned
in See Also.
Parameters
==========
parameters : A, B, C and D or 2x2 matrix (Matrix(2, 2, [A, B, C, D]))
Examples
========
>>> from sympy.physics.optics import RayTransferMatrix, ThinLens
>>> from sympy import Symbol, Matrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat
Matrix([
[1, 2],
[3, 4]])
>>> RayTransferMatrix(Matrix([[1, 2], [3, 4]]))
Matrix([
[1, 2],
[3, 4]])
>>> mat.A
1
>>> f = Symbol('f')
>>> lens = ThinLens(f)
>>> lens
Matrix([
[ 1, 0],
[-1/f, 1]])
>>> lens.C
-1/f
See Also
========
GeometricRay, BeamParameter,
FreeSpace, FlatRefraction, CurvedRefraction,
FlatMirror, CurvedMirror, ThinLens
References
==========
.. [1] https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
"""
def __new__(cls, *args):
if len(args) == 4:
temp = ((args[0], args[1]), (args[2], args[3]))
elif len(args) == 1 \
and isinstance(args[0], Matrix) \
and args[0].shape == (2, 2):
temp = args[0]
else:
raise ValueError(filldedent('''
Expecting 2x2 Matrix or the 4 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
def __mul__(self, other):
if isinstance(other, RayTransferMatrix):
return RayTransferMatrix(Matrix.__mul__(self, other))
elif isinstance(other, GeometricRay):
return GeometricRay(Matrix.__mul__(self, other))
elif isinstance(other, BeamParameter):
temp = self*Matrix(((other.q,), (1,)))
q = (temp[0]/temp[1]).expand(complex=True)
return BeamParameter(other.wavelen,
together(re(q)),
z_r=together(im(q)))
else:
return Matrix.__mul__(self, other)
@property
def A(self):
"""
The A parameter of the Matrix.
Examples
========
>>> from sympy.physics.optics import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.A
1
"""
return self[0, 0]
@property
def B(self):
"""
The B parameter of the Matrix.
Examples
========
>>> from sympy.physics.optics import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.B
2
"""
return self[0, 1]
@property
def C(self):
"""
The C parameter of the Matrix.
Examples
========
>>> from sympy.physics.optics import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.C
3
"""
return self[1, 0]
@property
def D(self):
"""
The D parameter of the Matrix.
Examples
========
>>> from sympy.physics.optics import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.D
4
"""
return self[1, 1]
class FreeSpace(RayTransferMatrix):
"""
Ray Transfer Matrix for free space.
Parameters
==========
distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import FreeSpace
>>> from sympy import symbols
>>> d = symbols('d')
>>> FreeSpace(d)
Matrix([
[1, d],
[0, 1]])
"""
def __new__(cls, d):
return RayTransferMatrix.__new__(cls, 1, d, 0, 1)
class FlatRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction.
Parameters
==========
n1 : refractive index of one medium
n2 : refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import FlatRefraction
>>> from sympy import symbols
>>> n1, n2 = symbols('n1 n2')
>>> FlatRefraction(n1, n2)
Matrix([
[1, 0],
[0, n1/n2]])
"""
def __new__(cls, n1, n2):
n1, n2 = map(sympify, (n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, 0, n1/n2)
class CurvedRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction on curved interface.
Parameters
==========
R : radius of curvature (positive for concave)
n1 : refractive index of one medium
n2 : refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import CurvedRefraction
>>> from sympy import symbols
>>> R, n1, n2 = symbols('R n1 n2')
>>> CurvedRefraction(R, n1, n2)
Matrix([
[ 1, 0],
[(n1 - n2)/(R*n2), n1/n2]])
"""
def __new__(cls, R, n1, n2):
R, n1, n2 = map(sympify, (R, n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, (n1 - n2)/R/n2, n1/n2)
class FlatMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection.
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import FlatMirror
>>> FlatMirror()
Matrix([
[1, 0],
[0, 1]])
"""
def __new__(cls):
return RayTransferMatrix.__new__(cls, 1, 0, 0, 1)
class CurvedMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection from curved surface.
Parameters
==========
R : radius of curvature (positive for concave)
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import CurvedMirror
>>> from sympy import symbols
>>> R = symbols('R')
>>> CurvedMirror(R)
Matrix([
[ 1, 0],
[-2/R, 1]])
"""
def __new__(cls, R):
R = sympify(R)
return RayTransferMatrix.__new__(cls, 1, 0, -2/R, 1)
class ThinLens(RayTransferMatrix):
"""
Ray Transfer Matrix for a thin lens.
Parameters
==========
f : the focal distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import ThinLens
>>> from sympy import symbols
>>> f = symbols('f')
>>> ThinLens(f)
Matrix([
[ 1, 0],
[-1/f, 1]])
"""
def __new__(cls, f):
f = sympify(f)
return RayTransferMatrix.__new__(cls, 1, 0, -1/f, 1)
###
# Representation for geometric ray
###
class GeometricRay(MutableDenseMatrix):
"""
Representation for a geometric ray in the Ray Transfer Matrix formalism.
Parameters
==========
h : height, and
angle : angle, or
matrix : a 2x1 matrix (Matrix(2, 1, [height, angle]))
Examples
========
>>> from sympy.physics.optics import GeometricRay, FreeSpace
>>> from sympy import symbols, Matrix
>>> d, h, angle = symbols('d, h, angle')
>>> GeometricRay(h, angle)
Matrix([
[ h],
[angle]])
>>> FreeSpace(d)*GeometricRay(h, angle)
Matrix([
[angle*d + h],
[ angle]])
>>> GeometricRay( Matrix( ((h,), (angle,)) ) )
Matrix([
[ h],
[angle]])
See Also
========
RayTransferMatrix
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], Matrix) \
and args[0].shape == (2, 1):
temp = args[0]
elif len(args) == 2:
temp = ((args[0],), (args[1],))
else:
raise ValueError(filldedent('''
Expecting 2x1 Matrix or the 2 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
@property
def height(self):
"""
The distance from the optical axis.
Examples
========
>>> from sympy.physics.optics import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.height
h
"""
return self[0]
@property
def angle(self):
"""
The angle with the optical axis.
Examples
========
>>> from sympy.physics.optics import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.angle
angle
"""
return self[1]
###
# Representation for gauss beam
###
class BeamParameter(Expr):
"""
Representation for a gaussian ray in the Ray Transfer Matrix formalism.
Parameters
==========
wavelen : the wavelength,
z : the distance to waist, and
w : the waist, or
z_r : the rayleigh range
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
>>> p.q.n()
1.0 + 5.92753330865999*I
>>> p.w_0.n()
0.00100000000000000
>>> p.z_r.n()
5.92753330865999
>>> from sympy.physics.optics import FreeSpace
>>> fs = FreeSpace(10)
>>> p1 = fs*p
>>> p.w.n()
0.00101413072159615
>>> p1.w.n()
0.00210803120913829
See Also
========
RayTransferMatrix
References
==========
.. [1] https://en.wikipedia.org/wiki/Complex_beam_parameter
.. [2] https://en.wikipedia.org/wiki/Gaussian_beam
"""
#TODO A class Complex may be implemented. The BeamParameter may
# subclass it. See:
# https://groups.google.com/d/topic/sympy/7XkU07NRBEs/discussion
def __new__(cls, wavelen, z, z_r=None, w=None):
wavelen = sympify(wavelen)
z = sympify(z)
if z_r is not None and w is None:
z_r = sympify(z_r)
elif w is not None and z_r is None:
z_r = waist2rayleigh(sympify(w), wavelen)
else:
raise ValueError('Constructor expects exactly one named argument.')
return Expr.__new__(cls, wavelen, z, z_r)
@property
def wavelen(self):
return self.args[0]
@property
def z(self):
return self.args[1]
@property
def z_r(self):
return self.args[2]
@property
def q(self):
"""
The complex parameter representing the beam.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
"""
return self.z + I*self.z_r
@property
def radius(self):
"""
The radius of curvature of the phase front.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.radius
1 + 3.55998576005696*pi**2
"""
return self.z*(1 + (self.z_r/self.z)**2)
@property
def w(self):
"""
The beam radius at `1/e^2` intensity.
See Also
========
w_0 : the minimal radius of beam
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w
0.001*sqrt(0.2809/pi**2 + 1)
"""
return self.w_0*sqrt(1 + (self.z/self.z_r)**2)
@property
def w_0(self):
"""
The beam waist (minimal radius).
See Also
========
w : the beam radius at `1/e^2` intensity
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w_0
0.00100000000000000
"""
return sqrt(self.z_r/pi*self.wavelen)
@property
def divergence(self):
"""
Half of the total angular spread.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.divergence
0.00053/pi
"""
return self.wavelen/pi/self.w_0
@property
def gouy(self):
"""
The Gouy phase.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.gouy
atan(0.53/pi)
"""
return atan2(self.z, self.z_r)
@property
def waist_approximation_limit(self):
"""
The minimal waist for which the gauss beam approximation is valid.
The gauss beam is a solution to the paraxial equation. For curvatures
that are too great it is not a valid approximation.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.waist_approximation_limit
1.06e-6/pi
"""
return 2*self.wavelen/pi
###
# Utilities
###
def waist2rayleigh(w, wavelen):
"""
Calculate the rayleigh range from the waist of a gaussian beam.
See Also
========
rayleigh2waist, BeamParameter
Examples
========
>>> from sympy.physics.optics import waist2rayleigh
>>> from sympy import symbols
>>> w, wavelen = symbols('w wavelen')
>>> waist2rayleigh(w, wavelen)
pi*w**2/wavelen
"""
w, wavelen = map(sympify, (w, wavelen))
return w**2*pi/wavelen
def rayleigh2waist(z_r, wavelen):
"""Calculate the waist from the rayleigh range of a gaussian beam.
See Also
========
waist2rayleigh, BeamParameter
Examples
========
>>> from sympy.physics.optics import rayleigh2waist
>>> from sympy import symbols
>>> z_r, wavelen = symbols('z_r wavelen')
>>> rayleigh2waist(z_r, wavelen)
sqrt(wavelen*z_r)/sqrt(pi)
"""
z_r, wavelen = map(sympify, (z_r, wavelen))
return sqrt(z_r/pi*wavelen)
def geometric_conj_ab(a, b):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the distances to the optical element and returns the needed
focal distance.
See Also
========
geometric_conj_af, geometric_conj_bf
Examples
========
>>> from sympy.physics.optics import geometric_conj_ab
>>> from sympy import symbols
>>> a, b = symbols('a b')
>>> geometric_conj_ab(a, b)
a*b/(a + b)
"""
a, b = map(sympify, (a, b))
if a.is_infinite or b.is_infinite:
return a if b.is_infinite else b
else:
return a*b/(a + b)
def geometric_conj_af(a, f):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the object distance (for geometric_conj_af) or the image distance
(for geometric_conj_bf) to the optical element and the focal distance.
Then it returns the other distance needed for conjugation.
See Also
========
geometric_conj_ab
Examples
========
>>> from sympy.physics.optics.gaussopt import geometric_conj_af, geometric_conj_bf
>>> from sympy import symbols
>>> a, b, f = symbols('a b f')
>>> geometric_conj_af(a, f)
a*f/(a - f)
>>> geometric_conj_bf(b, f)
b*f/(b - f)
"""
a, f = map(sympify, (a, f))
return -geometric_conj_ab(a, -f)
geometric_conj_bf = geometric_conj_af
def gaussian_conj(s_in, z_r_in, f):
"""
Conjugation relation for gaussian beams.
Parameters
==========
s_in : the distance to optical element from the waist
z_r_in : the rayleigh range of the incident beam
f : the focal length of the optical element
Returns
=======
a tuple containing (s_out, z_r_out, m)
s_out : the distance between the new waist and the optical element
z_r_out : the rayleigh range of the emergent beam
m : the ration between the new and the old waists
Examples
========
>>> from sympy.physics.optics import gaussian_conj
>>> from sympy import symbols
>>> s_in, z_r_in, f = symbols('s_in z_r_in f')
>>> gaussian_conj(s_in, z_r_in, f)[0]
1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
>>> gaussian_conj(s_in, z_r_in, f)[1]
z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
>>> gaussian_conj(s_in, z_r_in, f)[2]
1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
"""
s_in, z_r_in, f = map(sympify, (s_in, z_r_in, f))
s_out = 1 / ( -1/(s_in + z_r_in**2/(s_in - f)) + 1/f )
m = 1/sqrt((1 - (s_in/f)**2) + (z_r_in/f)**2)
z_r_out = z_r_in / ((1 - (s_in/f)**2) + (z_r_in/f)**2)
return (s_out, z_r_out, m)
def conjugate_gauss_beams(wavelen, waist_in, waist_out, **kwargs):
"""
Find the optical setup conjugating the object/image waists.
Parameters
==========
wavelen : the wavelength of the beam
waist_in and waist_out : the waists to be conjugated
f : the focal distance of the element used in the conjugation
Returns
=======
a tuple containing (s_in, s_out, f)
s_in : the distance before the optical element
s_out : the distance after the optical element
f : the focal distance of the optical element
Examples
========
>>> from sympy.physics.optics import conjugate_gauss_beams
>>> from sympy import symbols, factor
>>> l, w_i, w_o, f = symbols('l w_i w_o f')
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[0]
f*(1 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))
>>> factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1])
f*w_o**2*(w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 -
pi**2*w_i**4/(f**2*l**2)))/w_i**2
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[2]
f
"""
#TODO add the other possible arguments
wavelen, waist_in, waist_out = map(sympify, (wavelen, waist_in, waist_out))
m = waist_out / waist_in
z = waist2rayleigh(waist_in, wavelen)
if len(kwargs) != 1:
raise ValueError("The function expects only one named argument")
elif 'dist' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
elif 'f' in kwargs:
f = sympify(kwargs['f'])
s_in = f * (1 - sqrt(1/m**2 - z**2/f**2))
s_out = gaussian_conj(s_in, z, f)[0]
elif 's_in' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
else:
raise ValueError(filldedent('''
The functions expects the focal length as a named argument'''))
return (s_in, s_out, f)
#TODO
#def plot_beam():
# """Plot the beam radius as it propagates in space."""
# pass
#TODO
#def plot_beam_conjugation():
# """
# Plot the intersection of two beams.
#
# Represents the conjugation relation.
#
# See Also
# ========
#
# conjugate_gauss_beams
# """
# pass
|
2762a623822d2da367a0736bb31e5ddfc19d37c2ca5aee2c082843f09d638089 | from sympy.external import import_module
from sympy import Mul, Integer
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.gate import (X, Y, Z, H, CNOT,
IdentityGate, CGate, PhaseGate, TGate)
from sympy.physics.quantum.identitysearch import (generate_gate_rules,
generate_equivalent_ids, GateIdentity, bfs_identity_search,
is_scalar_sparse_matrix,
is_scalar_nonsparse_matrix, is_degenerate, is_reducible)
from sympy.testing.pytest import skip
def create_gate_sequence(qubit=0):
gates = (X(qubit), Y(qubit), Z(qubit), H(qubit))
return gates
def test_generate_gate_rules_1():
# Test with tuples
(x, y, z, h) = create_gate_sequence()
ph = PhaseGate(0)
cgate_t = CGate(0, TGate(1))
assert generate_gate_rules((x,)) == {((x,), ())}
gate_rules = {((x, x), ()),
((x,), (x,))}
assert generate_gate_rules((x, x)) == gate_rules
gate_rules = {((x, y, x), ()),
((y, x, x), ()),
((x, x, y), ()),
((y, x), (x,)),
((x, y), (x,)),
((y,), (x, x))}
assert generate_gate_rules((x, y, x)) == gate_rules
gate_rules = {((x, y, z), ()), ((y, z, x), ()), ((z, x, y), ()),
((), (x, z, y)), ((), (y, x, z)), ((), (z, y, x)),
((x,), (z, y)), ((y, z), (x,)), ((y,), (x, z)),
((z, x), (y,)), ((z,), (y, x)), ((x, y), (z,))}
actual = generate_gate_rules((x, y, z))
assert actual == gate_rules
gate_rules = {
((), (h, z, y, x)), ((), (x, h, z, y)), ((), (y, x, h, z)),
((), (z, y, x, h)), ((h,), (z, y, x)), ((x,), (h, z, y)),
((y,), (x, h, z)), ((z,), (y, x, h)), ((h, x), (z, y)),
((x, y), (h, z)), ((y, z), (x, h)), ((z, h), (y, x)),
((h, x, y), (z,)), ((x, y, z), (h,)), ((y, z, h), (x,)),
((z, h, x), (y,)), ((h, x, y, z), ()), ((x, y, z, h), ()),
((y, z, h, x), ()), ((z, h, x, y), ())}
actual = generate_gate_rules((x, y, z, h))
assert actual == gate_rules
gate_rules = {((), (cgate_t**(-1), ph**(-1), x)),
((), (ph**(-1), x, cgate_t**(-1))),
((), (x, cgate_t**(-1), ph**(-1))),
((cgate_t,), (ph**(-1), x)),
((ph,), (x, cgate_t**(-1))),
((x,), (cgate_t**(-1), ph**(-1))),
((cgate_t, x), (ph**(-1),)),
((ph, cgate_t), (x,)),
((x, ph), (cgate_t**(-1),)),
((cgate_t, x, ph), ()),
((ph, cgate_t, x), ()),
((x, ph, cgate_t), ())}
actual = generate_gate_rules((x, ph, cgate_t))
assert actual == gate_rules
gate_rules = {(Integer(1), cgate_t**(-1)*ph**(-1)*x),
(Integer(1), ph**(-1)*x*cgate_t**(-1)),
(Integer(1), x*cgate_t**(-1)*ph**(-1)),
(cgate_t, ph**(-1)*x),
(ph, x*cgate_t**(-1)),
(x, cgate_t**(-1)*ph**(-1)),
(cgate_t*x, ph**(-1)),
(ph*cgate_t, x),
(x*ph, cgate_t**(-1)),
(cgate_t*x*ph, Integer(1)),
(ph*cgate_t*x, Integer(1)),
(x*ph*cgate_t, Integer(1))}
actual = generate_gate_rules((x, ph, cgate_t), return_as_muls=True)
assert actual == gate_rules
def test_generate_gate_rules_2():
# Test with Muls
(x, y, z, h) = create_gate_sequence()
ph = PhaseGate(0)
cgate_t = CGate(0, TGate(1))
# Note: 1 (type int) is not the same as 1 (type One)
expected = {(x, Integer(1))}
assert generate_gate_rules((x,), return_as_muls=True) == expected
expected = {(Integer(1), Integer(1))}
assert generate_gate_rules(x*x, return_as_muls=True) == expected
expected = {((), ())}
assert generate_gate_rules(x*x, return_as_muls=False) == expected
gate_rules = {(x*y*x, Integer(1)),
(y, Integer(1)),
(y*x, x),
(x*y, x)}
assert generate_gate_rules(x*y*x, return_as_muls=True) == gate_rules
gate_rules = {(x*y*z, Integer(1)),
(y*z*x, Integer(1)),
(z*x*y, Integer(1)),
(Integer(1), x*z*y),
(Integer(1), y*x*z),
(Integer(1), z*y*x),
(x, z*y),
(y*z, x),
(y, x*z),
(z*x, y),
(z, y*x),
(x*y, z)}
actual = generate_gate_rules(x*y*z, return_as_muls=True)
assert actual == gate_rules
gate_rules = {(Integer(1), h*z*y*x),
(Integer(1), x*h*z*y),
(Integer(1), y*x*h*z),
(Integer(1), z*y*x*h),
(h, z*y*x), (x, h*z*y),
(y, x*h*z), (z, y*x*h),
(h*x, z*y), (z*h, y*x),
(x*y, h*z), (y*z, x*h),
(h*x*y, z), (x*y*z, h),
(y*z*h, x), (z*h*x, y),
(h*x*y*z, Integer(1)),
(x*y*z*h, Integer(1)),
(y*z*h*x, Integer(1)),
(z*h*x*y, Integer(1))}
actual = generate_gate_rules(x*y*z*h, return_as_muls=True)
assert actual == gate_rules
gate_rules = {(Integer(1), cgate_t**(-1)*ph**(-1)*x),
(Integer(1), ph**(-1)*x*cgate_t**(-1)),
(Integer(1), x*cgate_t**(-1)*ph**(-1)),
(cgate_t, ph**(-1)*x),
(ph, x*cgate_t**(-1)),
(x, cgate_t**(-1)*ph**(-1)),
(cgate_t*x, ph**(-1)),
(ph*cgate_t, x),
(x*ph, cgate_t**(-1)),
(cgate_t*x*ph, Integer(1)),
(ph*cgate_t*x, Integer(1)),
(x*ph*cgate_t, Integer(1))}
actual = generate_gate_rules(x*ph*cgate_t, return_as_muls=True)
assert actual == gate_rules
gate_rules = {((), (cgate_t**(-1), ph**(-1), x)),
((), (ph**(-1), x, cgate_t**(-1))),
((), (x, cgate_t**(-1), ph**(-1))),
((cgate_t,), (ph**(-1), x)),
((ph,), (x, cgate_t**(-1))),
((x,), (cgate_t**(-1), ph**(-1))),
((cgate_t, x), (ph**(-1),)),
((ph, cgate_t), (x,)),
((x, ph), (cgate_t**(-1),)),
((cgate_t, x, ph), ()),
((ph, cgate_t, x), ()),
((x, ph, cgate_t), ())}
actual = generate_gate_rules(x*ph*cgate_t)
assert actual == gate_rules
def test_generate_equivalent_ids_1():
# Test with tuples
(x, y, z, h) = create_gate_sequence()
assert generate_equivalent_ids((x,)) == {(x,)}
assert generate_equivalent_ids((x, x)) == {(x, x)}
assert generate_equivalent_ids((x, y)) == {(x, y), (y, x)}
gate_seq = (x, y, z)
gate_ids = {(x, y, z), (y, z, x), (z, x, y), (z, y, x),
(y, x, z), (x, z, y)}
assert generate_equivalent_ids(gate_seq) == gate_ids
gate_ids = {Mul(x, y, z), Mul(y, z, x), Mul(z, x, y),
Mul(z, y, x), Mul(y, x, z), Mul(x, z, y)}
assert generate_equivalent_ids(gate_seq, return_as_muls=True) == gate_ids
gate_seq = (x, y, z, h)
gate_ids = {(x, y, z, h), (y, z, h, x),
(h, x, y, z), (h, z, y, x),
(z, y, x, h), (y, x, h, z),
(z, h, x, y), (x, h, z, y)}
assert generate_equivalent_ids(gate_seq) == gate_ids
gate_seq = (x, y, x, y)
gate_ids = {(x, y, x, y), (y, x, y, x)}
assert generate_equivalent_ids(gate_seq) == gate_ids
cgate_y = CGate((1,), y)
gate_seq = (y, cgate_y, y, cgate_y)
gate_ids = {(y, cgate_y, y, cgate_y), (cgate_y, y, cgate_y, y)}
assert generate_equivalent_ids(gate_seq) == gate_ids
cnot = CNOT(1, 0)
cgate_z = CGate((0,), Z(1))
gate_seq = (cnot, h, cgate_z, h)
gate_ids = {(cnot, h, cgate_z, h), (h, cgate_z, h, cnot),
(h, cnot, h, cgate_z), (cgate_z, h, cnot, h)}
assert generate_equivalent_ids(gate_seq) == gate_ids
def test_generate_equivalent_ids_2():
# Test with Muls
(x, y, z, h) = create_gate_sequence()
assert generate_equivalent_ids((x,), return_as_muls=True) == {x}
gate_ids = {Integer(1)}
assert generate_equivalent_ids(x*x, return_as_muls=True) == gate_ids
gate_ids = {x*y, y*x}
assert generate_equivalent_ids(x*y, return_as_muls=True) == gate_ids
gate_ids = {(x, y), (y, x)}
assert generate_equivalent_ids(x*y) == gate_ids
circuit = Mul(*(x, y, z))
gate_ids = {x*y*z, y*z*x, z*x*y, z*y*x,
y*x*z, x*z*y}
assert generate_equivalent_ids(circuit, return_as_muls=True) == gate_ids
circuit = Mul(*(x, y, z, h))
gate_ids = {x*y*z*h, y*z*h*x,
h*x*y*z, h*z*y*x,
z*y*x*h, y*x*h*z,
z*h*x*y, x*h*z*y}
assert generate_equivalent_ids(circuit, return_as_muls=True) == gate_ids
circuit = Mul(*(x, y, x, y))
gate_ids = {x*y*x*y, y*x*y*x}
assert generate_equivalent_ids(circuit, return_as_muls=True) == gate_ids
cgate_y = CGate((1,), y)
circuit = Mul(*(y, cgate_y, y, cgate_y))
gate_ids = {y*cgate_y*y*cgate_y, cgate_y*y*cgate_y*y}
assert generate_equivalent_ids(circuit, return_as_muls=True) == gate_ids
cnot = CNOT(1, 0)
cgate_z = CGate((0,), Z(1))
circuit = Mul(*(cnot, h, cgate_z, h))
gate_ids = {cnot*h*cgate_z*h, h*cgate_z*h*cnot,
h*cnot*h*cgate_z, cgate_z*h*cnot*h}
assert generate_equivalent_ids(circuit, return_as_muls=True) == gate_ids
def test_is_scalar_nonsparse_matrix():
numqubits = 2
id_only = False
id_gate = (IdentityGate(1),)
actual = is_scalar_nonsparse_matrix(id_gate, numqubits, id_only)
assert actual is True
x0 = X(0)
xx_circuit = (x0, x0)
actual = is_scalar_nonsparse_matrix(xx_circuit, numqubits, id_only)
assert actual is True
x1 = X(1)
y1 = Y(1)
xy_circuit = (x1, y1)
actual = is_scalar_nonsparse_matrix(xy_circuit, numqubits, id_only)
assert actual is False
z1 = Z(1)
xyz_circuit = (x1, y1, z1)
actual = is_scalar_nonsparse_matrix(xyz_circuit, numqubits, id_only)
assert actual is True
cnot = CNOT(1, 0)
cnot_circuit = (cnot, cnot)
actual = is_scalar_nonsparse_matrix(cnot_circuit, numqubits, id_only)
assert actual is True
h = H(0)
hh_circuit = (h, h)
actual = is_scalar_nonsparse_matrix(hh_circuit, numqubits, id_only)
assert actual is True
h1 = H(1)
xhzh_circuit = (x1, h1, z1, h1)
actual = is_scalar_nonsparse_matrix(xhzh_circuit, numqubits, id_only)
assert actual is True
id_only = True
actual = is_scalar_nonsparse_matrix(xhzh_circuit, numqubits, id_only)
assert actual is True
actual = is_scalar_nonsparse_matrix(xyz_circuit, numqubits, id_only)
assert actual is False
actual = is_scalar_nonsparse_matrix(cnot_circuit, numqubits, id_only)
assert actual is True
actual = is_scalar_nonsparse_matrix(hh_circuit, numqubits, id_only)
assert actual is True
def test_is_scalar_sparse_matrix():
np = import_module('numpy')
if not np:
skip("numpy not installed.")
scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']})
if not scipy:
skip("scipy not installed.")
numqubits = 2
id_only = False
id_gate = (IdentityGate(1),)
assert is_scalar_sparse_matrix(id_gate, numqubits, id_only) is True
x0 = X(0)
xx_circuit = (x0, x0)
assert is_scalar_sparse_matrix(xx_circuit, numqubits, id_only) is True
x1 = X(1)
y1 = Y(1)
xy_circuit = (x1, y1)
assert is_scalar_sparse_matrix(xy_circuit, numqubits, id_only) is False
z1 = Z(1)
xyz_circuit = (x1, y1, z1)
assert is_scalar_sparse_matrix(xyz_circuit, numqubits, id_only) is True
cnot = CNOT(1, 0)
cnot_circuit = (cnot, cnot)
assert is_scalar_sparse_matrix(cnot_circuit, numqubits, id_only) is True
h = H(0)
hh_circuit = (h, h)
assert is_scalar_sparse_matrix(hh_circuit, numqubits, id_only) is True
# NOTE:
# The elements of the sparse matrix for the following circuit
# is actually 1.0000000000000002+0.0j.
h1 = H(1)
xhzh_circuit = (x1, h1, z1, h1)
assert is_scalar_sparse_matrix(xhzh_circuit, numqubits, id_only) is True
id_only = True
assert is_scalar_sparse_matrix(xhzh_circuit, numqubits, id_only) is True
assert is_scalar_sparse_matrix(xyz_circuit, numqubits, id_only) is False
assert is_scalar_sparse_matrix(cnot_circuit, numqubits, id_only) is True
assert is_scalar_sparse_matrix(hh_circuit, numqubits, id_only) is True
def test_is_degenerate():
(x, y, z, h) = create_gate_sequence()
gate_id = GateIdentity(x, y, z)
ids = {gate_id}
another_id = (z, y, x)
assert is_degenerate(ids, another_id) is True
def test_is_reducible():
nqubits = 2
(x, y, z, h) = create_gate_sequence()
circuit = (x, y, y)
assert is_reducible(circuit, nqubits, 1, 3) is True
circuit = (x, y, x)
assert is_reducible(circuit, nqubits, 1, 3) is False
circuit = (x, y, y, x)
assert is_reducible(circuit, nqubits, 0, 4) is True
circuit = (x, y, y, x)
assert is_reducible(circuit, nqubits, 1, 3) is True
circuit = (x, y, z, y, y)
assert is_reducible(circuit, nqubits, 1, 5) is True
def test_bfs_identity_search():
assert bfs_identity_search([], 1) == set()
(x, y, z, h) = create_gate_sequence()
gate_list = [x]
id_set = {GateIdentity(x, x)}
assert bfs_identity_search(gate_list, 1, max_depth=2) == id_set
# Set should not contain degenerate quantum circuits
gate_list = [x, y, z]
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(x, y, z)}
assert bfs_identity_search(gate_list, 1) == id_set
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(x, y, z),
GateIdentity(x, y, x, y),
GateIdentity(x, z, x, z),
GateIdentity(y, z, y, z)}
assert bfs_identity_search(gate_list, 1, max_depth=4) == id_set
assert bfs_identity_search(gate_list, 1, max_depth=5) == id_set
gate_list = [x, y, z, h]
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(h, h),
GateIdentity(x, y, z),
GateIdentity(x, y, x, y),
GateIdentity(x, z, x, z),
GateIdentity(x, h, z, h),
GateIdentity(y, z, y, z),
GateIdentity(y, h, y, h)}
assert bfs_identity_search(gate_list, 1) == id_set
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(h, h)}
assert id_set == bfs_identity_search(gate_list, 1, max_depth=3,
identity_only=True)
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(h, h),
GateIdentity(x, y, z),
GateIdentity(x, y, x, y),
GateIdentity(x, z, x, z),
GateIdentity(x, h, z, h),
GateIdentity(y, z, y, z),
GateIdentity(y, h, y, h),
GateIdentity(x, y, h, x, h),
GateIdentity(x, z, h, y, h),
GateIdentity(y, z, h, z, h)}
assert bfs_identity_search(gate_list, 1, max_depth=5) == id_set
id_set = {GateIdentity(x, x),
GateIdentity(y, y),
GateIdentity(z, z),
GateIdentity(h, h),
GateIdentity(x, h, z, h)}
assert id_set == bfs_identity_search(gate_list, 1, max_depth=4,
identity_only=True)
cnot = CNOT(1, 0)
gate_list = [x, cnot]
id_set = {GateIdentity(x, x),
GateIdentity(cnot, cnot),
GateIdentity(x, cnot, x, cnot)}
assert bfs_identity_search(gate_list, 2, max_depth=4) == id_set
cgate_x = CGate((1,), x)
gate_list = [x, cgate_x]
id_set = {GateIdentity(x, x),
GateIdentity(cgate_x, cgate_x),
GateIdentity(x, cgate_x, x, cgate_x)}
assert bfs_identity_search(gate_list, 2, max_depth=4) == id_set
cgate_z = CGate((0,), Z(1))
gate_list = [cnot, cgate_z, h]
id_set = {GateIdentity(h, h),
GateIdentity(cgate_z, cgate_z),
GateIdentity(cnot, cnot),
GateIdentity(cnot, h, cgate_z, h)}
assert bfs_identity_search(gate_list, 2, max_depth=4) == id_set
s = PhaseGate(0)
t = TGate(0)
gate_list = [s, t]
id_set = {GateIdentity(s, s, s, s)}
assert bfs_identity_search(gate_list, 1, max_depth=4) == id_set
def test_bfs_identity_search_xfail():
s = PhaseGate(0)
t = TGate(0)
gate_list = [Dagger(s), t]
id_set = {GateIdentity(Dagger(s), t, t)}
assert bfs_identity_search(gate_list, 1, max_depth=3) == id_set
|
c8f324b59e734516caf4dd5a964937a2d6c23f901f5d4a595687040430ec6bea | from sympy.core.backend import (cos, expand, Matrix, sin, symbols, tan, sqrt, S,
zeros)
from sympy import simplify
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, KanesMethod, inertia, Particle,
dot)
def test_one_dof():
# This is for a 1 dof spring-mass-damper case.
# It is described in more detail in the KanesMethod docstring.
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, c, k = symbols('m c k')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, u * N.x)
kd = [qd - u]
FL = [(P, (-k * q - c * u) * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand(-(q * k + u * c) / m)
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(2, 1)
assert (KM.linearize(A_and_B=True, )[0] == Matrix([[0, 1], [-k/m, -c/m]]))
def test_two_dof():
# This is for a 2 d.o.f., 2 particle spring-mass-damper.
# The first coordinate is the displacement of the first particle, and the
# second is the relative displacement between the first and second
# particles. Speeds are defined as the time derivatives of the particles.
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
q1d, q2d, u1d, u2d = dynamicsymbols('q1 q2 u1 u2', 1)
m, c1, c2, k1, k2 = symbols('m c1 c2 k1 k2')
N = ReferenceFrame('N')
P1 = Point('P1')
P2 = Point('P2')
P1.set_vel(N, u1 * N.x)
P2.set_vel(N, (u1 + u2) * N.x)
kd = [q1d - u1, q2d - u2]
# Now we create the list of forces, then assign properties to each
# particle, then create a list of all particles.
FL = [(P1, (-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2) * N.x), (P2, (-k2 *
q2 - c2 * u2) * N.x)]
pa1 = Particle('pa1', P1, m)
pa2 = Particle('pa2', P2, m)
BL = [pa1, pa2]
# Finally we create the KanesMethod object, specify the inertial frame,
# pass relevant information, and form Fr & Fr*. Then we calculate the mass
# matrix and forcing terms, and finally solve for the udots.
KM = KanesMethod(N, q_ind=[q1, q2], u_ind=[u1, u2], kd_eqs=kd)
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand((-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2)/m)
assert expand(rhs[1]) == expand((k1 * q1 + c1 * u1 - 2 * k2 * q2 - 2 *
c2 * u2) / m)
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(4, 1)
def test_pend():
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, l, g = symbols('m l g')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, -l * u * sin(q) * N.x + l * u * cos(q) * N.y)
kd = [qd - u]
FL = [(P, m * g * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
rhs.simplify()
assert expand(rhs[0]) == expand(-g / l * sin(q))
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(2, 1)
def test_rolling_disc():
# Rolling Disc Example
# Here the rolling disc is formed from the contact point up, removing the
# need to introduce generalized speeds. Only 3 configuration and three
# speed variables are need to describe this system, along with the disc's
# mass and radius, and the local gravity (note that mass will drop out).
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
q1d, q2d, q3d, u1d, u2d, u3d = dynamicsymbols('q1 q2 q3 u1 u2 u3', 1)
r, m, g = symbols('r m g')
# The kinematics are formed by a series of simple rotations. Each simple
# rotation creates a new frame, and the next rotation is defined by the new
# frame's basis vectors. This example uses a 3-1-2 series of rotations, or
# Z, X, Y series of rotations. Angular velocity for this is defined using
# the second frame's basis (the lean frame).
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
w_R_N_qd = R.ang_vel_in(N)
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
# This is the translational kinematics. We create a point with no velocity
# in N; this is the contact point between the disc and ground. Next we form
# the position vector from the contact point to the disc's center of mass.
# Finally we form the velocity and acceleration of the disc.
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
# This is a simple way to form the inertia dyadic.
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
# Kinematic differential equations; how the generalized coordinate time
# derivatives relate to generalized speeds.
kd = [dot(R.ang_vel_in(N) - w_R_N_qd, uv) for uv in L]
# Creation of the force list; it is the gravitational force at the mass
# center of the disc. Then we create the disc by assigning a Point to the
# center of mass attribute, a ReferenceFrame to the frame attribute, and mass
# and inertia. Then we form the body list.
ForceList = [(Dmc, - m * g * Y.z)]
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyList = [BodyD]
# Finally we form the equations of motion, using the same steps we did
# before. Specify inertial frame, supply generalized speeds, supply
# kinematic differential equation dictionary, compute Fr from the force
# list and Fr* from the body list, compute the mass matrix and forcing
# terms, then solve for the u dots (time derivatives of the generalized
# speeds).
KM = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3], kd_eqs=kd)
KM.kanes_equations(BodyList, ForceList)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
kdd = KM.kindiffdict()
rhs = rhs.subs(kdd)
rhs.simplify()
assert rhs.expand() == Matrix([(6*u2*u3*r - u3**2*r*tan(q2) +
4*g*sin(q2))/(5*r), -2*u1*u3/3, u1*(-2*u2 + u3*tan(q2))]).expand()
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(6, 1)
# This code tests our output vs. benchmark values. When r=g=m=1, the
# critical speed (where all eigenvalues of the linearized equations are 0)
# is 1 / sqrt(3) for the upright case.
A = KM.linearize(A_and_B=True)[0]
A_upright = A.subs({r: 1, g: 1, m: 1}).subs({q1: 0, q2: 0, q3: 0, u1: 0, u3: 0})
import sympy
assert sympy.sympify(A_upright.subs({u2: 1 / sqrt(3)})).eigenvals() == {S.Zero: 6}
def test_aux():
# Same as above, except we have 2 auxiliary speeds for the ground contact
# point, which is known to be zero. In one case, we go through then
# substitute the aux. speeds in at the end (they are zero, as well as their
# derivative), in the other case, we use the built-in auxiliary speed part
# of KanesMethod. The equations from each should be the same.
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
q1d, q2d, q3d, u1d, u2d, u3d = dynamicsymbols('q1 q2 q3 u1 u2 u3', 1)
u4, u5, f1, f2 = dynamicsymbols('u4, u5, f1, f2')
u4d, u5d = dynamicsymbols('u4, u5', 1)
r, m, g = symbols('r m g')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
w_R_N_qd = R.ang_vel_in(N)
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
C = Point('C')
C.set_vel(N, u4 * L.x + u5 * (Y.z ^ L.x))
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
Dmc.a2pt_theory(C, N, R)
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
kd = [dot(R.ang_vel_in(N) - w_R_N_qd, uv) for uv in L]
ForceList = [(Dmc, - m * g * Y.z), (C, f1 * L.x + f2 * (Y.z ^ L.x))]
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyList = [BodyD]
KM = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3, u4, u5],
kd_eqs=kd)
(fr, frstar) = KM.kanes_equations(BodyList, ForceList)
fr = fr.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar = frstar.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
KM2 = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3], kd_eqs=kd,
u_auxiliary=[u4, u5])
(fr2, frstar2) = KM2.kanes_equations(BodyList, ForceList)
fr2 = fr2.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar2 = frstar2.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar.simplify()
frstar2.simplify()
assert (fr - fr2).expand() == Matrix([0, 0, 0, 0, 0])
assert (frstar - frstar2).expand() == Matrix([0, 0, 0, 0, 0])
def test_parallel_axis():
# This is for a 2 dof inverted pendulum on a cart.
# This tests the parallel axis code in KanesMethod. The inertia of the
# pendulum is defined about the hinge, not about the center of mass.
# Defining the constants and knowns of the system
gravity = symbols('g')
k, ls = symbols('k ls')
a, mA, mC = symbols('a mA mC')
F = dynamicsymbols('F')
Ix, Iy, Iz = symbols('Ix Iy Iz')
# Declaring the Generalized coordinates and speeds
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
# Creating reference frames
N = ReferenceFrame('N')
A = ReferenceFrame('A')
A.orient(N, 'Axis', [-q2, N.z])
A.set_ang_vel(N, -u2 * N.z)
# Origin of Newtonian reference frame
O = Point('O')
# Creating and Locating the positions of the cart, C, and the
# center of mass of the pendulum, A
C = O.locatenew('C', q1 * N.x)
Ao = C.locatenew('Ao', a * A.y)
# Defining velocities of the points
O.set_vel(N, 0)
C.set_vel(N, u1 * N.x)
Ao.v2pt_theory(C, N, A)
Cart = Particle('Cart', C, mC)
Pendulum = RigidBody('Pendulum', Ao, A, mA, (inertia(A, Ix, Iy, Iz), C))
# kinematical differential equations
kindiffs = [q1d - u1, q2d - u2]
bodyList = [Cart, Pendulum]
forceList = [(Ao, -N.y * gravity * mA),
(C, -N.y * gravity * mC),
(C, -N.x * k * (q1 - ls)),
(C, N.x * F)]
km = KanesMethod(N, [q1, q2], [u1, u2], kindiffs)
(fr, frstar) = km.kanes_equations(bodyList, forceList)
mm = km.mass_matrix_full
assert mm[3, 3] == Iz
def test_input_format():
# 1 dof problem from test_one_dof
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, c, k = symbols('m c k')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, u * N.x)
kd = [qd - u]
FL = [(P, (-k * q - c * u) * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
# test for input format kane.kanes_equations((body1, body2, particle1))
assert KM.kanes_equations(BL)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2), loads=(load1,load2))
assert KM.kanes_equations(bodies=BL, loads=None)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2), loads=None)
assert KM.kanes_equations(BL, loads=None)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2))
assert KM.kanes_equations(BL)[0] == Matrix([0])
# test for error raised when a wrong force list (in this case a string) is provided
from sympy.testing.pytest import raises
raises(ValueError, lambda: KM._form_fr('bad input'))
# 2 dof problem from test_two_dof
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
q1d, q2d, u1d, u2d = dynamicsymbols('q1 q2 u1 u2', 1)
m, c1, c2, k1, k2 = symbols('m c1 c2 k1 k2')
N = ReferenceFrame('N')
P1 = Point('P1')
P2 = Point('P2')
P1.set_vel(N, u1 * N.x)
P2.set_vel(N, (u1 + u2) * N.x)
kd = [q1d - u1, q2d - u2]
FL = ((P1, (-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2) * N.x), (P2, (-k2 *
q2 - c2 * u2) * N.x))
pa1 = Particle('pa1', P1, m)
pa2 = Particle('pa2', P2, m)
BL = (pa1, pa2)
KM = KanesMethod(N, q_ind=[q1, q2], u_ind=[u1, u2], kd_eqs=kd)
# test for input format
# kane.kanes_equations((body1, body2), (load1, load2))
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand((-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2)/m)
assert expand(rhs[1]) == expand((k1 * q1 + c1 * u1 - 2 * k2 * q2 - 2 *
c2 * u2) / m)
|
885d238b5cb6ac561774fabc80223c2fd8a348f47e752da3b81e615f2b9285f9 | from sympy.core.backend import symbols, Matrix, cos, sin, atan, sqrt, Rational
from sympy import solve, simplify, sympify
from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point,\
dot, cross, inertia, KanesMethod, Particle, RigidBody, Lagrangian,\
LagrangesMethod
from sympy.testing.pytest import slow
@slow
def test_linearize_rolling_disc_kane():
# Symbols for time and constant parameters
t, r, m, g, v = symbols('t r m g v')
# Configuration variables and their time derivatives
q1, q2, q3, q4, q5, q6 = q = dynamicsymbols('q1:7')
q1d, q2d, q3d, q4d, q5d, q6d = qd = [qi.diff(t) for qi in q]
# Generalized speeds and their time derivatives
u = dynamicsymbols('u:6')
u1, u2, u3, u4, u5, u6 = u = dynamicsymbols('u1:7')
u1d, u2d, u3d, u4d, u5d, u6d = [ui.diff(t) for ui in u]
# Reference frames
N = ReferenceFrame('N') # Inertial frame
NO = Point('NO') # Inertial origin
A = N.orientnew('A', 'Axis', [q1, N.z]) # Yaw intermediate frame
B = A.orientnew('B', 'Axis', [q2, A.x]) # Lean intermediate frame
C = B.orientnew('C', 'Axis', [q3, B.y]) # Disc fixed frame
CO = NO.locatenew('CO', q4*N.x + q5*N.y + q6*N.z) # Disc center
# Disc angular velocity in N expressed using time derivatives of coordinates
w_c_n_qd = C.ang_vel_in(N)
w_b_n_qd = B.ang_vel_in(N)
# Inertial angular velocity and angular acceleration of disc fixed frame
C.set_ang_vel(N, u1*B.x + u2*B.y + u3*B.z)
# Disc center velocity in N expressed using time derivatives of coordinates
v_co_n_qd = CO.pos_from(NO).dt(N)
# Disc center velocity in N expressed using generalized speeds
CO.set_vel(N, u4*C.x + u5*C.y + u6*C.z)
# Disc Ground Contact Point
P = CO.locatenew('P', r*B.z)
P.v2pt_theory(CO, N, C)
# Configuration constraint
f_c = Matrix([q6 - dot(CO.pos_from(P), N.z)])
# Velocity level constraints
f_v = Matrix([dot(P.vel(N), uv) for uv in C])
# Kinematic differential equations
kindiffs = Matrix([dot(w_c_n_qd - C.ang_vel_in(N), uv) for uv in B] +
[dot(v_co_n_qd - CO.vel(N), uv) for uv in N])
qdots = solve(kindiffs, qd)
# Set angular velocity of remaining frames
B.set_ang_vel(N, w_b_n_qd.subs(qdots))
C.set_ang_acc(N, C.ang_vel_in(N).dt(B) + cross(B.ang_vel_in(N), C.ang_vel_in(N)))
# Active forces
F_CO = m*g*A.z
# Create inertia dyadic of disc C about point CO
I = (m * r**2) / 4
J = (m * r**2) / 2
I_C_CO = inertia(C, I, J, I)
Disc = RigidBody('Disc', CO, C, m, (I_C_CO, CO))
BL = [Disc]
FL = [(CO, F_CO)]
KM = KanesMethod(N, [q1, q2, q3, q4, q5], [u1, u2, u3], kd_eqs=kindiffs,
q_dependent=[q6], configuration_constraints=f_c,
u_dependent=[u4, u5, u6], velocity_constraints=f_v)
(fr, fr_star) = KM.kanes_equations(BL, FL)
# Test generalized form equations
linearizer = KM.to_linearizer()
assert linearizer.f_c == f_c
assert linearizer.f_v == f_v
assert linearizer.f_a == f_v.diff(t).subs(KM.kindiffdict())
sol = solve(linearizer.f_0 + linearizer.f_1, qd)
for qi in qdots.keys():
assert sol[qi] == qdots[qi]
assert simplify(linearizer.f_2 + linearizer.f_3 - fr - fr_star) == Matrix([0, 0, 0])
# Perform the linearization
# Precomputed operating point
q_op = {q6: -r*cos(q2)}
u_op = {u1: 0,
u2: sin(q2)*q1d + q3d,
u3: cos(q2)*q1d,
u4: -r*(sin(q2)*q1d + q3d)*cos(q3),
u5: 0,
u6: -r*(sin(q2)*q1d + q3d)*sin(q3)}
qd_op = {q2d: 0,
q4d: -r*(sin(q2)*q1d + q3d)*cos(q1),
q5d: -r*(sin(q2)*q1d + q3d)*sin(q1),
q6d: 0}
ud_op = {u1d: 4*g*sin(q2)/(5*r) + sin(2*q2)*q1d**2/2 + 6*cos(q2)*q1d*q3d/5,
u2d: 0,
u3d: 0,
u4d: r*(sin(q2)*sin(q3)*q1d*q3d + sin(q3)*q3d**2),
u5d: r*(4*g*sin(q2)/(5*r) + sin(2*q2)*q1d**2/2 + 6*cos(q2)*q1d*q3d/5),
u6d: -r*(sin(q2)*cos(q3)*q1d*q3d + cos(q3)*q3d**2)}
A, B = linearizer.linearize(op_point=[q_op, u_op, qd_op, ud_op], A_and_B=True, simplify=True)
upright_nominal = {q1d: 0, q2: 0, m: 1, r: 1, g: 1}
# Precomputed solution
A_sol = Matrix([[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[sin(q1)*q3d, 0, 0, 0, 0, -sin(q1), -cos(q1), 0],
[-cos(q1)*q3d, 0, 0, 0, 0, cos(q1), -sin(q1), 0],
[0, Rational(4, 5), 0, 0, 0, 0, 0, 6*q3d/5],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -2*q3d, 0, 0]])
B_sol = Matrix([])
# Check that linearization is correct
assert A.subs(upright_nominal) == A_sol
assert B.subs(upright_nominal) == B_sol
# Check eigenvalues at critical speed are all zero:
assert sympify(A.subs(upright_nominal).subs(q3d, 1/sqrt(3))).eigenvals() == {0: 8}
def test_linearize_pendulum_kane_minimal():
q1 = dynamicsymbols('q1') # angle of pendulum
u1 = dynamicsymbols('u1') # Angular velocity
q1d = dynamicsymbols('q1', 1) # Angular velocity
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
A = N.orientnew('A', 'axis', [q1, N.z])
A.set_ang_vel(N, u1*N.z)
# Locate point P relative to the origin N*
P = pN.locatenew('P', L*A.x)
P.v2pt_theory(pN, N, A)
pP = Particle('pP', P, m)
# Create Kinematic Differential Equations
kde = Matrix([q1d - u1])
# Input the force resultant at P
R = m*g*N.x
# Solve for eom with kanes method
KM = KanesMethod(N, q_ind=[q1], u_ind=[u1], kd_eqs=kde)
(fr, frstar) = KM.kanes_equations([pP], [(P, R)])
# Linearize
A, B, inp_vec = KM.linearize(A_and_B=True, simplify=True)
assert A == Matrix([[0, 1], [-9.8*cos(q1)/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_kane_nonminimal():
# Create generalized coordinates and speeds for this non-minimal realization
# q1, q2 = N.x and N.y coordinates of pendulum
# u1, u2 = N.x and N.y velocities of pendulum
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
u1, u2 = dynamicsymbols('u1:3')
u1d, u2d = dynamicsymbols('u1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
theta1 = atan(q2/q1)
A = N.orientnew('A', 'axis', [theta1, N.z])
# Locate the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
pP = Particle('pP', P, m)
# Calculate the kinematic differential equations
kde = Matrix([q1d - u1,
q2d - u2])
dq_dict = solve(kde, [q1d, q2d])
# Set velocity of point P
P.set_vel(N, P.pos_from(pN).dt(N).subs(dq_dict))
# Configuration constraint is length of pendulum
f_c = Matrix([P.pos_from(pN).magnitude() - L])
# Velocity constraint is that the velocity in the A.x direction is
# always zero (the pendulum is never getting longer).
f_v = Matrix([P.vel(N).express(A).dot(A.x)])
f_v.simplify()
# Acceleration constraints is the time derivative of the velocity constraint
f_a = f_v.diff(t)
f_a.simplify()
# Input the force resultant at P
R = m*g*N.x
# Derive the equations of motion using the KanesMethod class.
KM = KanesMethod(N, q_ind=[q2], u_ind=[u2], q_dependent=[q1],
u_dependent=[u1], configuration_constraints=f_c,
velocity_constraints=f_v, acceleration_constraints=f_a, kd_eqs=kde)
(fr, frstar) = KM.kanes_equations([pP], [(P, R)])
# Set the operating point to be straight down, and non-moving
q_op = {q1: L, q2: 0}
u_op = {u1: 0, u2: 0}
ud_op = {u1d: 0, u2d: 0}
A, B, inp_vec = KM.linearize(op_point=[q_op, u_op, ud_op], A_and_B=True,
simplify=True)
assert A.expand() == Matrix([[0, 1], [-9.8/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_lagrange_minimal():
q1 = dynamicsymbols('q1') # angle of pendulum
q1d = dynamicsymbols('q1', 1) # Angular velocity
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
A = N.orientnew('A', 'axis', [q1, N.z])
A.set_ang_vel(N, q1d*N.z)
# Locate point P relative to the origin N*
P = pN.locatenew('P', L*A.x)
P.v2pt_theory(pN, N, A)
pP = Particle('pP', P, m)
# Solve for eom with Lagranges method
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1], forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Linearize
A, B, inp_vec = LM.linearize([q1], [q1d], A_and_B=True)
assert A == Matrix([[0, 1], [-9.8*cos(q1)/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_lagrange_nonminimal():
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose World Frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
theta1 = atan(q2/q1)
A = N.orientnew('A', 'axis', [theta1, N.z])
# Create point P, the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
P.set_vel(N, P.pos_from(pN).dt(N))
pP = Particle('pP', P, m)
# Constraint Equations
f_c = Matrix([q1**2 + q2**2 - L**2])
# Calculate the lagrangian, and form the equations of motion
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1, q2], hol_coneqs=f_c, forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Compose operating point
op_point = {q1: L, q2: 0, q1d: 0, q2d: 0, q1d.diff(t): 0, q2d.diff(t): 0}
# Solve for multiplier operating point
lam_op = LM.solve_multipliers(op_point=op_point)
op_point.update(lam_op)
# Perform the Linearization
A, B, inp_vec = LM.linearize([q2], [q2d], [q1], [q1d],
op_point=op_point, A_and_B=True)
assert A == Matrix([[0, 1], [-9.8/L, 0]])
assert B == Matrix([])
def test_linearize_rolling_disc_lagrange():
q1, q2, q3 = q = dynamicsymbols('q1 q2 q3')
q1d, q2d, q3d = qd = dynamicsymbols('q1 q2 q3', 1)
r, m, g = symbols('r m g')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyD.potential_energy = - m * g * r * cos(q2)
Lag = Lagrangian(N, BodyD)
l = LagrangesMethod(Lag, q)
l.form_lagranges_equations()
# Linearize about steady-state upright rolling
op_point = {q1: 0, q2: 0, q3: 0,
q1d: 0, q2d: 0,
q1d.diff(): 0, q2d.diff(): 0, q3d.diff(): 0}
A = l.linearize(q_ind=q, qd_ind=qd, op_point=op_point, A_and_B=True)[0]
sol = Matrix([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, -6*q3d, 0],
[0, -4*g/(5*r), 0, 6*q3d/5, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert A == sol
|
40afb42e2e953a98c29c1211153f0785ea20fa6a2c6d4152995e46b16059b862 | from sympy.core.backend import symbols, Matrix, atan, zeros
from sympy import simplify
from sympy.physics.mechanics import (dynamicsymbols, Particle, Point,
ReferenceFrame, SymbolicSystem)
from sympy.testing.pytest import raises
# This class is going to be tested using a simple pendulum set up in x and y
# coordinates
x, y, u, v, lam = dynamicsymbols('x y u v lambda')
m, l, g = symbols('m l g')
# Set up the different forms the equations can take
# [1] Explicit form where the kinematics and dynamics are combined
# x' = F(x, t, r, p)
#
# [2] Implicit form where the kinematics and dynamics are combined
# M(x, p) x' = F(x, t, r, p)
#
# [3] Implicit form where the kinematics and dynamics are separate
# M(q, p) u' = F(q, u, t, r, p)
# q' = G(q, u, t, r, p)
dyn_implicit_mat = Matrix([[1, 0, -x/m],
[0, 1, -y/m],
[0, 0, l**2/m]])
dyn_implicit_rhs = Matrix([0, 0, u**2 + v**2 - g*y])
comb_implicit_mat = Matrix([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, -x/m],
[0, 0, 0, 1, -y/m],
[0, 0, 0, 0, l**2/m]])
comb_implicit_rhs = Matrix([u, v, 0, 0, u**2 + v**2 - g*y])
kin_explicit_rhs = Matrix([u, v])
comb_explicit_rhs = comb_implicit_mat.LUsolve(comb_implicit_rhs)
# Set up a body and load to pass into the system
theta = atan(x/y)
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [theta, N.z])
O = Point('O')
P = O.locatenew('P', l * A.x)
Pa = Particle('Pa', P, m)
bodies = [Pa]
loads = [(P, g * m * N.x)]
# Set up some output equations to be given to SymbolicSystem
# Change to make these fit the pendulum
PE = symbols("PE")
out_eqns = {PE: m*g*(l+y)}
# Set up remaining arguments that can be passed to SymbolicSystem
alg_con = [2]
alg_con_full = [4]
coordinates = (x, y, lam)
speeds = (u, v)
states = (x, y, u, v, lam)
coord_idxs = (0, 1)
speed_idxs = (2, 3)
def test_form_1():
symsystem1 = SymbolicSystem(states, comb_explicit_rhs,
alg_con=alg_con_full, output_eqns=out_eqns,
coord_idxs=coord_idxs, speed_idxs=speed_idxs,
bodies=bodies, loads=loads)
assert symsystem1.coordinates == Matrix([x, y])
assert symsystem1.speeds == Matrix([u, v])
assert symsystem1.states == Matrix([x, y, u, v, lam])
assert symsystem1.alg_con == [4]
inter = comb_explicit_rhs
assert simplify(symsystem1.comb_explicit_rhs - inter) == zeros(5, 1)
assert set(symsystem1.dynamic_symbols()) == {y, v, lam, u, x}
assert type(symsystem1.dynamic_symbols()) == tuple
assert set(symsystem1.constant_symbols()) == {l, g, m}
assert type(symsystem1.constant_symbols()) == tuple
assert symsystem1.output_eqns == out_eqns
assert symsystem1.bodies == (Pa,)
assert symsystem1.loads == ((P, g * m * N.x),)
def test_form_2():
symsystem2 = SymbolicSystem(coordinates, comb_implicit_rhs, speeds=speeds,
mass_matrix=comb_implicit_mat,
alg_con=alg_con_full, output_eqns=out_eqns,
bodies=bodies, loads=loads)
assert symsystem2.coordinates == Matrix([x, y, lam])
assert symsystem2.speeds == Matrix([u, v])
assert symsystem2.states == Matrix([x, y, lam, u, v])
assert symsystem2.alg_con == [4]
inter = comb_implicit_rhs
assert simplify(symsystem2.comb_implicit_rhs - inter) == zeros(5, 1)
assert simplify(symsystem2.comb_implicit_mat-comb_implicit_mat) == zeros(5)
assert set(symsystem2.dynamic_symbols()) == {y, v, lam, u, x}
assert type(symsystem2.dynamic_symbols()) == tuple
assert set(symsystem2.constant_symbols()) == {l, g, m}
assert type(symsystem2.constant_symbols()) == tuple
inter = comb_explicit_rhs
symsystem2.compute_explicit_form()
assert simplify(symsystem2.comb_explicit_rhs - inter) == zeros(5, 1)
assert symsystem2.output_eqns == out_eqns
assert symsystem2.bodies == (Pa,)
assert symsystem2.loads == ((P, g * m * N.x),)
def test_form_3():
symsystem3 = SymbolicSystem(states, dyn_implicit_rhs,
mass_matrix=dyn_implicit_mat,
coordinate_derivatives=kin_explicit_rhs,
alg_con=alg_con, coord_idxs=coord_idxs,
speed_idxs=speed_idxs, bodies=bodies,
loads=loads)
assert symsystem3.coordinates == Matrix([x, y])
assert symsystem3.speeds == Matrix([u, v])
assert symsystem3.states == Matrix([x, y, u, v, lam])
assert symsystem3.alg_con == [4]
inter1 = kin_explicit_rhs
inter2 = dyn_implicit_rhs
assert simplify(symsystem3.kin_explicit_rhs - inter1) == zeros(2, 1)
assert simplify(symsystem3.dyn_implicit_mat - dyn_implicit_mat) == zeros(3)
assert simplify(symsystem3.dyn_implicit_rhs - inter2) == zeros(3, 1)
inter = comb_implicit_rhs
assert simplify(symsystem3.comb_implicit_rhs - inter) == zeros(5, 1)
assert simplify(symsystem3.comb_implicit_mat-comb_implicit_mat) == zeros(5)
inter = comb_explicit_rhs
symsystem3.compute_explicit_form()
assert simplify(symsystem3.comb_explicit_rhs - inter) == zeros(5, 1)
assert set(symsystem3.dynamic_symbols()) == {y, v, lam, u, x}
assert type(symsystem3.dynamic_symbols()) == tuple
assert set(symsystem3.constant_symbols()) == {l, g, m}
assert type(symsystem3.constant_symbols()) == tuple
assert symsystem3.output_eqns == {}
assert symsystem3.bodies == (Pa,)
assert symsystem3.loads == ((P, g * m * N.x),)
def test_property_attributes():
symsystem = SymbolicSystem(states, comb_explicit_rhs,
alg_con=alg_con_full, output_eqns=out_eqns,
coord_idxs=coord_idxs, speed_idxs=speed_idxs,
bodies=bodies, loads=loads)
with raises(AttributeError):
symsystem.bodies = 42
with raises(AttributeError):
symsystem.coordinates = 42
with raises(AttributeError):
symsystem.dyn_implicit_rhs = 42
with raises(AttributeError):
symsystem.comb_implicit_rhs = 42
with raises(AttributeError):
symsystem.loads = 42
with raises(AttributeError):
symsystem.dyn_implicit_mat = 42
with raises(AttributeError):
symsystem.comb_implicit_mat = 42
with raises(AttributeError):
symsystem.kin_explicit_rhs = 42
with raises(AttributeError):
symsystem.comb_explicit_rhs = 42
with raises(AttributeError):
symsystem.speeds = 42
with raises(AttributeError):
symsystem.states = 42
with raises(AttributeError):
symsystem.alg_con = 42
def test_not_specified_errors():
"""This test will cover errors that arise from trying to access attributes
that were not specified upon object creation or were specified on creation
and the user tries to recalculate them."""
# Trying to access form 2 when form 1 given
# Trying to access form 3 when form 2 given
symsystem1 = SymbolicSystem(states, comb_explicit_rhs)
with raises(AttributeError):
symsystem1.comb_implicit_mat
with raises(AttributeError):
symsystem1.comb_implicit_rhs
with raises(AttributeError):
symsystem1.dyn_implicit_mat
with raises(AttributeError):
symsystem1.dyn_implicit_rhs
with raises(AttributeError):
symsystem1.kin_explicit_rhs
with raises(AttributeError):
symsystem1.compute_explicit_form()
symsystem2 = SymbolicSystem(coordinates, comb_implicit_rhs, speeds=speeds,
mass_matrix=comb_implicit_mat)
with raises(AttributeError):
symsystem2.dyn_implicit_mat
with raises(AttributeError):
symsystem2.dyn_implicit_rhs
with raises(AttributeError):
symsystem2.kin_explicit_rhs
# Attribute error when trying to access coordinates and speeds when only the
# states were given.
with raises(AttributeError):
symsystem1.coordinates
with raises(AttributeError):
symsystem1.speeds
# Attribute error when trying to access bodies and loads when they are not
# given
with raises(AttributeError):
symsystem1.bodies
with raises(AttributeError):
symsystem1.loads
# Attribute error when trying to access comb_explicit_rhs before it was
# calculated
with raises(AttributeError):
symsystem2.comb_explicit_rhs
|
c89c75e12a4fa4bd0d53e1e7aff895e114b45a2878a54b6cf7726e841e8314b6 | from sympy import evalf, symbols, pi, sin, cos, sqrt, acos, Matrix
from sympy.physics.mechanics import (ReferenceFrame, dynamicsymbols, inertia,
KanesMethod, RigidBody, Point, dot, msubs)
from sympy.testing.pytest import slow, ON_TRAVIS, skip
@slow
def test_bicycle():
if ON_TRAVIS:
skip("Too slow for travis.")
# Code to get equations of motion for a bicycle modeled as in:
# J.P Meijaard, Jim M Papadopoulos, Andy Ruina and A.L Schwab. Linearized
# dynamics equations for the balance and steer of a bicycle: a benchmark
# and review. Proceedings of The Royal Society (2007) 463, 1955-1982
# doi: 10.1098/rspa.2007.1857
# Note that this code has been crudely ported from Autolev, which is the
# reason for some of the unusual naming conventions. It was purposefully as
# similar as possible in order to aide debugging.
# Declare Coordinates & Speeds
# Simple definitions for qdots - qd = u
# Speeds are: yaw frame ang. rate, roll frame ang. rate, rear wheel frame
# ang. rate (spinning motion), frame ang. rate (pitching motion), steering
# frame ang. rate, and front wheel ang. rate (spinning motion).
# Wheel positions are ignorable coordinates, so they are not introduced.
q1, q2, q4, q5 = dynamicsymbols('q1 q2 q4 q5')
q1d, q2d, q4d, q5d = dynamicsymbols('q1 q2 q4 q5', 1)
u1, u2, u3, u4, u5, u6 = dynamicsymbols('u1 u2 u3 u4 u5 u6')
u1d, u2d, u3d, u4d, u5d, u6d = dynamicsymbols('u1 u2 u3 u4 u5 u6', 1)
# Declare System's Parameters
WFrad, WRrad, htangle, forkoffset = symbols('WFrad WRrad htangle forkoffset')
forklength, framelength, forkcg1 = symbols('forklength framelength forkcg1')
forkcg3, framecg1, framecg3, Iwr11 = symbols('forkcg3 framecg1 framecg3 Iwr11')
Iwr22, Iwf11, Iwf22, Iframe11 = symbols('Iwr22 Iwf11 Iwf22 Iframe11')
Iframe22, Iframe33, Iframe31, Ifork11 = symbols('Iframe22 Iframe33 Iframe31 Ifork11')
Ifork22, Ifork33, Ifork31, g = symbols('Ifork22 Ifork33 Ifork31 g')
mframe, mfork, mwf, mwr = symbols('mframe mfork mwf mwr')
# Set up reference frames for the system
# N - inertial
# Y - yaw
# R - roll
# WR - rear wheel, rotation angle is ignorable coordinate so not oriented
# Frame - bicycle frame
# TempFrame - statically rotated frame for easier reference inertia definition
# Fork - bicycle fork
# TempFork - statically rotated frame for easier reference inertia definition
# WF - front wheel, again posses a ignorable coordinate
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
R = Y.orientnew('R', 'Axis', [q2, Y.x])
Frame = R.orientnew('Frame', 'Axis', [q4 + htangle, R.y])
WR = ReferenceFrame('WR')
TempFrame = Frame.orientnew('TempFrame', 'Axis', [-htangle, Frame.y])
Fork = Frame.orientnew('Fork', 'Axis', [q5, Frame.x])
TempFork = Fork.orientnew('TempFork', 'Axis', [-htangle, Fork.y])
WF = ReferenceFrame('WF')
# Kinematics of the Bicycle First block of code is forming the positions of
# the relevant points
# rear wheel contact -> rear wheel mass center -> frame mass center +
# frame/fork connection -> fork mass center + front wheel mass center ->
# front wheel contact point
WR_cont = Point('WR_cont')
WR_mc = WR_cont.locatenew('WR_mc', WRrad * R.z)
Steer = WR_mc.locatenew('Steer', framelength * Frame.z)
Frame_mc = WR_mc.locatenew('Frame_mc', - framecg1 * Frame.x
+ framecg3 * Frame.z)
Fork_mc = Steer.locatenew('Fork_mc', - forkcg1 * Fork.x
+ forkcg3 * Fork.z)
WF_mc = Steer.locatenew('WF_mc', forklength * Fork.x + forkoffset * Fork.z)
WF_cont = WF_mc.locatenew('WF_cont', WFrad * (dot(Fork.y, Y.z) * Fork.y -
Y.z).normalize())
# Set the angular velocity of each frame.
# Angular accelerations end up being calculated automatically by
# differentiating the angular velocities when first needed.
# u1 is yaw rate
# u2 is roll rate
# u3 is rear wheel rate
# u4 is frame pitch rate
# u5 is fork steer rate
# u6 is front wheel rate
Y.set_ang_vel(N, u1 * Y.z)
R.set_ang_vel(Y, u2 * R.x)
WR.set_ang_vel(Frame, u3 * Frame.y)
Frame.set_ang_vel(R, u4 * Frame.y)
Fork.set_ang_vel(Frame, u5 * Fork.x)
WF.set_ang_vel(Fork, u6 * Fork.y)
# Form the velocities of the previously defined points, using the 2 - point
# theorem (written out by hand here). Accelerations again are calculated
# automatically when first needed.
WR_cont.set_vel(N, 0)
WR_mc.v2pt_theory(WR_cont, N, WR)
Steer.v2pt_theory(WR_mc, N, Frame)
Frame_mc.v2pt_theory(WR_mc, N, Frame)
Fork_mc.v2pt_theory(Steer, N, Fork)
WF_mc.v2pt_theory(Steer, N, Fork)
WF_cont.v2pt_theory(WF_mc, N, WF)
# Sets the inertias of each body. Uses the inertia frame to construct the
# inertia dyadics. Wheel inertias are only defined by principle moments of
# inertia, and are in fact constant in the frame and fork reference frames;
# it is for this reason that the orientations of the wheels does not need
# to be defined. The frame and fork inertias are defined in the 'Temp'
# frames which are fixed to the appropriate body frames; this is to allow
# easier input of the reference values of the benchmark paper. Note that
# due to slightly different orientations, the products of inertia need to
# have their signs flipped; this is done later when entering the numerical
# value.
Frame_I = (inertia(TempFrame, Iframe11, Iframe22, Iframe33, 0, 0, Iframe31), Frame_mc)
Fork_I = (inertia(TempFork, Ifork11, Ifork22, Ifork33, 0, 0, Ifork31), Fork_mc)
WR_I = (inertia(Frame, Iwr11, Iwr22, Iwr11), WR_mc)
WF_I = (inertia(Fork, Iwf11, Iwf22, Iwf11), WF_mc)
# Declaration of the RigidBody containers. ::
BodyFrame = RigidBody('BodyFrame', Frame_mc, Frame, mframe, Frame_I)
BodyFork = RigidBody('BodyFork', Fork_mc, Fork, mfork, Fork_I)
BodyWR = RigidBody('BodyWR', WR_mc, WR, mwr, WR_I)
BodyWF = RigidBody('BodyWF', WF_mc, WF, mwf, WF_I)
# The kinematic differential equations; they are defined quite simply. Each
# entry in this list is equal to zero.
kd = [q1d - u1, q2d - u2, q4d - u4, q5d - u5]
# The nonholonomic constraints are the velocity of the front wheel contact
# point dotted into the X, Y, and Z directions; the yaw frame is used as it
# is "closer" to the front wheel (1 less DCM connecting them). These
# constraints force the velocity of the front wheel contact point to be 0
# in the inertial frame; the X and Y direction constraints enforce a
# "no-slip" condition, and the Z direction constraint forces the front
# wheel contact point to not move away from the ground frame, essentially
# replicating the holonomic constraint which does not allow the frame pitch
# to change in an invalid fashion.
conlist_speed = [WF_cont.vel(N) & Y.x, WF_cont.vel(N) & Y.y, WF_cont.vel(N) & Y.z]
# The holonomic constraint is that the position from the rear wheel contact
# point to the front wheel contact point when dotted into the
# normal-to-ground plane direction must be zero; effectively that the front
# and rear wheel contact points are always touching the ground plane. This
# is actually not part of the dynamic equations, but instead is necessary
# for the lineraization process.
conlist_coord = [WF_cont.pos_from(WR_cont) & Y.z]
# The force list; each body has the appropriate gravitational force applied
# at its mass center.
FL = [(Frame_mc, -mframe * g * Y.z),
(Fork_mc, -mfork * g * Y.z),
(WF_mc, -mwf * g * Y.z),
(WR_mc, -mwr * g * Y.z)]
BL = [BodyFrame, BodyFork, BodyWR, BodyWF]
# The N frame is the inertial frame, coordinates are supplied in the order
# of independent, dependent coordinates, as are the speeds. The kinematic
# differential equation are also entered here. Here the dependent speeds
# are specified, in the same order they were provided in earlier, along
# with the non-holonomic constraints. The dependent coordinate is also
# provided, with the holonomic constraint. Again, this is only provided
# for the linearization process.
KM = KanesMethod(N, q_ind=[q1, q2, q5],
q_dependent=[q4], configuration_constraints=conlist_coord,
u_ind=[u2, u3, u5],
u_dependent=[u1, u4, u6], velocity_constraints=conlist_speed,
kd_eqs=kd)
(fr, frstar) = KM.kanes_equations(BL, FL)
# This is the start of entering in the numerical values from the benchmark
# paper to validate the eigen values of the linearized equations from this
# model to the reference eigen values. Look at the aforementioned paper for
# more information. Some of these are intermediate values, used to
# transform values from the paper into the coordinate systems used in this
# model.
PaperRadRear = 0.3
PaperRadFront = 0.35
HTA = evalf.N(pi / 2 - pi / 10)
TrailPaper = 0.08
rake = evalf.N(-(TrailPaper*sin(HTA)-(PaperRadFront*cos(HTA))))
PaperWb = 1.02
PaperFrameCgX = 0.3
PaperFrameCgZ = 0.9
PaperForkCgX = 0.9
PaperForkCgZ = 0.7
FrameLength = evalf.N(PaperWb*sin(HTA)-(rake-(PaperRadFront-PaperRadRear)*cos(HTA)))
FrameCGNorm = evalf.N((PaperFrameCgZ - PaperRadRear-(PaperFrameCgX/sin(HTA))*cos(HTA))*sin(HTA))
FrameCGPar = evalf.N(PaperFrameCgX / sin(HTA) + (PaperFrameCgZ - PaperRadRear - PaperFrameCgX / sin(HTA) * cos(HTA)) * cos(HTA))
tempa = evalf.N(PaperForkCgZ - PaperRadFront)
tempb = evalf.N(PaperWb-PaperForkCgX)
tempc = evalf.N(sqrt(tempa**2+tempb**2))
PaperForkL = evalf.N(PaperWb*cos(HTA)-(PaperRadFront-PaperRadRear)*sin(HTA))
ForkCGNorm = evalf.N(rake+(tempc * sin(pi/2-HTA-acos(tempa/tempc))))
ForkCGPar = evalf.N(tempc * cos((pi/2-HTA)-acos(tempa/tempc))-PaperForkL)
# Here is the final assembly of the numerical values. The symbol 'v' is the
# forward speed of the bicycle (a concept which only makes sense in the
# upright, static equilibrium case?). These are in a dictionary which will
# later be substituted in. Again the sign on the *product* of inertia
# values is flipped here, due to different orientations of coordinate
# systems.
v = symbols('v')
val_dict = {WFrad: PaperRadFront,
WRrad: PaperRadRear,
htangle: HTA,
forkoffset: rake,
forklength: PaperForkL,
framelength: FrameLength,
forkcg1: ForkCGPar,
forkcg3: ForkCGNorm,
framecg1: FrameCGNorm,
framecg3: FrameCGPar,
Iwr11: 0.0603,
Iwr22: 0.12,
Iwf11: 0.1405,
Iwf22: 0.28,
Ifork11: 0.05892,
Ifork22: 0.06,
Ifork33: 0.00708,
Ifork31: 0.00756,
Iframe11: 9.2,
Iframe22: 11,
Iframe33: 2.8,
Iframe31: -2.4,
mfork: 4,
mframe: 85,
mwf: 3,
mwr: 2,
g: 9.81,
q1: 0,
q2: 0,
q4: 0,
q5: 0,
u1: 0,
u2: 0,
u3: v / PaperRadRear,
u4: 0,
u5: 0,
u6: v / PaperRadFront}
# Linearizes the forcing vector; the equations are set up as MM udot =
# forcing, where MM is the mass matrix, udot is the vector representing the
# time derivatives of the generalized speeds, and forcing is a vector which
# contains both external forcing terms and internal forcing terms, such as
# centripital or coriolis forces. This actually returns a matrix with as
# many rows as *total* coordinates and speeds, but only as many columns as
# independent coordinates and speeds.
forcing_lin = KM.linearize()[0]
# As mentioned above, the size of the linearized forcing terms is expanded
# to include both q's and u's, so the mass matrix must have this done as
# well. This will likely be changed to be part of the linearized process,
# for future reference.
MM_full = KM.mass_matrix_full
MM_full_s = msubs(MM_full, val_dict)
forcing_lin_s = msubs(forcing_lin, KM.kindiffdict(), val_dict)
MM_full_s = MM_full_s.evalf()
forcing_lin_s = forcing_lin_s.evalf()
# Finally, we construct an "A" matrix for the form xdot = A x (x being the
# state vector, although in this case, the sizes are a little off). The
# following line extracts only the minimum entries required for eigenvalue
# analysis, which correspond to rows and columns for lean, steer, lean
# rate, and steer rate.
Amat = MM_full_s.inv() * forcing_lin_s
A = Amat.extract([1, 2, 4, 6], [1, 2, 3, 5])
# Precomputed for comparison
Res = Matrix([[ 0, 0, 1.0, 0],
[ 0, 0, 0, 1.0],
[9.48977444677355, -0.891197738059089*v**2 - 0.571523173729245, -0.105522449805691*v, -0.330515398992311*v],
[11.7194768719633, -1.97171508499972*v**2 + 30.9087533932407, 3.67680523332152*v, -3.08486552743311*v]])
# Actual eigenvalue comparison
eps = 1.e-12
for i in range(6):
error = Res.subs(v, i) - A.subs(v, i)
assert all(abs(x) < eps for x in error)
|
f3c4e5329fc8dc0a68d2ad722f2b970ee3475830cd0fff156b21a07878c60642 | from sympy.core.backend import cos, Matrix, sin, zeros, tan, pi, symbols
from sympy import trigsimp, simplify, solve
from sympy.physics.mechanics import (cross, dot, dynamicsymbols,
find_dynamicsymbols, KanesMethod, inertia,
inertia_of_point_mass, Point,
ReferenceFrame, RigidBody)
def test_aux_dep():
# This test is about rolling disc dynamics, comparing the results found
# with KanesMethod to those found when deriving the equations "manually"
# with SymPy.
# The terms Fr, Fr*, and Fr*_steady are all compared between the two
# methods. Here, Fr*_steady refers to the generalized inertia forces for an
# equilibrium configuration.
# Note: comparing to the test of test_rolling_disc() in test_kane.py, this
# test also tests auxiliary speeds and configuration and motion constraints
#, seen in the generalized dependent coordinates q[3], and depend speeds
# u[3], u[4] and u[5].
# First, manual derivation of Fr, Fr_star, Fr_star_steady.
# Symbols for time and constant parameters.
# Symbols for contact forces: Fx, Fy, Fz.
t, r, m, g, I, J = symbols('t r m g I J')
Fx, Fy, Fz = symbols('Fx Fy Fz')
# Configuration variables and their time derivatives:
# q[0] -- yaw
# q[1] -- lean
# q[2] -- spin
# q[3] -- dot(-r*B.z, A.z) -- distance from ground plane to disc center in
# A.z direction
# Generalized speeds and their time derivatives:
# u[0] -- disc angular velocity component, disc fixed x direction
# u[1] -- disc angular velocity component, disc fixed y direction
# u[2] -- disc angular velocity component, disc fixed z direction
# u[3] -- disc velocity component, A.x direction
# u[4] -- disc velocity component, A.y direction
# u[5] -- disc velocity component, A.z direction
# Auxiliary generalized speeds:
# ua[0] -- contact point auxiliary generalized speed, A.x direction
# ua[1] -- contact point auxiliary generalized speed, A.y direction
# ua[2] -- contact point auxiliary generalized speed, A.z direction
q = dynamicsymbols('q:4')
qd = [qi.diff(t) for qi in q]
u = dynamicsymbols('u:6')
ud = [ui.diff(t) for ui in u]
ud_zero = dict(zip(ud, [0.]*len(ud)))
ua = dynamicsymbols('ua:3')
ua_zero = dict(zip(ua, [0.]*len(ua))) # noqa:F841
# Reference frames:
# Yaw intermediate frame: A.
# Lean intermediate frame: B.
# Disc fixed frame: C.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q[0], N.z])
B = A.orientnew('B', 'Axis', [q[1], A.x])
C = B.orientnew('C', 'Axis', [q[2], B.y])
# Angular velocity and angular acceleration of disc fixed frame
# u[0], u[1] and u[2] are generalized independent speeds.
C.set_ang_vel(N, u[0]*B.x + u[1]*B.y + u[2]*B.z)
C.set_ang_acc(N, C.ang_vel_in(N).diff(t, B)
+ cross(B.ang_vel_in(N), C.ang_vel_in(N)))
# Velocity and acceleration of points:
# Disc-ground contact point: P.
# Center of disc: O, defined from point P with depend coordinate: q[3]
# u[3], u[4] and u[5] are generalized dependent speeds.
P = Point('P')
P.set_vel(N, ua[0]*A.x + ua[1]*A.y + ua[2]*A.z)
O = P.locatenew('O', q[3]*A.z + r*sin(q[1])*A.y)
O.set_vel(N, u[3]*A.x + u[4]*A.y + u[5]*A.z)
O.set_acc(N, O.vel(N).diff(t, A) + cross(A.ang_vel_in(N), O.vel(N)))
# Kinematic differential equations:
# Two equalities: one is w_c_n_qd = C.ang_vel_in(N) in three coordinates
# directions of B, for qd0, qd1 and qd2.
# the other is v_o_n_qd = O.vel(N) in A.z direction for qd3.
# Then, solve for dq/dt's in terms of u's: qd_kd.
w_c_n_qd = qd[0]*A.z + qd[1]*B.x + qd[2]*B.y
v_o_n_qd = O.pos_from(P).diff(t, A) + cross(A.ang_vel_in(N), O.pos_from(P))
kindiffs = Matrix([dot(w_c_n_qd - C.ang_vel_in(N), uv) for uv in B] +
[dot(v_o_n_qd - O.vel(N), A.z)])
qd_kd = solve(kindiffs, qd) # noqa:F841
# Values of generalized speeds during a steady turn for later substitution
# into the Fr_star_steady.
steady_conditions = solve(kindiffs.subs({qd[1] : 0, qd[3] : 0}), u)
steady_conditions.update({qd[1] : 0, qd[3] : 0})
# Partial angular velocities and velocities.
partial_w_C = [C.ang_vel_in(N).diff(ui, N) for ui in u + ua]
partial_v_O = [O.vel(N).diff(ui, N) for ui in u + ua]
partial_v_P = [P.vel(N).diff(ui, N) for ui in u + ua]
# Configuration constraint: f_c, the projection of radius r in A.z direction
# is q[3].
# Velocity constraints: f_v, for u3, u4 and u5.
# Acceleration constraints: f_a.
f_c = Matrix([dot(-r*B.z, A.z) - q[3]])
f_v = Matrix([dot(O.vel(N) - (P.vel(N) + cross(C.ang_vel_in(N),
O.pos_from(P))), ai).expand() for ai in A])
v_o_n = cross(C.ang_vel_in(N), O.pos_from(P))
a_o_n = v_o_n.diff(t, A) + cross(A.ang_vel_in(N), v_o_n)
f_a = Matrix([dot(O.acc(N) - a_o_n, ai) for ai in A]) # noqa:F841
# Solve for constraint equations in the form of
# u_dependent = A_rs * [u_i; u_aux].
# First, obtain constraint coefficient matrix: M_v * [u; ua] = 0;
# Second, taking u[0], u[1], u[2] as independent,
# taking u[3], u[4], u[5] as dependent,
# rearranging the matrix of M_v to be A_rs for u_dependent.
# Third, u_aux ==0 for u_dep, and resulting dictionary of u_dep_dict.
M_v = zeros(3, 9)
for i in range(3):
for j, ui in enumerate(u + ua):
M_v[i, j] = f_v[i].diff(ui)
M_v_i = M_v[:, :3]
M_v_d = M_v[:, 3:6]
M_v_aux = M_v[:, 6:]
M_v_i_aux = M_v_i.row_join(M_v_aux)
A_rs = - M_v_d.inv() * M_v_i_aux
u_dep = A_rs[:, :3] * Matrix(u[:3])
u_dep_dict = dict(zip(u[3:], u_dep))
# Active forces: F_O acting on point O; F_P acting on point P.
# Generalized active forces (unconstrained): Fr_u = F_point * pv_point.
F_O = m*g*A.z
F_P = Fx * A.x + Fy * A.y + Fz * A.z
Fr_u = Matrix([dot(F_O, pv_o) + dot(F_P, pv_p) for pv_o, pv_p in
zip(partial_v_O, partial_v_P)])
# Inertia force: R_star_O.
# Inertia of disc: I_C_O, where J is a inertia component about principal axis.
# Inertia torque: T_star_C.
# Generalized inertia forces (unconstrained): Fr_star_u.
R_star_O = -m*O.acc(N)
I_C_O = inertia(B, I, J, I)
T_star_C = -(dot(I_C_O, C.ang_acc_in(N)) \
+ cross(C.ang_vel_in(N), dot(I_C_O, C.ang_vel_in(N))))
Fr_star_u = Matrix([dot(R_star_O, pv) + dot(T_star_C, pav) for pv, pav in
zip(partial_v_O, partial_w_C)])
# Form nonholonomic Fr: Fr_c, and nonholonomic Fr_star: Fr_star_c.
# Also, nonholonomic Fr_star in steady turning condition: Fr_star_steady.
Fr_c = Fr_u[:3, :].col_join(Fr_u[6:, :]) + A_rs.T * Fr_u[3:6, :]
Fr_star_c = Fr_star_u[:3, :].col_join(Fr_star_u[6:, :])\
+ A_rs.T * Fr_star_u[3:6, :]
Fr_star_steady = Fr_star_c.subs(ud_zero).subs(u_dep_dict)\
.subs(steady_conditions).subs({q[3]: -r*cos(q[1])}).expand()
# Second, using KaneMethod in mechanics for fr, frstar and frstar_steady.
# Rigid Bodies: disc, with inertia I_C_O.
iner_tuple = (I_C_O, O)
disc = RigidBody('disc', O, C, m, iner_tuple)
bodyList = [disc]
# Generalized forces: Gravity: F_o; Auxiliary forces: F_p.
F_o = (O, F_O)
F_p = (P, F_P)
forceList = [F_o, F_p]
# KanesMethod.
kane = KanesMethod(
N, q_ind= q[:3], u_ind= u[:3], kd_eqs=kindiffs,
q_dependent=q[3:], configuration_constraints = f_c,
u_dependent=u[3:], velocity_constraints= f_v,
u_auxiliary=ua
)
# fr, frstar, frstar_steady and kdd(kinematic differential equations).
(fr, frstar)= kane.kanes_equations(bodyList, forceList)
frstar_steady = frstar.subs(ud_zero).subs(u_dep_dict).subs(steady_conditions)\
.subs({q[3]: -r*cos(q[1])}).expand()
kdd = kane.kindiffdict()
assert Matrix(Fr_c).expand() == fr.expand()
assert Matrix(Fr_star_c.subs(kdd)).expand() == frstar.expand()
assert (simplify(Matrix(Fr_star_steady).expand()) ==
simplify(frstar_steady.expand()))
syms_in_forcing = find_dynamicsymbols(kane.forcing)
for qdi in qd:
assert qdi not in syms_in_forcing
def test_non_central_inertia():
# This tests that the calculation of Fr* does not depend the point
# about which the inertia of a rigid body is defined. This test solves
# exercises 8.12, 8.17 from Kane 1985.
# Declare symbols
q1, q2, q3 = dynamicsymbols('q1:4')
q1d, q2d, q3d = dynamicsymbols('q1:4', level=1)
u1, u2, u3, u4, u5 = dynamicsymbols('u1:6')
u_prime, R, M, g, e, f, theta = symbols('u\' R, M, g, e, f, theta')
a, b, mA, mB, IA, J, K, t = symbols('a b mA mB IA J K t')
Q1, Q2, Q3 = symbols('Q1, Q2 Q3')
IA22, IA23, IA33 = symbols('IA22 IA23 IA33')
# Reference Frames
F = ReferenceFrame('F')
P = F.orientnew('P', 'axis', [-theta, F.y])
A = P.orientnew('A', 'axis', [q1, P.x])
A.set_ang_vel(F, u1*A.x + u3*A.z)
# define frames for wheels
B = A.orientnew('B', 'axis', [q2, A.z])
C = A.orientnew('C', 'axis', [q3, A.z])
B.set_ang_vel(A, u4 * A.z)
C.set_ang_vel(A, u5 * A.z)
# define points D, S*, Q on frame A and their velocities
pD = Point('D')
pD.set_vel(A, 0)
# u3 will not change v_D_F since wheels are still assumed to roll without slip.
pD.set_vel(F, u2 * A.y)
pS_star = pD.locatenew('S*', e*A.y)
pQ = pD.locatenew('Q', f*A.y - R*A.x)
for p in [pS_star, pQ]:
p.v2pt_theory(pD, F, A)
# masscenters of bodies A, B, C
pA_star = pD.locatenew('A*', a*A.y)
pB_star = pD.locatenew('B*', b*A.z)
pC_star = pD.locatenew('C*', -b*A.z)
for p in [pA_star, pB_star, pC_star]:
p.v2pt_theory(pD, F, A)
# points of B, C touching the plane P
pB_hat = pB_star.locatenew('B^', -R*A.x)
pC_hat = pC_star.locatenew('C^', -R*A.x)
pB_hat.v2pt_theory(pB_star, F, B)
pC_hat.v2pt_theory(pC_star, F, C)
# the velocities of B^, C^ are zero since B, C are assumed to roll without slip
kde = [q1d - u1, q2d - u4, q3d - u5]
vc = [dot(p.vel(F), A.y) for p in [pB_hat, pC_hat]]
# inertias of bodies A, B, C
# IA22, IA23, IA33 are not specified in the problem statement, but are
# necessary to define an inertia object. Although the values of
# IA22, IA23, IA33 are not known in terms of the variables given in the
# problem statement, they do not appear in the general inertia terms.
inertia_A = inertia(A, IA, IA22, IA33, 0, IA23, 0)
inertia_B = inertia(B, K, K, J)
inertia_C = inertia(C, K, K, J)
# define the rigid bodies A, B, C
rbA = RigidBody('rbA', pA_star, A, mA, (inertia_A, pA_star))
rbB = RigidBody('rbB', pB_star, B, mB, (inertia_B, pB_star))
rbC = RigidBody('rbC', pC_star, C, mB, (inertia_C, pC_star))
km = KanesMethod(F, q_ind=[q1, q2, q3], u_ind=[u1, u2], kd_eqs=kde,
u_dependent=[u4, u5], velocity_constraints=vc,
u_auxiliary=[u3])
forces = [(pS_star, -M*g*F.x), (pQ, Q1*A.x + Q2*A.y + Q3*A.z)]
bodies = [rbA, rbB, rbC]
fr, fr_star = km.kanes_equations(bodies, forces)
vc_map = solve(vc, [u4, u5])
# KanesMethod returns the negative of Fr, Fr* as defined in Kane1985.
fr_star_expected = Matrix([
-(IA + 2*J*b**2/R**2 + 2*K +
mA*a**2 + 2*mB*b**2) * u1.diff(t) - mA*a*u1*u2,
-(mA + 2*mB +2*J/R**2) * u2.diff(t) + mA*a*u1**2,
0])
t = trigsimp(fr_star.subs(vc_map).subs({u3: 0})).doit().expand()
assert ((fr_star_expected - t).expand() == zeros(3, 1))
# define inertias of rigid bodies A, B, C about point D
# I_S/O = I_S/S* + I_S*/O
bodies2 = []
for rb, I_star in zip([rbA, rbB, rbC], [inertia_A, inertia_B, inertia_C]):
I = I_star + inertia_of_point_mass(rb.mass,
rb.masscenter.pos_from(pD),
rb.frame)
bodies2.append(RigidBody('', rb.masscenter, rb.frame, rb.mass,
(I, pD)))
fr2, fr_star2 = km.kanes_equations(bodies2, forces)
t = trigsimp(fr_star2.subs(vc_map).subs({u3: 0})).doit()
assert (fr_star_expected - t).expand() == zeros(3, 1)
def test_sub_qdot():
# This test solves exercises 8.12, 8.17 from Kane 1985 and defines
# some velocities in terms of q, qdot.
## --- Declare symbols ---
q1, q2, q3 = dynamicsymbols('q1:4')
q1d, q2d, q3d = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
u_prime, R, M, g, e, f, theta = symbols('u\' R, M, g, e, f, theta')
a, b, mA, mB, IA, J, K, t = symbols('a b mA mB IA J K t')
IA22, IA23, IA33 = symbols('IA22 IA23 IA33')
Q1, Q2, Q3 = symbols('Q1 Q2 Q3')
# --- Reference Frames ---
F = ReferenceFrame('F')
P = F.orientnew('P', 'axis', [-theta, F.y])
A = P.orientnew('A', 'axis', [q1, P.x])
A.set_ang_vel(F, u1*A.x + u3*A.z)
# define frames for wheels
B = A.orientnew('B', 'axis', [q2, A.z])
C = A.orientnew('C', 'axis', [q3, A.z])
## --- define points D, S*, Q on frame A and their velocities ---
pD = Point('D')
pD.set_vel(A, 0)
# u3 will not change v_D_F since wheels are still assumed to roll w/o slip
pD.set_vel(F, u2 * A.y)
pS_star = pD.locatenew('S*', e*A.y)
pQ = pD.locatenew('Q', f*A.y - R*A.x)
# masscenters of bodies A, B, C
pA_star = pD.locatenew('A*', a*A.y)
pB_star = pD.locatenew('B*', b*A.z)
pC_star = pD.locatenew('C*', -b*A.z)
for p in [pS_star, pQ, pA_star, pB_star, pC_star]:
p.v2pt_theory(pD, F, A)
# points of B, C touching the plane P
pB_hat = pB_star.locatenew('B^', -R*A.x)
pC_hat = pC_star.locatenew('C^', -R*A.x)
pB_hat.v2pt_theory(pB_star, F, B)
pC_hat.v2pt_theory(pC_star, F, C)
# --- relate qdot, u ---
# the velocities of B^, C^ are zero since B, C are assumed to roll w/o slip
kde = [dot(p.vel(F), A.y) for p in [pB_hat, pC_hat]]
kde += [u1 - q1d]
kde_map = solve(kde, [q1d, q2d, q3d])
for k, v in list(kde_map.items()):
kde_map[k.diff(t)] = v.diff(t)
# inertias of bodies A, B, C
# IA22, IA23, IA33 are not specified in the problem statement, but are
# necessary to define an inertia object. Although the values of
# IA22, IA23, IA33 are not known in terms of the variables given in the
# problem statement, they do not appear in the general inertia terms.
inertia_A = inertia(A, IA, IA22, IA33, 0, IA23, 0)
inertia_B = inertia(B, K, K, J)
inertia_C = inertia(C, K, K, J)
# define the rigid bodies A, B, C
rbA = RigidBody('rbA', pA_star, A, mA, (inertia_A, pA_star))
rbB = RigidBody('rbB', pB_star, B, mB, (inertia_B, pB_star))
rbC = RigidBody('rbC', pC_star, C, mB, (inertia_C, pC_star))
## --- use kanes method ---
km = KanesMethod(F, [q1, q2, q3], [u1, u2], kd_eqs=kde, u_auxiliary=[u3])
forces = [(pS_star, -M*g*F.x), (pQ, Q1*A.x + Q2*A.y + Q3*A.z)]
bodies = [rbA, rbB, rbC]
# Q2 = -u_prime * u2 * Q1 / sqrt(u2**2 + f**2 * u1**2)
# -u_prime * R * u2 / sqrt(u2**2 + f**2 * u1**2) = R / Q1 * Q2
fr_expected = Matrix([
f*Q3 + M*g*e*sin(theta)*cos(q1),
Q2 + M*g*sin(theta)*sin(q1),
e*M*g*cos(theta) - Q1*f - Q2*R])
#Q1 * (f - u_prime * R * u2 / sqrt(u2**2 + f**2 * u1**2)))])
fr_star_expected = Matrix([
-(IA + 2*J*b**2/R**2 + 2*K +
mA*a**2 + 2*mB*b**2) * u1.diff(t) - mA*a*u1*u2,
-(mA + 2*mB +2*J/R**2) * u2.diff(t) + mA*a*u1**2,
0])
fr, fr_star = km.kanes_equations(bodies, forces)
assert (fr.expand() == fr_expected.expand())
assert ((fr_star_expected - trigsimp(fr_star)).expand() == zeros(3, 1))
def test_sub_qdot2():
# This test solves exercises 8.3 from Kane 1985 and defines
# all velocities in terms of q, qdot. We check that the generalized active
# forces are correctly computed if u terms are only defined in the
# kinematic differential equations.
#
# This functionality was added in PR 8948. Without qdot/u substitution, the
# KanesMethod constructor will fail during the constraint initialization as
# the B matrix will be poorly formed and inversion of the dependent part
# will fail.
g, m, Px, Py, Pz, R, t = symbols('g m Px Py Pz R t')
q = dynamicsymbols('q:5')
qd = dynamicsymbols('q:5', level=1)
u = dynamicsymbols('u:5')
## Define inertial, intermediate, and rigid body reference frames
A = ReferenceFrame('A')
B_prime = A.orientnew('B_prime', 'Axis', [q[0], A.z])
B = B_prime.orientnew('B', 'Axis', [pi/2 - q[1], B_prime.x])
C = B.orientnew('C', 'Axis', [q[2], B.z])
## Define points of interest and their velocities
pO = Point('O')
pO.set_vel(A, 0)
# R is the point in plane H that comes into contact with disk C.
pR = pO.locatenew('R', q[3]*A.x + q[4]*A.y)
pR.set_vel(A, pR.pos_from(pO).diff(t, A))
pR.set_vel(B, 0)
# C^ is the point in disk C that comes into contact with plane H.
pC_hat = pR.locatenew('C^', 0)
pC_hat.set_vel(C, 0)
# C* is the point at the center of disk C.
pCs = pC_hat.locatenew('C*', R*B.y)
pCs.set_vel(C, 0)
pCs.set_vel(B, 0)
# calculate velocites of points C* and C^ in frame A
pCs.v2pt_theory(pR, A, B) # points C* and R are fixed in frame B
pC_hat.v2pt_theory(pCs, A, C) # points C* and C^ are fixed in frame C
## Define forces on each point of the system
R_C_hat = Px*A.x + Py*A.y + Pz*A.z
R_Cs = -m*g*A.z
forces = [(pC_hat, R_C_hat), (pCs, R_Cs)]
## Define kinematic differential equations
# let ui = omega_C_A & bi (i = 1, 2, 3)
# u4 = qd4, u5 = qd5
u_expr = [C.ang_vel_in(A) & uv for uv in B]
u_expr += qd[3:]
kde = [ui - e for ui, e in zip(u, u_expr)]
km1 = KanesMethod(A, q, u, kde)
fr1, _ = km1.kanes_equations([], forces)
## Calculate generalized active forces if we impose the condition that the
# disk C is rolling without slipping
u_indep = u[:3]
u_dep = list(set(u) - set(u_indep))
vc = [pC_hat.vel(A) & uv for uv in [A.x, A.y]]
km2 = KanesMethod(A, q, u_indep, kde,
u_dependent=u_dep, velocity_constraints=vc)
fr2, _ = km2.kanes_equations([], forces)
fr1_expected = Matrix([
-R*g*m*sin(q[1]),
-R*(Px*cos(q[0]) + Py*sin(q[0]))*tan(q[1]),
R*(Px*cos(q[0]) + Py*sin(q[0])),
Px,
Py])
fr2_expected = Matrix([
-R*g*m*sin(q[1]),
0,
0])
assert (trigsimp(fr1.expand()) == trigsimp(fr1_expected.expand()))
assert (trigsimp(fr2.expand()) == trigsimp(fr2_expected.expand()))
|
4dffed77a43518f14ead9037122cb59df72e0c3efa07059351949adea4ceff40 | """
MKS unit system.
MKS stands for "meter, kilogram, second".
"""
from sympy.physics.units import UnitSystem, DimensionSystem
from sympy.physics.units.definitions import G, Hz, J, N, Pa, W, c, g, kg, m, s
from sympy.physics.units.definitions.dimension_definitions import (
acceleration, action, energy, force, frequency, momentum,
power, pressure, velocity, length, mass, time)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.systems.length_weight_time import dimsys_length_weight_time
dims = (velocity, acceleration, momentum, force, energy, power, pressure,
frequency, action)
units = [m, g, s, J, N, W, Pa, Hz]
all_units = []
# Prefixes of units like g, J, N etc get added using `prefix_unit`
# in the for loop, but the actual units have to be added manually.
all_units.extend([g, J, N, W, Pa, Hz])
for u in units:
all_units.extend(prefix_unit(u, PREFIXES))
all_units.extend([G, c])
# unit system
MKS = UnitSystem(base_units=(m, kg, s), units=all_units, name="MKS", dimension_system=dimsys_length_weight_time)
__all__ = [
'force', 'DimensionSystem', 'energy', 'Pa', 'MKS',
'dimsys_length_weight_time', 'Hz', 'power', 's', 'UnitSystem', 'units',
'mass', 'momentum', 'acceleration', 'G', 'J', 'N', 'pressure', 'W',
'all_units', 'c', 'kg', 'g', 'dims', 'prefix_unit', 'm', 'PREFIXES',
'length', 'frequency', 'u', 'time', 'action', 'velocity',
]
|
aab8c618923d1f82f1847112104abbda4f4d7ba77a00b12de829fb1701c3b411 | """
MKS unit system.
MKS stands for "meter, kilogram, second, ampere".
"""
from typing import List
from sympy.physics.units.definitions import Z0, A, C, F, H, S, T, V, Wb, ohm
from sympy.physics.units.definitions.dimension_definitions import (
capacitance, charge, conductance, current, impedance, inductance,
magnetic_density, magnetic_flux, voltage)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.systems.mks import MKS, dimsys_length_weight_time
from sympy.physics.units.quantities import Quantity
dims = (voltage, impedance, conductance, current, capacitance, inductance, charge,
magnetic_density, magnetic_flux)
units = [A, V, ohm, S, F, H, C, T, Wb]
all_units = [] # type: List[Quantity]
for u in units:
all_units.extend(prefix_unit(u, PREFIXES))
all_units.extend([Z0])
dimsys_MKSA = dimsys_length_weight_time.extend([
# Dimensional dependencies for base dimensions (MKSA not in MKS)
current,
], new_dim_deps=dict(
# Dimensional dependencies for derived dimensions
voltage=dict(mass=1, length=2, current=-1, time=-3),
impedance=dict(mass=1, length=2, current=-2, time=-3),
conductance=dict(mass=-1, length=-2, current=2, time=3),
capacitance=dict(mass=-1, length=-2, current=2, time=4),
inductance=dict(mass=1, length=2, current=-2, time=-2),
charge=dict(current=1, time=1),
magnetic_density=dict(mass=1, current=-1, time=-2),
magnetic_flux=dict(length=2, mass=1, current=-1, time=-2),
))
MKSA = MKS.extend(base=(A,), units=all_units, name='MKSA', dimension_system=dimsys_MKSA)
|
e3739dbe6b717696f3f14716ff1a1113d02b25555d17b7607403760720c05f1e | """
Naturalunit system.
The natural system comes from "setting c = 1, hbar = 1". From the computer
point of view it means that we use velocity and action instead of length and
time. Moreover instead of mass we use energy.
"""
from sympy.physics.units import DimensionSystem
from sympy.physics.units.definitions import c, eV, hbar
from sympy.physics.units.definitions.dimension_definitions import (
action, energy, force, frequency, length, mass, momentum,
power, time, velocity)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.unitsystem import UnitSystem
# dimension system
_natural_dim = DimensionSystem(
base_dims=(action, energy, velocity),
derived_dims=(length, mass, time, momentum, force, power, frequency)
)
units = prefix_unit(eV, PREFIXES)
# unit system
natural = UnitSystem(base_units=(hbar, eV, c), units=units, name="Natural system")
|
c0c74d5efa4392c33f83a3e83d92bbdf500aa13357ce0d28f59dae9c1d73c3e6 | """
SI unit system.
Based on MKSA, which stands for "meter, kilogram, second, ampere".
Added kelvin, candela and mole.
"""
from typing import List
from sympy.physics.units import DimensionSystem, Dimension, dHg0
from sympy.physics.units.quantities import Quantity
from sympy import Rational, pi, sqrt, S
from sympy.physics.units.definitions.dimension_definitions import (
acceleration, action, current, impedance, length, mass, time, velocity,
amount_of_substance, temperature, information, frequency, force, pressure,
energy, power, charge, voltage, capacitance, conductance, magnetic_flux,
magnetic_density, inductance, luminous_intensity
)
from sympy.physics.units.definitions import (
kilogram, newton, second, meter, gram, cd, K, joule, watt, pascal, hertz,
coulomb, volt, ohm, siemens, farad, henry, tesla, weber, dioptre, lux,
katal, gray, becquerel, inch, liter, julian_year, gravitational_constant,
speed_of_light, elementary_charge, planck, hbar, electronvolt,
avogadro_number, avogadro_constant, boltzmann_constant,
stefan_boltzmann_constant, atomic_mass_constant, molar_gas_constant,
faraday_constant, josephson_constant, von_klitzing_constant,
acceleration_due_to_gravity, magnetic_constant, vacuum_permittivity,
vacuum_impedance, coulomb_constant, atmosphere, bar, pound, psi, mmHg,
milli_mass_unit, quart, lightyear, astronomical_unit, planck_mass,
planck_time, planck_temperature, planck_length, planck_charge, planck_area,
planck_volume, planck_momentum, planck_energy, planck_force, planck_power,
planck_density, planck_energy_density, planck_intensity,
planck_angular_frequency, planck_pressure, planck_current, planck_voltage,
planck_impedance, planck_acceleration, bit, byte, kibibyte, mebibyte,
gibibyte, tebibyte, pebibyte, exbibyte, curie, rutherford, radian, degree,
steradian, angular_mil, atomic_mass_unit, gee, kPa, ampere, u0, c, kelvin,
mol, mole, candela, m, kg, s, electric_constant, G, boltzmann
)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.systems.mksa import MKSA, dimsys_MKSA
derived_dims = (frequency, force, pressure, energy, power, charge, voltage,
capacitance, conductance, magnetic_flux,
magnetic_density, inductance, luminous_intensity)
base_dims = (amount_of_substance, luminous_intensity, temperature)
units = [mol, cd, K, lux, hertz, newton, pascal, joule, watt, coulomb, volt,
farad, ohm, siemens, weber, tesla, henry, candela, lux, becquerel,
gray, katal]
all_units = [] # type: List[Quantity]
for u in units:
all_units.extend(prefix_unit(u, PREFIXES))
all_units.extend([mol, cd, K, lux])
dimsys_SI = dimsys_MKSA.extend(
[
# Dimensional dependencies for other base dimensions:
temperature,
amount_of_substance,
luminous_intensity,
])
dimsys_default = dimsys_SI.extend(
[information],
)
SI = MKSA.extend(base=(mol, cd, K), units=all_units, name='SI', dimension_system=dimsys_SI)
One = S.One
SI.set_quantity_dimension(radian, One)
SI.set_quantity_scale_factor(ampere, One)
SI.set_quantity_scale_factor(kelvin, One)
SI.set_quantity_scale_factor(mole, One)
SI.set_quantity_scale_factor(candela, One)
# MKSA extension to MKS: derived units
SI.set_quantity_scale_factor(coulomb, One)
SI.set_quantity_scale_factor(volt, joule/coulomb)
SI.set_quantity_scale_factor(ohm, volt/ampere)
SI.set_quantity_scale_factor(siemens, ampere/volt)
SI.set_quantity_scale_factor(farad, coulomb/volt)
SI.set_quantity_scale_factor(henry, volt*second/ampere)
SI.set_quantity_scale_factor(tesla, volt*second/meter**2)
SI.set_quantity_scale_factor(weber, joule/ampere)
SI.set_quantity_dimension(lux, luminous_intensity / length ** 2)
SI.set_quantity_scale_factor(lux, steradian*candela/meter**2)
# katal is the SI unit of catalytic activity
SI.set_quantity_dimension(katal, amount_of_substance / time)
SI.set_quantity_scale_factor(katal, mol/second)
# gray is the SI unit of absorbed dose
SI.set_quantity_dimension(gray, energy / mass)
SI.set_quantity_scale_factor(gray, meter**2/second**2)
# becquerel is the SI unit of radioactivity
SI.set_quantity_dimension(becquerel, 1 / time)
SI.set_quantity_scale_factor(becquerel, 1/second)
#### CONSTANTS ####
# elementary charge
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(elementary_charge, charge)
SI.set_quantity_scale_factor(elementary_charge, 1.602176634e-19*coulomb)
# Electronvolt
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(electronvolt, energy)
SI.set_quantity_scale_factor(electronvolt, 1.602176634e-19*joule)
# Avogadro number
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(avogadro_number, One)
SI.set_quantity_scale_factor(avogadro_number, 6.02214076e23)
# Avogadro constant
SI.set_quantity_dimension(avogadro_constant, amount_of_substance ** -1)
SI.set_quantity_scale_factor(avogadro_constant, avogadro_number / mol)
# Boltzmann constant
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(boltzmann_constant, energy / temperature)
SI.set_quantity_scale_factor(boltzmann_constant, 1.380649e-23*joule/kelvin)
# Stefan-Boltzmann constant
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(stefan_boltzmann_constant, energy * time ** -1 * length ** -2 * temperature ** -4)
SI.set_quantity_scale_factor(stefan_boltzmann_constant, pi**2 * boltzmann_constant**4 / (60 * hbar**3 * speed_of_light ** 2))
# Atomic mass
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(atomic_mass_constant, mass)
SI.set_quantity_scale_factor(atomic_mass_constant, 1.66053906660e-24*gram)
# Molar gas constant
# REF: NIST SP 959 (June 2019)
SI.set_quantity_dimension(molar_gas_constant, energy / (temperature * amount_of_substance))
SI.set_quantity_scale_factor(molar_gas_constant, boltzmann_constant * avogadro_constant)
# Faraday constant
SI.set_quantity_dimension(faraday_constant, charge / amount_of_substance)
SI.set_quantity_scale_factor(faraday_constant, elementary_charge * avogadro_constant)
# Josephson constant
SI.set_quantity_dimension(josephson_constant, frequency / voltage)
SI.set_quantity_scale_factor(josephson_constant, 0.5 * planck / elementary_charge)
# Von Klitzing constant
SI.set_quantity_dimension(von_klitzing_constant, voltage / current)
SI.set_quantity_scale_factor(von_klitzing_constant, hbar / elementary_charge ** 2)
# Acceleration due to gravity (on the Earth surface)
SI.set_quantity_dimension(acceleration_due_to_gravity, acceleration)
SI.set_quantity_scale_factor(acceleration_due_to_gravity, 9.80665*meter/second**2)
# magnetic constant:
SI.set_quantity_dimension(magnetic_constant, force / current ** 2)
SI.set_quantity_scale_factor(magnetic_constant, 4*pi/10**7 * newton/ampere**2)
# electric constant:
SI.set_quantity_dimension(vacuum_permittivity, capacitance / length)
SI.set_quantity_scale_factor(vacuum_permittivity, 1/(u0 * c**2))
# vacuum impedance:
SI.set_quantity_dimension(vacuum_impedance, impedance)
SI.set_quantity_scale_factor(vacuum_impedance, u0 * c)
# Coulomb's constant:
SI.set_quantity_dimension(coulomb_constant, force * length ** 2 / charge ** 2)
SI.set_quantity_scale_factor(coulomb_constant, 1/(4*pi*vacuum_permittivity))
SI.set_quantity_dimension(psi, pressure)
SI.set_quantity_scale_factor(psi, pound * gee / inch ** 2)
SI.set_quantity_dimension(mmHg, pressure)
SI.set_quantity_scale_factor(mmHg, dHg0 * acceleration_due_to_gravity * kilogram / meter**2)
SI.set_quantity_dimension(milli_mass_unit, mass)
SI.set_quantity_scale_factor(milli_mass_unit, atomic_mass_unit/1000)
SI.set_quantity_dimension(quart, length ** 3)
SI.set_quantity_scale_factor(quart, Rational(231, 4) * inch**3)
# Other convenient units and magnitudes
SI.set_quantity_dimension(lightyear, length)
SI.set_quantity_scale_factor(lightyear, speed_of_light*julian_year)
SI.set_quantity_dimension(astronomical_unit, length)
SI.set_quantity_scale_factor(astronomical_unit, 149597870691*meter)
# Fundamental Planck units:
SI.set_quantity_dimension(planck_mass, mass)
SI.set_quantity_scale_factor(planck_mass, sqrt(hbar*speed_of_light/G))
SI.set_quantity_dimension(planck_time, time)
SI.set_quantity_scale_factor(planck_time, sqrt(hbar*G/speed_of_light**5))
SI.set_quantity_dimension(planck_temperature, temperature)
SI.set_quantity_scale_factor(planck_temperature, sqrt(hbar*speed_of_light**5/G/boltzmann**2))
SI.set_quantity_dimension(planck_length, length)
SI.set_quantity_scale_factor(planck_length, sqrt(hbar*G/speed_of_light**3))
SI.set_quantity_dimension(planck_charge, charge)
SI.set_quantity_scale_factor(planck_charge, sqrt(4*pi*electric_constant*hbar*speed_of_light))
# Derived Planck units:
SI.set_quantity_dimension(planck_area, length ** 2)
SI.set_quantity_scale_factor(planck_area, planck_length**2)
SI.set_quantity_dimension(planck_volume, length ** 3)
SI.set_quantity_scale_factor(planck_volume, planck_length**3)
SI.set_quantity_dimension(planck_momentum, mass * velocity)
SI.set_quantity_scale_factor(planck_momentum, planck_mass * speed_of_light)
SI.set_quantity_dimension(planck_energy, energy)
SI.set_quantity_scale_factor(planck_energy, planck_mass * speed_of_light**2)
SI.set_quantity_dimension(planck_force, force)
SI.set_quantity_scale_factor(planck_force, planck_energy / planck_length)
SI.set_quantity_dimension(planck_power, power)
SI.set_quantity_scale_factor(planck_power, planck_energy / planck_time)
SI.set_quantity_dimension(planck_density, mass / length ** 3)
SI.set_quantity_scale_factor(planck_density, planck_mass / planck_length**3)
SI.set_quantity_dimension(planck_energy_density, energy / length ** 3)
SI.set_quantity_scale_factor(planck_energy_density, planck_energy / planck_length**3)
SI.set_quantity_dimension(planck_intensity, mass * time ** (-3))
SI.set_quantity_scale_factor(planck_intensity, planck_energy_density * speed_of_light)
SI.set_quantity_dimension(planck_angular_frequency, 1 / time)
SI.set_quantity_scale_factor(planck_angular_frequency, 1 / planck_time)
SI.set_quantity_dimension(planck_pressure, pressure)
SI.set_quantity_scale_factor(planck_pressure, planck_force / planck_length**2)
SI.set_quantity_dimension(planck_current, current)
SI.set_quantity_scale_factor(planck_current, planck_charge / planck_time)
SI.set_quantity_dimension(planck_voltage, voltage)
SI.set_quantity_scale_factor(planck_voltage, planck_energy / planck_charge)
SI.set_quantity_dimension(planck_impedance, impedance)
SI.set_quantity_scale_factor(planck_impedance, planck_voltage / planck_current)
SI.set_quantity_dimension(planck_acceleration, acceleration)
SI.set_quantity_scale_factor(planck_acceleration, speed_of_light / planck_time)
# Older units for radioactivity
SI.set_quantity_dimension(curie, 1 / time)
SI.set_quantity_scale_factor(curie, 37000000000*becquerel)
SI.set_quantity_dimension(rutherford, 1 / time)
SI.set_quantity_scale_factor(rutherford, 1000000*becquerel)
# check that scale factors are the right SI dimensions:
for _scale_factor, _dimension in zip(
SI._quantity_scale_factors.values(),
SI._quantity_dimension_map.values()
):
dimex = SI.get_dimensional_expr(_scale_factor)
if dimex != 1:
# XXX: equivalent_dims is an instance method taking two arguments in
# addition to self so this can not work:
if not DimensionSystem.equivalent_dims(_dimension, Dimension(dimex)): # type: ignore
raise ValueError("quantity value and dimension mismatch")
del _scale_factor, _dimension
__all__ = [
'mmHg', 'atmosphere', 'inductance', 'newton', 'meter',
'vacuum_permittivity', 'pascal', 'magnetic_constant', 'voltage',
'angular_mil', 'luminous_intensity', 'all_units',
'julian_year', 'weber', 'exbibyte', 'liter',
'molar_gas_constant', 'faraday_constant', 'avogadro_constant',
'lightyear', 'planck_density', 'gee', 'mol', 'bit', 'gray',
'planck_momentum', 'bar', 'magnetic_density', 'prefix_unit', 'PREFIXES',
'planck_time', 'dimex', 'gram', 'candela', 'force', 'planck_intensity',
'energy', 'becquerel', 'planck_acceleration', 'speed_of_light',
'conductance', 'frequency', 'coulomb_constant', 'degree', 'lux', 'planck',
'current', 'planck_current', 'tebibyte', 'planck_power', 'MKSA', 'power',
'K', 'planck_volume', 'quart', 'pressure', 'amount_of_substance',
'joule', 'boltzmann_constant', 'Dimension', 'c', 'planck_force', 'length',
'watt', 'action', 'hbar', 'gibibyte', 'DimensionSystem', 'cd', 'volt',
'planck_charge', 'dioptre', 'vacuum_impedance', 'dimsys_default', 'farad',
'charge', 'gravitational_constant', 'temperature', 'u0', 'hertz',
'capacitance', 'tesla', 'steradian', 'planck_mass', 'josephson_constant',
'planck_area', 'stefan_boltzmann_constant', 'base_dims',
'astronomical_unit', 'radian', 'planck_voltage', 'impedance',
'planck_energy', 'atomic_mass_constant', 'rutherford', 'second', 'inch',
'elementary_charge', 'SI', 'electronvolt', 'dimsys_SI', 'henry',
'planck_angular_frequency', 'ohm', 'pound', 'planck_pressure', 'G', 'psi',
'dHg0', 'von_klitzing_constant', 'planck_length', 'avogadro_number',
'mole', 'acceleration', 'information', 'planck_energy_density',
'mebibyte', 's', 'acceleration_due_to_gravity',
'planck_temperature', 'units', 'mass', 'dimsys_MKSA', 'kelvin', 'kPa',
'boltzmann', 'milli_mass_unit', 'planck_impedance', 'electric_constant',
'derived_dims', 'kg', 'coulomb', 'siemens', 'byte', 'magnetic_flux',
'atomic_mass_unit', 'm', 'kibibyte', 'kilogram', 'One', 'curie', 'u',
'time', 'pebibyte', 'velocity', 'ampere', 'katal',
]
|
b6c2dc7aa0d9150a6493ae6efe5e705f019c1d6bc7fbdbf2069ea6160e1956cd | from sympy.physics.vector import dynamicsymbols, Point, ReferenceFrame
from sympy.testing.pytest import raises, ignore_warnings
import warnings
def test_point_v1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.y
O.set_vel(N, N.x)
assert P.v1pt_theory(O, N, B) == N.x + qd * B.y
P.set_vel(B, B.z)
assert P.v1pt_theory(O, N, B) == B.z + N.x + qd * B.y
def test_point_a1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y
P.set_vel(B, q2d * B.z)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y + q2dd * B.z
O.set_vel(N, q2d * B.x)
assert P.a1pt_theory(O, N, B) == ((q2dd - qd**2) * B.x + (q2d * qd + qdd) * B.y +
q2dd * B.z)
def test_point_v2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.v2pt_theory(O, N, B) == 0
P = O.locatenew('P', B.x)
assert P.v2pt_theory(O, N, B) == (qd * B.z ^ B.x)
O.set_vel(N, N.x)
assert P.v2pt_theory(O, N, B) == N.x + qd * B.y
def test_point_a2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
qdd = dynamicsymbols('q', 2)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.a2pt_theory(O, N, B) == 0
P.set_pos(O, B.x)
assert P.a2pt_theory(O, N, B) == (-qd**2) * B.x + (qdd) * B.y
def test_point_funcs():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
assert P.pos_from(O) == q * B.x
P.set_vel(B, qd * B.x + q2d * B.y)
assert P.vel(B) == qd * B.x + q2d * B.y
O.set_vel(N, 0)
assert O.vel(N) == 0
assert P.a1pt_theory(O, N, B) == ((-25 * q + qdd) * B.x + (q2dd) * B.y +
(-10 * qd) * B.z)
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * B.x)
O.set_vel(N, 5 * N.x)
assert O.vel(N) == 5 * N.x
assert P.a2pt_theory(O, N, B) == (-10 * qd**2) * B.x + (10 * qdd) * B.y
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
P.set_vel(B, qd * B.x + q2d * B.y)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.x + q2d * B.y - 5 * q * B.z
def test_point_pos():
q = dynamicsymbols('q')
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * N.x + 5 * B.x)
assert P.pos_from(O) == 10 * N.x + 5 * B.x
Q = P.locatenew('Q', 10 * N.y + 5 * B.y)
assert Q.pos_from(P) == 10 * N.y + 5 * B.y
assert Q.pos_from(O) == 10 * N.x + 10 * N.y + 5 * B.x + 5 * B.y
assert O.pos_from(Q) == -10 * N.x - 10 * N.y - 5 * B.x - 5 * B.y
def test_point_partial_velocity():
N = ReferenceFrame('N')
A = ReferenceFrame('A')
p = Point('p')
u1, u2 = dynamicsymbols('u1, u2')
p.set_vel(N, u1 * A.x + u2 * N.y)
assert p.partial_velocity(N, u1) == A.x
assert p.partial_velocity(N, u1, u2) == (A.x, N.y)
raises(ValueError, lambda: p.partial_velocity(A, u1))
def test_point_vel(): #Basic functionality
q1, q2 = dynamicsymbols('q1 q2')
N = ReferenceFrame('N')
B = ReferenceFrame('B')
Q = Point('Q')
O = Point('O')
Q.set_pos(O, q1 * N.x)
raises(ValueError , lambda: Q.vel(N)) # Velocity of O in N is not defined
O.set_vel(N, q2 * N.y)
assert O.vel(N) == q2 * N.y
raises(ValueError , lambda : O.vel(B)) #Velocity of O is not defined in B
def test_auto_point_vel():
t = dynamicsymbols._t
q1, q2 = dynamicsymbols('q1 q2')
N = ReferenceFrame('N')
B = ReferenceFrame('B')
O = Point('O')
Q = Point('Q')
Q.set_pos(O, q1 * N.x)
O.set_vel(N, q2 * N.y)
assert Q.vel(N) == q1.diff(t) * N.x + q2 * N.y # Velocity of Q using O
P1 = Point('P1')
P1.set_pos(O, q1 * B.x)
P2 = Point('P2')
P2.set_pos(P1, q2 * B.z)
raises(ValueError, lambda : P2.vel(B)) # O's velocity is defined in different frame, and no
#point in between has its velocity defined
raises(ValueError, lambda: P2.vel(N)) # Velocity of O not defined in N
def test_auto_point_vel_multiple_point_path():
t = dynamicsymbols._t
q1, q2 = dynamicsymbols('q1 q2')
B = ReferenceFrame('B')
P = Point('P')
P.set_vel(B, q1 * B.x)
P1 = Point('P1')
P1.set_pos(P, q2 * B.y)
P1.set_vel(B, q1 * B.z)
P2 = Point('P2')
P2.set_pos(P1, q1 * B.z)
P3 = Point('P3')
P3.set_pos(P2, 10 * q1 * B.y)
assert P3.vel(B) == 10 * q1.diff(t) * B.y + (q1 + q1.diff(t)) * B.z
def test_auto_vel_dont_overwrite():
t = dynamicsymbols._t
q1, q2, u1 = dynamicsymbols('q1, q2, u1')
N = ReferenceFrame('N')
P = Point('P1')
P.set_vel(N, u1 * N.x)
P1 = Point('P1')
P1.set_pos(P, q2 * N.y)
assert P1.vel(N) == q2.diff(t) * N.y + u1 * N.x
assert P.vel(N) == u1 * N.x
P1.set_vel(N, u1 * N.z)
assert P1.vel(N) == u1 * N.z
def test_auto_point_vel_if_tree_has_vel_but_inappropriate_pos_vector():
q1, q2 = dynamicsymbols('q1 q2')
B = ReferenceFrame('B')
S = ReferenceFrame('S')
P = Point('P')
P.set_vel(B, q1 * B.x)
P1 = Point('P1')
P1.set_pos(P, S.y)
raises(ValueError, lambda : P1.vel(B)) # P1.pos_from(P) can't be expressed in B
raises(ValueError, lambda : P1.vel(S)) # P.vel(S) not defined
def test_auto_point_vel_shortest_path():
t = dynamicsymbols._t
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
B = ReferenceFrame('B')
P = Point('P')
P.set_vel(B, u1 * B.x)
P1 = Point('P1')
P1.set_pos(P, q2 * B.y)
P1.set_vel(B, q1 * B.z)
P2 = Point('P2')
P2.set_pos(P1, q1 * B.z)
P3 = Point('P3')
P3.set_pos(P2, 10 * q1 * B.y)
P4 = Point('P4')
P4.set_pos(P3, q1 * B.x)
O = Point('O')
O.set_vel(B, u2 * B.y)
O1 = Point('O1')
O1.set_pos(O, q2 * B.z)
P4.set_pos(O1, q1 * B.x + q2 * B.z)
with warnings.catch_warnings(): #There are two possible paths in this point tree, thus a warning is raised
warnings.simplefilter('error')
with ignore_warnings(UserWarning):
assert P4.vel(B) == q1.diff(t) * B.x + u2 * B.y + 2 * q2.diff(t) * B.z
def test_auto_point_vel_connected_frames():
t = dynamicsymbols._t
q, q1, q2, u = dynamicsymbols('q q1 q2 u')
N = ReferenceFrame('N')
B = ReferenceFrame('B')
O = Point('O')
O.set_vel(N, u * N.x)
P = Point('P')
P.set_pos(O, q1 * N.x + q2 * B.y)
raises(ValueError, lambda: P.vel(N))
N.orient(B, 'Axis', (q, B.x))
assert P.vel(N) == (u + q1.diff(t)) * N.x + q2.diff(t) * B.y - q2 * q.diff(t) * B.z
def test_auto_point_vel_multiple_paths_warning_arises():
q, u = dynamicsymbols('q u')
N = ReferenceFrame('N')
O = Point('O')
P = Point('P')
Q = Point('Q')
R = Point('R')
P.set_vel(N, u * N.x)
Q.set_vel(N, u *N.y)
R.set_vel(N, u * N.z)
O.set_pos(P, q * N.z)
O.set_pos(Q, q * N.y)
O.set_pos(R, q * N.x)
with warnings.catch_warnings(): #There are two possible paths in this point tree, thus a warning is raised
warnings.simplefilter("error")
raises(UserWarning ,lambda: O.vel(N))
def test_auto_vel_cyclic_warning_arises():
P = Point('P')
P1 = Point('P1')
P2 = Point('P2')
P3 = Point('P3')
N = ReferenceFrame('N')
P.set_vel(N, N.x)
P1.set_pos(P, N.x)
P2.set_pos(P1, N.y)
P3.set_pos(P2, N.z)
P1.set_pos(P3, N.x + N.y)
with warnings.catch_warnings(): #The path is cyclic at P1, thus a warning is raised
warnings.simplefilter("error")
raises(UserWarning ,lambda: P2.vel(N))
def test_auto_vel_cyclic_warning_msg():
P = Point('P')
P1 = Point('P1')
P2 = Point('P2')
P3 = Point('P3')
N = ReferenceFrame('N')
P.set_vel(N, N.x)
P1.set_pos(P, N.x)
P2.set_pos(P1, N.y)
P3.set_pos(P2, N.z)
P1.set_pos(P3, N.x + N.y)
with warnings.catch_warnings(record = True) as w: #The path is cyclic at P1, thus a warning is raised
warnings.simplefilter("always")
P2.vel(N)
assert issubclass(w[-1].category, UserWarning)
assert 'Kinematic loops are defined among the positions of points. This is likely not desired and may cause errors in your calculations.' in str(w[-1].message)
def test_auto_vel_multiple_path_warning_msg():
N = ReferenceFrame('N')
O = Point('O')
P = Point('P')
Q = Point('Q')
P.set_vel(N, N.x)
Q.set_vel(N, N.y)
O.set_pos(P, N.z)
O.set_pos(Q, N.y)
with warnings.catch_warnings(record = True) as w: #There are two possible paths in this point tree, thus a warning is raised
warnings.simplefilter("always")
O.vel(N)
assert issubclass(w[-1].category, UserWarning)
assert 'Velocity automatically calculated based on point' in str(w[-1].message)
assert 'Velocities from these points are not necessarily the same. This may cause errors in your calculations.' in str(w[-1].message)
|
cbc7d6dfb28e598f5bb7bc60b8149fa46a888364970319876276cc63862af540 | from typing import Optional
from sympy import Derivative, Integer, Expr
from sympy.matrices.common import MatrixCommon
from .ndim_array import NDimArray
from .arrayop import derive_by_array
from sympy import MatrixExpr
from sympy import ZeroMatrix
from sympy.matrices.expressions.matexpr import _matrix_derivative
class ArrayDerivative(Derivative):
is_scalar = False
def __new__(cls, expr, *variables, **kwargs):
obj = super().__new__(cls, expr, *variables, **kwargs)
if isinstance(obj, ArrayDerivative):
obj._shape = obj._get_shape()
return obj
def _get_shape(self):
shape = ()
for v, count in self.variable_count:
if hasattr(v, "shape"):
for i in range(count):
shape += v.shape
if hasattr(self.expr, "shape"):
shape += self.expr.shape
return shape
@property
def shape(self):
return self._shape
@classmethod
def _get_zero_with_shape_like(cls, expr):
if isinstance(expr, (MatrixCommon, NDimArray)):
return expr.zeros(*expr.shape)
elif isinstance(expr, MatrixExpr):
return ZeroMatrix(*expr.shape)
else:
raise RuntimeError("Unable to determine shape of array-derivative.")
@staticmethod
def _call_derive_scalar_by_matrix(expr, v): # type: (Expr, MatrixCommon) -> Expr
return v.applyfunc(lambda x: expr.diff(x))
@staticmethod
def _call_derive_scalar_by_matexpr(expr, v): # type: (Expr, MatrixExpr) -> Expr
if expr.has(v):
return _matrix_derivative(expr, v)
else:
return ZeroMatrix(*v.shape)
@staticmethod
def _call_derive_scalar_by_array(expr, v): # type: (Expr, NDimArray) -> Expr
return v.applyfunc(lambda x: expr.diff(x))
@staticmethod
def _call_derive_matrix_by_scalar(expr, v): # type: (MatrixCommon, Expr) -> Expr
return _matrix_derivative(expr, v)
@staticmethod
def _call_derive_matexpr_by_scalar(expr, v): # type: (MatrixExpr, Expr) -> Expr
return expr._eval_derivative(v)
@staticmethod
def _call_derive_array_by_scalar(expr, v): # type: (NDimArray, Expr) -> Expr
return expr.applyfunc(lambda x: x.diff(v))
@staticmethod
def _call_derive_default(expr, v): # type: (Expr, Expr) -> Optional[Expr]
if expr.has(v):
return _matrix_derivative(expr, v)
else:
return None
@classmethod
def _dispatch_eval_derivative_n_times(cls, expr, v, count):
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
if not isinstance(count, (int, Integer)) or ((count <= 0) == True):
return None
# TODO: this could be done with multiple-dispatching:
if expr.is_scalar:
if isinstance(v, MatrixCommon):
result = cls._call_derive_scalar_by_matrix(expr, v)
elif isinstance(v, MatrixExpr):
result = cls._call_derive_scalar_by_matexpr(expr, v)
elif isinstance(v, NDimArray):
result = cls._call_derive_scalar_by_array(expr, v)
elif v.is_scalar:
# scalar by scalar has a special
return super()._dispatch_eval_derivative_n_times(expr, v, count)
else:
return None
elif v.is_scalar:
if isinstance(expr, MatrixCommon):
result = cls._call_derive_matrix_by_scalar(expr, v)
elif isinstance(expr, MatrixExpr):
result = cls._call_derive_matexpr_by_scalar(expr, v)
elif isinstance(expr, NDimArray):
result = cls._call_derive_array_by_scalar(expr, v)
else:
return None
else:
# Both `expr` and `v` are some array/matrix type:
if isinstance(expr, MatrixCommon) or isinstance(expr, MatrixCommon):
result = derive_by_array(expr, v)
elif isinstance(expr, MatrixExpr) and isinstance(v, MatrixExpr):
result = cls._call_derive_default(expr, v)
elif isinstance(expr, MatrixExpr) or isinstance(v, MatrixExpr):
# if one expression is a symbolic matrix expression while the other isn't, don't evaluate:
return None
else:
result = derive_by_array(expr, v)
if result is None:
return None
if count == 1:
return result
else:
return cls._dispatch_eval_derivative_n_times(result, v, count - 1)
|
ae7cf0ebdb928116d0831782c23535f5a723d9e8b58779f36472c1771df7a3bf | import functools, itertools
from sympy.core.sympify import sympify
from sympy.core.expr import Expr
from sympy.core import Basic
from sympy.tensor.array import ImmutableDenseNDimArray
from sympy import Symbol
from sympy.core.numbers import Integer
class ArrayComprehension(Basic):
"""
Generate a list comprehension
If there is a symbolic dimension, for example, say [i for i in range(1, N)] where
N is a Symbol, then the expression will not be expanded to an array. Otherwise,
calling the doit() function will launch the expansion.
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a
ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.doit()
[[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]]
>>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k))
>>> b.doit()
ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k))
"""
def __new__(cls, function, *symbols, **assumptions):
if any(len(l) != 3 or None for l in symbols):
raise ValueError('ArrayComprehension requires values lower and upper bound'
' for the expression')
arglist = [sympify(function)]
arglist.extend(cls._check_limits_validity(function, symbols))
obj = Basic.__new__(cls, *arglist, **assumptions)
obj._limits = obj._args[1:]
obj._shape = cls._calculate_shape_from_limits(obj._limits)
obj._rank = len(obj._shape)
obj._loop_size = cls._calculate_loop_size(obj._shape)
return obj
@property
def function(self):
"""The function applied across limits
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j = symbols('i j')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.function
10*i + j
"""
return self._args[0]
@property
def limits(self):
"""
The list of limits that will be applied while expanding the array
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j = symbols('i j')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.limits
((i, 1, 4), (j, 1, 3))
"""
return self._limits
@property
def free_symbols(self):
"""
The set of the free_symbols in the array
Variables appeared in the bounds are supposed to be excluded
from the free symbol set.
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.free_symbols
set()
>>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3))
>>> b.free_symbols
{k}
"""
expr_free_sym = self.function.free_symbols
for var, inf, sup in self._limits:
expr_free_sym.discard(var)
curr_free_syms = inf.free_symbols.union(sup.free_symbols)
expr_free_sym = expr_free_sym.union(curr_free_syms)
return expr_free_sym
@property
def variables(self):
"""The tuples of the variables in the limits
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.variables
[i, j]
"""
return [l[0] for l in self._limits]
@property
def bound_symbols(self):
"""The list of dummy variables
Note
====
Note that all variables are dummy variables since a limit without
lower bound or upper bound is not accepted.
"""
return [l[0] for l in self._limits if len(l) != 1]
@property
def shape(self):
"""
The shape of the expanded array, which may have symbols
Note
====
Both the lower and the upper bounds are included while
calculating the shape.
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.shape
(4, 3)
>>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3))
>>> b.shape
(4, k + 3)
"""
return self._shape
@property
def is_shape_numeric(self):
"""
Test if the array is shape-numeric which means there is no symbolic
dimension
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.is_shape_numeric
True
>>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3))
>>> b.is_shape_numeric
False
"""
for _, inf, sup in self._limits:
if Basic(inf, sup).atoms(Symbol):
return False
return True
def rank(self):
"""The rank of the expanded array
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.rank()
2
"""
return self._rank
def __len__(self):
"""
The length of the expanded array which means the number
of elements in the array.
Raises
======
ValueError : When the length of the array is symbolic
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j = symbols('i j')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> len(a)
12
"""
if self._loop_size.free_symbols:
raise ValueError('Symbolic length is not supported')
return self._loop_size
@classmethod
def _check_limits_validity(cls, function, limits):
limits = sympify(limits)
for var, inf, sup in limits:
if any((not isinstance(i, Expr)) or i.atoms(Symbol, Integer) != i.atoms()
for i in [inf, sup]):
raise TypeError('Bounds should be an Expression(combination of Integer and Symbol)')
if (inf > sup) == True:
raise ValueError('Lower bound should be inferior to upper bound')
if var in inf.free_symbols or var in sup.free_symbols:
raise ValueError('Variable should not be part of its bounds')
return limits
@classmethod
def _calculate_shape_from_limits(cls, limits):
return tuple([sup - inf + 1 for _, inf, sup in limits])
@classmethod
def _calculate_loop_size(cls, shape):
if not shape:
return 0
loop_size = 1
for l in shape:
loop_size = loop_size * l
return loop_size
def doit(self):
if not self.is_shape_numeric:
return self
return self._expand_array()
def _expand_array(self):
res = []
for values in itertools.product(*[range(inf, sup+1)
for var, inf, sup
in self._limits]):
res.append(self._get_element(values))
return ImmutableDenseNDimArray(res, self.shape)
def _get_element(self, values):
temp = self.function
for var, val in zip(self.variables, values):
temp = temp.subs(var, val)
return temp
def tolist(self):
"""Transform the expanded array to a list
Raises
======
ValueError : When there is a symbolic dimension
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j = symbols('i j')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.tolist()
[[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]]
"""
if self.is_shape_numeric:
return self._expand_array().tolist()
raise ValueError("A symbolic array cannot be expanded to a list")
def tomatrix(self):
"""Transform the expanded array to a matrix
Raises
======
ValueError : When there is a symbolic dimension
ValueError : When the rank of the expanded array is not equal to 2
Examples
========
>>> from sympy.tensor.array import ArrayComprehension
>>> from sympy import symbols
>>> i, j = symbols('i j')
>>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3))
>>> a.tomatrix()
Matrix([
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43]])
"""
from sympy.matrices import Matrix
if not self.is_shape_numeric:
raise ValueError("A symbolic array cannot be expanded to a matrix")
if self._rank != 2:
raise ValueError('Dimensions must be of size of 2')
return Matrix(self._expand_array().tomatrix())
def isLambda(v):
LAMBDA = lambda: 0
return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__
class ArrayComprehensionMap(ArrayComprehension):
'''
A subclass of ArrayComprehension dedicated to map external function lambda.
Notes
=====
Only the lambda function is considered.
At most one argument in lambda function is accepted in order to avoid ambiguity
in value assignment.
Examples
========
>>> from sympy.tensor.array import ArrayComprehensionMap
>>> from sympy import symbols
>>> i, j, k = symbols('i j k')
>>> a = ArrayComprehensionMap(lambda: 1, (i, 1, 4))
>>> a.doit()
[1, 1, 1, 1]
>>> b = ArrayComprehensionMap(lambda a: a+1, (j, 1, 4))
>>> b.doit()
[2, 3, 4, 5]
'''
def __new__(cls, function, *symbols, **assumptions):
if any(len(l) != 3 or None for l in symbols):
raise ValueError('ArrayComprehension requires values lower and upper bound'
' for the expression')
if not isLambda(function):
raise ValueError('Data type not supported')
arglist = cls._check_limits_validity(function, symbols)
obj = Basic.__new__(cls, *arglist, **assumptions)
obj._limits = obj._args
obj._shape = cls._calculate_shape_from_limits(obj._limits)
obj._rank = len(obj._shape)
obj._loop_size = cls._calculate_loop_size(obj._shape)
obj._lambda = function
return obj
@property
def func(self):
class _(ArrayComprehensionMap):
def __new__(cls, *args, **kwargs):
return ArrayComprehensionMap(self._lambda, *args, **kwargs)
return _
def _get_element(self, values):
temp = self._lambda
if self._lambda.__code__.co_argcount == 0:
temp = temp()
elif self._lambda.__code__.co_argcount == 1:
temp = temp(functools.reduce(lambda a, b: a*b, values))
return temp
|
d7b1adfe40d0ea004c0089c81c480c8cf0b99475c3bddc1aacf399b7ed3778fd | import itertools
from sympy import S, Tuple, diff, Basic
from sympy.core.compatibility import Iterable
from sympy.tensor.array.ndim_array import NDimArray
from sympy.tensor.array.dense_ndim_array import DenseNDimArray, ImmutableDenseNDimArray
from sympy.tensor.array.sparse_ndim_array import SparseNDimArray
def _arrayfy(a):
from sympy.matrices import MatrixBase
if isinstance(a, NDimArray):
return a
if isinstance(a, (MatrixBase, list, tuple, Tuple)):
return ImmutableDenseNDimArray(a)
return a
def tensorproduct(*args):
"""
Tensor product among scalars or array-like objects.
Examples
========
>>> from sympy.tensor.array import tensorproduct, Array
>>> from sympy.abc import x, y, z, t
>>> A = Array([[1, 2], [3, 4]])
>>> B = Array([x, y])
>>> tensorproduct(A, B)
[[[x, y], [2*x, 2*y]], [[3*x, 3*y], [4*x, 4*y]]]
>>> tensorproduct(A, x)
[[x, 2*x], [3*x, 4*x]]
>>> tensorproduct(A, B, B)
[[[[x**2, x*y], [x*y, y**2]], [[2*x**2, 2*x*y], [2*x*y, 2*y**2]]], [[[3*x**2, 3*x*y], [3*x*y, 3*y**2]], [[4*x**2, 4*x*y], [4*x*y, 4*y**2]]]]
Applying this function on two matrices will result in a rank 4 array.
>>> from sympy import Matrix, eye
>>> m = Matrix([[x, y], [z, t]])
>>> p = tensorproduct(eye(3), m)
>>> p
[[[[x, y], [z, t]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[x, y], [z, t]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[x, y], [z, t]]]]
"""
from sympy.tensor.array import SparseNDimArray, ImmutableSparseNDimArray
if len(args) == 0:
return S.One
if len(args) == 1:
return _arrayfy(args[0])
if len(args) > 2:
return tensorproduct(tensorproduct(args[0], args[1]), *args[2:])
# length of args is 2:
a, b = map(_arrayfy, args)
if not isinstance(a, NDimArray) or not isinstance(b, NDimArray):
return a*b
if isinstance(a, SparseNDimArray) and isinstance(b, SparseNDimArray):
lp = len(b)
new_array = {k1*lp + k2: v1*v2 for k1, v1 in a._sparse_array.items() for k2, v2 in b._sparse_array.items()}
return ImmutableSparseNDimArray(new_array, a.shape + b.shape)
product_list = [i*j for i in Flatten(a) for j in Flatten(b)]
return ImmutableDenseNDimArray(product_list, a.shape + b.shape)
def tensorcontraction(array, *contraction_axes):
"""
Contraction of an array-like object on the specified axes.
Examples
========
>>> from sympy import Array, tensorcontraction
>>> from sympy import Matrix, eye
>>> tensorcontraction(eye(3), (0, 1))
3
>>> A = Array(range(18), (3, 2, 3))
>>> A
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]
>>> tensorcontraction(A, (0, 2))
[21, 30]
Matrix multiplication may be emulated with a proper combination of
``tensorcontraction`` and ``tensorproduct``
>>> from sympy import tensorproduct
>>> from sympy.abc import a,b,c,d,e,f,g,h
>>> m1 = Matrix([[a, b], [c, d]])
>>> m2 = Matrix([[e, f], [g, h]])
>>> p = tensorproduct(m1, m2)
>>> p
[[[[a*e, a*f], [a*g, a*h]], [[b*e, b*f], [b*g, b*h]]], [[[c*e, c*f], [c*g, c*h]], [[d*e, d*f], [d*g, d*h]]]]
>>> tensorcontraction(p, (1, 2))
[[a*e + b*g, a*f + b*h], [c*e + d*g, c*f + d*h]]
>>> m1*m2
Matrix([
[a*e + b*g, a*f + b*h],
[c*e + d*g, c*f + d*h]])
"""
array = _arrayfy(array)
# Verify contraction_axes:
taken_dims = set()
for axes_group in contraction_axes:
if not isinstance(axes_group, Iterable):
raise ValueError("collections of contraction axes expected")
dim = array.shape[axes_group[0]]
for d in axes_group:
if d in taken_dims:
raise ValueError("dimension specified more than once")
if dim != array.shape[d]:
raise ValueError("cannot contract between axes of different dimension")
taken_dims.add(d)
rank = array.rank()
remaining_shape = [dim for i, dim in enumerate(array.shape) if i not in taken_dims]
cum_shape = [0]*rank
_cumul = 1
for i in range(rank):
cum_shape[rank - i - 1] = _cumul
_cumul *= int(array.shape[rank - i - 1])
# DEFINITION: by absolute position it is meant the position along the one
# dimensional array containing all the tensor components.
# Possible future work on this module: move computation of absolute
# positions to a class method.
# Determine absolute positions of the uncontracted indices:
remaining_indices = [[cum_shape[i]*j for j in range(array.shape[i])]
for i in range(rank) if i not in taken_dims]
# Determine absolute positions of the contracted indices:
summed_deltas = []
for axes_group in contraction_axes:
lidx = []
for js in range(array.shape[axes_group[0]]):
lidx.append(sum([cum_shape[ig] * js for ig in axes_group]))
summed_deltas.append(lidx)
# Compute the contracted array:
#
# 1. external for loops on all uncontracted indices.
# Uncontracted indices are determined by the combinatorial product of
# the absolute positions of the remaining indices.
# 2. internal loop on all contracted indices.
# It sum the values of the absolute contracted index and the absolute
# uncontracted index for the external loop.
contracted_array = []
for icontrib in itertools.product(*remaining_indices):
index_base_position = sum(icontrib)
isum = S.Zero
for sum_to_index in itertools.product(*summed_deltas):
idx = array._get_tuple_index(index_base_position + sum(sum_to_index))
isum += array[idx]
contracted_array.append(isum)
if len(remaining_indices) == 0:
assert len(contracted_array) == 1
return contracted_array[0]
return type(array)(contracted_array, remaining_shape)
def derive_by_array(expr, dx):
r"""
Derivative by arrays. Supports both arrays and scalars.
Given the array `A_{i_1, \ldots, i_N}` and the array `X_{j_1, \ldots, j_M}`
this function will return a new array `B` defined by
`B_{j_1,\ldots,j_M,i_1,\ldots,i_N} := \frac{\partial A_{i_1,\ldots,i_N}}{\partial X_{j_1,\ldots,j_M}}`
Examples
========
>>> from sympy import derive_by_array
>>> from sympy.abc import x, y, z, t
>>> from sympy import cos
>>> derive_by_array(cos(x*t), x)
-t*sin(t*x)
>>> derive_by_array(cos(x*t), [x, y, z, t])
[-t*sin(t*x), 0, 0, -x*sin(t*x)]
>>> derive_by_array([x, y**2*z], [[x, y], [z, t]])
[[[1, 0], [0, 2*y*z]], [[0, y**2], [0, 0]]]
"""
from sympy.matrices import MatrixBase
from sympy.tensor.array import SparseNDimArray
array_types = (Iterable, MatrixBase, NDimArray)
if isinstance(dx, array_types):
dx = ImmutableDenseNDimArray(dx)
for i in dx:
if not i._diff_wrt:
raise ValueError("cannot derive by this array")
if isinstance(expr, array_types):
if isinstance(expr, NDimArray):
expr = expr.as_immutable()
else:
expr = ImmutableDenseNDimArray(expr)
if isinstance(dx, array_types):
if isinstance(expr, SparseNDimArray):
lp = len(expr)
new_array = {k + i*lp: v
for i, x in enumerate(Flatten(dx))
for k, v in expr.diff(x)._sparse_array.items()}
else:
new_array = [[y.diff(x) for y in Flatten(expr)] for x in Flatten(dx)]
return type(expr)(new_array, dx.shape + expr.shape)
else:
return expr.diff(dx)
else:
if isinstance(dx, array_types):
return ImmutableDenseNDimArray([expr.diff(i) for i in Flatten(dx)], dx.shape)
else:
return diff(expr, dx)
def permutedims(expr, perm):
"""
Permutes the indices of an array.
Parameter specifies the permutation of the indices.
Examples
========
>>> from sympy.abc import x, y, z, t
>>> from sympy import sin
>>> from sympy import Array, permutedims
>>> a = Array([[x, y, z], [t, sin(x), 0]])
>>> a
[[x, y, z], [t, sin(x), 0]]
>>> permutedims(a, (1, 0))
[[x, t], [y, sin(x)], [z, 0]]
If the array is of second order, ``transpose`` can be used:
>>> from sympy import transpose
>>> transpose(a)
[[x, t], [y, sin(x)], [z, 0]]
Examples on higher dimensions:
>>> b = Array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> permutedims(b, (2, 1, 0))
[[[1, 5], [3, 7]], [[2, 6], [4, 8]]]
>>> permutedims(b, (1, 2, 0))
[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]
``Permutation`` objects are also allowed:
>>> from sympy.combinatorics import Permutation
>>> permutedims(b, Permutation([1, 2, 0]))
[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]
"""
from sympy.tensor.array import SparseNDimArray
if not isinstance(expr, NDimArray):
raise TypeError("expression has to be an N-dim array")
from sympy.combinatorics import Permutation
if not isinstance(perm, Permutation):
perm = Permutation(list(perm))
if perm.size != expr.rank():
raise ValueError("wrong permutation size")
# Get the inverse permutation:
iperm = ~perm
new_shape = perm(expr.shape)
if isinstance(expr, SparseNDimArray):
return type(expr)({tuple(perm(expr._get_tuple_index(k))): v
for k, v in expr._sparse_array.items()}, new_shape)
indices_span = perm([range(i) for i in expr.shape])
new_array = [None]*len(expr)
for i, idx in enumerate(itertools.product(*indices_span)):
t = iperm(idx)
new_array[i] = expr[t]
return type(expr)(new_array, new_shape)
class Flatten(Basic):
'''
Flatten an iterable object to a list in a lazy-evaluation way.
Notes
=====
This class is an iterator with which the memory cost can be economised.
Optimisation has been considered to ameliorate the performance for some
specific data types like DenseNDimArray and SparseNDimArray.
Examples
========
>>> from sympy.tensor.array.arrayop import Flatten
>>> from sympy.tensor.array import Array
>>> A = Array(range(6)).reshape(2, 3)
>>> Flatten(A)
Flatten([[0, 1, 2], [3, 4, 5]])
>>> [i for i in Flatten(A)]
[0, 1, 2, 3, 4, 5]
'''
def __init__(self, iterable):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array import NDimArray
if not isinstance(iterable, (Iterable, MatrixBase)):
raise NotImplementedError("Data type not yet supported")
if isinstance(iterable, list):
iterable = NDimArray(iterable)
self._iter = iterable
self._idx = 0
def __iter__(self):
return self
def __next__(self):
from sympy.matrices.matrices import MatrixBase
if len(self._iter) > self._idx:
if isinstance(self._iter, DenseNDimArray):
result = self._iter._array[self._idx]
elif isinstance(self._iter, SparseNDimArray):
if self._idx in self._iter._sparse_array:
result = self._iter._sparse_array[self._idx]
else:
result = 0
elif isinstance(self._iter, MatrixBase):
result = self._iter[self._idx]
elif hasattr(self._iter, '__next__'):
result = next(self._iter)
else:
result = self._iter[self._idx]
else:
raise StopIteration
self._idx += 1
return result
def next(self):
return self.__next__()
|
c47bd464039176d09df3bba9c7cad9e0a7d898af97ed0a9387c178a40af76cb9 | from sympy import S, Dict, Basic, Tuple
from sympy.core.sympify import _sympify
from sympy.tensor.array.mutable_ndim_array import MutableNDimArray
from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray
import functools
class SparseNDimArray(NDimArray):
def __new__(self, *args, **kwargs):
return ImmutableSparseNDimArray(*args, **kwargs)
def __getitem__(self, index):
"""
Get an element from a sparse N-dim array.
Examples
========
>>> from sympy import MutableSparseNDimArray
>>> a = MutableSparseNDimArray(range(4), (2, 2))
>>> a
[[0, 1], [2, 3]]
>>> a[0, 0]
0
>>> a[1, 1]
3
>>> a[0]
[0, 1]
>>> a[1]
[2, 3]
Symbolic indexing:
>>> from sympy.abc import i, j
>>> a[i, j]
[[0, 1], [2, 3]][i, j]
Replace `i` and `j` to get element `(0, 0)`:
>>> a[i, j].subs({i: 0, j: 0})
0
"""
syindex = self._check_symbolic_index(index)
if syindex is not None:
return syindex
index = self._check_index_for_getitem(index)
# `index` is a tuple with one or more slices:
if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]):
sl_factors, eindices = self._get_slice_data_for_array_access(index)
array = [self._sparse_array.get(self._parse_index(i), S.Zero) for i in eindices]
nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)]
return type(self)(array, nshape)
else:
index = self._parse_index(index)
return self._sparse_array.get(index, S.Zero)
@classmethod
def zeros(cls, *shape):
"""
Return a sparse N-dim array of zeros.
"""
return cls({}, shape)
def tomatrix(self):
"""
Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error.
Examples
========
>>> from sympy import MutableSparseNDimArray
>>> a = MutableSparseNDimArray([1 for i in range(9)], (3, 3))
>>> b = a.tomatrix()
>>> b
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
from sympy.matrices import SparseMatrix
if self.rank() != 2:
raise ValueError('Dimensions must be of size of 2')
mat_sparse = {}
for key, value in self._sparse_array.items():
mat_sparse[self._get_tuple_index(key)] = value
return SparseMatrix(self.shape[0], self.shape[1], mat_sparse)
def reshape(self, *newshape):
new_total_size = functools.reduce(lambda x,y: x*y, newshape)
if new_total_size != self._loop_size:
raise ValueError("Invalid reshape parameters " + newshape)
return type(self)(self._sparse_array, newshape)
class ImmutableSparseNDimArray(SparseNDimArray, ImmutableNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
from sympy.utilities.iterables import flatten
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
shape = Tuple(*map(_sympify, shape))
cls._check_special_bounds(flat_list, shape)
loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
sparse_array = Dict(flat_list)
else:
sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
sparse_array[i] = _sympify(el)
sparse_array = Dict(sparse_array)
self = Basic.__new__(cls, sparse_array, shape, **kwargs)
self._shape = shape
self._rank = len(shape)
self._loop_size = loop_size
self._sparse_array = sparse_array
return self
def __setitem__(self, index, value):
raise TypeError("immutable N-dim array")
def as_mutable(self):
return MutableSparseNDimArray(self)
class MutableSparseNDimArray(MutableNDimArray, SparseNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
from sympy.utilities.iterables import flatten
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
self = object.__new__(cls)
self._shape = shape
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
self._sparse_array = dict(flat_list)
return self
self._sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
self._sparse_array[i] = _sympify(el)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy import MutableSparseNDimArray
>>> a = MutableSparseNDimArray.zeros(2, 2)
>>> a[0, 0] = 1
>>> a[1, 1] = 1
>>> a
[[1, 0], [0, 1]]
"""
if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]):
value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value)
for i in eindices:
other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None]
other_value = value[other_i]
complete_index = self._parse_index(i)
if other_value != 0:
self._sparse_array[complete_index] = other_value
elif complete_index in self._sparse_array:
self._sparse_array.pop(complete_index)
else:
index = self._parse_index(index)
value = _sympify(value)
if value == 0 and index in self._sparse_array:
self._sparse_array.pop(index)
else:
self._sparse_array[index] = value
def as_immutable(self):
return ImmutableSparseNDimArray(self)
@property
def free_symbols(self):
return {i for j in self._sparse_array.values() for i in j.free_symbols}
|
6170354f73775901d186a7af389fe78098c8385f4a738319fa196316344ebb84 | from sympy import Basic
from sympy import S
from sympy.core.expr import Expr
from sympy.core.numbers import Integer
from sympy.core.sympify import sympify
from sympy.core.compatibility import SYMPY_INTS, Iterable
from sympy.printing.defaults import Printable
import itertools
class NDimArray(Printable):
"""
Examples
========
Create an N-dim array of zeros:
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(2, 3, 4)
>>> a
[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
Create an N-dim array from a list;
>>> a = MutableDenseNDimArray([[2, 3], [4, 5]])
>>> a
[[2, 3], [4, 5]]
>>> b = MutableDenseNDimArray([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]])
>>> b
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]
Create an N-dim array from a flat list with dimension shape:
>>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))
>>> a
[[1, 2, 3], [4, 5, 6]]
Create an N-dim array from a matrix:
>>> from sympy import Matrix
>>> a = Matrix([[1,2],[3,4]])
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> b = MutableDenseNDimArray(a)
>>> b
[[1, 2], [3, 4]]
Arithmetic operations on N-dim arrays
>>> a = MutableDenseNDimArray([1, 1, 1, 1], (2, 2))
>>> b = MutableDenseNDimArray([4, 4, 4, 4], (2, 2))
>>> c = a + b
>>> c
[[5, 5], [5, 5]]
>>> a - b
[[-3, -3], [-3, -3]]
"""
_diff_wrt = True
is_scalar = False
def __new__(cls, iterable, shape=None, **kwargs):
from sympy.tensor.array import ImmutableDenseNDimArray
return ImmutableDenseNDimArray(iterable, shape, **kwargs)
def _parse_index(self, index):
if isinstance(index, (SYMPY_INTS, Integer)):
raise ValueError("Only a tuple index is accepted")
if self._loop_size == 0:
raise ValueError("Index not valide with an empty array")
if len(index) != self._rank:
raise ValueError('Wrong number of array axes')
real_index = 0
# check if input index can exist in current indexing
for i in range(self._rank):
if (index[i] >= self.shape[i]) or (index[i] < -self.shape[i]):
raise ValueError('Index ' + str(index) + ' out of border')
if index[i] < 0:
real_index += 1
real_index = real_index*self.shape[i] + index[i]
return real_index
def _get_tuple_index(self, integer_index):
index = []
for i, sh in enumerate(reversed(self.shape)):
index.append(integer_index % sh)
integer_index //= sh
index.reverse()
return tuple(index)
def _check_symbolic_index(self, index):
# Check if any index is symbolic:
tuple_index = (index if isinstance(index, tuple) else (index,))
if any([(isinstance(i, Expr) and (not i.is_number)) for i in tuple_index]):
for i, nth_dim in zip(tuple_index, self.shape):
if ((i < 0) == True) or ((i >= nth_dim) == True):
raise ValueError("index out of range")
from sympy.tensor import Indexed
return Indexed(self, *tuple_index)
return None
def _setter_iterable_check(self, value):
from sympy.matrices.matrices import MatrixBase
if isinstance(value, (Iterable, MatrixBase, NDimArray)):
raise NotImplementedError
@classmethod
def _scan_iterable_shape(cls, iterable):
def f(pointer):
if not isinstance(pointer, Iterable):
return [pointer], ()
result = []
elems, shapes = zip(*[f(i) for i in pointer])
if len(set(shapes)) != 1:
raise ValueError("could not determine shape unambiguously")
for i in elems:
result.extend(i)
return result, (len(shapes),)+shapes[0]
return f(iterable)
@classmethod
def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array import SparseNDimArray
from sympy import Dict, Tuple
if shape is None:
if iterable is None:
shape = ()
iterable = ()
# Construction of a sparse array from a sparse array
elif isinstance(iterable, SparseNDimArray):
return iterable._shape, iterable._sparse_array
# Construct N-dim array from an iterable (numpy arrays included):
elif isinstance(iterable, Iterable):
iterable, shape = cls._scan_iterable_shape(iterable)
# Construct N-dim array from a Matrix:
elif isinstance(iterable, MatrixBase):
shape = iterable.shape
# Construct N-dim array from another N-dim array:
elif isinstance(iterable, NDimArray):
shape = iterable.shape
else:
shape = ()
iterable = (iterable,)
if isinstance(iterable, (Dict, dict)) and shape is not None:
new_dict = iterable.copy()
for k, v in new_dict.items():
if isinstance(k, (tuple, Tuple)):
new_key = 0
for i, idx in enumerate(k):
new_key = new_key * shape[i] + idx
iterable[new_key] = iterable[k]
del iterable[k]
if isinstance(shape, (SYMPY_INTS, Integer)):
shape = (shape,)
if any([not isinstance(dim, (SYMPY_INTS, Integer)) for dim in shape]):
raise TypeError("Shape should contain integers only.")
return tuple(shape), iterable
def __len__(self):
"""Overload common function len(). Returns number of elements in array.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(3, 3)
>>> a
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
>>> len(a)
9
"""
return self._loop_size
@property
def shape(self):
"""
Returns array shape (dimension).
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(3, 3)
>>> a.shape
(3, 3)
"""
return self._shape
def rank(self):
"""
Returns rank of array.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(3,4,5,6,3)
>>> a.rank()
5
"""
return self._rank
def diff(self, *args, **kwargs):
"""
Calculate the derivative of each element in the array.
Examples
========
>>> from sympy import ImmutableDenseNDimArray
>>> from sympy.abc import x, y
>>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]])
>>> M.diff(x)
[[1, 0], [0, y]]
"""
from sympy.tensor.array.array_derivatives import ArrayDerivative
kwargs.setdefault('evaluate', True)
return ArrayDerivative(self.as_immutable(), *args, **kwargs)
def _eval_derivative(self, base):
# Types are (base: scalar, self: array)
return self.applyfunc(lambda x: base.diff(x))
def _eval_derivative_n_times(self, s, n):
return Basic._eval_derivative_n_times(self, s, n)
def applyfunc(self, f):
"""Apply a function to each element of the N-dim array.
Examples
========
>>> from sympy import ImmutableDenseNDimArray
>>> m = ImmutableDenseNDimArray([i*2+j for i in range(2) for j in range(2)], (2, 2))
>>> m
[[0, 1], [2, 3]]
>>> m.applyfunc(lambda i: 2*i)
[[0, 2], [4, 6]]
"""
from sympy.tensor.array import SparseNDimArray
from sympy.tensor.array.arrayop import Flatten
if isinstance(self, SparseNDimArray) and f(S.Zero) == 0:
return type(self)({k: f(v) for k, v in self._sparse_array.items() if f(v) != 0}, self.shape)
return type(self)(map(f, Flatten(self)), self.shape)
def _sympystr(self, printer):
def f(sh, shape_left, i, j):
if len(shape_left) == 1:
return "["+", ".join([printer._print(self[self._get_tuple_index(e)]) for e in range(i, j)])+"]"
sh //= shape_left[0]
return "[" + ", ".join([f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh) for e in range(shape_left[0])]) + "]" # + "\n"*len(shape_left)
if self.rank() == 0:
return printer._print(self[()])
return f(self._loop_size, self.shape, 0, self._loop_size)
def tolist(self):
"""
Converting MutableDenseNDimArray to one-dim list
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1, 2, 3, 4], (2, 2))
>>> a
[[1, 2], [3, 4]]
>>> b = a.tolist()
>>> b
[[1, 2], [3, 4]]
"""
def f(sh, shape_left, i, j):
if len(shape_left) == 1:
return [self[self._get_tuple_index(e)] for e in range(i, j)]
result = []
sh //= shape_left[0]
for e in range(shape_left[0]):
result.append(f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh))
return result
return f(self._loop_size, self.shape, 0, self._loop_size)
def __add__(self, other):
from sympy.tensor.array.arrayop import Flatten
if not isinstance(other, NDimArray):
return NotImplemented
if self.shape != other.shape:
raise ValueError("array shape mismatch")
result_list = [i+j for i,j in zip(Flatten(self), Flatten(other))]
return type(self)(result_list, self.shape)
def __sub__(self, other):
from sympy.tensor.array.arrayop import Flatten
if not isinstance(other, NDimArray):
return NotImplemented
if self.shape != other.shape:
raise ValueError("array shape mismatch")
result_list = [i-j for i,j in zip(Flatten(self), Flatten(other))]
return type(self)(result_list, self.shape)
def __mul__(self, other):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array import SparseNDimArray
from sympy.tensor.array.arrayop import Flatten
if isinstance(other, (Iterable, NDimArray, MatrixBase)):
raise ValueError("scalar expected, use tensorproduct(...) for tensorial product")
other = sympify(other)
if isinstance(self, SparseNDimArray):
if other.is_zero:
return type(self)({}, self.shape)
return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape)
result_list = [i*other for i in Flatten(self)]
return type(self)(result_list, self.shape)
def __rmul__(self, other):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array import SparseNDimArray
from sympy.tensor.array.arrayop import Flatten
if isinstance(other, (Iterable, NDimArray, MatrixBase)):
raise ValueError("scalar expected, use tensorproduct(...) for tensorial product")
other = sympify(other)
if isinstance(self, SparseNDimArray):
if other.is_zero:
return type(self)({}, self.shape)
return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape)
result_list = [other*i for i in Flatten(self)]
return type(self)(result_list, self.shape)
def __truediv__(self, other):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array import SparseNDimArray
from sympy.tensor.array.arrayop import Flatten
if isinstance(other, (Iterable, NDimArray, MatrixBase)):
raise ValueError("scalar expected")
other = sympify(other)
if isinstance(self, SparseNDimArray) and other != S.Zero:
return type(self)({k: v/other for (k, v) in self._sparse_array.items()}, self.shape)
result_list = [i/other for i in Flatten(self)]
return type(self)(result_list, self.shape)
def __rtruediv__(self, other):
raise NotImplementedError('unsupported operation on NDimArray')
def __neg__(self):
from sympy.tensor.array import SparseNDimArray
from sympy.tensor.array.arrayop import Flatten
if isinstance(self, SparseNDimArray):
return type(self)({k: -v for (k, v) in self._sparse_array.items()}, self.shape)
result_list = [-i for i in Flatten(self)]
return type(self)(result_list, self.shape)
def __iter__(self):
def iterator():
if self._shape:
for i in range(self._shape[0]):
yield self[i]
else:
yield self[()]
return iterator()
def __eq__(self, other):
"""
NDimArray instances can be compared to each other.
Instances equal if they have same shape and data.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(2, 3)
>>> b = MutableDenseNDimArray.zeros(2, 3)
>>> a == b
True
>>> c = a.reshape(3, 2)
>>> c == b
False
>>> a[0,0] = 1
>>> b[0,0] = 2
>>> a == b
False
"""
from sympy.tensor.array import SparseNDimArray
if not isinstance(other, NDimArray):
return False
if not self.shape == other.shape:
return False
if isinstance(self, SparseNDimArray) and isinstance(other, SparseNDimArray):
return dict(self._sparse_array) == dict(other._sparse_array)
return list(self) == list(other)
def __ne__(self, other):
return not self == other
def _eval_transpose(self):
if self.rank() != 2:
raise ValueError("array rank not 2")
from .arrayop import permutedims
return permutedims(self, (1, 0))
def transpose(self):
return self._eval_transpose()
def _eval_conjugate(self):
from sympy.tensor.array.arrayop import Flatten
return self.func([i.conjugate() for i in Flatten(self)], self.shape)
def conjugate(self):
return self._eval_conjugate()
def _eval_adjoint(self):
return self.transpose().conjugate()
def adjoint(self):
return self._eval_adjoint()
def _slice_expand(self, s, dim):
if not isinstance(s, slice):
return (s,)
start, stop, step = s.indices(dim)
return [start + i*step for i in range((stop-start)//step)]
def _get_slice_data_for_array_access(self, index):
sl_factors = [self._slice_expand(i, dim) for (i, dim) in zip(index, self.shape)]
eindices = itertools.product(*sl_factors)
return sl_factors, eindices
def _get_slice_data_for_array_assignment(self, index, value):
if not isinstance(value, NDimArray):
value = type(self)(value)
sl_factors, eindices = self._get_slice_data_for_array_access(index)
slice_offsets = [min(i) if isinstance(i, list) else None for i in sl_factors]
# TODO: add checks for dimensions for `value`?
return value, eindices, slice_offsets
@classmethod
def _check_special_bounds(cls, flat_list, shape):
if shape == () and len(flat_list) != 1:
raise ValueError("arrays without shape need one scalar value")
if shape == (0,) and len(flat_list) > 0:
raise ValueError("if array shape is (0,) there cannot be elements")
def _check_index_for_getitem(self, index):
if isinstance(index, (SYMPY_INTS, Integer, slice)):
index = (index, )
if len(index) < self.rank():
index = tuple([i for i in index] + \
[slice(None) for i in range(len(index), self.rank())])
if len(index) > self.rank():
raise ValueError('Dimension of index greater than rank of array')
return index
class ImmutableNDimArray(NDimArray, Basic):
_op_priority = 11.0
def __hash__(self):
return Basic.__hash__(self)
def as_immutable(self):
return self
def as_mutable(self):
raise NotImplementedError("abstract method")
|
6cb3f331286d35d029c34ede44eee7768af97b45885ce3a12733736b1e9146e5 | import functools
from sympy import Basic, Tuple, S
from sympy.core.sympify import _sympify
from sympy.tensor.array.mutable_ndim_array import MutableNDimArray
from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray
from sympy.simplify.simplify import simplify
class DenseNDimArray(NDimArray):
def __new__(self, *args, **kwargs):
return ImmutableDenseNDimArray(*args, **kwargs)
def __getitem__(self, index):
"""
Allows to get items from N-dim array.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([0, 1, 2, 3], (2, 2))
>>> a
[[0, 1], [2, 3]]
>>> a[0, 0]
0
>>> a[1, 1]
3
>>> a[0]
[0, 1]
>>> a[1]
[2, 3]
Symbolic index:
>>> from sympy.abc import i, j
>>> a[i, j]
[[0, 1], [2, 3]][i, j]
Replace `i` and `j` to get element `(1, 1)`:
>>> a[i, j].subs({i: 1, j: 1})
3
"""
syindex = self._check_symbolic_index(index)
if syindex is not None:
return syindex
index = self._check_index_for_getitem(index)
if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]):
sl_factors, eindices = self._get_slice_data_for_array_access(index)
array = [self._array[self._parse_index(i)] for i in eindices]
nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)]
return type(self)(array, nshape)
else:
index = self._parse_index(index)
return self._array[index]
@classmethod
def zeros(cls, *shape):
list_length = functools.reduce(lambda x, y: x*y, shape, S.One)
return cls._new(([0]*list_length,), shape)
def tomatrix(self):
"""
Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1 for i in range(9)], (3, 3))
>>> b = a.tomatrix()
>>> b
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
from sympy.matrices import Matrix
if self.rank() != 2:
raise ValueError('Dimensions must be of size of 2')
return Matrix(self.shape[0], self.shape[1], self._array)
def reshape(self, *newshape):
"""
Returns MutableDenseNDimArray instance with new shape. Elements number
must be suitable to new shape. The only argument of method sets
new shape.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))
>>> a.shape
(2, 3)
>>> a
[[1, 2, 3], [4, 5, 6]]
>>> b = a.reshape(3, 2)
>>> b.shape
(3, 2)
>>> b
[[1, 2], [3, 4], [5, 6]]
"""
new_total_size = functools.reduce(lambda x,y: x*y, newshape)
if new_total_size != self._loop_size:
raise ValueError("Invalid reshape parameters " + newshape)
# there is no `.func` as this class does not subtype `Basic`:
return type(self)(self._array, newshape)
class ImmutableDenseNDimArray(DenseNDimArray, ImmutableNDimArray):
"""
"""
def __new__(cls, iterable, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
from sympy.utilities.iterables import flatten
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
shape = Tuple(*map(_sympify, shape))
cls._check_special_bounds(flat_list, shape)
flat_list = flatten(flat_list)
flat_list = Tuple(*flat_list)
self = Basic.__new__(cls, flat_list, shape, **kwargs)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape, 1)
return self
def __setitem__(self, index, value):
raise TypeError('immutable N-dim array')
def as_mutable(self):
return MutableDenseNDimArray(self)
def _eval_simplify(self, **kwargs):
return self.applyfunc(simplify)
class MutableDenseNDimArray(DenseNDimArray, MutableNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
from sympy.utilities.iterables import flatten
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
flat_list = flatten(flat_list)
self = object.__new__(cls)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(2, 2)
>>> a[0,0] = 1
>>> a[1,1] = 1
>>> a
[[1, 0], [0, 1]]
"""
if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]):
value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value)
for i in eindices:
other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None]
self._array[self._parse_index(i)] = value[other_i]
else:
index = self._parse_index(index)
self._setter_iterable_check(value)
value = _sympify(value)
self._array[index] = value
def as_immutable(self):
return ImmutableDenseNDimArray(self)
@property
def free_symbols(self):
return {i for j in self._array for i in j.free_symbols}
|
017d807c581192abde2f504acc5f0f79dbf7d2abd07368a712acb4d510a69080 | from functools import wraps
from sympy import Matrix, eye, Integer, expand, Indexed, Sum
from sympy.combinatorics import Permutation
from sympy.core import S, Rational, Symbol, Basic, Add
from sympy.core.containers import Tuple
from sympy.core.symbol import symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.tensor.array import Array
from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, \
get_symmetric_group_sgs, TensorIndex, tensor_mul, TensAdd, \
riemann_cyclic_replace, riemann_cyclic, TensMul, tensor_heads, \
TensorManager, TensExpr, TensorHead, canon_bp, \
tensorhead, tensorsymmetry, TensorType, substitute_indices
from sympy.testing.pytest import raises, XFAIL, warns_deprecated_sympy, ignore_warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.matrices import diag
def filter_warnings_decorator(f):
@wraps(f)
def wrapper():
with ignore_warnings(SymPyDeprecationWarning):
f()
return wrapper
def _is_equal(arg1, arg2):
if isinstance(arg1, TensExpr):
return arg1.equals(arg2)
elif isinstance(arg2, TensExpr):
return arg2.equals(arg1)
return arg1 == arg2
#################### Tests from tensor_can.py #######################
def test_canonicalize_no_slot_sym():
# A_d0 * B^d0; T_c = A^d0*B_d0
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, d0, d1 = tensor_indices('a,b,d0,d1', Lorentz)
A, B = tensor_heads('A,B', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(-d0)*B(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*B(-L_0)'
# A^a * B^b; T_c = T
t = A(a)*B(b)
tc = t.canon_bp()
assert tc == t
# B^b * A^a
t1 = B(b)*A(a)
tc = t1.canon_bp()
assert str(tc) == 'A(a)*B(b)'
# A symmetric
# A^{b}_{d0}*A^{d0, a}; T_c = A^{a d0}*A{b}_{d0}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(b, -d0)*A(d0, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, -L_0)'
# A^{d1}_{d0}*B^d0*C_d1
# T_c = A^{d0 d1}*B_d0*C_d1
B, C = tensor_heads('B,C', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0)*C(-L_1)'
# A without symmetry
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5]
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1)*C(-L_0)'
# A, B without symmetry
# A^{d1}_{d0}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d0 d1}
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0, -L_1)'
# A_{d0}^{d1}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d1 d0}
t = A(-d0, d1)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1, -L_0)'
# A, B, C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}
C = TensorHead('C', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_1)*C(-L_0, -b)'
# A symmetric, B and C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-L_1, -b)'
# A and C symmetric, B without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# T_c = A^{d0 d1}*B_{a d0}*C_{b d1}
C = TensorHead('C', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-b, -L_1)'
def test_canonicalize_no_dummies():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a, b, c, d', Lorentz)
# A commuting
# A^c A^b A^a
# T_c = A^a A^b A^c
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == 'A(a)*A(b)*A(c)'
# A anticommuting
# A^c A^b A^a
# T_c = -A^a A^b A^c
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1)
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == '-A(a)*A(b)*A(c)'
# A commuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = A^{a c}*A^{b d}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
# A anticommuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = -A^{a c}*A^{b d}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2), 1)
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == '-A(a, c)*A(b, d)'
# A^{c,a}*A^{b,d}
# T_c = A^{a c}*A^{b d}
t = A(c, a)*A(b, d)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
def test_tensorhead_construction_without_symmetry():
L = TensorIndexType('Lorentz')
A1 = TensorHead('A', [L, L])
A2 = TensorHead('A', [L, L], TensorSymmetry.no_symmetry(2))
assert A1 == A2
A3 = TensorHead('A', [L, L], TensorSymmetry.fully_symmetric(2)) # Symmetric
assert A1 != A3
def test_no_metric_symmetry():
# no metric symmetry; A no symmetry
# A^d1_d0 * A^d0_d1
# T_c = A^d0_d1 * A^d1_d0
Lorentz = TensorIndexType('Lorentz', dummy_name='L', metric_symmetry=0)
d0, d1, d2, d3 = tensor_indices('d:4', Lorentz)
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*A(d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)'
# A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0
# T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2
t = A(d1, -d2)*A(d0, -d3)*A(d2, -d1)*A(d3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)*A(L_2, -L_3)*A(L_3, -L_2)'
# A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1
# T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0
t = A(d0, -d1)*A(d1, -d2)*A(d2, -d3)*A(d3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_2)*A(L_2, -L_3)*A(L_3, -L_0)'
def test_canonicalize1():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Lorentz)
# A_d0*A^d0; ord = [d0,-d0]
# T_c = A^d0*A_d0
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(-d0)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)'
# A commuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)*A(L_1)*A(-L_1)*A(L_2)*A(-L_2)'
# A anticommuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c 0
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1)
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert tc == 0
# A commuting symmetric
# A^{d0 b}*A^a_d1*A^d1_d0
# T_c = A^{a d0}*A^{b d1}*A_{d0 d1}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d0, b)*A(a, -d1)*A(d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, L_1)*A(-L_0, -L_1)'
# A, B commuting symmetric
# A^{d0 b}*A^d1_d0*B^a_d1
# T_c = A^{b d0}*A_d0^d1*B^a_d1
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d0, b)*A(d1, -d0)*B(a, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(b, L_0)*A(-L_0, L_1)*B(a, -L_1)'
# A commuting symmetric
# A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1]
# T_c = A^{a d0 d1}*A^{b}_{d0 d1}
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3))
t = A(d1, d0, b)*A(a, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0, L_1)*A(b, -L_0, -L_1)'
# A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0
# T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3}
t = A(d3, d0, d2)*A(a0, -d1, -d2)*A(d1, -d3, a1)*A(a2, a3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a0, L_0, L_1)*A(a1, -L_0, L_2)*A(a2, a3, L_3)*A(-L_1, -L_2, -L_3)'
# A commuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# in this esxample and in the next three,
# renaming dummy indices and using symmetry of A,
# T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = 0
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3))
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert tc == 0
# A anticommuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1, L_2)*A(-L_0, -L_1, L_3)*B(-L_2, -L_3)'
# A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
Spinor = TensorIndexType('Spinor', dummy_name='S', metric_symmetry=-1)
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Spinor)
A = TensorHead('A', [Spinor]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Spinor]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == '-A(S_0, S_1, S_2)*A(-S_0, -S_1, S_3)*B(-S_2, -S_3)'
# A anticommuting symmetric, B antisymmetric anticommuting,
# no metric symmetry
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
Mat = TensorIndexType('Mat', metric_symmetry=0, dummy_name='M')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Mat)
A = TensorHead('A', [Mat]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Mat]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(M_0, M_1, M_2)*A(-M_0, -M_1, -M_3)*B(-M_2, M_3)'
# Gamma anticommuting
# Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha}
# T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu}
alpha, beta, gamma, mu, nu, rho = \
tensor_indices('alpha,beta,gamma,mu,nu,rho', Lorentz)
Gamma = TensorHead('Gamma', [Lorentz],
TensorSymmetry.fully_symmetric(1), 2)
Gamma2 = TensorHead('Gamma', [Lorentz]*2,
TensorSymmetry.fully_symmetric(-2), 2)
Gamma3 = TensorHead('Gamma', [Lorentz]*3,
TensorSymmetry.fully_symmetric(-3), 2)
t = Gamma2(-mu, -nu)*Gamma(rho)*Gamma3(nu, mu, alpha)
tc = t.canon_bp()
assert str(tc) == '-Gamma(L_0, L_1)*Gamma(rho)*Gamma(alpha, -L_0, -L_1)'
# Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha}
# T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu}
t = Gamma2(mu, nu)*Gamma2(beta, gamma)*Gamma(-rho)*Gamma3(alpha, -mu, -nu)
tc = t.canon_bp()
assert str(tc) == 'Gamma(L_0, L_1)*Gamma(beta, gamma)*Gamma(-rho)*Gamma(alpha, -L_0, -L_1)'
# f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry
# f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b}
# g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]
# T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e}
Flavor = TensorIndexType('Flavor', dummy_name='F')
a, b, c, d, e, ff = tensor_indices('a,b,c,d,e,f', Flavor)
mu, nu = tensor_indices('mu,nu', Lorentz)
f = TensorHead('f', [Flavor]*3, TensorSymmetry.direct_product(1, -2))
A = TensorHead('A', [Lorentz, Flavor], TensorSymmetry.no_symmetry(2))
t = f(c, -d, -a)*f(-c, -e, -b)*A(-mu, d)*A(-nu, a)*A(nu, e)*A(mu, b)
tc = t.canon_bp()
assert str(tc) == '-f(F_0, F_1, F_2)*f(-F_0, F_3, F_4)*A(L_0, -F_1)*A(-L_0, -F_3)*A(L_1, -F_2)*A(-L_1, -F_4)'
def test_bug_correction_tensor_indices():
# to make sure that tensor_indices does not return a list if creating
# only one index:
A = TensorIndexType("A")
i = tensor_indices('i', A)
assert not isinstance(i, (tuple, list))
assert isinstance(i, TensorIndex)
def test_riemann_invariants():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11 = \
tensor_indices('d0:12', Lorentz)
# R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]
# T_c = -R^{d0 d1}_{d0 d1}
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(d0, d1, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, -L_0, -L_1)'
# R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} *
# R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10}
# can = [0,2,4,6, 1,3,8,10, 5,7,12,14, 9,11,16,18, 13,15,20,22,
# 17,19,21<F10,23, 24,25]
# T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} *
# R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11}
t = R(-d11,d1,-d0,d5)*R(d6,d4,d0,-d5)*R(-d7,-d2,-d8,-d9)* \
R(-d10,-d3,-d6,-d4)*R(d2,d7,d11,-d1)*R(d8,d9,d3,d10)
tc = t.canon_bp()
assert str(tc) == 'R(L_0, L_1, L_2, L_3)*R(-L_0, -L_1, L_4, L_5)*R(-L_2, -L_3, L_6, L_7)*R(-L_4, -L_5, L_8, L_9)*R(-L_6, -L_7, L_10, L_11)*R(-L_8, -L_9, -L_10, -L_11)'
def test_riemann_products():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
d0, d1, d2, d3, d4, d5, d6 = tensor_indices('d0:7', Lorentz)
a0, a1, a2, a3, a4, a5 = tensor_indices('a0:6', Lorentz)
a, b = tensor_indices('a,b', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
# R^{a b d0}_d0 = 0
t = R(a, b, d0, -d0)
tc = t.canon_bp()
assert tc == 0
# R^{d0 b a}_d0
# T_c = -R^{a d0 b}_d0
t = R(d0, b, a, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, b, -L_0)'
# R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2]
# T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2}
t = R(d1, -d2, b, -d0)*R(d0, a, -d1, d2)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, L_1, L_2)*R(b, -L_0, -L_1, -L_2)'
# A symmetric commuting
# R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5}
# g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15]
# T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6}
V = TensorHead('V', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = R(d6, d5, -d2, d1)*R(d4, d0, d2, d3)*V(-d6, -d0)*V(-d3, -d1)*V(-d4, -d5)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, L_2, L_3)*R(-L_0, L_4, L_5, L_6)*V(-L_1, -L_4)*V(-L_2, -L_5)*V(-L_3, -L_6)'
# R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1}
# T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2}
t = R(d2, a0, a2, d0)*R(d1, -d2, a1, a3)*R(a4, a5, -d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'R(a0, L_0, a2, L_1)*R(a1, a3, -L_0, L_2)*R(a4, a5, -L_1, -L_2)'
######################################################################
def test_canonicalize2():
D = Symbol('D')
Eucl = TensorIndexType('Eucl', metric_symmetry=1, dim=D, dummy_name='E')
i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14 = \
tensor_indices('i0:15', Eucl)
A = TensorHead('A', [Eucl]*3, TensorSymmetry.fully_symmetric(-3))
# two examples from Cvitanovic, Group Theory page 59
# of identities for antisymmetric tensors of rank 3
# contracted according to the Kuratowski graph eq.(6.59)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i3,i7,i5)*A(-i2,-i5,i6)*A(-i4,-i6,i8)
t1 = t.canon_bp()
assert t1 == 0
# eq.(6.60)
#t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*
# A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i3,-i12,i14)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*\
A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i9,-i12,i14)
t1 = t.canon_bp()
assert t1 == 0
def test_canonicalize3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric_symmetry=-1, dummy_name='S')
a0,a1,a2,a3,a4 = tensor_indices('a0:5', Spinor)
chi, psi = tensor_heads('chi,psi', [Spinor], TensorSymmetry.no_symmetry(1), 1)
t = chi(a1)*psi(a0)
t1 = t.canon_bp()
assert t1 == t
t = psi(a1)*chi(a0)
t1 = t.canon_bp()
assert t1 == -chi(a0)*psi(a1)
def test_TensorIndexType():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', metric_name='g', metric_symmetry=1,
dim=D, dummy_name='L')
m0, m1, m2, m3, m4 = tensor_indices('m0:5', Lorentz)
sym2 = TensorSymmetry.fully_symmetric(2)
sym2n = TensorSymmetry(*get_symmetric_group_sgs(2))
assert sym2 == sym2n
g = Lorentz.metric
assert str(g) == 'g(Lorentz,Lorentz)'
assert Lorentz.eps_dim == Lorentz.dim
TSpace = TensorIndexType('TSpace', dummy_name = 'TSpace')
i0, i1 = tensor_indices('i0 i1', TSpace)
g = TSpace.metric
A = TensorHead('A', [TSpace]*2, sym2)
assert str(A(i0,-i0).canon_bp()) == 'A(TSpace_0, -TSpace_0)'
def test_indices():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
assert a.tensor_index_type == Lorentz
assert a != -a
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(a,b)*B(-b,c)
indices = t.get_indices()
L_0 = TensorIndex('L_0', Lorentz)
assert indices == [a, L_0, -L_0, c]
raises(ValueError, lambda: tensor_indices(3, Lorentz))
raises(ValueError, lambda: A(a,b,c))
A = TensorHead('A', [Lorentz, Lorentz])
assert A('a', 'b') == A(TensorIndex('a', Lorentz),
TensorIndex('b', Lorentz))
assert A('a', '-b') == A(TensorIndex('a', Lorentz),
TensorIndex('b', Lorentz, is_up=False))
assert A('a', TensorIndex('b', Lorentz)) == A(TensorIndex('a', Lorentz),
TensorIndex('b', Lorentz))
def test_TensorSymmetry():
assert TensorSymmetry.fully_symmetric(2) == \
TensorSymmetry(get_symmetric_group_sgs(2))
assert TensorSymmetry.fully_symmetric(-3) == \
TensorSymmetry(get_symmetric_group_sgs(3, True))
assert TensorSymmetry.direct_product(-4) == \
TensorSymmetry.fully_symmetric(-4)
assert TensorSymmetry.fully_symmetric(-1) == \
TensorSymmetry.fully_symmetric(1)
assert TensorSymmetry.direct_product(1, -1, 1) == \
TensorSymmetry.no_symmetry(3)
assert TensorSymmetry(get_symmetric_group_sgs(2)) == \
TensorSymmetry(*get_symmetric_group_sgs(2))
# TODO: add check for *get_symmetric_group_sgs(0)
sym = TensorSymmetry.fully_symmetric(-3)
assert sym.rank == 3
assert sym.base == Tuple(0, 1)
assert sym.generators == Tuple(Permutation(0, 1)(3, 4), Permutation(1, 2)(3, 4))
def test_TensExpr():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
g = Lorentz.metric
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
raises(ValueError, lambda: g(c, d)/g(a, b))
raises(ValueError, lambda: S.One/g(a, b))
raises(ValueError, lambda: (A(c, d) + g(c, d))/g(a, b))
raises(ValueError, lambda: S.One/(A(c, d) + g(c, d)))
raises(ValueError, lambda: A(a, b) + A(a, c))
A(a, b) + B(a, b) # assigned to t for below
#raises(NotImplementedError, lambda: TensExpr.__mul__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__add__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__radd__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__sub__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__rsub__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__truediv__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__rtruediv__(t, 'a'))
with ignore_warnings(SymPyDeprecationWarning):
# DO NOT REMOVE THIS AFTER DEPRECATION REMOVED:
raises(ValueError, lambda: A(a, b)**2)
raises(NotImplementedError, lambda: 2**A(a, b))
raises(NotImplementedError, lambda: abs(A(a, b)))
def test_TensorHead():
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
A = TensorHead('A', [Lorentz]*2)
assert A.name == 'A'
assert A.index_types == [Lorentz, Lorentz]
assert A.rank == 2
assert A.symmetry == TensorSymmetry.no_symmetry(2)
assert A.comm == 0
def test_add1():
assert TensAdd().args == ()
assert TensAdd().doit() == 0
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a,b,d0,d1,i,j,k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t1 = A(b, -d0)*B(d0, a)
assert TensAdd(t1).equals(t1)
t2a = B(d0, a) + A(d0, a)
t2 = A(b, -d0)*t2a
assert str(t2) == 'A(b, -L_0)*(A(L_0, a) + B(L_0, a))'
t2 = t2.expand()
assert str(t2) == 'A(b, -L_0)*A(L_0, a) + A(b, -L_0)*B(L_0, a)'
t2 = t2.canon_bp()
assert str(t2) == 'A(a, L_0)*A(b, -L_0) + A(b, L_0)*B(a, -L_0)'
t2b = t2 + t1
assert str(t2b) == 'A(a, L_0)*A(b, -L_0) + A(b, -L_0)*B(L_0, a) + A(b, L_0)*B(a, -L_0)'
t2b = t2b.canon_bp()
assert str(t2b) == 'A(a, L_0)*A(b, -L_0) + 2*A(b, L_0)*B(a, -L_0)'
p, q, r = tensor_heads('p,q,r', [Lorentz])
t = q(d0)*2
assert str(t) == '2*q(d0)'
t = 2*q(d0)
assert str(t) == '2*q(d0)'
t1 = p(d0) + 2*q(d0)
assert str(t1) == '2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
assert str(t2) == '2*q(-d0) + p(-d0)'
t1 = p(d0)
t3 = t1*t2
assert str(t3) == 'p(L_0)*(2*q(-L_0) + p(-L_0))'
t3 = t3.expand()
assert str(t3) == 'p(L_0)*p(-L_0) + 2*p(L_0)*q(-L_0)'
t3 = t2*t1
t3 = t3.expand()
assert str(t3) == 'p(-L_0)*p(L_0) + 2*q(-L_0)*p(L_0)'
t3 = t3.canon_bp()
assert str(t3) == 'p(L_0)*p(-L_0) + 2*p(L_0)*q(-L_0)'
t1 = p(d0) + 2*q(d0)
t3 = t1*t2
t3 = t3.canon_bp()
assert str(t3) == 'p(L_0)*p(-L_0) + 4*p(L_0)*q(-L_0) + 4*q(L_0)*q(-L_0)'
t1 = p(d0) - 2*q(d0)
assert str(t1) == '-2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
t3 = t1*t2
t3 = t3.canon_bp()
assert t3 == p(d0)*p(-d0) - 4*q(d0)*q(-d0)
t = p(i)*p(j)*(p(k) + q(k)) + p(i)*(p(j) + q(j))*(p(k) - 3*q(k))
t = t.canon_bp()
assert t == 2*p(i)*p(j)*p(k) - 2*p(i)*p(j)*q(k) + p(i)*p(k)*q(j) - 3*p(i)*q(j)*q(k)
t1 = (p(i) + q(i) + 2*r(i))*(p(j) - q(j))
t2 = (p(j) + q(j) + 2*r(j))*(p(i) - q(i))
t = t1 + t2
t = t.canon_bp()
assert t == 2*p(i)*p(j) + 2*p(i)*r(j) + 2*p(j)*r(i) - 2*q(i)*q(j) - 2*q(i)*r(j) - 2*q(j)*r(i)
t = p(i)*q(j)/2
assert 2*t == p(i)*q(j)
t = (p(i) + q(i))/2
assert 2*t == p(i) + q(i)
t = S.One - p(i)*p(-i)
t = t.canon_bp()
tz1 = t + p(-j)*p(j)
assert tz1 != 1
tz1 = tz1.canon_bp()
assert tz1.equals(1)
t = S.One + p(i)*p(-i)
assert (t - p(-j)*p(j)).canon_bp().equals(1)
t = A(a, b) + B(a, b)
assert t.rank == 2
t1 = t - A(a, b) - B(a, b)
assert t1 == 0
t = 1 - (A(a, -a) + B(a, -a))
t1 = 1 + (A(a, -a) + B(a, -a))
assert (t + t1).expand().equals(2)
t2 = 1 + A(a, -a)
assert t1 != t2
assert t2 != TensMul.from_data(0, [], [], [])
def test_special_eq_ne():
# test special equality cases:
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, d0, d1, i, j, k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
p, q, r = tensor_heads('p,q,r', [Lorentz])
t = 0*A(a, b)
assert _is_equal(t, 0)
assert _is_equal(t, S.Zero)
assert p(i) != A(a, b)
assert A(a, -a) != A(a, b)
assert 0*(A(a, b) + B(a, b)) == 0
assert 0*(A(a, b) + B(a, b)) is S.Zero
assert 3*(A(a, b) - A(a, b)) is S.Zero
assert p(i) + q(i) != A(a, b)
assert p(i) + q(i) != A(a, b) + B(a, b)
assert p(i) - p(i) == 0
assert p(i) - p(i) is S.Zero
assert _is_equal(A(a, b), A(b, a))
def test_add2():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m, n, p, q = tensor_indices('m,n,p,q', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(-3))
t1 = 2*R(m, n, p, q) - R(m, q, n, p) + R(m, p, n, q)
t2 = t1*A(-n, -p, -q)
t2 = t2.canon_bp()
assert t2 == 0
t1 = Rational(2, 3)*R(m,n,p,q) - Rational(1, 3)*R(m,q,n,p) + Rational(1, 3)*R(m,p,n,q)
t2 = t1*A(-n, -p, -q)
t2 = t2.canon_bp()
assert t2 == 0
t = A(m, -m, n) + A(n, p, -p)
t = t.canon_bp()
assert t == 0
def test_add3():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i0, i1 = tensor_indices('i0:2', Lorentz)
E, px, py, pz = symbols('E px py pz')
A = TensorHead('A', [Lorentz])
B = TensorHead('B', [Lorentz])
expr1 = A(i0)*A(-i0) - (E**2 - px**2 - py**2 - pz**2)
assert expr1.args == (-E**2, px**2, py**2, pz**2, A(i0)*A(-i0))
expr2 = E**2 - px**2 - py**2 - pz**2 - A(i0)*A(-i0)
assert expr2.args == (E**2, -px**2, -py**2, -pz**2, -A(i0)*A(-i0))
expr3 = A(i0)*A(-i0) - E**2 + px**2 + py**2 + pz**2
assert expr3.args == (-E**2, px**2, py**2, pz**2, A(i0)*A(-i0))
expr4 = B(i1)*B(-i1) + 2*E**2 - 2*px**2 - 2*py**2 - 2*pz**2 - A(i0)*A(-i0)
assert expr4.args == (2*E**2, -2*px**2, -2*py**2, -2*pz**2, B(i1)*B(-i1), -A(i0)*A(-i0))
def test_mul():
from sympy.abc import x
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
t = TensMul.from_data(S.One, [], [], [])
assert str(t) == '1'
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = (1 + x)*A(a, b)
assert str(t) == '(x + 1)*A(a, b)'
assert t.index_types == [Lorentz, Lorentz]
assert t.rank == 2
assert t.dum == []
assert t.coeff == 1 + x
assert sorted(t.free) == [(a, 0), (b, 1)]
assert t.components == [A]
ts = A(a, b)
assert str(ts) == 'A(a, b)'
assert ts.index_types == [Lorentz, Lorentz]
assert ts.rank == 2
assert ts.dum == []
assert ts.coeff == 1
assert sorted(ts.free) == [(a, 0), (b, 1)]
assert ts.components == [A]
t = A(-b, a)*B(-a, c)*A(-c, d)
t1 = tensor_mul(*t.split())
assert t == t1
assert tensor_mul(*[]) == TensMul.from_data(S.One, [], [], [])
t = TensMul.from_data(1, [], [], [])
C = TensorHead('C', [])
assert str(C()) == 'C'
assert str(t) == '1'
assert t == 1
raises(ValueError, lambda: A(a, b)*A(a, c))
def test_substitute_indices():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
p = TensorHead('p', [Lorentz])
t = p(i)
t1 = t.substitute_indices((j, k))
assert t1 == t
t1 = t.substitute_indices((i, j))
assert t1 == p(j)
t1 = t.substitute_indices((i, -j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, -j))
assert t1 == p(j)
t = A(m, n)
t1 = t.substitute_indices((m, i), (n, -i))
assert t1 == A(n, -n)
t1 = substitute_indices(t, (m, i), (n, -i))
assert t1 == A(n, -n)
t = A(i, k)*B(-k, -j)
t1 = t.substitute_indices((i, j), (j, k))
t1a = A(j, l)*B(-l, -k)
assert t1 == t1a
t1 = substitute_indices(t, (i, j), (j, k))
assert t1 == t1a
t = A(i, j) + B(i, j)
t1 = t.substitute_indices((j, -i))
t1a = A(i, -i) + B(i, -i)
assert t1 == t1a
t1 = substitute_indices(t, (j, -i))
assert t1 == t1a
def test_riemann_cyclic_replace():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m0, m1, m2, m3 = tensor_indices('m:4', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(m0, m2, m1, m3)
t1 = riemann_cyclic_replace(t)
t1a = Rational(-1, 3)*R(m0, m3, m2, m1) + Rational(1, 3)*R(m0, m1, m2, m3) + Rational(2, 3)*R(m0, m2, m1, m3)
assert t1 == t1a
def test_riemann_cyclic():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(i,j,k,l) + R(i,l,j,k) + R(i,k,l,j) - \
R(i,j,l,k) - R(i,l,k,j) - R(i,k,j,l)
t2 = t*R(-i,-j,-k,-l)
t3 = riemann_cyclic(t2)
assert t3 == 0
t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
t1 = riemann_cyclic(t)
assert t1 == 0
t = R(i,j,k,l)
t1 = riemann_cyclic(t)
assert t1 == Rational(-1, 3)*R(i, l, j, k) + Rational(1, 3)*R(i, k, j, l) + Rational(2, 3)*R(i, j, k, l)
t = R(i,j,k,l)*R(-k,-l,m,n)*(R(-m,-n,-i,-j) + 2*R(-m,-j,-n,-i))
t1 = riemann_cyclic(t)
assert t1 == 0
@XFAIL
def test_div():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m0, m1, m2, m3 = tensor_indices('m0:4', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(m0,m1,-m1,m3)
t1 = t/S(4)
assert str(t1) == '(1/4)*R(m0, L_0, -L_0, m3)'
t = t.canon_bp()
assert not t1._is_canon_bp
t1 = t*4
assert t1._is_canon_bp
t1 = t1/4
assert t1._is_canon_bp
def test_contract_metric1():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p = TensorHead('p', [Lorentz])
t = g(a, b)*p(-b)
t1 = t.contract_metric(g)
assert t1 == p(a)
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
# case with g with all free indices
t1 = A(a,b)*B(-b,c)*g(d, e)
t2 = t1.contract_metric(g)
assert t1 == t2
# case of g(d, -d)
t1 = A(a,b)*B(-b,c)*g(-d, d)
t2 = t1.contract_metric(g)
assert t2 == D*A(a, d)*B(-d, c)
# g with one free index
t1 = A(a,b)*B(-b,-c)*g(c, d)
t2 = t1.contract_metric(g)
assert t2 == A(a, c)*B(-c, d)
# g with both indices contracted with another tensor
t1 = A(a,b)*B(-b,-c)*g(c, -a)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, b)*B(-b, -a))
t1 = A(a,b)*B(-b,-c)*g(c, d)*g(-a, -d)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a,b)*B(-b,-a))
t1 = A(a,b)*g(-a,-b)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, -a))
assert not t2.free
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b = tensor_indices('a,b', Lorentz)
g = Lorentz.metric
assert _is_equal(g(a, -a).contract_metric(g), Lorentz.dim) # no dim
def test_contract_metric2():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e, L_0 = tensor_indices('a,b,c,d,e,L_0', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p,q', [Lorentz])
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == 3*D*p(a)*p(-a)*q(b)*q(-b)
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*q(-a)*q(-b)
t = t1*t2
t = t.contract_metric(g)
t = t.canon_bp()
assert t == 3*p(a)*p(-a)*q(b)*q(-b)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = - 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t = 6*g(a,b)*g(-a,-b)*p(c)*p(-c)*q(d)*q(-d)
t = t.contract_metric(g)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) + 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == (2 + 6*D)*p(a)*p(-a)*q(b)*q(-b)
t1 = p(a)*p(b) + p(a)*q(b) + 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) - g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t1 = (1 - 2*D)*p(a)*p(-a)*q(b)*q(-b) + p(a)*q(-a)*p(b)*q(-b)
assert canon_bp(t - t1) == 0
t = g(a,b)*g(c,d)*g(-b,-c)
t1 = t.contract_metric(g)
assert t1 == g(a, d)
t1 = g(a,b)*g(c,d) + g(a,c)*g(b,d) + g(a,d)*g(b,c)
t2 = t1.substitute_indices((a,-a),(b,-b),(c,-c),(d,-d))
t = t1*t2
t = t.contract_metric(g)
assert t.equals(3*D**2 + 6*D)
t = 2*p(a)*g(b,-b)
t1 = t.contract_metric(g)
assert t1.equals(2*D*p(a))
t = 2*p(a)*g(b,-a)
t1 = t.contract_metric(g)
assert t1 == 2*p(b)
M = Symbol('M')
t = (p(a)*p(b) + g(a, b)*M**2)*g(-a, -b) - D*M**2
t1 = t.contract_metric(g)
assert t1 == p(a)*p(-a)
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(a, b)*p(L_0)*g(-a, -b)
t1 = t.contract_metric(g)
assert str(t1) == 'A(L_1, -L_1)*p(L_0)' or str(t1) == 'A(-L_1, L_1)*p(L_0)'
def test_metric_contract3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric_symmetry=-1, dummy_name='S')
a0, a1, a2, a3, a4 = tensor_indices('a0:5', Spinor)
C = Spinor.metric
chi, psi = tensor_heads('chi,psi', [Spinor], TensorSymmetry.no_symmetry(1), 1)
B = TensorHead('B', [Spinor]*2, TensorSymmetry.no_symmetry(2))
t = C(a0,-a0)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0,a0)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a1,a0)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0,a1)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(a1,-a0)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*B(-a1,-a0)
t1 = t.contract_metric(C)
t1 = t1.canon_bp()
assert _is_equal(t1, B(a0,-a0))
t = C(a1,a0)*B(-a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(a0,-a1)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(-a0,a1)*B(-a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(-a0,-a1)*B(a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0,-a0))
t = C(-a1, a0)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0,-a0))
t = C(a0,a1)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, psi(a0))
t = C(a1,a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -psi(a0))
t = C(a0,a1)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(a1)*psi(-a1))
t = C(a1,a0)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(a1)*psi(-a1))
t = C(-a1,a0)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(a0,-a1)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a0,-a1)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(-a1,-a0)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a1,-a0)*B(a0,a2)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(-a1,a2)*psi(a1))
t = C(a1,a0)*B(-a2,-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(-a2,a1)*psi(-a1))
def test_epsilon():
Lorentz = TensorIndexType('Lorentz', dim=4, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
epsilon = Lorentz.epsilon
p, q, r, s = tensor_heads('p,q,r,s', [Lorentz])
t = epsilon(b,a,c,d)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(c,b,d,a)
t1 = t.canon_bp()
assert t1 == epsilon(a,b,c,d)
t = epsilon(c,a,d,b)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(a,b,c,d)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,b,d,a)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == -epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*p(-b)
t1 = t.canon_bp()
assert t1 == 0
t = epsilon(c,a,d,b)*p(-a)*q(-b) + epsilon(a,b,c,d)*p(-b)*q(-a)
t1 = t.canon_bp()
assert t1 == -2*epsilon(c,d,a,b)*p(-a)*q(-b)
# Test that epsilon can be create with a SymPy integer:
Lorentz = TensorIndexType('Lorentz', dim=Integer(4), dummy_name='L')
epsilon = Lorentz.epsilon
assert isinstance(epsilon, TensorHead)
def test_contract_delta1():
# see Group Theory by Cvitanovic page 9
n = Symbol('n')
Color = TensorIndexType('Color', dim=n, dummy_name='C')
a, b, c, d, e, f = tensor_indices('a,b,c,d,e,f', Color)
delta = Color.delta
def idn(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a,c)*delta(d,b)
def T(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a,b)*delta(d,c)
def P1(a, b, c, d):
return idn(a,b,c,d) - 1/n*T(a,b,c,d)
def P2(a, b, c, d):
return 1/n*T(a,b,c,d)
t = P1(a, -b, e, -f)*P1(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert canon_bp(t1 - P1(a, -b, d, -c)) == 0
t = P2(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == P2(a, -b, d, -c)
t = P1(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == 0
t = P1(a, -b, b, -a)
t1 = t.contract_delta(delta)
assert t1.equals(n**2 - 1)
@filter_warnings_decorator
def test_fun():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p q', [Lorentz])
t = q(c)*p(a)*q(b) + g(a,b)*g(c,d)*q(-d)
assert t(a,b,c) == t
assert canon_bp(t - t(b,a,c) - q(c)*p(a)*q(b) + q(c)*p(b)*q(a)) == 0
assert t(b,c,d) == q(d)*p(b)*q(c) + g(b,c)*g(d,e)*q(-e)
t1 = t.substitute_indices((a,b),(b,a))
assert canon_bp(t1 - q(c)*p(b)*q(a) - g(a,b)*g(c,d)*q(-d)) == 0
# check that g_{a b; c} = 0
# example taken from L. Brewin
# "A brief introduction to Cadabra" arxiv:0903.2085
# dg_{a b c} = \partial_{a} g_{b c} is symmetric in b, c
dg = TensorHead('dg', [Lorentz]*3, TensorSymmetry.direct_product(1, 2))
# gamma^a_{b c} is the Christoffel symbol
gamma = S.Half*g(a,d)*(dg(-b,-d,-c) + dg(-c,-b,-d) - dg(-d,-b,-c))
# t = g_{a b; c}
t = dg(-c,-a,-b) - g(-a,-d)*gamma(d,-b,-c) - g(-b,-d)*gamma(d,-a,-c)
t = t.contract_metric(g)
assert t == 0
t = q(c)*p(a)*q(b)
assert t(b,c,d) == q(d)*p(b)*q(c)
def test_TensorManager():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
LorentzH = TensorIndexType('LorentzH', dummy_name='LH')
i, j = tensor_indices('i,j', Lorentz)
ih, jh = tensor_indices('ih,jh', LorentzH)
p, q = tensor_heads('p q', [Lorentz])
ph, qh = tensor_heads('ph qh', [LorentzH])
Gsymbol = Symbol('Gsymbol')
GHsymbol = Symbol('GHsymbol')
TensorManager.set_comm(Gsymbol, GHsymbol, 0)
G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), Gsymbol)
assert TensorManager._comm_i2symbol[G.comm] == Gsymbol
GH = TensorHead('GH', [LorentzH], TensorSymmetry.no_symmetry(1), GHsymbol)
ps = G(i)*p(-i)
psh = GH(ih)*ph(-ih)
t = ps + psh
t1 = t*t
assert canon_bp(t1 - ps*ps - 2*ps*psh - psh*psh) == 0
qs = G(i)*q(-i)
qsh = GH(ih)*qh(-ih)
assert _is_equal(ps*qsh, qsh*ps)
assert not _is_equal(ps*qs, qs*ps)
n = TensorManager.comm_symbols2i(Gsymbol)
assert TensorManager.comm_i2symbol(n) == Gsymbol
assert GHsymbol in TensorManager._comm_symbols2i
raises(ValueError, lambda: TensorManager.set_comm(GHsymbol, 1, 2))
TensorManager.set_comms((Gsymbol,GHsymbol,0),(Gsymbol,1,1))
assert TensorManager.get_comm(n, 1) == TensorManager.get_comm(1, n) == 1
TensorManager.clear()
assert TensorManager.comm == [{0:0, 1:0, 2:0}, {0:0, 1:1, 2:None}, {0:0, 1:None}]
assert GHsymbol not in TensorManager._comm_symbols2i
nh = TensorManager.comm_symbols2i(GHsymbol)
assert TensorManager.comm_i2symbol(nh) == GHsymbol
assert GHsymbol in TensorManager._comm_symbols2i
def test_hash():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p q', [Lorentz])
p_type = p.args[1]
t1 = p(a)*q(b)
t2 = p(a)*p(b)
assert hash(t1) != hash(t2)
t3 = p(a)*p(b) + g(a,b)
t4 = p(a)*p(b) - g(a,b)
assert hash(t3) != hash(t4)
assert a.func(*a.args) == a
assert Lorentz.func(*Lorentz.args) == Lorentz
assert g.func(*g.args) == g
assert p.func(*p.args) == p
assert p_type.func(*p_type.args) == p_type
assert p(a).func(*(p(a)).args) == p(a)
assert t1.func(*t1.args) == t1
assert t2.func(*t2.args) == t2
assert t3.func(*t3.args) == t3
assert t4.func(*t4.args) == t4
assert hash(a.func(*a.args)) == hash(a)
assert hash(Lorentz.func(*Lorentz.args)) == hash(Lorentz)
assert hash(g.func(*g.args)) == hash(g)
assert hash(p.func(*p.args)) == hash(p)
assert hash(p_type.func(*p_type.args)) == hash(p_type)
assert hash(p(a).func(*(p(a)).args)) == hash(p(a))
assert hash(t1.func(*t1.args)) == hash(t1)
assert hash(t2.func(*t2.args)) == hash(t2)
assert hash(t3.func(*t3.args)) == hash(t3)
assert hash(t4.func(*t4.args)) == hash(t4)
def check_all(obj):
return all([isinstance(_, Basic) for _ in obj.args])
assert check_all(a)
assert check_all(Lorentz)
assert check_all(g)
assert check_all(p)
assert check_all(p_type)
assert check_all(p(a))
assert check_all(t1)
assert check_all(t2)
assert check_all(t3)
assert check_all(t4)
tsymmetry = TensorSymmetry.direct_product(-2, 1, 3)
assert tsymmetry.func(*tsymmetry.args) == tsymmetry
assert hash(tsymmetry.func(*tsymmetry.args)) == hash(tsymmetry)
assert check_all(tsymmetry)
### TEST VALUED TENSORS ###
def _get_valued_base_test_variables():
minkowski = Matrix((
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1),
))
Lorentz = TensorIndexType('Lorentz', dim=4)
Lorentz.data = minkowski
i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
E, px, py, pz = symbols('E px py pz')
A = TensorHead('A', [Lorentz])
A.data = [E, px, py, pz]
B = TensorHead('B', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm')
B.data = range(4)
AB = TensorHead("AB", [Lorentz]*2)
AB.data = minkowski
ba_matrix = Matrix((
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 0, -1, -2),
(-3, -4, -5, -6),
))
BA = TensorHead("BA", [Lorentz]*2)
BA.data = ba_matrix
# Let's test the diagonal metric, with inverted Minkowski metric:
LorentzD = TensorIndexType('LorentzD')
LorentzD.data = [-1, 1, 1, 1]
mu0, mu1, mu2 = tensor_indices('mu0:3', LorentzD)
C = TensorHead('C', [LorentzD])
C.data = [E, px, py, pz]
### non-diagonal metric ###
ndm_matrix = (
(1, 1, 0,),
(1, 0, 1),
(0, 1, 0,),
)
ndm = TensorIndexType("ndm")
ndm.data = ndm_matrix
n0, n1, n2 = tensor_indices('n0:3', ndm)
NA = TensorHead('NA', [ndm])
NA.data = range(10, 13)
NB = TensorHead('NB', [ndm]*2)
NB.data = [[i+j for j in range(10, 13)] for i in range(10, 13)]
NC = TensorHead('NC', [ndm]*3)
NC.data = [[[i+j+k for k in range(4, 7)] for j in range(1, 4)] for i in range(2, 5)]
return (A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4)
@filter_warnings_decorator
def test_valued_tensor_iter():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
list_BA = [Array([1, 2, 3, 4]), Array([5, 6, 7, 8]), Array([9, 0, -1, -2]), Array([-3, -4, -5, -6])]
# iteration on VTensorHead
assert list(A) == [E, px, py, pz]
assert list(ba_matrix) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, -1, -2, -3, -4, -5, -6]
assert list(BA) == list_BA
# iteration on VTensMul
assert list(A(i1)) == [E, px, py, pz]
assert list(BA(i1, i2)) == list_BA
assert list(3 * BA(i1, i2)) == [3 * i for i in list_BA]
assert list(-5 * BA(i1, i2)) == [-5 * i for i in list_BA]
# iteration on VTensAdd
# A(i1) + A(i1)
assert list(A(i1) + A(i1)) == [2*E, 2*px, 2*py, 2*pz]
assert BA(i1, i2) - BA(i1, i2) == 0
assert list(BA(i1, i2) - 2 * BA(i1, i2)) == [-i for i in list_BA]
@filter_warnings_decorator
def test_valued_tensor_covariant_contravariant_elements():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert A(-i0)[0] == A(i0)[0]
assert A(-i0)[1] == -A(i0)[1]
assert AB(i0, i1)[1, 1] == -1
assert AB(i0, -i1)[1, 1] == 1
assert AB(-i0, -i1)[1, 1] == -1
assert AB(-i0, i1)[1, 1] == 1
@filter_warnings_decorator
def test_valued_tensor_get_matrix():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
matab = AB(i0, i1).get_matrix()
assert matab == Matrix([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1],
])
# when alternating contravariant/covariant with [1, -1, -1, -1] metric
# it becomes the identity matrix:
assert AB(i0, -i1).get_matrix() == eye(4)
# covariant and contravariant forms:
assert A(i0).get_matrix() == Matrix([E, px, py, pz])
assert A(-i0).get_matrix() == Matrix([E, -px, -py, -pz])
@filter_warnings_decorator
def test_valued_tensor_contraction():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert (A(i0) * A(-i0)).data == E ** 2 - px ** 2 - py ** 2 - pz ** 2
assert (A(i0) * A(-i0)).data == A ** 2
assert (A(i0) * A(-i0)).data == A(i0) ** 2
assert (A(i0) * B(-i0)).data == -px - 2 * py - 3 * pz
for i in range(4):
for j in range(4):
assert (A(i0) * B(-i1))[i, j] == [E, px, py, pz][i] * [0, -1, -2, -3][j]
# test contraction on the alternative Minkowski metric: [-1, 1, 1, 1]
assert (C(mu0) * C(-mu0)).data == -E ** 2 + px ** 2 + py ** 2 + pz ** 2
contrexp = A(i0) * AB(i1, -i0)
assert A(i0).rank == 1
assert AB(i1, -i0).rank == 2
assert contrexp.rank == 1
for i in range(4):
assert contrexp[i] == [E, px, py, pz][i]
@filter_warnings_decorator
def test_valued_tensor_self_contraction():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert AB(i0, -i0).data == 4
assert BA(i0, -i0).data == 2
@filter_warnings_decorator
def test_valued_tensor_pow():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert C**2 == -E**2 + px**2 + py**2 + pz**2
assert C**1 == sqrt(-E**2 + px**2 + py**2 + pz**2)
assert C(mu0)**2 == C**2
assert C(mu0)**1 == C**1
@filter_warnings_decorator
def test_valued_tensor_expressions():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
x1, x2, x3 = symbols('x1:4')
# test coefficient in contraction:
rank2coeff = x1 * A(i3) * B(i2)
assert rank2coeff[1, 1] == x1 * px
assert rank2coeff[3, 3] == 3 * pz * x1
coeff_expr = ((x1 * A(i4)) * (B(-i4) / x2)).data
assert coeff_expr.expand() == -px*x1/x2 - 2*py*x1/x2 - 3*pz*x1/x2
add_expr = A(i0) + B(i0)
assert add_expr[0] == E
assert add_expr[1] == px + 1
assert add_expr[2] == py + 2
assert add_expr[3] == pz + 3
sub_expr = A(i0) - B(i0)
assert sub_expr[0] == E
assert sub_expr[1] == px - 1
assert sub_expr[2] == py - 2
assert sub_expr[3] == pz - 3
assert (add_expr * B(-i0)).data == -px - 2*py - 3*pz - 14
expr1 = x1*A(i0) + x2*B(i0)
expr2 = expr1 * B(i1) * (-4)
expr3 = expr2 + 3*x3*AB(i0, i1)
expr4 = expr3 / 2
assert expr4 * 2 == expr3
expr5 = (expr4 * BA(-i1, -i0))
assert expr5.data.expand() == 28*E*x1 + 12*px*x1 + 20*py*x1 + 28*pz*x1 + 136*x2 + 3*x3
@filter_warnings_decorator
def test_valued_tensor_add_scalar():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# one scalar summand after the contracted tensor
expr1 = A(i0)*A(-i0) - (E**2 - px**2 - py**2 - pz**2)
assert expr1.data == 0
# multiple scalar summands in front of the contracted tensor
expr2 = E**2 - px**2 - py**2 - pz**2 - A(i0)*A(-i0)
assert expr2.data == 0
# multiple scalar summands after the contracted tensor
expr3 = A(i0)*A(-i0) - E**2 + px**2 + py**2 + pz**2
assert expr3.data == 0
# multiple scalar summands and multiple tensors
expr4 = C(mu0)*C(-mu0) + 2*E**2 - 2*px**2 - 2*py**2 - 2*pz**2 - A(i0)*A(-i0)
assert expr4.data == 0
@filter_warnings_decorator
def test_noncommuting_components():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
euclid = TensorIndexType('Euclidean')
euclid.data = [1, 1]
i1, i2, i3 = tensor_indices('i1:4', euclid)
a, b, c, d = symbols('a b c d', commutative=False)
V1 = TensorHead('V1', [euclid]*2)
V1.data = [[a, b], (c, d)]
V2 = TensorHead('V2', [euclid]*2)
V2.data = [[a, c], [b, d]]
vtp = V1(i1, i2) * V2(-i2, -i1)
assert vtp.data == a**2 + b**2 + c**2 + d**2
assert vtp.data != a**2 + 2*b*c + d**2
vtp2 = V1(i1, i2)*V1(-i2, -i1)
assert vtp2.data == a**2 + b*c + c*b + d**2
assert vtp2.data != a**2 + 2*b*c + d**2
Vc = (b * V1(i1, -i1)).data
assert Vc.expand() == b * a + b * d
@filter_warnings_decorator
def test_valued_non_diagonal_metric():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
mmatrix = Matrix(ndm_matrix)
assert (NA(n0)*NA(-n0)).data == (NA(n0).get_matrix().T * mmatrix * NA(n0).get_matrix())[0, 0]
@filter_warnings_decorator
def test_valued_assign_numpy_ndarray():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# this is needed to make sure that a numpy.ndarray can be assigned to a
# tensor.
arr = [E+1, px-1, py, pz]
A.data = Array(arr)
for i in range(4):
assert A(i0).data[i] == arr[i]
qx, qy, qz = symbols('qx qy qz')
A(-i0).data = Array([E, qx, qy, qz])
for i in range(4):
assert A(i0).data[i] == [E, -qx, -qy, -qz][i]
assert A.data[i] == [E, -qx, -qy, -qz][i]
# test on multi-indexed tensors.
random_4x4_data = [[(i**3-3*i**2)%(j+7) for i in range(4)] for j in range(4)]
AB(-i0, -i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]
AB(-i0, i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
@filter_warnings_decorator
def test_valued_metric_inverse():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# let's assign some fancy matrix, just to verify it:
# (this has no physical sense, it's just testing sympy);
# it is symmetrical:
md = [[2, 2, 2, 1], [2, 3, 1, 0], [2, 1, 2, 3], [1, 0, 3, 2]]
Lorentz.data = md
m = Matrix(md)
metric = Lorentz.metric
minv = m.inv()
meye = eye(4)
# the Kronecker Delta:
KD = Lorentz.get_kronecker_delta()
for i in range(4):
for j in range(4):
assert metric(i0, i1).data[i, j] == m[i, j]
assert metric(-i0, -i1).data[i, j] == minv[i, j]
assert metric(i0, -i1).data[i, j] == meye[i, j]
assert metric(-i0, i1).data[i, j] == meye[i, j]
assert metric(i0, i1)[i, j] == m[i, j]
assert metric(-i0, -i1)[i, j] == minv[i, j]
assert metric(i0, -i1)[i, j] == meye[i, j]
assert metric(-i0, i1)[i, j] == meye[i, j]
assert KD(i0, -i1)[i, j] == meye[i, j]
@filter_warnings_decorator
def test_valued_canon_bp_swapaxes():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
e1 = A(i1)*A(i0)
e2 = e1.canon_bp()
assert e2 == A(i0)*A(i1)
for i in range(4):
for j in range(4):
assert e1[i, j] == e2[j, i]
o1 = B(i2)*A(i1)*B(i0)
o2 = o1.canon_bp()
for i in range(4):
for j in range(4):
for k in range(4):
assert o1[i, j, k] == o2[j, i, k]
@filter_warnings_decorator
def test_valued_components_with_wrong_symmetry():
IT = TensorIndexType('IT', dim=3)
i0, i1, i2, i3 = tensor_indices('i0:4', IT)
IT.data = [1, 1, 1]
A_nosym = TensorHead('A', [IT]*2)
A_sym = TensorHead('A', [IT]*2, TensorSymmetry.fully_symmetric(2))
A_antisym = TensorHead('A', [IT]*2, TensorSymmetry.fully_symmetric(-2))
mat_nosym = Matrix([[1,2,3],[4,5,6],[7,8,9]])
mat_sym = mat_nosym + mat_nosym.T
mat_antisym = mat_nosym - mat_nosym.T
A_nosym.data = mat_nosym
A_nosym.data = mat_sym
A_nosym.data = mat_antisym
def assign(A, dat):
A.data = dat
A_sym.data = mat_sym
raises(ValueError, lambda: assign(A_sym, mat_nosym))
raises(ValueError, lambda: assign(A_sym, mat_antisym))
A_antisym.data = mat_antisym
raises(ValueError, lambda: assign(A_antisym, mat_sym))
raises(ValueError, lambda: assign(A_antisym, mat_nosym))
A_sym.data = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
A_antisym.data = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
@filter_warnings_decorator
def test_issue_10972_TensMul_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='i', dim=2)
Lorentz.data = [-1, 1]
mu, nu, alpha, beta = tensor_indices('\\mu, \\nu, \\alpha, \\beta',
Lorentz)
u = TensorHead('u', [Lorentz])
u.data = [1, 0]
F = TensorHead('F', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
F.data = [[0, 1],
[-1, 0]]
mul_1 = F(mu, alpha) * u(-alpha) * F(nu, beta) * u(-beta)
assert (mul_1.data == Array([[0, 0], [0, 1]]))
mul_2 = F(mu, alpha) * F(nu, beta) * u(-alpha) * u(-beta)
assert (mul_2.data == mul_1.data)
assert ((mul_1 + mul_1).data == 2 * mul_1.data)
@filter_warnings_decorator
def test_TensMul_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='L', dim=4)
Lorentz.data = [-1, 1, 1, 1]
mu, nu, alpha, beta = tensor_indices('\\mu, \\nu, \\alpha, \\beta',
Lorentz)
u = TensorHead('u', [Lorentz])
u.data = [1, 0, 0, 0]
F = TensorHead('F', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
F.data = [
[0, Ex, Ey, Ez],
[-Ex, 0, Bz, -By],
[-Ey, -Bz, 0, Bx],
[-Ez, By, -Bx, 0]]
E = F(mu, nu) * u(-nu)
assert ((E(mu) * E(nu)).data ==
Array([[0, 0, 0, 0],
[0, Ex ** 2, Ex * Ey, Ex * Ez],
[0, Ex * Ey, Ey ** 2, Ey * Ez],
[0, Ex * Ez, Ey * Ez, Ez ** 2]])
)
assert ((E(mu) * E(nu)).canon_bp().data == (E(mu) * E(nu)).data)
assert ((F(mu, alpha) * F(beta, nu) * u(-alpha) * u(-beta)).data ==
- (E(mu) * E(nu)).data
)
assert ((F(alpha, mu) * F(beta, nu) * u(-alpha) * u(-beta)).data ==
(E(mu) * E(nu)).data
)
g = TensorHead('g', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
g.data = Lorentz.data
# tensor 'perp' is orthogonal to vector 'u'
perp = u(mu) * u(nu) + g(mu, nu)
mul_1 = u(-mu) * perp(mu, nu)
assert (mul_1.data == Array([0, 0, 0, 0]))
mul_2 = u(-mu) * perp(mu, alpha) * perp(nu, beta)
assert (mul_2.data == Array.zeros(4, 4, 4))
Fperp = perp(mu, alpha) * perp(nu, beta) * F(-alpha, -beta)
assert (Fperp.data[0, :] == Array([0, 0, 0, 0]))
assert (Fperp.data[:, 0] == Array([0, 0, 0, 0]))
mul_3 = u(-mu) * Fperp(mu, nu)
assert (mul_3.data == Array([0, 0, 0, 0]))
@filter_warnings_decorator
def test_issue_11020_TensAdd_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='i', dim=2)
Lorentz.data = [-1, 1]
a, b, c, d = tensor_indices('a, b, c, d', Lorentz)
i0, i1 = tensor_indices('i_0:2', Lorentz)
# metric tensor
g = TensorHead('g', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
g.data = Lorentz.data
u = TensorHead('u', [Lorentz])
u.data = [1, 0]
add_1 = g(b, c) * g(d, i0) * u(-i0) - g(b, c) * u(d)
assert (add_1.data == Array.zeros(2, 2, 2))
# Now let us replace index `d` with `a`:
add_2 = g(b, c) * g(a, i0) * u(-i0) - g(b, c) * u(a)
assert (add_2.data == Array.zeros(2, 2, 2))
# some more tests
# perp is tensor orthogonal to u^\mu
perp = u(a) * u(b) + g(a, b)
mul_1 = u(-a) * perp(a, b)
assert (mul_1.data == Array([0, 0]))
mul_2 = u(-c) * perp(c, a) * perp(d, b)
assert (mul_2.data == Array.zeros(2, 2, 2))
def test_index_iteration():
L = TensorIndexType("Lorentz", dummy_name="L")
i0, i1, i2, i3, i4 = tensor_indices('i0:5', L)
L0 = tensor_indices('L_0', L)
L1 = tensor_indices('L_1', L)
A = TensorHead("A", [L, L])
B = TensorHead("B", [L, L], TensorSymmetry.fully_symmetric(2))
e1 = A(i0,i2)
e2 = A(i0,-i0)
e3 = A(i0,i1)*B(i2,i3)
e4 = A(i0,i1)*B(i2,-i1)
e5 = A(i0,i1)*B(-i0,-i1)
e6 = e1 + e4
assert list(e1._iterate_free_indices) == [(i0, (1, 0)), (i2, (1, 1))]
assert list(e1._iterate_dummy_indices) == []
assert list(e1._iterate_indices) == [(i0, (1, 0)), (i2, (1, 1))]
assert list(e2._iterate_free_indices) == []
assert list(e2._iterate_dummy_indices) == [(L0, (1, 0)), (-L0, (1, 1))]
assert list(e2._iterate_indices) == [(L0, (1, 0)), (-L0, (1, 1))]
assert list(e3._iterate_free_indices) == [(i0, (0, 1, 0)), (i1, (0, 1, 1)), (i2, (1, 1, 0)), (i3, (1, 1, 1))]
assert list(e3._iterate_dummy_indices) == []
assert list(e3._iterate_indices) == [(i0, (0, 1, 0)), (i1, (0, 1, 1)), (i2, (1, 1, 0)), (i3, (1, 1, 1))]
assert list(e4._iterate_free_indices) == [(i0, (0, 1, 0)), (i2, (1, 1, 0))]
assert list(e4._iterate_dummy_indices) == [(L0, (0, 1, 1)), (-L0, (1, 1, 1))]
assert list(e4._iterate_indices) == [(i0, (0, 1, 0)), (L0, (0, 1, 1)), (i2, (1, 1, 0)), (-L0, (1, 1, 1))]
assert list(e5._iterate_free_indices) == []
assert list(e5._iterate_dummy_indices) == [(L0, (0, 1, 0)), (L1, (0, 1, 1)), (-L0, (1, 1, 0)), (-L1, (1, 1, 1))]
assert list(e5._iterate_indices) == [(L0, (0, 1, 0)), (L1, (0, 1, 1)), (-L0, (1, 1, 0)), (-L1, (1, 1, 1))]
assert list(e6._iterate_free_indices) == [(i0, (0, 0, 1, 0)), (i2, (0, 1, 1, 0)), (i0, (1, 1, 0)), (i2, (1, 1, 1))]
assert list(e6._iterate_dummy_indices) == [(L0, (0, 0, 1, 1)), (-L0, (0, 1, 1, 1))]
assert list(e6._iterate_indices) == [(i0, (0, 0, 1, 0)), (L0, (0, 0, 1, 1)), (i2, (0, 1, 1, 0)), (-L0, (0, 1, 1, 1)), (i0, (1, 1, 0)), (i2, (1, 1, 1))]
assert e1.get_indices() == [i0, i2]
assert e1.get_free_indices() == [i0, i2]
assert e2.get_indices() == [L0, -L0]
assert e2.get_free_indices() == []
assert e3.get_indices() == [i0, i1, i2, i3]
assert e3.get_free_indices() == [i0, i1, i2, i3]
assert e4.get_indices() == [i0, L0, i2, -L0]
assert e4.get_free_indices() == [i0, i2]
assert e5.get_indices() == [L0, L1, -L0, -L1]
assert e5.get_free_indices() == []
def test_tensor_expand():
L = TensorIndexType("L")
i, j, k = tensor_indices("i j k", L)
L_0 = TensorIndex("L_0", L)
A, B, C, D = tensor_heads("A B C D", [L])
assert isinstance(Add(A(i), B(i)), TensAdd)
assert isinstance(expand(A(i)+B(i)), TensAdd)
expr = A(i)*(A(-i)+B(-i))
assert expr.args == (A(L_0), A(-L_0) + B(-L_0))
assert expr != A(i)*A(-i) + A(i)*B(-i)
assert expr.expand() == A(i)*A(-i) + A(i)*B(-i)
assert str(expr) == "A(L_0)*(A(-L_0) + B(-L_0))"
expr = A(i)*A(j) + A(i)*B(j)
assert str(expr) == "A(i)*A(j) + A(i)*B(j)"
expr = A(-i)*(A(i)*A(j) + A(i)*B(j)*C(k)*C(-k))
assert expr != A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)*C(k)*C(-k)
assert expr.expand() == A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)*C(k)*C(-k)
assert str(expr) == "A(-L_0)*(A(L_0)*A(j) + A(L_0)*B(j)*C(L_1)*C(-L_1))"
assert str(expr.canon_bp()) == 'A(j)*A(L_0)*A(-L_0) + A(L_0)*A(-L_0)*B(j)*C(L_1)*C(-L_1)'
expr = A(-i)*(2*A(i)*A(j) + A(i)*B(j))
assert expr.expand() == 2*A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)
expr = 2*A(i)*A(-i)
assert expr.coeff == 2
expr = A(i)*(B(j)*C(k) + C(j)*(A(k) + D(k)))
assert str(expr) == "A(i)*(B(j)*C(k) + C(j)*(A(k) + D(k)))"
assert str(expr.expand()) == "A(i)*B(j)*C(k) + A(i)*C(j)*A(k) + A(i)*C(j)*D(k)"
assert isinstance(TensMul(3), TensMul)
tm = TensMul(3).doit()
assert tm == 3
assert isinstance(tm, Integer)
p1 = B(j)*B(-j) + B(j)*C(-j)
p2 = C(-i)*p1
p3 = A(i)*p2
assert p3.expand() == A(i)*C(-i)*B(j)*B(-j) + A(i)*C(-i)*B(j)*C(-j)
expr = A(i)*(B(-i) + C(-i)*(B(j)*B(-j) + B(j)*C(-j)))
assert expr.expand() == A(i)*B(-i) + A(i)*C(-i)*B(j)*B(-j) + A(i)*C(-i)*B(j)*C(-j)
expr = C(-i)*(B(j)*B(-j) + B(j)*C(-j))
assert expr.expand() == C(-i)*B(j)*B(-j) + C(-i)*B(j)*C(-j)
def test_tensor_alternative_construction():
L = TensorIndexType("L")
i0, i1, i2, i3 = tensor_indices('i0:4', L)
A = TensorHead("A", [L])
x, y = symbols("x y")
assert A(i0) == A(Symbol("i0"))
assert A(-i0) == A(-Symbol("i0"))
raises(TypeError, lambda: A(x+y))
raises(ValueError, lambda: A(2*x))
def test_tensor_replacement():
L = TensorIndexType("L")
L2 = TensorIndexType("L2", dim=2)
i, j, k, l = tensor_indices("i j k l", L)
A, B, C, D = tensor_heads("A B C D", [L])
H = TensorHead("H", [L, L])
K = TensorHead("K", [L]*4)
expr = H(i, j)
repl = {H(i,-j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, j], Array([[1, -2], [3, -4]]))
assert expr.replace_with_arrays(repl) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [i, j]) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [i, -j]) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [-i, j]) == Array([[1, -2], [-3, 4]])
assert expr.replace_with_arrays(repl, [-i, -j]) == Array([[1, 2], [-3, -4]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[1, 3], [-2, -4]])
assert expr.replace_with_arrays(repl, [j, -i]) == Array([[1, -3], [-2, 4]])
assert expr.replace_with_arrays(repl, [-j, i]) == Array([[1, 3], [2, 4]])
assert expr.replace_with_arrays(repl, [-j, -i]) == Array([[1, -3], [2, -4]])
# Test stability of optional parameter 'indices'
assert expr.replace_with_arrays(repl) == Array([[1, -2], [3, -4]])
expr = H(i,j)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, j], Array([[1, 2], [3, 4]]))
assert expr.replace_with_arrays(repl) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [i, j]) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [i, -j]) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [-i, j]) == Array([[1, 2], [-3, -4]])
assert expr.replace_with_arrays(repl, [-i, -j]) == Array([[1, -2], [-3, 4]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[1, 3], [2, 4]])
assert expr.replace_with_arrays(repl, [j, -i]) == Array([[1, -3], [2, -4]])
assert expr.replace_with_arrays(repl, [-j, i]) == Array([[1, 3], [-2, -4]])
assert expr.replace_with_arrays(repl, [-j, -i]) == Array([[1, -3], [-2, 4]])
# Not the same indices:
expr = H(i,k)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, k], Array([[1, 2], [3, 4]]))
expr = A(i)*A(-i)
repl = {A(i): [1,2], L: diag(1, -1)}
assert expr._extract_data(repl) == ([], -3)
assert expr.replace_with_arrays(repl, []) == -3
expr = K(i, j, -j, k)*A(-i)*A(-k)
repl = {A(i): [1, 2], K(i,j,k,l): Array([1]*2**4).reshape(2,2,2,2), L: diag(1, -1)}
assert expr._extract_data(repl)
expr = H(j, k)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
raises(ValueError, lambda: expr._extract_data(repl))
expr = A(i)
repl = {B(i): [1, 2]}
raises(ValueError, lambda: expr._extract_data(repl))
expr = A(i)
repl = {A(i): [[1, 2], [3, 4]]}
raises(ValueError, lambda: expr._extract_data(repl))
# TensAdd:
expr = A(k)*H(i, j) + B(k)*H(i, j)
repl = {A(k): [1], B(k): [1], H(i, j): [[1, 2],[3,4]], L:diag(1,1)}
assert expr._extract_data(repl) == ([k, i, j], Array([[[2, 4], [6, 8]]]))
assert expr.replace_with_arrays(repl, [k, i, j]) == Array([[[2, 4], [6, 8]]])
assert expr.replace_with_arrays(repl, [k, j, i]) == Array([[[2, 6], [4, 8]]])
expr = A(k)*A(-k) + 100
repl = {A(k): [2, 3], L: diag(1, 1)}
assert expr.replace_with_arrays(repl, []) == 113
## Symmetrization:
expr = H(i, j) + H(j, i)
repl = {H(i, j): [[1, 2], [3, 4]]}
assert expr._extract_data(repl) == ([i, j], Array([[2, 5], [5, 8]]))
assert expr.replace_with_arrays(repl, [i, j]) == Array([[2, 5], [5, 8]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[2, 5], [5, 8]])
## Anti-symmetrization:
expr = H(i, j) - H(j, i)
repl = {H(i, j): [[1, 2], [3, 4]]}
assert expr.replace_with_arrays(repl, [i, j]) == Array([[0, -1], [1, 0]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[0, 1], [-1, 0]])
# Tensors with contractions in replacements:
expr = K(i, j, k, -k)
repl = {K(i, j, k, -k): [[1, 2], [3, 4]]}
assert expr._extract_data(repl) == ([i, j], Array([[1, 2], [3, 4]]))
expr = H(i, -i)
repl = {H(i, -i): 42}
assert expr._extract_data(repl) == ([], 42)
expr = H(i, -i)
repl = {
H(-i, -j): Array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]),
L: Array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]),
}
assert expr._extract_data(repl) == ([], 4)
# Replace with array, raise exception if indices are not compatible:
expr = A(i)*A(j)
repl = {A(i): [1, 2]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [j]))
# Raise exception if array dimension is not compatible:
expr = A(i)
repl = {A(i): [[1, 2]]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [i]))
# TensorIndexType with dimension, wrong dimension in replacement array:
u1, u2, u3 = tensor_indices("u1:4", L2)
U = TensorHead("U", [L2])
expr = U(u1)*U(-u2)
repl = {U(u1): [[1]]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [u1, -u2]))
def test_rewrite_tensor_to_Indexed():
L = TensorIndexType("L", dim=4)
A = TensorHead("A", [L]*4)
B = TensorHead("B", [L])
i0, i1, i2, i3 = symbols("i0:4")
L_0, L_1 = symbols("L_0:2")
a1 = A(i0, i1, i2, i3)
assert a1.rewrite(Indexed) == Indexed(Symbol("A"), i0, i1, i2, i3)
a2 = A(i0, -i0, i2, i3)
assert a2.rewrite(Indexed) == Sum(Indexed(Symbol("A"), L_0, L_0, i2, i3), (L_0, 0, 3))
a3 = a2 + A(i2, i3, i0, -i0)
assert a3.rewrite(Indexed) == \
Sum(Indexed(Symbol("A"), L_0, L_0, i2, i3), (L_0, 0, 3)) +\
Sum(Indexed(Symbol("A"), i2, i3, L_0, L_0), (L_0, 0, 3))
b1 = B(-i0)*a1
assert b1.rewrite(Indexed) == Sum(Indexed(Symbol("B"), L_0)*Indexed(Symbol("A"), L_0, i1, i2, i3), (L_0, 0, 3))
b2 = B(-i3)*a2
assert b2.rewrite(Indexed) == Sum(Indexed(Symbol("B"), L_1)*Indexed(Symbol("A"), L_0, L_0, i2, L_1), (L_0, 0, 3), (L_1, 0, 3))
def test_tensorsymmetry():
with warns_deprecated_sympy():
tensorsymmetry([1]*2)
def test_tensorhead():
with warns_deprecated_sympy():
tensorhead('A', [])
def test_TensorType():
with warns_deprecated_sympy():
sym2 = TensorSymmetry.fully_symmetric(2)
Lorentz = TensorIndexType('Lorentz')
S2 = TensorType([Lorentz]*2, sym2)
assert isinstance(S2, TensorType)
|
7aa1eeeb82beb561be415dc918238a4120a68411db1b3c9874585875e432bbb1 | from sympy.core import symbols, S, Pow, Function
from sympy.functions import exp
from sympy.testing.pytest import raises
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.tensor.index_methods import IndexConformanceException
from sympy import get_contraction_structure, get_indices
def test_trivial_indices():
x, y = symbols('x y')
assert get_indices(x) == (set(), {})
assert get_indices(x*y) == (set(), {})
assert get_indices(x + y) == (set(), {})
assert get_indices(x**y) == (set(), {})
def test_get_indices_Indexed():
x = IndexedBase('x')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i, j]) == ({i, j}, {})
assert get_indices(x[j, i]) == ({j, i}, {})
def test_get_indices_Idx():
f = Function('f')
i, j = Idx('i'), Idx('j')
assert get_indices(f(i)*j) == ({i, j}, {})
assert get_indices(f(j, i)) == ({j, i}, {})
assert get_indices(f(i)*i) == (set(), {})
def test_get_indices_mul():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[j]*y[i]) == ({i, j}, {})
assert get_indices(x[i]*y[j]) == ({i, j}, {})
def test_get_indices_exceptions():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
raises(IndexConformanceException, lambda: get_indices(x[i] + y[j]))
def test_scalar_broadcast():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i] + y[i, i]) == ({i}, {})
assert get_indices(x[i] + y[j, j]) == ({i}, {})
def test_get_indices_add():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(x[i] + 2*y[i]) == ({i}, {})
assert get_indices(y[i] + 2*A[i, j]*x[j]) == ({i}, {})
assert get_indices(y[i] + 2*(x[i] + A[i, j]*x[j])) == ({i}, {})
assert get_indices(y[i] + x[i]*(A[j, j] + 1)) == ({i}, {})
assert get_indices(
y[i] + x[i]*x[j]*(y[j] + A[j, k]*x[k])) == ({i}, {})
def test_get_indices_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(Pow(x[i], y[j])) == ({i, j}, {})
assert get_indices(Pow(x[i, k], y[j, k])) == ({i, j, k}, {})
assert get_indices(Pow(A[i, k], y[k] + A[k, j]*x[j])) == ({i, k}, {})
assert get_indices(Pow(2, x[i])) == get_indices(exp(x[i]))
# test of a design decision, this may change:
assert get_indices(Pow(x[i], 2)) == ({i}, {})
def test_get_contraction_structure_basic():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_contraction_structure(x[i]*y[j]) == {None: {x[i]*y[j]}}
assert get_contraction_structure(x[i] + y[j]) == {None: {x[i], y[j]}}
assert get_contraction_structure(x[i]*y[i]) == {(i,): {x[i]*y[i]}}
assert get_contraction_structure(
1 + x[i]*y[i]) == {None: {S.One}, (i,): {x[i]*y[i]}}
assert get_contraction_structure(x[i]**y[i]) == {None: {x[i]**y[i]}}
def test_get_contraction_structure_complex():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
expr1 = y[i] + A[i, j]*x[j]
d1 = {None: {y[i]}, (j,): {A[i, j]*x[j]}}
assert get_contraction_structure(expr1) == d1
expr2 = expr1*A[k, i] + x[k]
d2 = {None: {x[k]}, (i,): {expr1*A[k, i]}, expr1*A[k, i]: [d1]}
assert get_contraction_structure(expr2) == d2
def test_contraction_structure_simple_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj = x[i, i]**y[j, j]
assert get_contraction_structure(ii_jj) == {
None: {ii_jj},
ii_jj: [
{(i,): {x[i, i]}},
{(j,): {y[j, j]}}
]
}
ii_jk = x[i, i]**y[j, k]
assert get_contraction_structure(ii_jk) == {
None: {x[i, i]**y[j, k]},
x[i, i]**y[j, k]: [
{(i,): {x[i, i]}}
]
}
def test_contraction_structure_Mul_and_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
i_ji = x[i]**(y[j]*x[i])
assert get_contraction_structure(i_ji) == {None: {i_ji}}
ij_i = (x[i]*y[j])**(y[i])
assert get_contraction_structure(ij_i) == {None: {ij_i}}
j_ij_i = x[j]*(x[i]*y[j])**(y[i])
assert get_contraction_structure(j_ij_i) == {(j,): {j_ij_i}}
j_i_ji = x[j]*x[i]**(y[j]*x[i])
assert get_contraction_structure(j_i_ji) == {(j,): {j_i_ji}}
ij_exp_kki = x[i]*y[j]*exp(y[i]*y[k, k])
result = get_contraction_structure(ij_exp_kki)
expected = {
(i,): {ij_exp_kki},
ij_exp_kki: [{
None: {exp(y[i]*y[k, k])},
exp(y[i]*y[k, k]): [{
None: {y[i]*y[k, k]},
y[i]*y[k, k]: [{(k,): {y[k, k]}}]
}]}
]
}
assert result == expected
def test_contraction_structure_Add_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
s_ii_jj_s = (1 + x[i, i])**(1 + y[j, j])
expected = {
None: {s_ii_jj_s},
s_ii_jj_s: [
{None: {S.One}, (i,): {x[i, i]}},
{None: {S.One}, (j,): {y[j, j]}}
]
}
result = get_contraction_structure(s_ii_jj_s)
assert result == expected
s_ii_jk_s = (1 + x[i, i]) ** (1 + y[j, k])
expected_2 = {
None: {(x[i, i] + 1)**(y[j, k] + 1)},
s_ii_jk_s: [
{None: {S.One}, (i,): {x[i, i]}}
]
}
result_2 = get_contraction_structure(s_ii_jk_s)
assert result_2 == expected_2
def test_contraction_structure_Pow_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj_kk = x[i, i]**y[j, j]**z[k, k]
expected = {
None: {ii_jj_kk},
ii_jj_kk: [
{(i,): {x[i, i]}},
{
None: {y[j, j]**z[k, k]},
y[j, j]**z[k, k]: [
{(j,): {y[j, j]}},
{(k,): {z[k, k]}}
]
}
]
}
assert get_contraction_structure(ii_jj_kk) == expected
def test_ufunc_support():
f = Function('f')
g = Function('g')
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
a = symbols('a')
assert get_indices(f(x[i])) == ({i}, {})
assert get_indices(f(x[i], y[j])) == ({i, j}, {})
assert get_indices(f(y[i])*g(x[i])) == (set(), {})
assert get_indices(f(a, x[i])) == ({i}, {})
assert get_indices(f(a, y[i], x[j])*g(x[i])) == ({j}, {})
assert get_indices(g(f(x[i]))) == ({i}, {})
assert get_contraction_structure(f(x[i])) == {None: {f(x[i])}}
assert get_contraction_structure(
f(y[i])*g(x[i])) == {(i,): {f(y[i])*g(x[i])}}
assert get_contraction_structure(
f(y[i])*g(f(x[i]))) == {(i,): {f(y[i])*g(f(x[i]))}}
assert get_contraction_structure(
f(x[j], y[i])*g(x[i])) == {(i,): {f(x[j], y[i])*g(x[i])}}
|
3cb39e500c579d2be7d1a08a4d0b6f4c0680ea905b956397b18366e4fdfe69e0 | from sympy.testing.pytest import raises
from sympy import (
Array, ImmutableDenseNDimArray, ImmutableSparseNDimArray,
MutableDenseNDimArray, MutableSparseNDimArray, sin, cos,
simplify, Matrix
)
from sympy.abc import x, y
array_types = [
ImmutableDenseNDimArray,
ImmutableSparseNDimArray,
MutableDenseNDimArray,
MutableSparseNDimArray
]
def test_array_negative_indices():
for ArrayType in array_types:
test_array = ArrayType([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
assert test_array[:, -1] == Array([5, 10])
assert test_array[:, -2] == Array([4, 9])
assert test_array[:, -3] == Array([3, 8])
assert test_array[:, -4] == Array([2, 7])
assert test_array[:, -5] == Array([1, 6])
assert test_array[:, 0] == Array([1, 6])
assert test_array[:, 1] == Array([2, 7])
assert test_array[:, 2] == Array([3, 8])
assert test_array[:, 3] == Array([4, 9])
assert test_array[:, 4] == Array([5, 10])
raises(ValueError, lambda: test_array[:, -6])
raises(ValueError, lambda: test_array[-3, :])
assert test_array[-1, -1] == 10
def test_issue_18361():
A = Array([sin(2 * x) - 2 * sin(x) * cos(x)])
B = Array([sin(x)**2 + cos(x)**2, 0])
C = Array([(x + x**2)/(x*sin(y)**2 + x*cos(y)**2), 2*sin(x)*cos(x)])
assert simplify(A) == Array([0])
assert simplify(B) == Array([1, 0])
assert simplify(C) == Array([x + 1, sin(2*x)])
def test_issue_20222():
A = Array([[1, 2], [3, 4]])
B = Matrix([[1,2],[3,4]])
raises(TypeError, lambda: A - B)
|
752a30598bb7acd05e0e67df5f7c6ebcc194880a2cd9b882fa5ac0fd3e391548 | from sympy.assumptions import Q
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.function import Function
from sympy.core.numbers import I, Integer, oo, pi, Rational
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, symbols
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.matrices.common import (ShapeError, NonSquareMatrixError,
_MinimalMatrix, _CastableMatrix, MatrixShaping, MatrixProperties,
MatrixOperations, MatrixArithmetic, MatrixSpecial)
from sympy.matrices.matrices import MatrixCalculus
from sympy.matrices import (Matrix, diag, eye,
matrix_multiply_elementwise, ones, zeros, SparseMatrix, banded,
MutableDenseMatrix, MutableSparseMatrix, ImmutableDenseMatrix,
ImmutableSparseMatrix)
from sympy.polys.polytools import Poly
from sympy.utilities.iterables import flatten
from sympy.testing.pytest import raises, XFAIL, warns_deprecated_sympy
from sympy import Array
from sympy.abc import x, y, z
# classes to test the basic matrix classes
class ShapingOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixShaping):
pass
def eye_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: 0)
class PropertiesOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixProperties):
pass
def eye_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: 0)
class OperationsOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixOperations):
pass
def eye_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: 0)
class ArithmeticOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixArithmetic):
pass
def eye_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: 0)
class SpecialOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixSpecial):
pass
class CalculusOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixCalculus):
pass
def test__MinimalMatrix():
x = _MinimalMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert x.rows == 2
assert x.cols == 3
assert x[2] == 3
assert x[1, 1] == 5
assert list(x) == [1, 2, 3, 4, 5, 6]
assert list(x[1, :]) == [4, 5, 6]
assert list(x[:, 1]) == [2, 5]
assert list(x[:, :]) == list(x)
assert x[:, :] == x
assert _MinimalMatrix(x) == x
assert _MinimalMatrix([[1, 2, 3], [4, 5, 6]]) == x
assert _MinimalMatrix(([1, 2, 3], [4, 5, 6])) == x
assert _MinimalMatrix([(1, 2, 3), (4, 5, 6)]) == x
assert _MinimalMatrix(((1, 2, 3), (4, 5, 6))) == x
assert not (_MinimalMatrix([[1, 2], [3, 4], [5, 6]]) == x)
# ShapingOnlyMatrix tests
def test_vec():
m = ShapingOnlyMatrix(2, 2, [1, 3, 2, 4])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_todok():
a, b, c, d = symbols('a:d')
m1 = MutableDenseMatrix([[a, b], [c, d]])
m2 = ImmutableDenseMatrix([[a, b], [c, d]])
m3 = MutableSparseMatrix([[a, b], [c, d]])
m4 = ImmutableSparseMatrix([[a, b], [c, d]])
assert m1.todok() == m2.todok() == m3.todok() == m4.todok() == \
{(0, 0): a, (0, 1): b, (1, 0): c, (1, 1): d}
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
flat_lst = [S.One, S.Half, x*y, S.Zero, x, y, z, x**2, y, -S.One, z*x, 3]
m = ShapingOnlyMatrix(3, 4, flat_lst)
assert m.tolist() == lst
def test_row_col_del():
e = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
raises(IndexError, lambda: e.row_del(5))
raises(IndexError, lambda: e.row_del(-5))
raises(IndexError, lambda: e.col_del(5))
raises(IndexError, lambda: e.col_del(-5))
assert e.row_del(2) == e.row_del(-1) == Matrix([[1, 2, 3], [4, 5, 6]])
assert e.col_del(2) == e.col_del(-1) == Matrix([[1, 2], [4, 5], [7, 8]])
assert e.row_del(1) == e.row_del(-2) == Matrix([[1, 2, 3], [7, 8, 9]])
assert e.col_del(1) == e.col_del(-2) == Matrix([[1, 3], [4, 6], [7, 9]])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b]
assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A, B, C, D = diag(a, b, b), diag(a, b, c), diag(a, c, b), diag(c, c, b)
A = ShapingOnlyMatrix(A.rows, A.cols, A)
B = ShapingOnlyMatrix(B.rows, B.cols, B)
C = ShapingOnlyMatrix(C.rows, C.cols, C)
D = ShapingOnlyMatrix(D.rows, D.cols, D)
assert A.get_diag_blocks() == [a, b, b]
assert B.get_diag_blocks() == [a, b, c]
assert C.get_diag_blocks() == [a, c, b]
assert D.get_diag_blocks() == [c, c, b]
def test_shape():
m = ShapingOnlyMatrix(1, 2, [0, 0])
m.shape == (1, 2)
def test_reshape():
m0 = eye_Shaping(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = ShapingOnlyMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_row_col():
m = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert m.row(0) == Matrix(1, 3, [1, 2, 3])
assert m.col(0) == Matrix(3, 1, [1, 4, 7])
def test_row_join():
assert eye_Shaping(3).row_join(Matrix([7, 7, 7])) == \
Matrix([[1, 0, 0, 7],
[0, 1, 0, 7],
[0, 0, 1, 7]])
def test_col_join():
assert eye_Shaping(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye_Shaping(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l
# issue 13643
assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \
Matrix([[1, 0, 0, 2, 2, 0, 0, 0],
[0, 1, 0, 2, 2, 0, 0, 0],
[0, 0, 1, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 0, 0],
[0, 0, 0, 2, 2, 0, 1, 0],
[0, 0, 0, 2, 2, 0, 0, 1]])
def test_extract():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_hstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.hstack(m)
assert m.hstack(m, m, m) == ShapingOnlyMatrix.hstack(m, m, m) == Matrix([
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11]])
raises(ShapeError, lambda: m.hstack(m, m2))
assert Matrix.hstack() == Matrix()
# test regression #12938
M1 = Matrix.zeros(0, 0)
M2 = Matrix.zeros(0, 1)
M3 = Matrix.zeros(0, 2)
M4 = Matrix.zeros(0, 3)
m = ShapingOnlyMatrix.hstack(M1, M2, M3, M4)
assert m.rows == 0 and m.cols == 6
def test_vstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.vstack(m)
assert m.vstack(m, m, m) == ShapingOnlyMatrix.vstack(m, m, m) == Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
raises(ShapeError, lambda: m.vstack(m, m2))
assert Matrix.vstack() == Matrix()
# PropertiesOnlyMatrix tests
def test_atoms():
m = PropertiesOnlyMatrix(2, 2, [1, 2, x, 1 - 1/x])
assert m.atoms() == {S.One, S(2), S.NegativeOne, x}
assert m.atoms(Symbol) == {x}
def test_free_symbols():
assert PropertiesOnlyMatrix([[x], [0]]).free_symbols == {x}
def test_has():
A = PropertiesOnlyMatrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = PropertiesOnlyMatrix(((2, y), (2, 3)))
assert not A.has(x)
def test_is_anti_symmetric():
x = symbols('x')
assert PropertiesOnlyMatrix(2, 1, [1, 2]).is_anti_symmetric() is False
m = PropertiesOnlyMatrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in m])
assert m.is_anti_symmetric(simplify=False) is True
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in [S.One] + list(m)[1:]])
assert m.is_anti_symmetric() is False
def test_diagonal_symmetrical():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = PropertiesOnlyMatrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = PropertiesOnlyMatrix(3, 3, diag(1, 2, 3))
assert m.is_diagonal()
assert m.is_symmetric()
m = PropertiesOnlyMatrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = PropertiesOnlyMatrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_is_hermitian():
a = PropertiesOnlyMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a = PropertiesOnlyMatrix([[2*I, I], [-I, 1]])
assert a.is_hermitian is False
a = PropertiesOnlyMatrix([[x, I], [-I, 1]])
assert a.is_hermitian is None
a = PropertiesOnlyMatrix([[x, 1], [-I, 1]])
assert a.is_hermitian is False
def test_is_Identity():
assert eye_Properties(3).is_Identity
assert not PropertiesOnlyMatrix(zeros(3)).is_Identity
assert not PropertiesOnlyMatrix(ones(3)).is_Identity
# issue 6242
assert not PropertiesOnlyMatrix([[1, 0, 0]]).is_Identity
def test_is_symbolic():
a = PropertiesOnlyMatrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, x, 3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_upper is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_upper is False
def test_is_lower():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_lower is False
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_square():
m = PropertiesOnlyMatrix([[1], [1]])
m2 = PropertiesOnlyMatrix([[2, 2], [2, 2]])
assert not m.is_square
assert m2.is_square
def test_is_symmetric():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
m = PropertiesOnlyMatrix(2, 2, [0, 1, 0, 1])
assert not m.is_symmetric()
def test_is_hessenberg():
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, 0, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, -1, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg is False
assert A.is_upper_hessenberg is False
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
def test_is_zero():
assert PropertiesOnlyMatrix(0, 0, []).is_zero_matrix
assert PropertiesOnlyMatrix([[0, 0], [0, 0]]).is_zero_matrix
assert PropertiesOnlyMatrix(zeros(3, 4)).is_zero_matrix
assert not PropertiesOnlyMatrix(eye(3)).is_zero_matrix
assert PropertiesOnlyMatrix([[x, 0], [0, 0]]).is_zero_matrix == None
assert PropertiesOnlyMatrix([[x, 1], [0, 0]]).is_zero_matrix == False
a = Symbol('a', nonzero=True)
assert PropertiesOnlyMatrix([[a, 0], [0, 0]]).is_zero_matrix == False
def test_values():
assert set(PropertiesOnlyMatrix(2, 2, [0, 1, 2, 3]
).values()) == {1, 2, 3}
x = Symbol('x', real=True)
assert set(PropertiesOnlyMatrix(2, 2, [x, 0, 0, 1]
).values()) == {x, 1}
# OperationsOnlyMatrix tests
def test_applyfunc():
m0 = OperationsOnlyMatrix(eye(3))
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
assert m0.applyfunc(lambda x: 1) == ones(3)
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = OperationsOnlyMatrix([[0, 1], [-I, 0]])
assert ans.adjoint() == Matrix(dat)
def test_as_real_imag():
m1 = OperationsOnlyMatrix(2, 2, [1, 2, 3, 4])
m3 = OperationsOnlyMatrix(2, 2,
[1 + S.ImaginaryUnit, 2 + 2*S.ImaginaryUnit,
3 + 3*S.ImaginaryUnit, 4 + 4*S.ImaginaryUnit])
a, b = m3.as_real_imag()
assert a == m1
assert b == m1
def test_conjugate():
M = OperationsOnlyMatrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_doit():
a = OperationsOnlyMatrix([[Add(x, x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_evalf():
a = OperationsOnlyMatrix(2, 1, [sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_expand():
m0 = OperationsOnlyMatrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
# Test if expand() returns a matrix
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert OperationsOnlyMatrix(1, 1, [exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
def test_refine():
m0 = OperationsOnlyMatrix([[Abs(x)**2, sqrt(x**2)],
[sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])
m1 = m0.refine(Q.real(x) & Q.real(y))
assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])
m1 = m0.refine(Q.positive(x) & Q.positive(y))
assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])
m1 = m0.refine(Q.negative(x) & Q.negative(y))
assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])
def test_replace():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, lambda i, j: G(i+j))
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1) \
: G(1)}), (G(2), {F(2): G(2)})])
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_rot90():
A = Matrix([[1, 2], [3, 4]])
assert A == A.rot90(0) == A.rot90(4)
assert A.rot90(2) == A.rot90(-2) == A.rot90(6) == Matrix(((4, 3), (2, 1)))
assert A.rot90(3) == A.rot90(-1) == A.rot90(7) == Matrix(((2, 4), (1, 3)))
assert A.rot90() == A.rot90(-7) == A.rot90(-3) == Matrix(((3, 1), (4, 2)))
def test_simplify():
n = Symbol('n')
f = Function('f')
M = OperationsOnlyMatrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
assert M.simplify() == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = OperationsOnlyMatrix([[eq]])
assert M.simplify() == Matrix([[eq]])
assert M.simplify(ratio=oo) == Matrix([[eq.simplify(ratio=oo)]])
def test_subs():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x*y]]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([[(x - 1)*(y - 1)]])
def test_trace():
M = OperationsOnlyMatrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_xreplace():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).xreplace({x: 5}) == \
Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
def test_permute():
a = OperationsOnlyMatrix(3, 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
raises(IndexError, lambda: a.permute([[0, 5]]))
raises(ValueError, lambda: a.permute(Symbol('x')))
b = a.permute_rows([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]]) == b == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
b = a.permute_cols([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]], orientation='cols') == b ==\
Matrix([
[ 2, 3, 1, 4],
[ 6, 7, 5, 8],
[10, 11, 9, 12]])
b = a.permute_cols([[0, 2], [0, 1]], direction='backward')
assert a.permute([[0, 2], [0, 1]], orientation='cols', direction='backward') == b ==\
Matrix([
[ 3, 1, 2, 4],
[ 7, 5, 6, 8],
[11, 9, 10, 12]])
assert a.permute([1, 2, 0, 3]) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
from sympy.combinatorics import Permutation
assert a.permute(Permutation([1, 2, 0, 3])) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
# ArithmeticOnlyMatrix tests
def test_abs():
m = ArithmeticOnlyMatrix([[1, -2], [x, y]])
assert abs(m) == ArithmeticOnlyMatrix([[1, 2], [Abs(x), Abs(y)]])
def test_add():
m = ArithmeticOnlyMatrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == ArithmeticOnlyMatrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_multiplication():
a = ArithmeticOnlyMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = ArithmeticOnlyMatrix((
(1, 2),
(3, 0),
))
raises(ShapeError, lambda: b*a)
raises(TypeError, lambda: a*{})
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = a.multiply_elementwise(c)
assert h == matrix_multiply_elementwise(a, c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: a.multiply_elementwise(b))
c = b * Symbol("x")
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
try:
eval('c = 5 @ b')
except SyntaxError:
pass
else:
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_matmul():
a = Matrix([[1, 2], [3, 4]])
assert a.__matmul__(2) == NotImplemented
assert a.__rmatmul__(2) == NotImplemented
#This is done this way because @ is only supported in Python 3.5+
#To check 2@a case
try:
eval('2 @ a')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
#Check a@2 case
try:
eval('a @ 2')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
def test_non_matmul():
"""
Test that if explicitly specified as non-matrix, mul reverts
to scalar multiplication.
"""
class foo(Expr):
is_Matrix=False
is_MatrixLike=False
shape = (1, 1)
A = Matrix([[1, 2], [3, 4]])
b = foo()
assert b*A == Matrix([[b, 2*b], [3*b, 4*b]])
assert A*b == Matrix([[b, 2*b], [3*b, 4*b]])
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
A = ArithmeticOnlyMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == (6140, 8097, 10796, 14237)
A = ArithmeticOnlyMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == (290, 262, 251, 448, 440, 368, 702, 954, 433)
assert A**0 == eye(3)
assert A**1 == A
assert (ArithmeticOnlyMatrix([[2]]) ** 100)[0, 0] == 2**100
assert ArithmeticOnlyMatrix([[1, 2], [3, 4]])**Integer(2) == ArithmeticOnlyMatrix([[7, 10], [15, 22]])
A = Matrix([[1,2],[4,5]])
assert A.pow(20, method='cayley') == A.pow(20, method='multiply')
def test_neg():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert -n == ArithmeticOnlyMatrix(1, 2, [-1, -2])
def test_sub():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n - n == ArithmeticOnlyMatrix(1, 2, [0, 0])
def test_div():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n/2 == ArithmeticOnlyMatrix(1, 2, [S.Half, S(2)/2])
# SpecialOnlyMatrix tests
def test_eye():
assert list(SpecialOnlyMatrix.eye(2, 2)) == [1, 0, 0, 1]
assert list(SpecialOnlyMatrix.eye(2)) == [1, 0, 0, 1]
assert type(SpecialOnlyMatrix.eye(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.eye(2, cls=Matrix)) == Matrix
def test_ones():
assert list(SpecialOnlyMatrix.ones(2, 2)) == [1, 1, 1, 1]
assert list(SpecialOnlyMatrix.ones(2)) == [1, 1, 1, 1]
assert SpecialOnlyMatrix.ones(2, 3) == Matrix([[1, 1, 1], [1, 1, 1]])
assert type(SpecialOnlyMatrix.ones(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.ones(2, cls=Matrix)) == Matrix
def test_zeros():
assert list(SpecialOnlyMatrix.zeros(2, 2)) == [0, 0, 0, 0]
assert list(SpecialOnlyMatrix.zeros(2)) == [0, 0, 0, 0]
assert SpecialOnlyMatrix.zeros(2, 3) == Matrix([[0, 0, 0], [0, 0, 0]])
assert type(SpecialOnlyMatrix.zeros(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.zeros(2, cls=Matrix)) == Matrix
def test_diag_make():
diag = SpecialOnlyMatrix.diag
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b) == Matrix([
[1, 2, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0],
[0, 0, y, 3, 0, 0],
[0, 0, 0, 0, 3, x],
[0, 0, 0, 0, y, 3],
])
assert diag(a, b, c) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0, 0],
[0, 0, y, 3, 0, 0, 0],
[0, 0, 0, 0, 3, x, 3],
[0, 0, 0, 0, y, 3, z],
[0, 0, 0, 0, x, y, z],
])
assert diag(a, c, b) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 3, 0, 0],
[0, 0, y, 3, z, 0, 0],
[0, 0, x, y, z, 0, 0],
[0, 0, 0, 0, 0, 3, x],
[0, 0, 0, 0, 0, y, 3],
])
a = Matrix([x, y, z])
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[5, 6]])
# this "wandering diagonal" is what makes this
# a block diagonal where each block is independent
# of the others
assert diag(a, 7, b, c) == Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
raises(ValueError, lambda: diag(a, 7, b, c, rows=5))
assert diag(1) == Matrix([[1]])
assert diag(1, rows=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, cols=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, rows=3, cols=2) == Matrix([[1, 0], [0, 0], [0, 0]])
assert diag(*[2, 3]) == Matrix([
[2, 0],
[0, 3]])
assert diag(Matrix([2, 3])) == Matrix([
[2],
[3]])
assert diag([1, [2, 3], 4], unpack=False) == \
diag([[1], [2, 3], [4]], unpack=False) == Matrix([
[1, 0],
[2, 3],
[4, 0]])
assert type(diag(1)) == SpecialOnlyMatrix
assert type(diag(1, cls=Matrix)) == Matrix
assert Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
assert Matrix.diag([1, 2, 3], unpack=False).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]]).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]], unpack=False).shape == (1, 3)
assert Matrix.diag([[[1, 2, 3]]]).shape == (1, 3)
# kerning can be used to move the starting point
assert Matrix.diag(ones(0, 2), 1, 2) == Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
assert Matrix.diag(ones(2, 0), 1, 2) == Matrix([
[0, 0],
[0, 0],
[1, 0],
[0, 2]])
def test_diagonal():
m = Matrix(3, 3, range(9))
d = m.diagonal()
assert d == m.diagonal(0)
assert tuple(d) == (0, 4, 8)
assert tuple(m.diagonal(1)) == (1, 5)
assert tuple(m.diagonal(-1)) == (3, 7)
assert tuple(m.diagonal(2)) == (2,)
assert type(m.diagonal()) == type(m)
s = SparseMatrix(3, 3, {(1, 1): 1})
assert type(s.diagonal()) == type(s)
assert type(m) != type(s)
raises(ValueError, lambda: m.diagonal(3))
raises(ValueError, lambda: m.diagonal(-3))
raises(ValueError, lambda: m.diagonal(pi))
M = ones(2, 3)
assert banded({i: list(M.diagonal(i))
for i in range(1-M.rows, M.cols)}) == M
def test_jordan_block():
assert SpecialOnlyMatrix.jordan_block(3, 2) == SpecialOnlyMatrix.jordan_block(3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(3, 2, band='upper') \
== SpecialOnlyMatrix.jordan_block(
size=3, eigenval=2, eigenvalue=2) \
== Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2]])
assert SpecialOnlyMatrix.jordan_block(3, 2, band='lower') == Matrix([
[2, 0, 0],
[1, 2, 0],
[0, 1, 2]])
# missing eigenvalue
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(2))
# non-integral size
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(3.5, 2))
# size not specified
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(eigenvalue=2))
# inconsistent eigenvalue
raises(ValueError,
lambda: SpecialOnlyMatrix.jordan_block(
eigenvalue=2, eigenval=4))
# Deprecated feature
with warns_deprecated_sympy():
assert (SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) ==
SpecialOnlyMatrix(3, 3, (2, 1, 0, 0, 2, 1, 0, 0, 2)))
with warns_deprecated_sympy():
assert (SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2) ==
SpecialOnlyMatrix(3, 3, (2, 1, 0, 0, 2, 1, 0, 0, 2)))
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(3, 2) == \
SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2)
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(
rows=4, cols=3, eigenvalue=2) == \
Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2],
[0, 0, 0]])
# Using alias keyword
assert SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(size=3, eigenval=2)
def test_orthogonalize():
m = Matrix([[1, 2], [3, 4]])
assert m.orthogonalize(Matrix([[2], [1]])) == [Matrix([[2], [1]])]
assert m.orthogonalize(Matrix([[2], [1]]), normalize=True) == \
[Matrix([[2*sqrt(5)/5], [sqrt(5)/5]])]
assert m.orthogonalize(Matrix([[1], [2]]), Matrix([[-1], [4]])) == \
[Matrix([[1], [2]]), Matrix([[Rational(-12, 5)], [Rational(6, 5)]])]
assert m.orthogonalize(Matrix([[0], [0]]), Matrix([[-1], [4]])) == \
[Matrix([[-1], [4]])]
assert m.orthogonalize(Matrix([[0], [0]])) == []
n = Matrix([[9, 1, 9], [3, 6, 10], [8, 5, 2]])
vecs = [Matrix([[-5], [1]]), Matrix([[-5], [2]]), Matrix([[-5], [-2]])]
assert n.orthogonalize(*vecs) == \
[Matrix([[-5], [1]]), Matrix([[Rational(5, 26)], [Rational(25, 26)]])]
vecs = [Matrix([0, 0, 0]), Matrix([1, 2, 3]), Matrix([1, 4, 5])]
raises(ValueError, lambda: Matrix.orthogonalize(*vecs, rankcheck=True))
vecs = [Matrix([1, 2, 3]), Matrix([4, 5, 6]), Matrix([7, 8, 9])]
raises(ValueError, lambda: Matrix.orthogonalize(*vecs, rankcheck=True))
# CalculusOnlyMatrix tests
@XFAIL
def test_diff():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
# TODO: currently not working as ``_MinimalMatrix`` cannot be sympified:
assert m.diff(x) == Matrix(2, 1, [1, 0])
def test_integrate():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
assert m.integrate(x) == Matrix(2, 1, [x**2/2, y*x])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = CalculusOnlyMatrix(3, 1, [rho*cos(phi), rho*sin(phi), rho**2])
Y = CalculusOnlyMatrix(2, 1, [rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
m = CalculusOnlyMatrix(2, 2, [1, 2, 3, 4])
m2 = CalculusOnlyMatrix(4, 1, [1, 2, 3, 4])
raises(TypeError, lambda: m.jacobian(Matrix([1, 2])))
raises(TypeError, lambda: m2.jacobian(m))
def test_limit():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [1/x, y])
assert m.limit(x, 5) == Matrix(2, 1, [Rational(1, 5), y])
def test_issue_13774():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
v = [1, 1, 1]
raises(TypeError, lambda: M*v)
raises(TypeError, lambda: v*M)
def test_companion():
x = Symbol('x')
y = Symbol('y')
raises(ValueError, lambda: Matrix.companion(1))
raises(ValueError, lambda: Matrix.companion(Poly([1], x)))
raises(ValueError, lambda: Matrix.companion(Poly([2, 1], x)))
raises(ValueError, lambda: Matrix.companion(Poly(x*y, [x, y])))
c0, c1, c2 = symbols('c0:3')
assert Matrix.companion(Poly([1, c0], x)) == Matrix([-c0])
assert Matrix.companion(Poly([1, c1, c0], x)) == \
Matrix([[0, -c0], [1, -c1]])
assert Matrix.companion(Poly([1, c2, c1, c0], x)) == \
Matrix([[0, 0, -c0], [1, 0, -c1], [0, 1, -c2]])
def test_issue_10589():
x, y, z = symbols("x, y z")
M1 = Matrix([x, y, z])
M1 = M1.subs(zip([x, y, z], [1, 2, 3]))
assert M1 == Matrix([[1], [2], [3]])
M2 = Matrix([[x, x, x, x, x], [x, x, x, x, x], [x, x, x, x, x]])
M2 = M2.subs(zip([x], [1]))
assert M2 == Matrix([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]])
def test_rmul_pr19860():
class Foo(ImmutableDenseMatrix):
_op_priority = MutableDenseMatrix._op_priority + 0.01
a = Matrix(2, 2, [1, 2, 3, 4])
b = Foo(2, 2, [1, 2, 3, 4])
# This would throw a RecursionError: maximum recursion depth
# since b always has higher priority even after a.as_mutable()
c = a*b
assert isinstance(c, Foo)
assert c == Matrix([[7, 10], [15, 22]])
def test_issue_18956():
A = Array([[1, 2], [3, 4]])
B = Matrix([[1,2],[3,4]])
raises(TypeError, lambda: B + A)
raises(TypeError, lambda: A + B)
|
090d640420696def66f419d0c66b405331dc1c332ae9a998d7eb81b386ce1883 | import random
from sympy.core.numbers import I
from sympy import symbols, Symbol, Rational, sqrt, Poly
from sympy.matrices import Matrix, eye, ones
from sympy.abc import x, y, z
from sympy.testing.pytest import raises
from sympy.matrices.matrices import MatrixDeterminant
from sympy.matrices.common import NonSquareMatrixError, _MinimalMatrix, _CastableMatrix
from sympy.functions.combinatorial.factorials import factorial, subfactorial
class DeterminantOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixDeterminant):
pass
def test_determinant():
for M in [Matrix(), Matrix([[1]])]:
assert (
M.det() ==
M._eval_det_bareiss() ==
M._eval_det_berkowitz() ==
M._eval_det_lu() ==
1)
M = Matrix(( (-3, 2),
( 8, -5) ))
assert M.det(method="bareiss") == -1
assert M.det(method="berkowitz") == -1
assert M.det(method="lu") == -1
M = Matrix(( (x, 1),
(y, 2*y) ))
assert M.det(method="bareiss") == 2*x*y - y
assert M.det(method="berkowitz") == 2*x*y - y
assert M.det(method="lu") == 2*x*y - y
M = Matrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) ))
assert M.det(method="bareiss") == 1
assert M.det(method="berkowitz") == 1
assert M.det(method="lu") == 1
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == -289
assert M.det(method="berkowitz") == -289
assert M.det(method="lu") == -289
M = Matrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 275
assert M.det(method="berkowitz") == 275
assert M.det(method="lu") == 275
M = Matrix(( ( 3, 0, 0, 0),
(-2, 1, 0, 0),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == 60
assert M.det(method="berkowitz") == 60
assert M.det(method="lu") == 60
M = Matrix(( ( 1, 0, 0, 0),
( 5, 0, 0, 0),
( 9, 10, 11, 0),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(0, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 243
assert M.det(method="berkowitz") == 243
assert M.det(method="lu") == 243
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="bareiss") == -55
assert M.det(method="berkowitz") == -55
assert M.det(method="lu") == -55
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="bareiss") == 11664
assert M.det(method="berkowitz") == 11664
assert M.det(method="lu") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="bareiss") == 123
assert M.det(method="berkowitz") == 123
assert M.det(method="lu") == 123
M = Matrix(( (x, y, z),
(1, 0, 0),
(y, z, x) ))
assert M.det(method="bareiss") == z**2 - x*y
assert M.det(method="berkowitz") == z**2 - x*y
assert M.det(method="lu") == z**2 - x*y
# issue 13835
a = symbols('a')
M = lambda n: Matrix([[i + a*j for i in range(n)]
for j in range(n)])
assert M(5).det() == 0
assert M(6).det() == 0
assert M(7).det() == 0
def test_issue_14517():
M = Matrix([
[ 0, 10*I, 10*I, 0],
[10*I, 0, 0, 10*I],
[10*I, 0, 5 + 2*I, 10*I],
[ 0, 10*I, 10*I, 5 + 2*I]])
ev = M.eigenvals()
# test one random eigenvalue, the computation is a little slow
test_ev = random.choice(list(ev.keys()))
assert (M - test_ev*eye(4)).det() == 0
def test_legacy_det():
# Minimal support for legacy keys for 'method' in det()
# Partially copied from test_determinant()
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareis") == -289
assert M.det(method="det_lu") == -289
assert M.det(method="det_LU") == -289
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="bareis") == 275
assert M.det(method="det_lu") == 275
assert M.det(method="Bareis") == 275
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="bareis") == -55
assert M.det(method="det_lu") == -55
assert M.det(method="BAREISS") == -55
M = Matrix(( ( 3, 0, 0, 0),
(-2, 1, 0, 0),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == 60
assert M.det(method="berkowitz") == 60
assert M.det(method="lu") == 60
M = Matrix(( ( 1, 0, 0, 0),
( 5, 0, 0, 0),
( 9, 10, 11, 0),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(0, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 243
assert M.det(method="berkowitz") == 243
assert M.det(method="lu") == 243
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="bareis") == 11664
assert M.det(method="det_lu") == 11664
assert M.det(method="BERKOWITZ") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="bareis") == 123
assert M.det(method="det_lu") == 123
assert M.det(method="LU") == 123
def eye_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: 0)
def test_det():
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
raises(NonSquareMatrixError, lambda: a.det())
z = zeros_Determinant(2)
ey = eye_Determinant(2)
assert z.det() == 0
assert ey.det() == 1
x = Symbol('x')
a = DeterminantOnlyMatrix(0, 0, [])
b = DeterminantOnlyMatrix(1, 1, [5])
c = DeterminantOnlyMatrix(2, 2, [1, 2, 3, 4])
d = DeterminantOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8])
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
from sympy.abc import i, j, k, l, m, n
f = DeterminantOnlyMatrix(3, 3, [i, l, m, 0, j, n, 0, 0, k])
g = DeterminantOnlyMatrix(3, 3, [i, 0, 0, l, j, 0, m, n, k])
h = DeterminantOnlyMatrix(3, 3, [x**3, 0, 0, i, x**-1, 0, j, k, x**-2])
# the method keyword for `det` doesn't kick in until 4x4 matrices,
# so there is no need to test all methods on smaller ones
assert a.det() == 1
assert b.det() == 5
assert c.det() == -2
assert d.det() == 3
assert e.det() == 4*x - 24
assert e.det(method='bareiss') == 4*x - 24
assert e.det(method='berkowitz') == 4*x - 24
assert f.det() == i*j*k
assert g.det() == i*j*k
assert h.det() == 1
raises(ValueError, lambda: e.det(iszerofunc="test"))
def test_permanent():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert M.per() == 450
for i in range(1, 12):
assert ones(i, i).per() == ones(i, i).T.per() == factorial(i)
assert (ones(i, i)-eye(i)).per() == (ones(i, i)-eye(i)).T.per() == subfactorial(i)
a1, a2, a3, a4, a5 = symbols('a_1 a_2 a_3 a_4 a_5')
M = Matrix([a1, a2, a3, a4, a5])
assert M.per() == M.T.per() == a1 + a2 + a3 + a4 + a5
def test_adjugate():
x = Symbol('x')
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
adj = Matrix([
[ 4, -8, 4, 0],
[ 76, -14*x - 68, 14*x - 8, -4*x + 24],
[-122, 17*x + 142, -21*x + 4, 8*x - 48],
[ 48, -4*x - 72, 8*x, -4*x + 24]])
assert e.adjugate() == adj
assert e.adjugate(method='bareiss') == adj
assert e.adjugate(method='berkowitz') == adj
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
raises(NonSquareMatrixError, lambda: a.adjugate())
def test_util():
R = Rational
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.norm() == sqrt(14)
assert v1.project(v2) == Matrix(1, 3, [R(39)/25, R(52)/25, R(13)/5])
assert Matrix.zeros(1, 2) == Matrix(1, 2, [0, 0])
assert ones(1, 2) == Matrix(1, 2, [1, 1])
assert v1.copy() == v1
# cofactor
assert eye(3) == eye(3).cofactor_matrix()
test = Matrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactor_matrix() == \
Matrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactor_matrix() == \
Matrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
def test_cofactor_and_minors():
x = Symbol('x')
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
m = Matrix([
[ x, 1, 3],
[ 2, 9, 11],
[12, 13, 14]])
cm = Matrix([
[ 4, 76, -122, 48],
[-8, -14*x - 68, 17*x + 142, -4*x - 72],
[ 4, 14*x - 8, -21*x + 4, 8*x],
[ 0, -4*x + 24, 8*x - 48, -4*x + 24]])
sub = Matrix([
[x, 1, 2],
[4, 5, 6],
[2, 9, 10]])
assert e.minor_submatrix(1, 2) == m
assert e.minor_submatrix(-1, -1) == sub
assert e.minor(1, 2) == -17*x - 142
assert e.cofactor(1, 2) == 17*x + 142
assert e.cofactor_matrix() == cm
assert e.cofactor_matrix(method="bareiss") == cm
assert e.cofactor_matrix(method="berkowitz") == cm
raises(ValueError, lambda: e.cofactor(4, 5))
raises(ValueError, lambda: e.minor(4, 5))
raises(ValueError, lambda: e.minor_submatrix(4, 5))
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert a.minor_submatrix(0, 0) == Matrix([[5, 6]])
raises(ValueError, lambda:
DeterminantOnlyMatrix(0, 0, []).minor_submatrix(0, 0))
raises(NonSquareMatrixError, lambda: a.cofactor(0, 0))
raises(NonSquareMatrixError, lambda: a.minor(0, 0))
raises(NonSquareMatrixError, lambda: a.cofactor_matrix())
def test_charpoly():
x, y = Symbol('x'), Symbol('y')
z, t = Symbol('z'), Symbol('t')
from sympy.abc import a,b,c
m = DeterminantOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y)
assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x)
raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly())
n = DeterminantOnlyMatrix(4, 4, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert n.charpoly() == Poly(x**4, x)
n = DeterminantOnlyMatrix(4, 4, [45, 0, 0, 0, 0, 23, 0, 0, 0, 0, 87, 0, 0, 0, 0, 12])
assert n.charpoly() == Poly(x**4 - 167*x**3 + 8811*x**2 - 173457*x + 1080540, x)
n = DeterminantOnlyMatrix(3, 3, [x, 0, 0, a, y, 0, b, c, z])
assert n.charpoly() == Poly(t**3 - (x+y+z)*t**2 + t*(x*y+y*z+x*z) - x*y*z , t)
|
ba3c7a6e551c7e83bd530d6e68d78363206939c006334f5b7f847f27a17030b6 | from sympy.testing.pytest import ignore_warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.matrices import Matrix, SparseMatrix, ImmutableMatrix
with ignore_warnings(SymPyDeprecationWarning):
from sympy.matrices.densetools import eye
from sympy.matrices.densearith import add, sub, mulmatmat, mulmatscaler
from sympy import ZZ
def test_add():
a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]]
c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]]
d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]]
e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]]
f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]]
assert add(a, b, ZZ) == [[ZZ(8), ZZ(11), ZZ(13)], [ZZ(5), ZZ(11), ZZ(6)], [ZZ(18), ZZ(15), ZZ(17)]]
assert add(c, d, ZZ) == [[ZZ(15)], [ZZ(21)], [ZZ(26)]]
assert add(e, f, ZZ) == e
def test_sub():
a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]]
c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]]
d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]]
e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]]
f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]]
assert sub(a, b, ZZ) == [[ZZ(-2), ZZ(3), ZZ(-5)], [ZZ(-1), ZZ(-3), ZZ(4)], [ZZ(-6), ZZ(-11), ZZ(-11)]]
assert sub(c, d, ZZ) == [[ZZ(9)], [ZZ(13)], [ZZ(16)]]
assert sub(e, f, ZZ) == e
def test_mulmatmat():
a = [[ZZ(3), ZZ(4)], [ZZ(5), ZZ(6)]]
b = [[ZZ(1), ZZ(2)], [ZZ(7), ZZ(8)]]
c = eye(2, ZZ)
d = [[ZZ(6)], [ZZ(7)]]
assert mulmatmat(a, b, ZZ) == [[ZZ(31), ZZ(38)], [ZZ(47), ZZ(58)]]
assert mulmatmat(a, c, ZZ) == [[ZZ(3), ZZ(4)], [ZZ(5), ZZ(6)]]
assert mulmatmat(b, d, ZZ) == [[ZZ(20)], [ZZ(98)]]
def test_mulmatscaler():
a = eye(3, ZZ)
b = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
assert mulmatscaler(a, ZZ(4), ZZ) == [[ZZ(4), ZZ(0), ZZ(0)], [ZZ(0), ZZ(4), ZZ(0)], [ZZ(0), ZZ(0), ZZ(4)]]
assert mulmatscaler(b, ZZ(1), ZZ) == [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
def test_eq():
A = Matrix([[1]])
B = ImmutableMatrix([[1]])
C = SparseMatrix([[1]])
assert A != object()
assert A != "Matrix([[1]])"
assert A == B
assert A == C
|
6181d322587b90cfc8fa488541b238caaaa2cb77036ae224b191092d9e32ebdb | import random
import concurrent.futures
from sympy import (
Abs, Add, E, Float, I, Integer, Max, Min, Poly, Pow, PurePoly, Rational,
S, Symbol, cos, exp, log, oo, pi, signsimp, simplify, sin,
sqrt, symbols, sympify, trigsimp, tan, sstr, diff, Function, expand)
from sympy.matrices.matrices import (ShapeError, MatrixError,
NonSquareMatrixError, DeferredVector, _find_reasonable_pivot_naive,
_simplify)
from sympy.matrices import (
GramSchmidt, ImmutableMatrix, ImmutableSparseMatrix, Matrix,
SparseMatrix, casoratian, diag, eye, hessian,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,
rot_axis3, wronskian, zeros, MutableDenseMatrix, ImmutableDenseMatrix,
MatrixSymbol, dotprodsimp)
from sympy.matrices.utilities import _dotprodsimp_state
from sympy.core.compatibility import iterable, Hashable
from sympy.core import Tuple, Wild
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.utilities.iterables import flatten, capture
from sympy.testing.pytest import raises, XFAIL, skip, warns_deprecated_sympy
from sympy.assumptions import Q
from sympy.tensor.array import Array
from sympy.matrices.expressions import MatPow
from sympy.abc import a, b, c, d, x, y, z, t
# don't re-order this list
classes = (Matrix, SparseMatrix, ImmutableMatrix, ImmutableSparseMatrix)
def test_args():
for n, cls in enumerate(classes):
m = cls.zeros(3, 2)
# all should give back the same type of arguments, e.g. ints for shape
assert m.shape == (3, 2) and all(type(i) is int for i in m.shape)
assert m.rows == 3 and type(m.rows) is int
assert m.cols == 2 and type(m.cols) is int
if not n % 2:
assert type(m._mat) in (list, tuple, Tuple)
else:
assert type(m._smat) is dict
def test_division():
v = Matrix(1, 2, [x, y])
assert v/z == Matrix(1, 2, [x/z, y/z])
def test_sum():
m = Matrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == Matrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = Matrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_abs():
m = Matrix(1, 2, [-3, x])
n = Matrix(1, 2, [3, Abs(x)])
assert abs(m) == n
def test_addition():
a = Matrix((
(1, 2),
(3, 1),
))
b = Matrix((
(1, 2),
(3, 0),
))
assert a + b == a.add(b) == Matrix([[2, 4], [6, 1]])
def test_fancy_index_matrix():
for M in (Matrix, SparseMatrix):
a = M(3, 3, range(9))
assert a == a[:, :]
assert a[1, :] == Matrix(1, 3, [3, 4, 5])
assert a[:, 1] == Matrix([1, 4, 7])
assert a[[0, 1], :] == Matrix([[0, 1, 2], [3, 4, 5]])
assert a[[0, 1], 2] == a[[0, 1], [2]]
assert a[2, [0, 1]] == a[[2], [0, 1]]
assert a[:, [0, 1]] == Matrix([[0, 1], [3, 4], [6, 7]])
assert a[0, 0] == 0
assert a[0:2, :] == Matrix([[0, 1, 2], [3, 4, 5]])
assert a[:, 0:2] == Matrix([[0, 1], [3, 4], [6, 7]])
assert a[::2, 1] == a[[0, 2], 1]
assert a[1, ::2] == a[1, [0, 2]]
a = M(3, 3, range(9))
assert a[[0, 2, 1, 2, 1], :] == Matrix([
[0, 1, 2],
[6, 7, 8],
[3, 4, 5],
[6, 7, 8],
[3, 4, 5]])
assert a[:, [0,2,1,2,1]] == Matrix([
[0, 2, 1, 2, 1],
[3, 5, 4, 5, 4],
[6, 8, 7, 8, 7]])
a = SparseMatrix.zeros(3)
a[1, 2] = 2
a[0, 1] = 3
a[2, 0] = 4
assert a.extract([1, 1], [2]) == Matrix([
[2],
[2]])
assert a.extract([1, 0], [2, 2, 2]) == Matrix([
[2, 2, 2],
[0, 0, 0]])
assert a.extract([1, 0, 1, 2], [2, 0, 1, 0]) == Matrix([
[2, 0, 0, 0],
[0, 0, 3, 0],
[2, 0, 0, 0],
[0, 4, 0, 4]])
def test_multiplication():
a = Matrix((
(1, 2),
(3, 1),
(0, 6),
))
b = Matrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = matrix_multiply_elementwise(a, c)
assert h == a.multiply_elementwise(c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: matrix_multiply_elementwise(a, b))
c = b * Symbol("x")
assert isinstance(c, Matrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, Matrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
try:
eval('c = 5 @ b')
except SyntaxError:
pass
else:
assert isinstance(c, Matrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
R = Rational
A = Matrix([[2, 3], [4, 5]])
assert (A**-3)[:] == [R(-269)/8, R(153)/8, R(51)/2, R(-29)/2]
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = Matrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
assert A**0 == eye(3)
assert A**1 == A
assert (Matrix([[2]]) ** 100)[0, 0] == 2**100
assert eye(2)**10000000 == eye(2)
assert Matrix([[1, 2], [3, 4]])**Integer(2) == Matrix([[7, 10], [15, 22]])
A = Matrix([[33, 24], [48, 57]])
assert (A**S.Half)[:] == [5, 2, 4, 7]
A = Matrix([[0, 4], [-1, 5]])
assert (A**S.Half)**2 == A
assert Matrix([[1, 0], [1, 1]])**S.Half == Matrix([[1, 0], [S.Half, 1]])
assert Matrix([[1, 0], [1, 1]])**0.5 == Matrix([[1.0, 0], [0.5, 1.0]])
from sympy.abc import a, b, n
assert Matrix([[1, a], [0, 1]])**n == Matrix([[1, a*n], [0, 1]])
assert Matrix([[b, a], [0, b]])**n == Matrix([[b**n, a*b**(n-1)*n], [0, b**n]])
assert Matrix([
[a**n, a**(n - 1)*n, (a**n*n**2 - a**n*n)/(2*a**2)],
[ 0, a**n, a**(n - 1)*n],
[ 0, 0, a**n]])
assert Matrix([[a, 1, 0], [0, a, 0], [0, 0, b]])**n == Matrix([
[a**n, a**(n-1)*n, 0],
[0, a**n, 0],
[0, 0, b**n]])
A = Matrix([[1, 0], [1, 7]])
assert A._matrix_pow_by_jordan_blocks(S(3)) == A._eval_pow_by_recursion(3)
A = Matrix([[2]])
assert A**10 == Matrix([[2**10]]) == A._matrix_pow_by_jordan_blocks(S(10)) == \
A._eval_pow_by_recursion(10)
# testing a matrix that cannot be jordan blocked issue 11766
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m._matrix_pow_by_jordan_blocks(S(10)))
# test issue 11964
raises(MatrixError, lambda: Matrix([[1, 1], [3, 3]])._matrix_pow_by_jordan_blocks(S(-10)))
A = Matrix([[0, 1, 0], [0, 0, 1], [0, 0, 0]]) # Nilpotent jordan block size 3
assert A**10.0 == Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
raises(ValueError, lambda: A**2.1)
raises(ValueError, lambda: A**Rational(3, 2))
A = Matrix([[8, 1], [3, 2]])
assert A**10.0 == Matrix([[1760744107, 272388050], [817164150, 126415807]])
A = Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) # Nilpotent jordan block size 1
assert A**10.0 == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
A = Matrix([[0, 1, 0], [0, 0, 1], [0, 0, 1]]) # Nilpotent jordan block size 2
assert A**10.0 == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
n = Symbol('n', integer=True)
assert isinstance(A**n, MatPow)
n = Symbol('n', integer=True, negative=True)
raises(ValueError, lambda: A**n)
n = Symbol('n', integer=True, nonnegative=True)
assert A**n == Matrix([
[KroneckerDelta(0, n), KroneckerDelta(1, n), -KroneckerDelta(0, n) - KroneckerDelta(1, n) + 1],
[ 0, KroneckerDelta(0, n), 1 - KroneckerDelta(0, n)],
[ 0, 0, 1]])
assert A**(n + 2) == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
raises(ValueError, lambda: A**Rational(3, 2))
A = Matrix([[0, 0, 1], [3, 0, 1], [4, 3, 1]])
assert A**5.0 == Matrix([[168, 72, 89], [291, 144, 161], [572, 267, 329]])
assert A**5.0 == A**5
A = Matrix([[0, 1, 0],[-1, 0, 0],[0, 0, 0]])
n = Symbol("n")
An = A**n
assert An.subs(n, 2).doit() == A**2
raises(ValueError, lambda: An.subs(n, -2).doit())
assert An * An == A**(2*n)
# concretizing behavior for non-integer and complex powers
A = Matrix([[0,0,0],[0,0,0],[0,0,0]])
n = Symbol('n', integer=True, positive=True)
assert A**n == A
n = Symbol('n', integer=True, nonnegative=True)
assert A**n == diag(0**n, 0**n, 0**n)
assert (A**n).subs(n, 0) == eye(3)
assert (A**n).subs(n, 1) == zeros(3)
A = Matrix ([[2,0,0],[0,2,0],[0,0,2]])
assert A**2.1 == diag (2**2.1, 2**2.1, 2**2.1)
assert A**I == diag (2**I, 2**I, 2**I)
A = Matrix([[0, 1, 0], [0, 0, 1], [0, 0, 1]])
raises(ValueError, lambda: A**2.1)
raises(ValueError, lambda: A**I)
A = Matrix([[S.Half, S.Half], [S.Half, S.Half]])
assert A**S.Half == A
A = Matrix([[1, 1],[3, 3]])
assert A**S.Half == Matrix ([[S.Half, S.Half], [3*S.Half, 3*S.Half]])
def test_issue_17247_expression_blowup_1():
M = Matrix([[1+x, 1-x], [1-x, 1+x]])
with dotprodsimp(True):
assert M.exp().expand() == Matrix([
[ (exp(2*x) + exp(2))/2, (-exp(2*x) + exp(2))/2],
[(-exp(2*x) + exp(2))/2, (exp(2*x) + exp(2))/2]])
def test_issue_17247_expression_blowup_2():
M = Matrix([[1+x, 1-x], [1-x, 1+x]])
with dotprodsimp(True):
P, J = M.jordan_form ()
assert P*J*P.inv()
def test_issue_17247_expression_blowup_3():
M = Matrix([[1+x, 1-x], [1-x, 1+x]])
with dotprodsimp(True):
assert M**100 == Matrix([
[633825300114114700748351602688*x**100 + 633825300114114700748351602688, 633825300114114700748351602688 - 633825300114114700748351602688*x**100],
[633825300114114700748351602688 - 633825300114114700748351602688*x**100, 633825300114114700748351602688*x**100 + 633825300114114700748351602688]])
def test_issue_17247_expression_blowup_4():
# This matrix takes extremely long on current master even with intermediate simplification so an abbreviated version is used. It is left here for test in case of future optimizations.
# M = Matrix(S('''[
# [ -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64, -9/32 - I/16, 183/256 - 97*I/128, 3/64 + 13*I/64, -23/32 - 59*I/256, 15/128 - 3*I/32, 19/256 + 551*I/1024],
# [-149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512, -219/128 + 115*I/256, 6301/4096 - 6609*I/1024, 119/128 + 143*I/128, -10879/2048 + 4343*I/4096, 129/256 - 549*I/512, 42533/16384 + 29103*I/8192],
# [ 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64, -9/32 - I/16, 183/256 - 97*I/128, 3/64 + 13*I/64, -23/32 - 59*I/256],
# [ -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512, -219/128 + 115*I/256, 6301/4096 - 6609*I/1024, 119/128 + 143*I/128, -10879/2048 + 4343*I/4096],
# [ 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64, -9/32 - I/16, 183/256 - 97*I/128],
# [ 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512, -219/128 + 115*I/256, 6301/4096 - 6609*I/1024],
# [ -2, 17/4 - 13*I/2, 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64],
# [ 1/4 + 13*I/4, -825/64 - 147*I/32, 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512],
# [ -4*I, 27/2 + 6*I, -2, 17/4 - 13*I/2, 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64],
# [ 1/4 + 5*I/2, -23/8 - 57*I/16, 1/4 + 13*I/4, -825/64 - 147*I/32, 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128],
# [ -4, 9 - 5*I, -4*I, 27/2 + 6*I, -2, 17/4 - 13*I/2, 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16],
# [ -2*I, 119/8 + 29*I/4, 1/4 + 5*I/2, -23/8 - 57*I/16, 1/4 + 13*I/4, -825/64 - 147*I/32, 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128]]'''))
# assert M**10 == Matrix([
# [ 7*(-221393644768594642173548179825793834595 - 1861633166167425978847110897013541127952*I)/9671406556917033397649408, 15*(31670992489131684885307005100073928751695 + 10329090958303458811115024718207404523808*I)/77371252455336267181195264, 7*(-3710978679372178839237291049477017392703 + 1377706064483132637295566581525806894169*I)/19342813113834066795298816, (9727707023582419994616144751727760051598 - 59261571067013123836477348473611225724433*I)/9671406556917033397649408, (31896723509506857062605551443641668183707 + 54643444538699269118869436271152084599580*I)/38685626227668133590597632, (-2024044860947539028275487595741003997397402 + 130959428791783397562960461903698670485863*I)/309485009821345068724781056, 3*(26190251453797590396533756519358368860907 - 27221191754180839338002754608545400941638*I)/77371252455336267181195264, (1154643595139959842768960128434994698330461 + 3385496216250226964322872072260446072295634*I)/618970019642690137449562112, 3*(-31849347263064464698310044805285774295286 - 11877437776464148281991240541742691164309*I)/77371252455336267181195264, (4661330392283532534549306589669150228040221 - 4171259766019818631067810706563064103956871*I)/1237940039285380274899124224, (9598353794289061833850770474812760144506 + 358027153990999990968244906482319780943983*I)/309485009821345068724781056, (-9755135335127734571547571921702373498554177 - 4837981372692695195747379349593041939686540*I)/2475880078570760549798248448],
# [(-379516731607474268954110071392894274962069 - 422272153179747548473724096872271700878296*I)/77371252455336267181195264, (41324748029613152354787280677832014263339501 - 12715121258662668420833935373453570749288074*I)/1237940039285380274899124224, (-339216903907423793947110742819264306542397 + 494174755147303922029979279454787373566517*I)/77371252455336267181195264, (-18121350839962855576667529908850640619878381 - 37413012454129786092962531597292531089199003*I)/1237940039285380274899124224, (2489661087330511608618880408199633556675926 + 1137821536550153872137379935240732287260863*I)/309485009821345068724781056, (-136644109701594123227587016790354220062972119 + 110130123468183660555391413889600443583585272*I)/4951760157141521099596496896, (1488043981274920070468141664150073426459593 - 9691968079933445130866371609614474474327650*I)/1237940039285380274899124224, 27*(4636797403026872518131756991410164760195942 + 3369103221138229204457272860484005850416533*I)/4951760157141521099596496896, (-8534279107365915284081669381642269800472363 + 2241118846262661434336333368511372725482742*I)/1237940039285380274899124224, (60923350128174260992536531692058086830950875 - 263673488093551053385865699805250505661590126*I)/9903520314283042199192993792, (18520943561240714459282253753348921824172569 + 24846649186468656345966986622110971925703604*I)/4951760157141521099596496896, (-232781130692604829085973604213529649638644431 + 35981505277760667933017117949103953338570617*I)/9903520314283042199192993792],
# [ (8742968295129404279528270438201520488950 + 3061473358639249112126847237482570858327*I)/4835703278458516698824704, (-245657313712011778432792959787098074935273 + 253113767861878869678042729088355086740856*I)/38685626227668133590597632, (1947031161734702327107371192008011621193 - 19462330079296259148177542369999791122762*I)/9671406556917033397649408, (552856485625209001527688949522750288619217 + 392928441196156725372494335248099016686580*I)/77371252455336267181195264, (-44542866621905323121630214897126343414629 + 3265340021421335059323962377647649632959*I)/19342813113834066795298816, (136272594005759723105646069956434264218730 - 330975364731707309489523680957584684763587*I)/38685626227668133590597632, (27392593965554149283318732469825168894401 + 75157071243800133880129376047131061115278*I)/38685626227668133590597632, 7*(-357821652913266734749960136017214096276154 - 45509144466378076475315751988405961498243*I)/309485009821345068724781056, (104485001373574280824835174390219397141149 - 99041000529599568255829489765415726168162*I)/77371252455336267181195264, (1198066993119982409323525798509037696321291 + 4249784165667887866939369628840569844519936*I)/618970019642690137449562112, (-114985392587849953209115599084503853611014 - 52510376847189529234864487459476242883449*I)/77371252455336267181195264, (6094620517051332877965959223269600650951573 - 4683469779240530439185019982269137976201163*I)/1237940039285380274899124224],
# [ (611292255597977285752123848828590587708323 - 216821743518546668382662964473055912169502*I)/77371252455336267181195264, (-1144023204575811464652692396337616594307487 + 12295317806312398617498029126807758490062855*I)/309485009821345068724781056, (-374093027769390002505693378578475235158281 - 573533923565898290299607461660384634333639*I)/77371252455336267181195264, (47405570632186659000138546955372796986832987 - 2837476058950808941605000274055970055096534*I)/1237940039285380274899124224, (-571573207393621076306216726219753090535121 + 533381457185823100878764749236639320783831*I)/77371252455336267181195264, (-7096548151856165056213543560958582513797519 - 24035731898756040059329175131592138642195366*I)/618970019642690137449562112, (2396762128833271142000266170154694033849225 + 1448501087375679588770230529017516492953051*I)/309485009821345068724781056, (-150609293845161968447166237242456473262037053 + 92581148080922977153207018003184520294188436*I)/4951760157141521099596496896, 5*(270278244730804315149356082977618054486347 - 1997830155222496880429743815321662710091562*I)/1237940039285380274899124224, (62978424789588828258068912690172109324360330 + 44803641177219298311493356929537007630129097*I)/2475880078570760549798248448, 19*(-451431106327656743945775812536216598712236 + 114924966793632084379437683991151177407937*I)/1237940039285380274899124224, (63417747628891221594106738815256002143915995 - 261508229397507037136324178612212080871150958*I)/9903520314283042199192993792],
# [ (-2144231934021288786200752920446633703357 + 2305614436009705803670842248131563850246*I)/1208925819614629174706176, (-90720949337459896266067589013987007078153 - 221951119475096403601562347412753844534569*I)/19342813113834066795298816, (11590973613116630788176337262688659880376 + 6514520676308992726483494976339330626159*I)/4835703278458516698824704, 3*(-131776217149000326618649542018343107657237 + 79095042939612668486212006406818285287004*I)/38685626227668133590597632, (10100577916793945997239221374025741184951 - 28631383488085522003281589065994018550748*I)/9671406556917033397649408, 67*(10090295594251078955008130473573667572549 + 10449901522697161049513326446427839676762*I)/77371252455336267181195264, (-54270981296988368730689531355811033930513 - 3413683117592637309471893510944045467443*I)/19342813113834066795298816, (440372322928679910536575560069973699181278 - 736603803202303189048085196176918214409081*I)/77371252455336267181195264, (33220374714789391132887731139763250155295 + 92055083048787219934030779066298919603554*I)/38685626227668133590597632, 5*(-594638554579967244348856981610805281527116 - 82309245323128933521987392165716076704057*I)/309485009821345068724781056, (128056368815300084550013708313312073721955 - 114619107488668120303579745393765245911404*I)/77371252455336267181195264, 21*(59839959255173222962789517794121843393573 + 241507883613676387255359616163487405826334*I)/618970019642690137449562112],
# [ (-13454485022325376674626653802541391955147 + 184471402121905621396582628515905949793486*I)/19342813113834066795298816, (-6158730123400322562149780662133074862437105 - 3416173052604643794120262081623703514107476*I)/154742504910672534362390528, (770558003844914708453618983120686116100419 - 127758381209767638635199674005029818518766*I)/77371252455336267181195264, (-4693005771813492267479835161596671660631703 + 12703585094750991389845384539501921531449948*I)/309485009821345068724781056, (-295028157441149027913545676461260860036601 - 841544569970643160358138082317324743450770*I)/77371252455336267181195264, (56716442796929448856312202561538574275502893 + 7216818824772560379753073185990186711454778*I)/1237940039285380274899124224, 15*(-87061038932753366532685677510172566368387 + 61306141156647596310941396434445461895538*I)/154742504910672534362390528, (-3455315109680781412178133042301025723909347 - 24969329563196972466388460746447646686670670*I)/618970019642690137449562112, (2453418854160886481106557323699250865361849 + 1497886802326243014471854112161398141242514*I)/309485009821345068724781056, (-151343224544252091980004429001205664193082173 + 90471883264187337053549090899816228846836628*I)/4951760157141521099596496896, (1652018205533026103358164026239417416432989 - 9959733619236515024261775397109724431400162*I)/1237940039285380274899124224, 3*(40676374242956907656984876692623172736522006 + 31023357083037817469535762230872667581366205*I)/4951760157141521099596496896],
# [ (-1226990509403328460274658603410696548387 - 4131739423109992672186585941938392788458*I)/1208925819614629174706176, (162392818524418973411975140074368079662703 + 23706194236915374831230612374344230400704*I)/9671406556917033397649408, (-3935678233089814180000602553655565621193 + 2283744757287145199688061892165659502483*I)/1208925819614629174706176, (-2400210250844254483454290806930306285131 - 315571356806370996069052930302295432758205*I)/19342813113834066795298816, (13365917938215281056563183751673390817910 + 15911483133819801118348625831132324863881*I)/4835703278458516698824704, 3*(-215950551370668982657516660700301003897855 + 51684341999223632631602864028309400489378*I)/38685626227668133590597632, (20886089946811765149439844691320027184765 - 30806277083146786592790625980769214361844*I)/9671406556917033397649408, (562180634592713285745940856221105667874855 + 1031543963988260765153550559766662245114916*I)/77371252455336267181195264, (-65820625814810177122941758625652476012867 - 12429918324787060890804395323920477537595*I)/19342813113834066795298816, (319147848192012911298771180196635859221089 - 402403304933906769233365689834404519960394*I)/38685626227668133590597632, (23035615120921026080284733394359587955057 + 115351677687031786114651452775242461310624*I)/38685626227668133590597632, (-3426830634881892756966440108592579264936130 - 1022954961164128745603407283836365128598559*I)/309485009821345068724781056],
# [ (-192574788060137531023716449082856117537757 - 69222967328876859586831013062387845780692*I)/19342813113834066795298816, (2736383768828013152914815341491629299773262 - 2773252698016291897599353862072533475408743*I)/77371252455336267181195264, (-23280005281223837717773057436155921656805 + 214784953368021840006305033048142888879224*I)/19342813113834066795298816, (-3035247484028969580570400133318947903462326 - 2195168903335435855621328554626336958674325*I)/77371252455336267181195264, (984552428291526892214541708637840971548653 - 64006622534521425620714598573494988589378*I)/77371252455336267181195264, (-3070650452470333005276715136041262898509903 + 7286424705750810474140953092161794621989080*I)/154742504910672534362390528, (-147848877109756404594659513386972921139270 - 416306113044186424749331418059456047650861*I)/38685626227668133590597632, (55272118474097814260289392337160619494260781 + 7494019668394781211907115583302403519488058*I)/1237940039285380274899124224, (-581537886583682322424771088996959213068864 + 542191617758465339135308203815256798407429*I)/77371252455336267181195264, (-6422548983676355789975736799494791970390991 - 23524183982209004826464749309156698827737702*I)/618970019642690137449562112, 7*(180747195387024536886923192475064903482083 + 84352527693562434817771649853047924991804*I)/154742504910672534362390528, (-135485179036717001055310712747643466592387031 + 102346575226653028836678855697782273460527608*I)/4951760157141521099596496896],
# [ (3384238362616083147067025892852431152105 + 156724444932584900214919898954874618256*I)/604462909807314587353088, (-59558300950677430189587207338385764871866 + 114427143574375271097298201388331237478857*I)/4835703278458516698824704, (-1356835789870635633517710130971800616227 - 7023484098542340388800213478357340875410*I)/1208925819614629174706176, (234884918567993750975181728413524549575881 + 79757294640629983786895695752733890213506*I)/9671406556917033397649408, (-7632732774935120473359202657160313866419 + 2905452608512927560554702228553291839465*I)/1208925819614629174706176, (52291747908702842344842889809762246649489 - 520996778817151392090736149644507525892649*I)/19342813113834066795298816, (17472406829219127839967951180375981717322 + 23464704213841582137898905375041819568669*I)/4835703278458516698824704, (-911026971811893092350229536132730760943307 + 150799318130900944080399439626714846752360*I)/38685626227668133590597632, (26234457233977042811089020440646443590687 - 45650293039576452023692126463683727692890*I)/9671406556917033397649408, 3*(288348388717468992528382586652654351121357 + 454526517721403048270274049572136109264668*I)/77371252455336267181195264, (-91583492367747094223295011999405657956347 - 12704691128268298435362255538069612411331*I)/19342813113834066795298816, (411208730251327843849027957710164064354221 - 569898526380691606955496789378230959965898*I)/38685626227668133590597632],
# [ (27127513117071487872628354831658811211795 - 37765296987901990355760582016892124833857*I)/4835703278458516698824704, (1741779916057680444272938534338833170625435 + 3083041729779495966997526404685535449810378*I)/77371252455336267181195264, 3*(-60642236251815783728374561836962709533401 - 24630301165439580049891518846174101510744*I)/19342813113834066795298816, 3*(445885207364591681637745678755008757483408 - 350948497734812895032502179455610024541643*I)/38685626227668133590597632, (-47373295621391195484367368282471381775684 + 219122969294089357477027867028071400054973*I)/19342813113834066795298816, (-2801565819673198722993348253876353741520438 - 2250142129822658548391697042460298703335701*I)/77371252455336267181195264, (801448252275607253266997552356128790317119 - 50890367688077858227059515894356594900558*I)/77371252455336267181195264, (-5082187758525931944557763799137987573501207 + 11610432359082071866576699236013484487676124*I)/309485009821345068724781056, (-328925127096560623794883760398247685166830 - 643447969697471610060622160899409680422019*I)/77371252455336267181195264, 15*(2954944669454003684028194956846659916299765 + 33434406416888505837444969347824812608566*I)/1237940039285380274899124224, (-415749104352001509942256567958449835766827 + 479330966144175743357171151440020955412219*I)/77371252455336267181195264, 3*(-4639987285852134369449873547637372282914255 - 11994411888966030153196659207284951579243273*I)/1237940039285380274899124224],
# [ (-478846096206269117345024348666145495601 + 1249092488629201351470551186322814883283*I)/302231454903657293676544, (-17749319421930878799354766626365926894989 - 18264580106418628161818752318217357231971*I)/1208925819614629174706176, (2801110795431528876849623279389579072819 + 363258850073786330770713557775566973248*I)/604462909807314587353088, (-59053496693129013745775512127095650616252 + 78143588734197260279248498898321500167517*I)/4835703278458516698824704, (-283186724922498212468162690097101115349 - 6443437753863179883794497936345437398276*I)/1208925819614629174706176, (188799118826748909206887165661384998787543 + 84274736720556630026311383931055307398820*I)/9671406556917033397649408, (-5482217151670072904078758141270295025989 + 1818284338672191024475557065444481298568*I)/1208925819614629174706176, (56564463395350195513805521309731217952281 - 360208541416798112109946262159695452898431*I)/19342813113834066795298816, 11*(1259539805728870739006416869463689438068 + 1409136581547898074455004171305324917387*I)/4835703278458516698824704, 5*(-123701190701414554945251071190688818343325 + 30997157322590424677294553832111902279712*I)/38685626227668133590597632, (16130917381301373033736295883982414239781 - 32752041297570919727145380131926943374516*I)/9671406556917033397649408, (650301385108223834347093740500375498354925 + 899526407681131828596801223402866051809258*I)/77371252455336267181195264],
# [ (9011388245256140876590294262420614839483 + 8167917972423946282513000869327525382672*I)/1208925819614629174706176, (-426393174084720190126376382194036323028924 + 180692224825757525982858693158209545430621*I)/9671406556917033397649408, (24588556702197802674765733448108154175535 - 45091766022876486566421953254051868331066*I)/4835703278458516698824704, (1872113939365285277373877183750416985089691 + 3030392393733212574744122057679633775773130*I)/77371252455336267181195264, (-222173405538046189185754954524429864167549 - 75193157893478637039381059488387511299116*I)/19342813113834066795298816, (2670821320766222522963689317316937579844558 - 2645837121493554383087981511645435472169191*I)/77371252455336267181195264, 5*(-2100110309556476773796963197283876204940 + 41957457246479840487980315496957337371937*I)/19342813113834066795298816, (-5733743755499084165382383818991531258980593 - 3328949988392698205198574824396695027195732*I)/154742504910672534362390528, (707827994365259025461378911159398206329247 - 265730616623227695108042528694302299777294*I)/77371252455336267181195264, (-1442501604682933002895864804409322823788319 + 11504137805563265043376405214378288793343879*I)/309485009821345068724781056, (-56130472299445561499538726459719629522285 - 61117552419727805035810982426639329818864*I)/9671406556917033397649408, (39053692321126079849054272431599539429908717 - 10209127700342570953247177602860848130710666*I)/1237940039285380274899124224]])
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512],
[ 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64],
[ -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128],
[ 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16],
[ 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M**10 == Matrix(S('''[
[ 7369525394972778926719607798014571861/604462909807314587353088 - 229284202061790301477392339912557559*I/151115727451828646838272, -19704281515163975949388435612632058035/1208925819614629174706176 + 14319858347987648723768698170712102887*I/302231454903657293676544, -3623281909451783042932142262164941211/604462909807314587353088 - 6039240602494288615094338643452320495*I/604462909807314587353088, 109260497799140408739847239685705357695/2417851639229258349412352 - 7427566006564572463236368211555511431*I/2417851639229258349412352, -16095803767674394244695716092817006641/2417851639229258349412352 + 10336681897356760057393429626719177583*I/1208925819614629174706176, -42207883340488041844332828574359769743/2417851639229258349412352 - 182332262671671273188016400290188468499*I/4835703278458516698824704],
[50566491050825573392726324995779608259/1208925819614629174706176 - 90047007594468146222002432884052362145*I/2417851639229258349412352, 74273703462900000967697427843983822011/1208925819614629174706176 + 265947522682943571171988741842776095421*I/1208925819614629174706176, -116900341394390200556829767923360888429/2417851639229258349412352 - 53153263356679268823910621474478756845*I/2417851639229258349412352, 195407378023867871243426523048612490249/1208925819614629174706176 - 1242417915995360200584837585002906728929*I/9671406556917033397649408, -863597594389821970177319682495878193/302231454903657293676544 + 476936100741548328800725360758734300481*I/9671406556917033397649408, -3154451590535653853562472176601754835575/19342813113834066795298816 - 232909875490506237386836489998407329215*I/2417851639229258349412352],
[ -1715444997702484578716037230949868543/302231454903657293676544 + 5009695651321306866158517287924120777*I/302231454903657293676544, -30551582497996879620371947949342101301/604462909807314587353088 - 7632518367986526187139161303331519629*I/151115727451828646838272, 312680739924495153190604170938220575/18889465931478580854784 - 108664334509328818765959789219208459*I/75557863725914323419136, -14693696966703036206178521686918865509/604462909807314587353088 + 72345386220900843930147151999899692401*I/1208925819614629174706176, -8218872496728882299722894680635296519/1208925819614629174706176 - 16776782833358893712645864791807664983*I/1208925819614629174706176, 143237839169380078671242929143670635137/2417851639229258349412352 + 2883817094806115974748882735218469447*I/2417851639229258349412352],
[ 3087979417831061365023111800749855987/151115727451828646838272 + 34441942370802869368851419102423997089*I/604462909807314587353088, -148309181940158040917731426845476175667/604462909807314587353088 - 263987151804109387844966835369350904919*I/9671406556917033397649408, 50259518594816377378747711930008883165/1208925819614629174706176 - 95713974916869240305450001443767979653*I/2417851639229258349412352, 153466447023875527996457943521467271119/2417851639229258349412352 + 517285524891117105834922278517084871349*I/2417851639229258349412352, -29184653615412989036678939366291205575/604462909807314587353088 - 27551322282526322041080173287022121083*I/1208925819614629174706176, 196404220110085511863671393922447671649/1208925819614629174706176 - 1204712019400186021982272049902206202145*I/9671406556917033397649408],
[ -2632581805949645784625606590600098779/151115727451828646838272 - 589957435912868015140272627522612771*I/37778931862957161709568, 26727850893953715274702844733506310247/302231454903657293676544 - 10825791956782128799168209600694020481*I/302231454903657293676544, -1036348763702366164044671908440791295/151115727451828646838272 + 3188624571414467767868303105288107375*I/151115727451828646838272, -36814959939970644875593411585393242449/604462909807314587353088 - 18457555789119782404850043842902832647*I/302231454903657293676544, 12454491297984637815063964572803058647/604462909807314587353088 - 340489532842249733975074349495329171*I/302231454903657293676544, -19547211751145597258386735573258916681/604462909807314587353088 + 87299583775782199663414539883938008933*I/1208925819614629174706176],
[ -40281994229560039213253423262678393183/604462909807314587353088 - 2939986850065527327299273003299736641*I/604462909807314587353088, 331940684638052085845743020267462794181/2417851639229258349412352 - 284574901963624403933361315517248458969*I/1208925819614629174706176, 6453843623051745485064693628073010961/302231454903657293676544 + 36062454107479732681350914931391590957*I/604462909807314587353088, -147665869053634695632880753646441962067/604462909807314587353088 - 305987938660447291246597544085345123927*I/9671406556917033397649408, 107821369195275772166593879711259469423/2417851639229258349412352 - 11645185518211204108659001435013326687*I/302231454903657293676544, 64121228424717666402009446088588091619/1208925819614629174706176 + 265557133337095047883844369272389762133*I/1208925819614629174706176]]'''))
def test_issue_17247_expression_blowup_5():
M = Matrix(6, 6, lambda i, j: 1 + (-1)**(i+j)*I)
with dotprodsimp(True):
assert M.charpoly('x') == PurePoly(x**6 + (-6 - 6*I)*x**5 + 36*I*x**4, x, domain='EX')
def test_issue_17247_expression_blowup_6():
M = Matrix(8, 8, [x+i for i in range (64)])
with dotprodsimp(True):
assert M.det('bareiss') == 0
def test_issue_17247_expression_blowup_7():
M = Matrix(6, 6, lambda i, j: 1 + (-1)**(i+j)*I)
with dotprodsimp(True):
assert M.det('berkowitz') == 0
def test_issue_17247_expression_blowup_8():
M = Matrix(8, 8, [x+i for i in range (64)])
with dotprodsimp(True):
assert M.det('lu') == 0
def test_issue_17247_expression_blowup_9():
M = Matrix(8, 8, [x+i for i in range (64)])
with dotprodsimp(True):
assert M.rref() == (Matrix([
[1, 0, -1, -2, -3, -4, -5, -6],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]), (0, 1))
def test_issue_17247_expression_blowup_10():
M = Matrix(6, 6, lambda i, j: 1 + (-1)**(i+j)*I)
with dotprodsimp(True):
assert M.cofactor(0, 0) == 0
def test_issue_17247_expression_blowup_11():
M = Matrix(6, 6, lambda i, j: 1 + (-1)**(i+j)*I)
with dotprodsimp(True):
assert M.cofactor_matrix() == Matrix(6, 6, [0]*36)
def test_issue_17247_expression_blowup_12():
M = Matrix(6, 6, lambda i, j: 1 + (-1)**(i+j)*I)
with dotprodsimp(True):
assert M.eigenvals() == {6: 1, 6*I: 1, 0: 4}
def test_issue_17247_expression_blowup_13():
M = Matrix([
[ 0, 1 - x, x + 1, 1 - x],
[1 - x, x + 1, 0, x + 1],
[ 0, 1 - x, x + 1, 1 - x],
[ 0, 0, 1 - x, 0]])
with dotprodsimp(True):
ev = M.eigenvects()
assert ev[0][:2] == (0, 2)
assert ev[0][2][0] == Matrix([[0],[-1],[0],[1]])
assert ev[1][:2] == (x - sqrt(2)*(x - 1) + 1, 1)
assert (ev[1][2][0] - Matrix([
[-(-17*x**4 + 12*sqrt(2)*x**4 - 4*sqrt(2)*x**3 + 6*x**3 - 6*x - 4*sqrt(2)*x + 12*sqrt(2) + 17)/(-7*x**4 + 5*sqrt(2)*x**4 - 6*sqrt(2)*x**3 + 8*x**3 - 2*x**2 + 8*x + 6*sqrt(2)*x - 5*sqrt(2) - 7)],
[ (-7*x**3 + 5*sqrt(2)*x**3 - x**2 + sqrt(2)*x**2 - sqrt(2)*x - x - 5*sqrt(2) - 7)/(-3*x**3 + 2*sqrt(2)*x**3 - 2*sqrt(2)*x**2 + 3*x**2 + 2*sqrt(2)*x + 3*x - 3 - 2*sqrt(2))],
[ -(-3*x**2 + 2*sqrt(2)*x**2 + 2*x - 3 - 2*sqrt(2))/(-x**2 + sqrt(2)*x**2 - 2*sqrt(2)*x + 1 + sqrt(2))],
[ 1]])).expand() == Matrix([[0],[0],[0],[0]])
assert ev[2][:2] == (x + sqrt(2)*(x - 1) + 1, 1)
assert (ev[2][2][0] - Matrix([
[-(12*sqrt(2)*x**4 + 17*x**4 - 6*x**3 - 4*sqrt(2)*x**3 - 4*sqrt(2)*x + 6*x - 17 + 12*sqrt(2))/(7*x**4 + 5*sqrt(2)*x**4 - 6*sqrt(2)*x**3 - 8*x**3 + 2*x**2 - 8*x + 6*sqrt(2)*x - 5*sqrt(2) + 7)],
[ (7*x**3 + 5*sqrt(2)*x**3 + x**2 + sqrt(2)*x**2 - sqrt(2)*x + x - 5*sqrt(2) + 7)/(2*sqrt(2)*x**3 + 3*x**3 - 3*x**2 - 2*sqrt(2)*x**2 - 3*x + 2*sqrt(2)*x - 2*sqrt(2) + 3)],
[ -(2*sqrt(2)*x**2 + 3*x**2 - 2*x - 2*sqrt(2) + 3)/(x**2 + sqrt(2)*x**2 - 2*sqrt(2)*x - 1 + sqrt(2))],
[ 1]])).expand() == Matrix([[0],[0],[0],[0]])
def test_issue_17247_expression_blowup_14():
M = Matrix(8, 8, ([1+x, 1-x]*4 + [1-x, 1+x]*4)*4)
with dotprodsimp(True):
assert M.echelon_form() == Matrix([
[x + 1, 1 - x, x + 1, 1 - x, x + 1, 1 - x, x + 1, 1 - x],
[ 0, 4*x, 0, 4*x, 0, 4*x, 0, 4*x],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0]])
def test_issue_17247_expression_blowup_15():
M = Matrix(8, 8, ([1+x, 1-x]*4 + [1-x, 1+x]*4)*4)
with dotprodsimp(True):
assert M.rowspace() == [Matrix([[x + 1, 1 - x, x + 1, 1 - x, x + 1, 1 - x, x + 1, 1 - x]]), Matrix([[0, 4*x, 0, 4*x, 0, 4*x, 0, 4*x]])]
def test_issue_17247_expression_blowup_16():
M = Matrix(8, 8, ([1+x, 1-x]*4 + [1-x, 1+x]*4)*4)
with dotprodsimp(True):
assert M.columnspace() == [Matrix([[x + 1],[1 - x],[x + 1],[1 - x],[x + 1],[1 - x],[x + 1],[1 - x]]), Matrix([[1 - x],[x + 1],[1 - x],[x + 1],[1 - x],[x + 1],[1 - x],[x + 1]])]
def test_issue_17247_expression_blowup_17():
M = Matrix(8, 8, [x+i for i in range (64)])
with dotprodsimp(True):
assert M.nullspace() == [
Matrix([[1],[-2],[1],[0],[0],[0],[0],[0]]),
Matrix([[2],[-3],[0],[1],[0],[0],[0],[0]]),
Matrix([[3],[-4],[0],[0],[1],[0],[0],[0]]),
Matrix([[4],[-5],[0],[0],[0],[1],[0],[0]]),
Matrix([[5],[-6],[0],[0],[0],[0],[1],[0]]),
Matrix([[6],[-7],[0],[0],[0],[0],[0],[1]])]
def test_issue_17247_expression_blowup_18():
M = Matrix(6, 6, ([1+x, 1-x]*3 + [1-x, 1+x]*3)*3)
with dotprodsimp(True):
assert not M.is_nilpotent()
def test_issue_17247_expression_blowup_19():
M = Matrix(S('''[
[ -3/4, 0, 1/4 + I/2, 0],
[ 0, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 1/2 - I, 0, 0, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert not M.is_diagonalizable()
def test_issue_17247_expression_blowup_20():
M = Matrix([
[x + 1, 1 - x, 0, 0],
[1 - x, x + 1, 0, x + 1],
[ 0, 1 - x, x + 1, 0],
[ 0, 0, 0, x + 1]])
with dotprodsimp(True):
assert M.diagonalize() == (Matrix([
[1, 1, 0, (x + 1)/(x - 1)],
[1, -1, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 1]]),
Matrix([
[2, 0, 0, 0],
[0, 2*x, 0, 0],
[0, 0, x + 1, 0],
[0, 0, 0, x + 1]]))
def test_issue_17247_expression_blowup_21():
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.inv(method='GE') == Matrix(S('''[
[-26194832/3470993 - 31733264*I/3470993, 156352/3470993 + 10325632*I/3470993, 0, -7741283181072/3306971225785 + 2999007604624*I/3306971225785],
[4408224/3470993 - 9675328*I/3470993, -2422272/3470993 + 1523712*I/3470993, 0, -1824666489984/3306971225785 - 1401091949952*I/3306971225785],
[-26406945676288/22270005630769 + 10245925485056*I/22270005630769, 7453523312640/22270005630769 + 1601616519168*I/22270005630769, 633088/6416033 - 140288*I/6416033, 872209227109521408/21217636514687010905 + 6066405081802389504*I/21217636514687010905],
[0, 0, 0, -11328/952745 + 87616*I/952745]]'''))
def test_issue_17247_expression_blowup_22():
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.inv(method='LU') == Matrix(S('''[
[-26194832/3470993 - 31733264*I/3470993, 156352/3470993 + 10325632*I/3470993, 0, -7741283181072/3306971225785 + 2999007604624*I/3306971225785],
[4408224/3470993 - 9675328*I/3470993, -2422272/3470993 + 1523712*I/3470993, 0, -1824666489984/3306971225785 - 1401091949952*I/3306971225785],
[-26406945676288/22270005630769 + 10245925485056*I/22270005630769, 7453523312640/22270005630769 + 1601616519168*I/22270005630769, 633088/6416033 - 140288*I/6416033, 872209227109521408/21217636514687010905 + 6066405081802389504*I/21217636514687010905],
[0, 0, 0, -11328/952745 + 87616*I/952745]]'''))
def test_issue_17247_expression_blowup_23():
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.inv(method='ADJ').expand() == Matrix(S('''[
[-26194832/3470993 - 31733264*I/3470993, 156352/3470993 + 10325632*I/3470993, 0, -7741283181072/3306971225785 + 2999007604624*I/3306971225785],
[4408224/3470993 - 9675328*I/3470993, -2422272/3470993 + 1523712*I/3470993, 0, -1824666489984/3306971225785 - 1401091949952*I/3306971225785],
[-26406945676288/22270005630769 + 10245925485056*I/22270005630769, 7453523312640/22270005630769 + 1601616519168*I/22270005630769, 633088/6416033 - 140288*I/6416033, 872209227109521408/21217636514687010905 + 6066405081802389504*I/21217636514687010905],
[0, 0, 0, -11328/952745 + 87616*I/952745]]'''))
def test_issue_17247_expression_blowup_24():
M = SparseMatrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.inv(method='CH') == Matrix(S('''[
[-26194832/3470993 - 31733264*I/3470993, 156352/3470993 + 10325632*I/3470993, 0, -7741283181072/3306971225785 + 2999007604624*I/3306971225785],
[4408224/3470993 - 9675328*I/3470993, -2422272/3470993 + 1523712*I/3470993, 0, -1824666489984/3306971225785 - 1401091949952*I/3306971225785],
[-26406945676288/22270005630769 + 10245925485056*I/22270005630769, 7453523312640/22270005630769 + 1601616519168*I/22270005630769, 633088/6416033 - 140288*I/6416033, 872209227109521408/21217636514687010905 + 6066405081802389504*I/21217636514687010905],
[0, 0, 0, -11328/952745 + 87616*I/952745]]'''))
def test_issue_17247_expression_blowup_25():
M = SparseMatrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.inv(method='LDL') == Matrix(S('''[
[-26194832/3470993 - 31733264*I/3470993, 156352/3470993 + 10325632*I/3470993, 0, -7741283181072/3306971225785 + 2999007604624*I/3306971225785],
[4408224/3470993 - 9675328*I/3470993, -2422272/3470993 + 1523712*I/3470993, 0, -1824666489984/3306971225785 - 1401091949952*I/3306971225785],
[-26406945676288/22270005630769 + 10245925485056*I/22270005630769, 7453523312640/22270005630769 + 1601616519168*I/22270005630769, 633088/6416033 - 140288*I/6416033, 872209227109521408/21217636514687010905 + 6066405081802389504*I/21217636514687010905],
[0, 0, 0, -11328/952745 + 87616*I/952745]]'''))
def test_issue_17247_expression_blowup_26():
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64, -9/32 - I/16, 183/256 - 97*I/128],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512, -219/128 + 115*I/256, 6301/4096 - 6609*I/1024],
[ 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64, 1/4 - 5*I/16, 65/128 + 87*I/64],
[ -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128, 85/256 - 33*I/16, 805/128 + 2415*I/512],
[ 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16, 1/4 + I/2, -129/64 - 9*I/64],
[ 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128, 125/64 + 87*I/64, -2063/256 + 541*I/128],
[ -2, 17/4 - 13*I/2, 1 + I, -19/4 + 5*I/4, 1/2 - I, 9/4 + 55*I/16, -3/4, 45/32 - 37*I/16],
[ 1/4 + 13*I/4, -825/64 - 147*I/32, 21/8 + I, -537/64 + 143*I/16, -5/8 - 39*I/16, 2473/256 + 137*I/64, -149/64 + 49*I/32, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.rank() == 4
def test_issue_17247_expression_blowup_27():
M = Matrix([
[ 0, 1 - x, x + 1, 1 - x],
[1 - x, x + 1, 0, x + 1],
[ 0, 1 - x, x + 1, 1 - x],
[ 0, 0, 1 - x, 0]])
with dotprodsimp(True):
P, J = M.jordan_form()
assert P.expand() == Matrix(S('''[
[ 0, 4*x/(x**2 - 2*x + 1), -(-17*x**4 + 12*sqrt(2)*x**4 - 4*sqrt(2)*x**3 + 6*x**3 - 6*x - 4*sqrt(2)*x + 12*sqrt(2) + 17)/(-7*x**4 + 5*sqrt(2)*x**4 - 6*sqrt(2)*x**3 + 8*x**3 - 2*x**2 + 8*x + 6*sqrt(2)*x - 5*sqrt(2) - 7), -(12*sqrt(2)*x**4 + 17*x**4 - 6*x**3 - 4*sqrt(2)*x**3 - 4*sqrt(2)*x + 6*x - 17 + 12*sqrt(2))/(7*x**4 + 5*sqrt(2)*x**4 - 6*sqrt(2)*x**3 - 8*x**3 + 2*x**2 - 8*x + 6*sqrt(2)*x - 5*sqrt(2) + 7)],
[x - 1, x/(x - 1) + 1/(x - 1), (-7*x**3 + 5*sqrt(2)*x**3 - x**2 + sqrt(2)*x**2 - sqrt(2)*x - x - 5*sqrt(2) - 7)/(-3*x**3 + 2*sqrt(2)*x**3 - 2*sqrt(2)*x**2 + 3*x**2 + 2*sqrt(2)*x + 3*x - 3 - 2*sqrt(2)), (7*x**3 + 5*sqrt(2)*x**3 + x**2 + sqrt(2)*x**2 - sqrt(2)*x + x - 5*sqrt(2) + 7)/(2*sqrt(2)*x**3 + 3*x**3 - 3*x**2 - 2*sqrt(2)*x**2 - 3*x + 2*sqrt(2)*x - 2*sqrt(2) + 3)],
[ 0, 1, -(-3*x**2 + 2*sqrt(2)*x**2 + 2*x - 3 - 2*sqrt(2))/(-x**2 + sqrt(2)*x**2 - 2*sqrt(2)*x + 1 + sqrt(2)), -(2*sqrt(2)*x**2 + 3*x**2 - 2*x - 2*sqrt(2) + 3)/(x**2 + sqrt(2)*x**2 - 2*sqrt(2)*x - 1 + sqrt(2))],
[1 - x, 0, 1, 1]]''')).expand()
assert J == Matrix(S('''[
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, x - sqrt(2)*(x - 1) + 1, 0],
[0, 0, 0, x + sqrt(2)*(x - 1) + 1]]'''))
def test_issue_17247_expression_blowup_28():
M = Matrix(S('''[
[ -3/4, 45/32 - 37*I/16, 0, 0],
[-149/64 + 49*I/32, -177/128 - 1369*I/128, 0, -2063/256 + 541*I/128],
[ 0, 9/4 + 55*I/16, 2473/256 + 137*I/64, 0],
[ 0, 0, 0, -177/128 - 1369*I/128]]'''))
with dotprodsimp(True):
assert M.singular_values() == S('''[
sqrt(14609315/131072 + sqrt(64789115132571/2147483648 - 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3) + 76627253330829751075/(35184372088832*sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))) - 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)))/2 + sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))/2),
sqrt(14609315/131072 - sqrt(64789115132571/2147483648 - 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3) + 76627253330829751075/(35184372088832*sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))) - 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)))/2 + sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))/2),
sqrt(14609315/131072 - sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))/2 + sqrt(64789115132571/2147483648 - 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3) - 76627253330829751075/(35184372088832*sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))) - 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)))/2),
sqrt(14609315/131072 - sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))/2 - sqrt(64789115132571/2147483648 - 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3) - 76627253330829751075/(35184372088832*sqrt(64789115132571/4294967296 + 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)) + 2*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3))) - 3546944054712886603889144627/(110680464442257309696*(25895222463957462655758224991455280215303/633825300114114700748351602688 + sqrt(1213909058710955930446995195883114969038524625997915131236390724543989220134670)*I/22282920707136844948184236032)**(1/3)))/2)]''')
def test_issue_16823():
# This still needs to be fixed if not using dotprodsimp.
M = Matrix(S('''[
[1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I,1/4+1/2*I,-129/64-9/64*I,1/4-5/16*I,65/128+87/64*I,-9/32-1/16*I,183/256-97/128*I,3/64+13/64*I,-23/32-59/256*I,15/128-3/32*I,19/256+551/1024*I],
[21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I,125/64+87/64*I,-2063/256+541/128*I,85/256-33/16*I,805/128+2415/512*I,-219/128+115/256*I,6301/4096-6609/1024*I,119/128+143/128*I,-10879/2048+4343/4096*I,129/256-549/512*I,42533/16384+29103/8192*I],
[-2,17/4-13/2*I,1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I,1/4+1/2*I,-129/64-9/64*I,1/4-5/16*I,65/128+87/64*I,-9/32-1/16*I,183/256-97/128*I,3/64+13/64*I,-23/32-59/256*I],
[1/4+13/4*I,-825/64-147/32*I,21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I,125/64+87/64*I,-2063/256+541/128*I,85/256-33/16*I,805/128+2415/512*I,-219/128+115/256*I,6301/4096-6609/1024*I,119/128+143/128*I,-10879/2048+4343/4096*I],
[-4*I,27/2+6*I,-2,17/4-13/2*I,1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I,1/4+1/2*I,-129/64-9/64*I,1/4-5/16*I,65/128+87/64*I,-9/32-1/16*I,183/256-97/128*I],
[1/4+5/2*I,-23/8-57/16*I,1/4+13/4*I,-825/64-147/32*I,21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I,125/64+87/64*I,-2063/256+541/128*I,85/256-33/16*I,805/128+2415/512*I,-219/128+115/256*I,6301/4096-6609/1024*I],
[-4,9-5*I,-4*I,27/2+6*I,-2,17/4-13/2*I,1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I,1/4+1/2*I,-129/64-9/64*I,1/4-5/16*I,65/128+87/64*I],
[-2*I,119/8+29/4*I,1/4+5/2*I,-23/8-57/16*I,1/4+13/4*I,-825/64-147/32*I,21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I,125/64+87/64*I,-2063/256+541/128*I,85/256-33/16*I,805/128+2415/512*I],
[0,-6,-4,9-5*I,-4*I,27/2+6*I,-2,17/4-13/2*I,1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I,1/4+1/2*I,-129/64-9/64*I],
[1,-9/4+3*I,-2*I,119/8+29/4*I,1/4+5/2*I,-23/8-57/16*I,1/4+13/4*I,-825/64-147/32*I,21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I,125/64+87/64*I,-2063/256+541/128*I],
[0,-4*I,0,-6,-4,9-5*I,-4*I,27/2+6*I,-2,17/4-13/2*I,1+I,-19/4+5/4*I,1/2-I,9/4+55/16*I,-3/4,45/32-37/16*I],
[0,1/4+1/2*I,1,-9/4+3*I,-2*I,119/8+29/4*I,1/4+5/2*I,-23/8-57/16*I,1/4+13/4*I,-825/64-147/32*I,21/8+I,-537/64+143/16*I,-5/8-39/16*I,2473/256+137/64*I,-149/64+49/32*I,-177/128-1369/128*I]]'''))
with dotprodsimp(True):
assert M.rank() == 8
def test_issue_18531():
# solve_linear_system still needs fixing but the rref works.
M = Matrix([
[1, 1, 1, 1, 1, 0, 1, 0, 0],
[1 + sqrt(2), -1 + sqrt(2), 1 - sqrt(2), -sqrt(2) - 1, 1, 1, -1, 1, 1],
[-5 + 2*sqrt(2), -5 - 2*sqrt(2), -5 - 2*sqrt(2), -5 + 2*sqrt(2), -7, 2, -7, -2, 0],
[-3*sqrt(2) - 1, 1 - 3*sqrt(2), -1 + 3*sqrt(2), 1 + 3*sqrt(2), -7, -5, 7, -5, 3],
[7 - 4*sqrt(2), 4*sqrt(2) + 7, 4*sqrt(2) + 7, 7 - 4*sqrt(2), 7, -12, 7, 12, 0],
[-1 + 3*sqrt(2), 1 + 3*sqrt(2), -3*sqrt(2) - 1, 1 - 3*sqrt(2), 7, -5, -7, -5, 3],
[-3 + 2*sqrt(2), -3 - 2*sqrt(2), -3 - 2*sqrt(2), -3 + 2*sqrt(2), -1, 2, -1, -2, 0],
[1 - sqrt(2), -sqrt(2) - 1, 1 + sqrt(2), -1 + sqrt(2), -1, 1, 1, 1, 1]
])
with dotprodsimp(True):
assert M.rref() == (Matrix([
[1, 0, 0, 0, 0, 0, 0, 0, 1/2],
[0, 1, 0, 0, 0, 0, 0, 0, -1/2],
[0, 0, 1, 0, 0, 0, 0, 0, 1/2],
[0, 0, 0, 1, 0, 0, 0, 0, -1/2],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, -1/2],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, -1/2]]), (0, 1, 2, 3, 4, 5, 6, 7))
def test_creation():
raises(ValueError, lambda: Matrix(5, 5, range(20)))
raises(ValueError, lambda: Matrix(5, -1, []))
raises(IndexError, lambda: Matrix((1, 2))[2])
with raises(IndexError):
Matrix((1, 2))[1:2] = 5
with raises(IndexError):
Matrix((1, 2))[3] = 5
assert Matrix() == Matrix([]) == Matrix([[]]) == Matrix(0, 0, [])
# anything can go into a matrix (laplace_transform uses tuples)
assert Matrix([[[], ()]]).tolist() == [[[], ()]]
assert Matrix([[[], ()]]).T.tolist() == [[[]], [()]]
a = Matrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = Matrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
assert Matrix(b) == b
c23 = Matrix(2, 3, range(1, 7))
c13 = Matrix(1, 3, range(7, 10))
c = Matrix([c23, c13])
assert c.cols == 3
assert c.rows == 3
assert c[:] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
assert Matrix(eye(2)) == eye(2)
assert ImmutableMatrix(ImmutableMatrix(eye(2))) == ImmutableMatrix(eye(2))
assert ImmutableMatrix(c) == c.as_immutable()
assert Matrix(ImmutableMatrix(c)) == ImmutableMatrix(c).as_mutable()
assert c is not Matrix(c)
dat = [[ones(3,2), ones(3,3)*2], [ones(2,3)*3, ones(2,2)*4]]
M = Matrix(dat)
assert M == Matrix([
[1, 1, 2, 2, 2],
[1, 1, 2, 2, 2],
[1, 1, 2, 2, 2],
[3, 3, 3, 4, 4],
[3, 3, 3, 4, 4]])
assert M.tolist() != dat
# keep block form if evaluate=False
assert Matrix(dat, evaluate=False).tolist() == dat
A = MatrixSymbol("A", 2, 2)
dat = [ones(2), A]
assert Matrix(dat) == Matrix([
[ 1, 1],
[ 1, 1],
[A[0, 0], A[0, 1]],
[A[1, 0], A[1, 1]]])
assert Matrix(dat, evaluate=False).tolist() == [[i] for i in dat]
# 0-dim tolerance
assert Matrix([ones(2), ones(0)]) == Matrix([ones(2)])
raises(ValueError, lambda: Matrix([ones(2), ones(0, 3)]))
raises(ValueError, lambda: Matrix([ones(2), ones(3, 0)]))
def test_irregular_block():
assert Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3,
ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7) == Matrix([
[1, 2, 2, 2, 3, 3],
[1, 2, 2, 2, 3, 3],
[4, 2, 2, 2, 5, 5],
[6, 6, 7, 7, 5, 5]])
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
m = Matrix(lst)
assert m.tolist() == lst
def test_as_mutable():
assert zeros(0, 3).as_mutable() == zeros(0, 3)
assert zeros(0, 3).as_immutable() == ImmutableMatrix(zeros(0, 3))
assert zeros(3, 0).as_immutable() == ImmutableMatrix(zeros(3, 0))
def test_slicing():
m0 = eye(4)
assert m0[:3, :3] == eye(3)
assert m0[2:4, 0:2] == zeros(2)
m1 = Matrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == Matrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == Matrix(2, 1, (2, 3))
m2 = Matrix([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == Matrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == Matrix([[8, 9, 10, 11], [12, 13, 14, 15]])
def test_submatrix_assignment():
m = zeros(4)
m[2:4, 2:4] = eye(2)
assert m == Matrix(((0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)))
m[:2, :2] = eye(2)
assert m == eye(4)
m[:, 0] = Matrix(4, 1, (1, 2, 3, 4))
assert m == Matrix(((1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)))
m[:, :] = zeros(4)
assert m == zeros(4)
m[:, :] = [(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)]
assert m == Matrix(((1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == Matrix(((0, 2, 3, 4),
(0, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16)))
def test_extract():
m = Matrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_reshape():
m0 = eye(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = Matrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_applyfunc():
m0 = eye(3)
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
def test_expand():
m0 = Matrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
# Test if expand() returns a matrix
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert Matrix([exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
assert Matrix([[0, 1, 2], [0, 0, -1], [0, 0, 0]]).exp() == Matrix([
[1, 1, Rational(3, 2)],
[0, 1, -1],
[0, 0, 1]]
)
def test_refine():
m0 = Matrix([[Abs(x)**2, sqrt(x**2)],
[sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])
m1 = m0.refine(Q.real(x) & Q.real(y))
assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])
m1 = m0.refine(Q.positive(x) & Q.positive(y))
assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])
m1 = m0.refine(Q.negative(x) & Q.negative(y))
assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])
def test_random():
M = randMatrix(3, 3)
M = randMatrix(3, 3, seed=3)
assert M == randMatrix(3, 3, seed=3)
M = randMatrix(3, 4, 0, 150)
M = randMatrix(3, seed=4, symmetric=True)
assert M == randMatrix(3, seed=4, symmetric=True)
S = M.copy()
S.simplify()
assert S == M # doesn't fail when elements are Numbers, not int
rng = random.Random(4)
assert M == randMatrix(3, symmetric=True, prng=rng)
# Ensure symmetry
for size in (10, 11): # Test odd and even
for percent in (100, 70, 30):
M = randMatrix(size, symmetric=True, percent=percent, prng=rng)
assert M == M.T
M = randMatrix(10, min=1, percent=70)
zero_count = 0
for i in range(M.shape[0]):
for j in range(M.shape[1]):
if M[i, j] == 0:
zero_count += 1
assert zero_count == 30
def test_inverse():
A = eye(4)
assert A.inv() == eye(4)
assert A.inv(method="LU") == eye(4)
assert A.inv(method="ADJ") == eye(4)
assert A.inv(method="CH") == eye(4)
assert A.inv(method="LDL") == eye(4)
assert A.inv(method="QR") == eye(4)
A = Matrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
Ainv = A.inv()
assert A*Ainv == eye(3)
assert A.inv(method="LU") == Ainv
assert A.inv(method="ADJ") == Ainv
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
assert A.inv(method="QR") == Ainv
AA = Matrix([[0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0]])
assert AA.inv(method="BLOCK") * AA == eye(AA.shape[0])
# test that immutability is not a problem
cls = ImmutableMatrix
m = cls([[48, 49, 31],
[ 9, 71, 94],
[59, 28, 65]])
assert all(type(m.inv(s)) is cls for s in 'GE ADJ LU CH LDL QR'.split())
cls = ImmutableSparseMatrix
m = cls([[48, 49, 31],
[ 9, 71, 94],
[59, 28, 65]])
assert all(type(m.inv(s)) is cls for s in 'GE ADJ LU CH LDL QR'.split())
def test_matrix_inverse_mod():
A = Matrix(2, 1, [1, 0])
raises(NonSquareMatrixError, lambda: A.inv_mod(2))
A = Matrix(2, 2, [1, 0, 0, 0])
raises(ValueError, lambda: A.inv_mod(2))
A = Matrix(2, 2, [1, 2, 3, 4])
Ai = Matrix(2, 2, [1, 1, 0, 1])
assert A.inv_mod(3) == Ai
A = Matrix(2, 2, [1, 0, 0, 1])
assert A.inv_mod(2) == A
A = Matrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
raises(ValueError, lambda: A.inv_mod(5))
A = Matrix(3, 3, [5, 1, 3, 2, 6, 0, 2, 1, 1])
Ai = Matrix(3, 3, [6, 8, 0, 1, 5, 6, 5, 6, 4])
assert A.inv_mod(9) == Ai
A = Matrix(3, 3, [1, 6, -3, 4, 1, -5, 3, -5, 5])
Ai = Matrix(3, 3, [4, 3, 3, 1, 2, 5, 1, 5, 1])
assert A.inv_mod(6) == Ai
A = Matrix(3, 3, [1, 6, 1, 4, 1, 5, 3, 2, 5])
Ai = Matrix(3, 3, [6, 0, 3, 6, 6, 4, 1, 6, 1])
assert A.inv_mod(7) == Ai
def test_jacobian_hessian():
L = Matrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = Matrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == Matrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
f = x**2*y
syms = [x, y]
assert hessian(f, syms) == Matrix([[2*y, 2*x], [2*x, 0]])
f = x**2*y**3
assert hessian(f, syms) == \
Matrix([[2*y**3, 6*x*y**2], [6*x*y**2, 6*x**2*y]])
f = z + x*y**2
g = x**2 + 2*y**3
ans = Matrix([[0, 2*y],
[2*y, 2*x]])
assert ans == hessian(f, Matrix([x, y]))
assert ans == hessian(f, Matrix([x, y]).T)
assert hessian(f, (y, x), [g]) == Matrix([
[ 0, 6*y**2, 2*x],
[6*y**2, 2*x, 2*y],
[ 2*x, 2*y, 0]])
def test_wronskian():
assert wronskian([cos(x), sin(x)], x) == cos(x)**2 + sin(x)**2
assert wronskian([exp(x), exp(2*x)], x) == exp(3*x)
assert wronskian([exp(x), x], x) == exp(x) - x*exp(x)
assert wronskian([1, x, x**2], x) == 2
w1 = -6*exp(x)*sin(x)*x + 6*cos(x)*exp(x)*x**2 - 6*exp(x)*cos(x)*x - \
exp(x)*cos(x)*x**3 + exp(x)*sin(x)*x**3
assert wronskian([exp(x), cos(x), x**3], x).expand() == w1
assert wronskian([exp(x), cos(x), x**3], x, method='berkowitz').expand() \
== w1
w2 = -x**3*cos(x)**2 - x**3*sin(x)**2 - 6*x*cos(x)**2 - 6*x*sin(x)**2
assert wronskian([sin(x), cos(x), x**3], x).expand() == w2
assert wronskian([sin(x), cos(x), x**3], x, method='berkowitz').expand() \
== w2
assert wronskian([], x) == 1
def test_subs():
assert Matrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert Matrix([x*y]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([(x - 1)*(y - 1)])
for cls in classes:
assert Matrix([[2, 0], [0, 2]]) == cls.eye(2).subs(1, 2)
def test_xreplace():
assert Matrix([[1, x], [x, 4]]).xreplace({x: 5}) == \
Matrix([[1, 5], [5, 4]])
assert Matrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
for cls in classes:
assert Matrix([[2, 0], [0, 2]]) == cls.eye(2).xreplace({1: 2})
def test_simplify():
n = Symbol('n')
f = Function('f')
M = Matrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
M.simplify()
assert M == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = Matrix([[eq]])
M.simplify()
assert M == Matrix([[eq]])
M.simplify(ratio=oo) == M
assert M == Matrix([[eq.simplify(ratio=oo)]])
def test_transpose():
M = Matrix([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]])
assert M.T == Matrix( [ [1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
[6, 6],
[7, 7],
[8, 8],
[9, 9],
[0, 0] ])
assert M.T.T == M
assert M.T == M.transpose()
def test_conjugate():
M = Matrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_conj_dirac():
raises(AttributeError, lambda: eye(3).D)
M = Matrix([[1, I, I, I],
[0, 1, I, I],
[0, 0, 1, I],
[0, 0, 0, 1]])
assert M.D == Matrix([[ 1, 0, 0, 0],
[-I, 1, 0, 0],
[-I, -I, -1, 0],
[-I, -I, I, -1]])
def test_trace():
M = Matrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_shape():
M = Matrix([[x, 0, 0],
[0, y, 0]])
assert M.shape == (2, 3)
def test_col_row_op():
M = Matrix([[x, 0, 0],
[0, y, 0]])
M.row_op(1, lambda r, j: r + j + 1)
assert M == Matrix([[x, 0, 0],
[1, y + 2, 3]])
M.col_op(0, lambda c, j: c + y**j)
assert M == Matrix([[x + 1, 0, 0],
[1 + y, y + 2, 3]])
# neither row nor slice give copies that allow the original matrix to
# be changed
assert M.row(0) == Matrix([[x + 1, 0, 0]])
r1 = M.row(0)
r1[0] = 42
assert M[0, 0] == x + 1
r1 = M[0, :-1] # also testing negative slice
r1[0] = 42
assert M[0, 0] == x + 1
c1 = M.col(0)
assert c1 == Matrix([x + 1, 1 + y])
c1[0] = 0
assert M[0, 0] == x + 1
c1 = M[:, 0]
c1[0] = 42
assert M[0, 0] == x + 1
def test_zip_row_op():
for cls in classes[:2]: # XXX: immutable matrices don't support row ops
M = cls.eye(3)
M.zip_row_op(1, 0, lambda v, u: v + 2*u)
assert M == cls([[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
M = cls.eye(3)*2
M[0, 1] = -1
M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
assert M == cls([[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
def test_issue_3950():
m = Matrix([1, 2, 3])
a = Matrix([1, 2, 3])
b = Matrix([2, 2, 3])
assert not (m in [])
assert not (m in [1])
assert m != 1
assert m == a
assert m != b
def test_issue_3981():
class Index1:
def __index__(self):
return 1
class Index2:
def __index__(self):
return 2
index1 = Index1()
index2 = Index2()
m = Matrix([1, 2, 3])
assert m[index2] == 3
m[index2] = 5
assert m[2] == 5
m = Matrix([[1, 2, 3], [4, 5, 6]])
assert m[index1, index2] == 6
assert m[1, index2] == 6
assert m[index1, 2] == 6
m[index1, index2] = 4
assert m[1, 2] == 4
m[1, index2] = 6
assert m[1, 2] == 6
m[index1, 2] = 8
assert m[1, 2] == 8
def test_evalf():
a = Matrix([sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_is_symbolic():
a = Matrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = Matrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = Matrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = Matrix([[1, x, 3]])
assert a.is_symbolic() is True
a = Matrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = Matrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = Matrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = Matrix([[1, 2, 3]])
assert a.is_upper is True
a = Matrix([[1], [2], [3]])
assert a.is_upper is False
a = zeros(4, 2)
assert a.is_upper is True
def test_is_lower():
a = Matrix([[1, 2, 3]])
assert a.is_lower is False
a = Matrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_nilpotent():
a = Matrix(4, 4, [0, 2, 1, 6, 0, 0, 1, 2, 0, 0, 0, 3, 0, 0, 0, 0])
assert a.is_nilpotent()
a = Matrix([[1, 0], [0, 1]])
assert not a.is_nilpotent()
a = Matrix([])
assert a.is_nilpotent()
def test_zeros_ones_fill():
n, m = 3, 5
a = zeros(n, m)
a.fill( 5 )
b = 5 * ones(n, m)
assert a == b
assert a.rows == b.rows == 3
assert a.cols == b.cols == 5
assert a.shape == b.shape == (3, 5)
assert zeros(2) == zeros(2, 2)
assert ones(2) == ones(2, 2)
assert zeros(2, 3) == Matrix(2, 3, [0]*6)
assert ones(2, 3) == Matrix(2, 3, [1]*6)
def test_empty_zeros():
a = zeros(0)
assert a == Matrix()
a = zeros(0, 2)
assert a.rows == 0
assert a.cols == 2
a = zeros(2, 0)
assert a.rows == 2
assert a.cols == 0
def test_issue_3749():
a = Matrix([[x**2, x*y], [x*sin(y), x*cos(y)]])
assert a.diff(x) == Matrix([[2*x, y], [sin(y), cos(y)]])
assert Matrix([
[x, -x, x**2],
[exp(x), 1/x - exp(-x), x + 1/x]]).limit(x, oo) == \
Matrix([[oo, -oo, oo], [oo, 0, oo]])
assert Matrix([
[(exp(x) - 1)/x, 2*x + y*x, x**x ],
[1/x, abs(x), abs(sin(x + 1))]]).limit(x, 0) == \
Matrix([[1, 0, 1], [oo, 0, sin(1)]])
assert a.integrate(x) == Matrix([
[Rational(1, 3)*x**3, y*x**2/2],
[x**2*sin(y)/2, x**2*cos(y)/2]])
def test_inv_iszerofunc():
A = eye(4)
A.col_swap(0, 1)
for method in "GE", "LU":
assert A.inv(method=method, iszerofunc=lambda x: x == 0) == \
A.inv(method="ADJ")
def test_jacobian_metrics():
rho, phi = symbols("rho,phi")
X = Matrix([rho*cos(phi), rho*sin(phi)])
Y = Matrix([rho, phi])
J = X.jacobian(Y)
assert J == X.jacobian(Y.T)
assert J == (X.T).jacobian(Y)
assert J == (X.T).jacobian(Y.T)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(trigsimp)
assert g == Matrix([[1, 0], [0, rho**2]])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
Y = Matrix([rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
def test_issue_4564():
X = Matrix([exp(x + y + z), exp(x + y + z), exp(x + y + z)])
Y = Matrix([x, y, z])
for i in range(1, 3):
for j in range(1, 3):
X_slice = X[:i, :]
Y_slice = Y[:j, :]
J = X_slice.jacobian(Y_slice)
assert J.rows == i
assert J.cols == j
for k in range(j):
assert J[:, k] == X_slice
def test_nonvectorJacobian():
X = Matrix([[exp(x + y + z), exp(x + y + z)],
[exp(x + y + z), exp(x + y + z)]])
raises(TypeError, lambda: X.jacobian(Matrix([x, y, z])))
X = X[0, :]
Y = Matrix([[x, y], [x, z]])
raises(TypeError, lambda: X.jacobian(Y))
raises(TypeError, lambda: X.jacobian(Matrix([ [x, y], [x, z] ])))
def test_vec():
m = Matrix([[1, 3], [2, 4]])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_vech():
m = Matrix([[1, 2], [2, 3]])
m_vech = m.vech()
assert m_vech.cols == 1
for i in range(3):
assert m_vech[i] == i + 1
m_vech = m.vech(diagonal=False)
assert m_vech[0] == 2
m = Matrix([[1, x*(x + y)], [y*x + x**2, 1]])
m_vech = m.vech(diagonal=False)
assert m_vech[0] == y*x + x**2
m = Matrix([[1, x*(x + y)], [y*x, 1]])
m_vech = m.vech(diagonal=False, check_symmetry=False)
assert m_vech[0] == y*x
raises(ShapeError, lambda: Matrix([[1, 3]]).vech())
raises(ValueError, lambda: Matrix([[1, 3], [2, 4]]).vech())
raises(ShapeError, lambda: Matrix([[1, 3]]).vech())
raises(ValueError, lambda: Matrix([[1, 3], [2, 4]]).vech())
def test_diag():
# mostly tested in testcommonmatrix.py
assert diag([1, 2, 3]) == Matrix([1, 2, 3])
m = [1, 2, [3]]
raises(ValueError, lambda: diag(m))
assert diag(m, strict=False) == Matrix([1, 2, 3])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b]
assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b).get_diag_blocks() == [a, b, b]
assert diag(a, b, c).get_diag_blocks() == [a, b, c]
assert diag(a, c, b).get_diag_blocks() == [a, c, b]
assert diag(c, c, b).get_diag_blocks() == [c, c, b]
def test_inv_block():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A = diag(a, b, b)
assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), b.inv())
A = diag(a, b, c)
assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), c.inv())
A = diag(a, c, b)
assert A.inv(try_block_diag=True) == diag(a.inv(), c.inv(), b.inv())
A = diag(a, a, b, a, c, a)
assert A.inv(try_block_diag=True) == diag(
a.inv(), a.inv(), b.inv(), a.inv(), c.inv(), a.inv())
assert A.inv(try_block_diag=True, method="ADJ") == diag(
a.inv(method="ADJ"), a.inv(method="ADJ"), b.inv(method="ADJ"),
a.inv(method="ADJ"), c.inv(method="ADJ"), a.inv(method="ADJ"))
def test_creation_args():
"""
Check that matrix dimensions can be specified using any reasonable type
(see issue 4614).
"""
raises(ValueError, lambda: zeros(3, -1))
raises(TypeError, lambda: zeros(1, 2, 3, 4))
assert zeros(int(3)) == zeros(3)
assert zeros(Integer(3)) == zeros(3)
raises(ValueError, lambda: zeros(3.))
assert eye(int(3)) == eye(3)
assert eye(Integer(3)) == eye(3)
raises(ValueError, lambda: eye(3.))
assert ones(int(3), Integer(4)) == ones(3, 4)
raises(TypeError, lambda: Matrix(5))
raises(TypeError, lambda: Matrix(1, 2))
raises(ValueError, lambda: Matrix([1, [2]]))
def test_diagonal_symmetrical():
m = Matrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = Matrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = diag(1, 2, 3)
assert m.is_diagonal()
assert m.is_symmetric()
m = Matrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = Matrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = Matrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = Matrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_diagonalization():
m = Matrix([[1, 2+I], [2-I, 3]])
assert m.is_diagonalizable()
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
assert not m.is_diagonalizable()
assert not m.is_symmetric()
raises(NonSquareMatrixError, lambda: m.diagonalize())
# diagonalizable
m = diag(1, 2, 3)
(P, D) = m.diagonalize()
assert P == eye(3)
assert D == m
m = Matrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
m = Matrix(2, 2, [1, 0, 0, 3])
assert m.is_symmetric()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
assert P == eye(2)
assert D == m
m = Matrix(2, 2, [1, 1, 0, 0])
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
for i in P:
assert i.as_numer_denom()[1] == 1
m = Matrix(2, 2, [1, 0, 0, 0])
assert m.is_diagonal()
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
assert P == Matrix([[0, 1], [1, 0]])
# diagonalizable, complex only
m = Matrix(2, 2, [0, 1, -1, 0])
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
assert m.is_diagonalizable()
(P, D) = m.diagonalize()
assert P.inv() * m * P == D
# not diagonalizable
m = Matrix(2, 2, [0, 1, 0, 0])
assert not m.is_diagonalizable()
raises(MatrixError, lambda: m.diagonalize())
m = Matrix(3, 3, [-3, 1, -3, 20, 3, 10, 2, -2, 4])
assert not m.is_diagonalizable()
raises(MatrixError, lambda: m.diagonalize())
# symbolic
a, b, c, d = symbols('a b c d')
m = Matrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
def test_issue_15887():
# Mutable matrix should not use cache
a = MutableDenseMatrix([[0, 1], [1, 0]])
assert a.is_diagonalizable() is True
a[1, 0] = 0
assert a.is_diagonalizable() is False
a = MutableDenseMatrix([[0, 1], [1, 0]])
a.diagonalize()
a[1, 0] = 0
raises(MatrixError, lambda: a.diagonalize())
# Test deprecated cache and kwargs
with warns_deprecated_sympy():
a.is_diagonalizable(clear_cache=True)
with warns_deprecated_sympy():
a.is_diagonalizable(clear_subproducts=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# diagonalizable
m = Matrix(3, 3, [7, -12, 6, 10, -19, 10, 12, -24, 13])
Jmust = Matrix(3, 3, [-1, 0, 0, 0, 1, 0, 0, 0, 1])
P, J = m.jordan_form()
assert Jmust == J
assert Jmust == m.diagonalize()[1]
# m = Matrix(3, 3, [0, 6, 3, 1, 3, 1, -2, 2, 1])
# m.jordan_form() # very long
# m.jordan_form() #
# diagonalizable, complex only
# Jordan cells
# complexity: one of eigenvalues is zero
m = Matrix(3, 3, [0, 1, 0, -4, 4, 0, -2, 1, 2])
# The blocks are ordered according to the value of their eigenvalues,
# in order to make the matrix compatible with .diagonalize()
Jmust = Matrix(3, 3, [2, 1, 0, 0, 2, 0, 0, 0, 2])
P, J = m.jordan_form()
assert Jmust == J
# complexity: all of eigenvalues are equal
m = Matrix(3, 3, [2, 6, -15, 1, 1, -5, 1, 2, -6])
# Jmust = Matrix(3, 3, [-1, 0, 0, 0, -1, 1, 0, 0, -1])
# same here see 1456ff
Jmust = Matrix(3, 3, [-1, 1, 0, 0, -1, 0, 0, 0, -1])
P, J = m.jordan_form()
assert Jmust == J
# complexity: two of eigenvalues are zero
m = Matrix(3, 3, [4, -5, 2, 5, -7, 3, 6, -9, 4])
Jmust = Matrix(3, 3, [0, 1, 0, 0, 0, 0, 0, 0, 1])
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [6, 5, -2, -3, -3, -1, 3, 3, 2, 1, -2, -3, -1, 1, 5, 5])
Jmust = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2]
)
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [6, 2, -8, -6, -3, 2, 9, 6, 2, -2, -8, -6, -1, 0, 3, 4])
# Jmust = Matrix(4, 4, [2, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, -2])
# same here see 1456ff
Jmust = Matrix(4, 4, [-2, 0, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2])
P, J = m.jordan_form()
assert Jmust == J
m = Matrix(4, 4, [5, 4, 2, 1, 0, 1, -1, -1, -1, -1, 3, 0, 1, 1, -1, 2])
assert not m.is_diagonalizable()
Jmust = Matrix(4, 4, [1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 4, 1, 0, 0, 0, 4])
P, J = m.jordan_form()
assert Jmust == J
# checking for maximum precision to remain unchanged
m = Matrix([[Float('1.0', precision=110), Float('2.0', precision=110)],
[Float('3.14159265358979323846264338327', precision=110), Float('4.0', precision=110)]])
P, J = m.jordan_form()
for term in J._mat:
if isinstance(term, Float):
assert term._prec == 110
def test_jordan_form_complex_issue_9274():
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
p = 2 - 4*I;
q = 2 + 4*I;
Jmust1 = Matrix([[p, 1, 0, 0],
[0, p, 0, 0],
[0, 0, q, 1],
[0, 0, 0, q]])
Jmust2 = Matrix([[q, 1, 0, 0],
[0, q, 0, 0],
[0, 0, p, 1],
[0, 0, 0, p]])
P, J = A.jordan_form()
assert J == Jmust1 or J == Jmust2
assert simplify(P*J*P.inv()) == A
def test_issue_10220():
# two non-orthogonal Jordan blocks with eigenvalue 1
M = Matrix([[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]])
P, J = M.jordan_form()
assert P == Matrix([[0, 1, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
assert J == Matrix([
[1, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def test_jordan_form_issue_15858():
A = Matrix([
[1, 1, 1, 0],
[-2, -1, 0, -1],
[0, 0, -1, -1],
[0, 0, 2, 1]])
(P, J) = A.jordan_form()
assert P.expand() == Matrix([
[ -I, -I/2, I, I/2],
[-1 + I, 0, -1 - I, 0],
[ 0, -S(1)/2 - I/2, 0, -S(1)/2 + I/2],
[ 0, 1, 0, 1]])
assert J == Matrix([
[-I, 1, 0, 0],
[0, -I, 0, 0],
[0, 0, I, 1],
[0, 0, 0, I]])
def test_Matrix_berkowitz_charpoly():
UA, K_i, K_w = symbols('UA K_i K_w')
A = Matrix([[-K_i - UA + K_i**2/(K_i + K_w), K_i*K_w/(K_i + K_w)],
[ K_i*K_w/(K_i + K_w), -K_w + K_w**2/(K_i + K_w)]])
charpoly = A.charpoly(x)
assert charpoly == \
Poly(x**2 + (K_i*UA + K_w*UA + 2*K_i*K_w)/(K_i + K_w)*x +
K_i*K_w*UA/(K_i + K_w), x, domain='ZZ(K_i,K_w,UA)')
assert type(charpoly) is PurePoly
A = Matrix([[1, 3], [2, 0]])
assert A.charpoly() == A.charpoly(x) == PurePoly(x**2 - x - 6)
A = Matrix([[1, 2], [x, 0]])
p = A.charpoly(x)
assert p.gen != x
assert p.as_expr().subs(p.gen, x) == x**2 - 3*x
def test_exp_jordan_block():
l = Symbol('lamda')
m = Matrix.jordan_block(1, l)
assert m._eval_matrix_exp_jblock() == Matrix([[exp(l)]])
m = Matrix.jordan_block(3, l)
assert m._eval_matrix_exp_jblock() == \
Matrix([
[exp(l), exp(l), exp(l)/2],
[0, exp(l), exp(l)],
[0, 0, exp(l)]])
def test_exp():
m = Matrix([[3, 4], [0, -2]])
m_exp = Matrix([[exp(3), -4*exp(-2)/5 + 4*exp(3)/5], [0, exp(-2)]])
assert m.exp() == m_exp
assert exp(m) == m_exp
m = Matrix([[1, 0], [0, 1]])
assert m.exp() == Matrix([[E, 0], [0, E]])
assert exp(m) == Matrix([[E, 0], [0, E]])
m = Matrix([[1, -1], [1, 1]])
assert m.exp() == Matrix([[E*cos(1), -E*sin(1)], [E*sin(1), E*cos(1)]])
def test_log():
l = Symbol('lamda')
m = Matrix.jordan_block(1, l)
assert m._eval_matrix_log_jblock() == Matrix([[log(l)]])
m = Matrix.jordan_block(4, l)
assert m._eval_matrix_log_jblock() == \
Matrix(
[
[log(l), 1/l, -1/(2*l**2), 1/(3*l**3)],
[0, log(l), 1/l, -1/(2*l**2)],
[0, 0, log(l), 1/l],
[0, 0, 0, log(l)]
]
)
m = Matrix(
[[0, 0, 1],
[0, 0, 0],
[-1, 0, 0]]
)
raises(MatrixError, lambda: m.log())
def test_has():
A = Matrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = A.subs(x, 2)
assert not A.has(x)
def test_find_reasonable_pivot_naive_finds_guaranteed_nonzero1():
# Test if matrices._find_reasonable_pivot_naive()
# finds a guaranteed non-zero pivot when the
# some of the candidate pivots are symbolic expressions.
# Keyword argument: simpfunc=None indicates that no simplifications
# should be performed during the search.
x = Symbol('x')
column = Matrix(3, 1, [x, cos(x)**2 + sin(x)**2, S.Half])
pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\
_find_reasonable_pivot_naive(column)
assert pivot_val == S.Half
def test_find_reasonable_pivot_naive_finds_guaranteed_nonzero2():
# Test if matrices._find_reasonable_pivot_naive()
# finds a guaranteed non-zero pivot when the
# some of the candidate pivots are symbolic expressions.
# Keyword argument: simpfunc=_simplify indicates that the search
# should attempt to simplify candidate pivots.
x = Symbol('x')
column = Matrix(3, 1,
[x,
cos(x)**2+sin(x)**2+x**2,
cos(x)**2+sin(x)**2])
pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\
_find_reasonable_pivot_naive(column, simpfunc=_simplify)
assert pivot_val == 1
def test_find_reasonable_pivot_naive_simplifies():
# Test if matrices._find_reasonable_pivot_naive()
# simplifies candidate pivots, and reports
# their offsets correctly.
x = Symbol('x')
column = Matrix(3, 1,
[x,
cos(x)**2+sin(x)**2+x,
cos(x)**2+sin(x)**2])
pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\
_find_reasonable_pivot_naive(column, simpfunc=_simplify)
assert len(simplified) == 2
assert simplified[0][0] == 1
assert simplified[0][1] == 1+x
assert simplified[1][0] == 2
assert simplified[1][1] == 1
def test_errors():
raises(ValueError, lambda: Matrix([[1, 2], [1]]))
raises(IndexError, lambda: Matrix([[1, 2]])[1.2, 5])
raises(IndexError, lambda: Matrix([[1, 2]])[1, 5.2])
raises(ValueError, lambda: randMatrix(3, c=4, symmetric=True))
raises(ValueError, lambda: Matrix([1, 2]).reshape(4, 6))
raises(ShapeError,
lambda: Matrix([[1, 2], [3, 4]]).copyin_matrix([1, 0], Matrix([1, 2])))
raises(TypeError, lambda: Matrix([[1, 2], [3, 4]]).copyin_list([0,
1], set()))
raises(NonSquareMatrixError, lambda: Matrix([[1, 2, 3], [2, 3, 0]]).inv())
raises(ShapeError,
lambda: Matrix(1, 2, [1, 2]).row_join(Matrix([[1, 2], [3, 4]])))
raises(
ShapeError, lambda: Matrix([1, 2]).col_join(Matrix([[1, 2], [3, 4]])))
raises(ShapeError, lambda: Matrix([1]).row_insert(1, Matrix([[1,
2], [3, 4]])))
raises(ShapeError, lambda: Matrix([1]).col_insert(1, Matrix([[1,
2], [3, 4]])))
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).trace())
raises(TypeError, lambda: Matrix([1]).applyfunc(1))
raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minor(4, 5))
raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minor_submatrix(4, 5))
raises(TypeError, lambda: Matrix([1, 2, 3]).cross(1))
raises(TypeError, lambda: Matrix([1, 2, 3]).dot(1))
raises(ShapeError, lambda: Matrix([1, 2, 3]).dot(Matrix([1, 2])))
raises(ShapeError, lambda: Matrix([1, 2]).dot([]))
raises(TypeError, lambda: Matrix([1, 2]).dot('a'))
with warns_deprecated_sympy():
Matrix([[1, 2], [3, 4]]).dot(Matrix([[4, 3], [1, 2]]))
raises(ShapeError, lambda: Matrix([1, 2]).dot([1, 2, 3]))
raises(NonSquareMatrixError, lambda: Matrix([1, 2, 3]).exp())
raises(ShapeError, lambda: Matrix([[1, 2], [3, 4]]).normalized())
raises(ValueError, lambda: Matrix([1, 2]).inv(method='not a method'))
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_GE())
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_GE())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_ADJ())
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_ADJ())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_LU())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).is_nilpotent())
raises(NonSquareMatrixError, lambda: Matrix([1, 2]).det())
raises(ValueError,
lambda: Matrix([[1, 2], [3, 4]]).det(method='Not a real method'))
raises(ValueError,
lambda: Matrix([[1, 2, 3, 4], [5, 6, 7, 8],
[9, 10, 11, 12], [13, 14, 15, 16]]).det(iszerofunc="Not function"))
raises(ValueError,
lambda: Matrix([[1, 2, 3, 4], [5, 6, 7, 8],
[9, 10, 11, 12], [13, 14, 15, 16]]).det(iszerofunc=False))
raises(ValueError,
lambda: hessian(Matrix([[1, 2], [3, 4]]), Matrix([[1, 2], [2, 1]])))
raises(ValueError, lambda: hessian(Matrix([[1, 2], [3, 4]]), []))
raises(ValueError, lambda: hessian(Symbol('x')**2, 'a'))
raises(IndexError, lambda: eye(3)[5, 2])
raises(IndexError, lambda: eye(3)[2, 5])
M = Matrix(((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)))
raises(ValueError, lambda: M.det('method=LU_decomposition()'))
V = Matrix([[10, 10, 10]])
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(ValueError, lambda: M.row_insert(4.7, V))
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(ValueError, lambda: M.col_insert(-4.2, V))
def test_len():
assert len(Matrix()) == 0
assert len(Matrix([[1, 2]])) == len(Matrix([[1], [2]])) == 2
assert len(Matrix(0, 2, lambda i, j: 0)) == \
len(Matrix(2, 0, lambda i, j: 0)) == 0
assert len(Matrix([[0, 1, 2], [3, 4, 5]])) == 6
assert Matrix([1]) == Matrix([[1]])
assert not Matrix()
assert Matrix() == Matrix([])
def test_integrate():
A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2)))
assert A.integrate(x) == \
Matrix(((x, 4*x, x**2/2), (x*y, 2*x, 4*x), (10*x, 5*x, x**3/3)))
assert A.integrate(y) == \
Matrix(((y, 4*y, x*y), (y**2/2, 2*y, 4*y), (10*y, 5*y, y*x**2)))
def test_limit():
A = Matrix(((1, 4, sin(x)/x), (y, 2, 4), (10, 5, x**2 + 1)))
assert A.limit(x, 0) == Matrix(((1, 4, 1), (y, 2, 4), (10, 5, 1)))
def test_diff():
A = MutableDenseMatrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))
assert isinstance(A.diff(x), type(A))
assert A.diff(x) == MutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
assert A.diff(y) == MutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))
assert diff(A, x) == MutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
assert diff(A, y) == MutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))
A_imm = A.as_immutable()
assert isinstance(A_imm.diff(x), type(A_imm))
assert A_imm.diff(x) == ImmutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
assert A_imm.diff(y) == ImmutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))
assert diff(A_imm, x) == ImmutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
assert diff(A_imm, y) == ImmutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))
def test_diff_by_matrix():
# Derive matrix by matrix:
A = MutableDenseMatrix([[x, y], [z, t]])
assert A.diff(A) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
assert diff(A, A) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
A_imm = A.as_immutable()
assert A_imm.diff(A_imm) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
assert diff(A_imm, A_imm) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
# Derive a constant matrix:
assert A.diff(a) == MutableDenseMatrix([[0, 0], [0, 0]])
B = ImmutableDenseMatrix([a, b])
assert A.diff(B) == Array.zeros(2, 1, 2, 2)
assert A.diff(A) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
# Test diff with tuples:
dB = B.diff([[a, b]])
assert dB.shape == (2, 2, 1)
assert dB == Array([[[1], [0]], [[0], [1]]])
f = Function("f")
fxyz = f(x, y, z)
assert fxyz.diff([[x, y, z]]) == Array([fxyz.diff(x), fxyz.diff(y), fxyz.diff(z)])
assert fxyz.diff(([x, y, z], 2)) == Array([
[fxyz.diff(x, 2), fxyz.diff(x, y), fxyz.diff(x, z)],
[fxyz.diff(x, y), fxyz.diff(y, 2), fxyz.diff(y, z)],
[fxyz.diff(x, z), fxyz.diff(z, y), fxyz.diff(z, 2)],
])
expr = sin(x)*exp(y)
assert expr.diff([[x, y]]) == Array([cos(x)*exp(y), sin(x)*exp(y)])
assert expr.diff(y, ((x, y),)) == Array([cos(x)*exp(y), sin(x)*exp(y)])
assert expr.diff(x, ((x, y),)) == Array([-sin(x)*exp(y), cos(x)*exp(y)])
assert expr.diff(((y, x),), [[x, y]]) == Array([[cos(x)*exp(y), -sin(x)*exp(y)], [sin(x)*exp(y), cos(x)*exp(y)]])
# Test different notations:
fxyz.diff(x).diff(y).diff(x) == fxyz.diff(((x, y, z),), 3)[0, 1, 0]
fxyz.diff(z).diff(y).diff(x) == fxyz.diff(((x, y, z),), 3)[2, 1, 0]
fxyz.diff([[x, y, z]], ((z, y, x),)) == Array([[fxyz.diff(i).diff(j) for i in (x, y, z)] for j in (z, y, x)])
# Test scalar derived by matrix remains matrix:
res = x.diff(Matrix([[x, y]]))
assert isinstance(res, ImmutableDenseMatrix)
assert res == Matrix([[1, 0]])
res = (x**3).diff(Matrix([[x, y]]))
assert isinstance(res, ImmutableDenseMatrix)
assert res == Matrix([[3*x**2, 0]])
def test_getattr():
A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))
raises(AttributeError, lambda: A.nonexistantattribute)
assert getattr(A, 'diff')(x) == Matrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))
def test_hessenberg():
A = Matrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = A.T
assert A.is_lower_hessenberg
A[0, -1] = 1
assert A.is_lower_hessenberg is False
A = Matrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
A = zeros(5, 2)
assert A.is_upper_hessenberg
def test_cholesky():
raises(NonSquareMatrixError, lambda: Matrix((1, 2)).cholesky())
raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).cholesky())
raises(ValueError, lambda: Matrix(((5 + I, 0), (0, 1))).cholesky())
raises(ValueError, lambda: Matrix(((1, 5), (5, 1))).cholesky())
raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).cholesky(hermitian=False))
assert Matrix(((5 + I, 0), (0, 1))).cholesky(hermitian=False) == Matrix([
[sqrt(5 + I), 0], [0, 1]])
A = Matrix(((1, 5), (5, 1)))
L = A.cholesky(hermitian=False)
assert L == Matrix([[1, 0], [5, 2*sqrt(6)*I]])
assert L*L.T == A
A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L = A.cholesky()
assert L * L.T == A
assert L.is_lower
assert L == Matrix([[5, 0, 0], [3, 3, 0], [-1, 1, 3]])
A = Matrix(((4, -2*I, 2 + 2*I), (2*I, 2, -1 + I), (2 - 2*I, -1 - I, 11)))
assert A.cholesky().expand() == Matrix(((2, 0, 0), (I, 1, 0), (1 - I, 0, 3)))
raises(NonSquareMatrixError, lambda: SparseMatrix((1, 2)).cholesky())
raises(ValueError, lambda: SparseMatrix(((1, 2), (3, 4))).cholesky())
raises(ValueError, lambda: SparseMatrix(((5 + I, 0), (0, 1))).cholesky())
raises(ValueError, lambda: SparseMatrix(((1, 5), (5, 1))).cholesky())
raises(ValueError, lambda: SparseMatrix(((1, 2), (3, 4))).cholesky(hermitian=False))
assert SparseMatrix(((5 + I, 0), (0, 1))).cholesky(hermitian=False) == Matrix([
[sqrt(5 + I), 0], [0, 1]])
A = SparseMatrix(((1, 5), (5, 1)))
L = A.cholesky(hermitian=False)
assert L == Matrix([[1, 0], [5, 2*sqrt(6)*I]])
assert L*L.T == A
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L = A.cholesky()
assert L * L.T == A
assert L.is_lower
assert L == Matrix([[5, 0, 0], [3, 3, 0], [-1, 1, 3]])
A = SparseMatrix(((4, -2*I, 2 + 2*I), (2*I, 2, -1 + I), (2 - 2*I, -1 - I, 11)))
assert A.cholesky() == Matrix(((2, 0, 0), (I, 1, 0), (1 - I, 0, 3)))
def test_matrix_norm():
# Vector Tests
# Test columns and symbols
x = Symbol('x', real=True)
v = Matrix([cos(x), sin(x)])
assert trigsimp(v.norm(2)) == 1
assert v.norm(10) == Pow(cos(x)**10 + sin(x)**10, Rational(1, 10))
# Test Rows
A = Matrix([[5, Rational(3, 2)]])
assert A.norm() == Pow(25 + Rational(9, 4), S.Half)
assert A.norm(oo) == max(A._mat)
assert A.norm(-oo) == min(A._mat)
# Matrix Tests
# Intuitive test
A = Matrix([[1, 1], [1, 1]])
assert A.norm(2) == 2
assert A.norm(-2) == 0
assert A.norm('frobenius') == 2
assert eye(10).norm(2) == eye(10).norm(-2) == 1
assert A.norm(oo) == 2
# Test with Symbols and more complex entries
A = Matrix([[3, y, y], [x, S.Half, -pi]])
assert (A.norm('fro')
== sqrt(Rational(37, 4) + 2*abs(y)**2 + pi**2 + x**2))
# Check non-square
A = Matrix([[1, 2, -3], [4, 5, Rational(13, 2)]])
assert A.norm(2) == sqrt(Rational(389, 8) + sqrt(78665)/8)
assert A.norm(-2) is S.Zero
assert A.norm('frobenius') == sqrt(389)/2
# Test properties of matrix norms
# https://en.wikipedia.org/wiki/Matrix_norm#Definition
# Two matrices
A = Matrix([[1, 2], [3, 4]])
B = Matrix([[5, 5], [-2, 2]])
C = Matrix([[0, -I], [I, 0]])
D = Matrix([[1, 0], [0, -1]])
L = [A, B, C, D]
alpha = Symbol('alpha', real=True)
for order in ['fro', 2, -2]:
# Zero Check
assert zeros(3).norm(order) is S.Zero
# Check Triangle Inequality for all Pairs of Matrices
for X in L:
for Y in L:
dif = (X.norm(order) + Y.norm(order) -
(X + Y).norm(order))
assert (dif >= 0)
# Scalar multiplication linearity
for M in [A, B, C, D]:
dif = simplify((alpha*M).norm(order) -
abs(alpha) * M.norm(order))
assert dif == 0
# Test Properties of Vector Norms
# https://en.wikipedia.org/wiki/Vector_norm
# Two column vectors
a = Matrix([1, 1 - 1*I, -3])
b = Matrix([S.Half, 1*I, 1])
c = Matrix([-1, -1, -1])
d = Matrix([3, 2, I])
e = Matrix([Integer(1e2), Rational(1, 1e2), 1])
L = [a, b, c, d, e]
alpha = Symbol('alpha', real=True)
for order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity, pi]:
# Zero Check
if order > 0:
assert Matrix([0, 0, 0]).norm(order) is S.Zero
# Triangle inequality on all pairs
if order >= 1: # Triangle InEq holds only for these norms
for X in L:
for Y in L:
dif = (X.norm(order) + Y.norm(order) -
(X + Y).norm(order))
assert simplify(dif >= 0) is S.true
# Linear to scalar multiplication
if order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity]:
for X in L:
dif = simplify((alpha*X).norm(order) -
(abs(alpha) * X.norm(order)))
assert dif == 0
# ord=1
M = Matrix(3, 3, [1, 3, 0, -2, -1, 0, 3, 9, 6])
assert M.norm(1) == 13
def test_condition_number():
x = Symbol('x', real=True)
A = eye(3)
A[0, 0] = 10
A[2, 2] = Rational(1, 10)
assert A.condition_number() == 100
A[1, 1] = x
assert A.condition_number() == Max(10, Abs(x)) / Min(Rational(1, 10), Abs(x))
M = Matrix([[cos(x), sin(x)], [-sin(x), cos(x)]])
Mc = M.condition_number()
assert all(Float(1.).epsilon_eq(Mc.subs(x, val).evalf()) for val in
[Rational(1, 5), S.Half, Rational(1, 10), pi/2, pi, pi*Rational(7, 4) ])
#issue 10782
assert Matrix([]).condition_number() == 0
def test_equality():
A = Matrix(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
B = Matrix(((9, 8, 7), (6, 5, 4), (3, 2, 1)))
assert A == A[:, :]
assert not A != A[:, :]
assert not A == B
assert A != B
assert A != 10
assert not A == 10
# A SparseMatrix can be equal to a Matrix
C = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
D = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
assert C == D
assert not C != D
def test_col_join():
assert eye(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros(3).col_insert(i, c4).row(0).tolist()) == l
def test_normalized():
assert Matrix([3, 4]).normalized() == \
Matrix([Rational(3, 5), Rational(4, 5)])
# Zero vector trivial cases
assert Matrix([0, 0, 0]).normalized() == Matrix([0, 0, 0])
# Machine precision error truncation trivial cases
m = Matrix([0,0,1.e-100])
assert m.normalized(
iszerofunc=lambda x: x.evalf(n=10, chop=True).is_zero
) == Matrix([0, 0, 0])
def test_print_nonzero():
assert capture(lambda: eye(3).print_nonzero()) == \
'[X ]\n[ X ]\n[ X]\n'
assert capture(lambda: eye(3).print_nonzero('.')) == \
'[. ]\n[ . ]\n[ .]\n'
def test_zeros_eye():
assert Matrix.eye(3) == eye(3)
assert Matrix.zeros(3) == zeros(3)
assert ones(3, 4) == Matrix(3, 4, [1]*12)
i = Matrix([[1, 0], [0, 1]])
z = Matrix([[0, 0], [0, 0]])
for cls in classes:
m = cls.eye(2)
assert i == m # but m == i will fail if m is immutable
assert i == eye(2, cls=cls)
assert type(m) == cls
m = cls.zeros(2)
assert z == m
assert z == zeros(2, cls=cls)
assert type(m) == cls
def test_is_zero():
assert Matrix().is_zero_matrix
assert Matrix([[0, 0], [0, 0]]).is_zero_matrix
assert zeros(3, 4).is_zero_matrix
assert not eye(3).is_zero_matrix
assert Matrix([[x, 0], [0, 0]]).is_zero_matrix == None
assert SparseMatrix([[x, 0], [0, 0]]).is_zero_matrix == None
assert ImmutableMatrix([[x, 0], [0, 0]]).is_zero_matrix == None
assert ImmutableSparseMatrix([[x, 0], [0, 0]]).is_zero_matrix == None
assert Matrix([[x, 1], [0, 0]]).is_zero_matrix == False
a = Symbol('a', nonzero=True)
assert Matrix([[a, 0], [0, 0]]).is_zero_matrix == False
def test_rotation_matrices():
# This tests the rotation matrices by rotating about an axis and back.
theta = pi/3
r3_plus = rot_axis3(theta)
r3_minus = rot_axis3(-theta)
r2_plus = rot_axis2(theta)
r2_minus = rot_axis2(-theta)
r1_plus = rot_axis1(theta)
r1_minus = rot_axis1(-theta)
assert r3_minus*r3_plus*eye(3) == eye(3)
assert r2_minus*r2_plus*eye(3) == eye(3)
assert r1_minus*r1_plus*eye(3) == eye(3)
# Check the correctness of the trace of the rotation matrix
assert r1_plus.trace() == 1 + 2*cos(theta)
assert r2_plus.trace() == 1 + 2*cos(theta)
assert r3_plus.trace() == 1 + 2*cos(theta)
# Check that a rotation with zero angle doesn't change anything.
assert rot_axis1(0) == eye(3)
assert rot_axis2(0) == eye(3)
assert rot_axis3(0) == eye(3)
def test_DeferredVector():
assert str(DeferredVector("vector")[4]) == "vector[4]"
assert sympify(DeferredVector("d")) == DeferredVector("d")
raises(IndexError, lambda: DeferredVector("d")[-1])
assert str(DeferredVector("d")) == "d"
assert repr(DeferredVector("test")) == "DeferredVector('test')"
def test_DeferredVector_not_iterable():
assert not iterable(DeferredVector('X'))
def test_DeferredVector_Matrix():
raises(TypeError, lambda: Matrix(DeferredVector("V")))
def test_GramSchmidt():
R = Rational
m1 = Matrix(1, 2, [1, 2])
m2 = Matrix(1, 2, [2, 3])
assert GramSchmidt([m1, m2]) == \
[Matrix(1, 2, [1, 2]), Matrix(1, 2, [R(2)/5, R(-1)/5])]
assert GramSchmidt([m1.T, m2.T]) == \
[Matrix(2, 1, [1, 2]), Matrix(2, 1, [R(2)/5, R(-1)/5])]
# from wikipedia
assert GramSchmidt([Matrix([3, 1]), Matrix([2, 2])], True) == [
Matrix([3*sqrt(10)/10, sqrt(10)/10]),
Matrix([-sqrt(10)/10, 3*sqrt(10)/10])]
def test_casoratian():
assert casoratian([1, 2, 3, 4], 1) == 0
assert casoratian([1, 2, 3, 4], 1, zero=False) == 0
def test_zero_dimension_multiply():
assert (Matrix()*zeros(0, 3)).shape == (0, 3)
assert zeros(3, 0)*zeros(0, 3) == zeros(3, 3)
assert zeros(0, 3)*zeros(3, 0) == Matrix()
def test_slice_issue_2884():
m = Matrix(2, 2, range(4))
assert m[1, :] == Matrix([[2, 3]])
assert m[-1, :] == Matrix([[2, 3]])
assert m[:, 1] == Matrix([[1, 3]]).T
assert m[:, -1] == Matrix([[1, 3]]).T
raises(IndexError, lambda: m[2, :])
raises(IndexError, lambda: m[2, 2])
def test_slice_issue_3401():
assert zeros(0, 3)[:, -1].shape == (0, 1)
assert zeros(3, 0)[0, :] == Matrix(1, 0, [])
def test_copyin():
s = zeros(3, 3)
s[3] = 1
assert s[:, 0] == Matrix([0, 1, 0])
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == Matrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == Matrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == Matrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == Matrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == Matrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == Matrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == Matrix([1, 1, 1])
def test_invertible_check():
# sometimes a singular matrix will have a pivot vector shorter than
# the number of rows in a matrix...
assert Matrix([[1, 2], [1, 2]]).rref() == (Matrix([[1, 2], [0, 0]]), (0,))
raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inv())
m = Matrix([
[-1, -1, 0],
[ x, 1, 1],
[ 1, x, -1],
])
assert len(m.rref()[1]) != m.rows
# in addition, unless simplify=True in the call to rref, the identity
# matrix will be returned even though m is not invertible
assert m.rref()[0] != eye(3)
assert m.rref(simplify=signsimp)[0] != eye(3)
raises(ValueError, lambda: m.inv(method="ADJ"))
raises(ValueError, lambda: m.inv(method="GE"))
raises(ValueError, lambda: m.inv(method="LU"))
def test_issue_3959():
x, y = symbols('x, y')
e = x*y
assert e.subs(x, Matrix([3, 5, 3])) == Matrix([3, 5, 3])*y
def test_issue_5964():
assert str(Matrix([[1, 2], [3, 4]])) == 'Matrix([[1, 2], [3, 4]])'
def test_issue_7604():
x, y = symbols("x y")
assert sstr(Matrix([[x, 2*y], [y**2, x + 3]])) == \
'Matrix([\n[ x, 2*y],\n[y**2, x + 3]])'
def test_is_Identity():
assert eye(3).is_Identity
assert eye(3).as_immutable().is_Identity
assert not zeros(3).is_Identity
assert not ones(3).is_Identity
# issue 6242
assert not Matrix([[1, 0, 0]]).is_Identity
# issue 8854
assert SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1}).is_Identity
assert not SparseMatrix(2,3, range(6)).is_Identity
assert not SparseMatrix(3,3, {(0,0):1, (1,1):1}).is_Identity
assert not SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1, (0,1):2, (0,2):3}).is_Identity
def test_dot():
assert ones(1, 3).dot(ones(3, 1)) == 3
assert ones(1, 3).dot([1, 1, 1]) == 3
assert Matrix([1, 2, 3]).dot(Matrix([1, 2, 3])) == 14
assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I])) == -5 + I
assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=False) == -5 + I
assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=True) == 13 + I
assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=True, conjugate_convention="physics") == 13 - I
assert Matrix([1, 2, 3*I]).dot(Matrix([4, 5*I, 6]), hermitian=True, conjugate_convention="right") == 4 + 8*I
assert Matrix([1, 2, 3*I]).dot(Matrix([4, 5*I, 6]), hermitian=True, conjugate_convention="left") == 4 - 8*I
assert Matrix([I, 2*I]).dot(Matrix([I, 2*I]), hermitian=False, conjugate_convention="left") == -5
assert Matrix([I, 2*I]).dot(Matrix([I, 2*I]), conjugate_convention="left") == 5
raises(ValueError, lambda: Matrix([1, 2]).dot(Matrix([3, 4]), hermitian=True, conjugate_convention="test"))
def test_dual():
B_x, B_y, B_z, E_x, E_y, E_z = symbols(
'B_x B_y B_z E_x E_y E_z', real=True)
F = Matrix((
( 0, E_x, E_y, E_z),
(-E_x, 0, B_z, -B_y),
(-E_y, -B_z, 0, B_x),
(-E_z, B_y, -B_x, 0)
))
Fd = Matrix((
( 0, -B_x, -B_y, -B_z),
(B_x, 0, E_z, -E_y),
(B_y, -E_z, 0, E_x),
(B_z, E_y, -E_x, 0)
))
assert F.dual().equals(Fd)
assert eye(3).dual().equals(zeros(3))
assert F.dual().dual().equals(-F)
def test_anti_symmetric():
assert Matrix([1, 2]).is_anti_symmetric() is False
m = Matrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
# tweak to fail
m[2, 1] = -m[2, 1]
assert m.is_anti_symmetric() is False
# untweak
m[2, 1] = -m[2, 1]
m = m.expand()
assert m.is_anti_symmetric(simplify=False) is True
m[0, 0] = 1
assert m.is_anti_symmetric() is False
def test_normalize_sort_diogonalization():
A = Matrix(((1, 2), (2, 1)))
P, Q = A.diagonalize(normalize=True)
assert P*P.T == P.T*P == eye(P.cols)
P, Q = A.diagonalize(normalize=True, sort=True)
assert P*P.T == P.T*P == eye(P.cols)
assert P*Q*P.inv() == A
def test_issue_5321():
raises(ValueError, lambda: Matrix([[1, 2, 3], Matrix(0, 1, [])]))
def test_issue_5320():
assert Matrix.hstack(eye(2), 2*eye(2)) == Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]
])
assert Matrix.vstack(eye(2), 2*eye(2)) == Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]
])
cls = SparseMatrix
assert cls.hstack(cls(eye(2)), cls(2*eye(2))) == Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]
])
def test_issue_11944():
A = Matrix([[1]])
AIm = sympify(A)
assert Matrix.hstack(AIm, A) == Matrix([[1, 1]])
assert Matrix.vstack(AIm, A) == Matrix([[1], [1]])
def test_cross():
a = [1, 2, 3]
b = [3, 4, 5]
col = Matrix([-2, 4, -2])
row = col.T
def test(M, ans):
assert ans == M
assert type(M) == cls
for cls in classes:
A = cls(a)
B = cls(b)
test(A.cross(B), col)
test(A.cross(B.T), col)
test(A.T.cross(B.T), row)
test(A.T.cross(B), row)
raises(ShapeError, lambda:
Matrix(1, 2, [1, 1]).cross(Matrix(1, 2, [1, 1])))
def test_hash():
for cls in classes[-2:]:
s = {cls.eye(1), cls.eye(1)}
assert len(s) == 1 and s.pop() == cls.eye(1)
# issue 3979
for cls in classes[:2]:
assert not isinstance(cls.eye(1), Hashable)
@XFAIL
def test_issue_3979():
# when this passes, delete this and change the [1:2]
# to [:2] in the test_hash above for issue 3979
cls = classes[0]
raises(AttributeError, lambda: hash(cls.eye(1)))
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = Matrix([[0, 1], [-I, 0]])
for cls in classes:
assert ans == cls(dat).adjoint()
def test_simplify_immutable():
from sympy import simplify, sin, cos
assert simplify(ImmutableMatrix([[sin(x)**2 + cos(x)**2]])) == \
ImmutableMatrix([[1]])
def test_replace():
from sympy import symbols, Function, Matrix
F, G = symbols('F, G', cls=Function)
K = Matrix(2, 2, lambda i, j: G(i+j))
M = Matrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
from sympy import symbols, Function, Matrix
F, G = symbols('F, G', cls=Function)
K = Matrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1)\
: G(1)}), (G(2), {F(2): G(2)})])
M = Matrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_atoms():
m = Matrix([[1, 2], [x, 1 - 1/x]])
assert m.atoms() == {S.One,S(2),S.NegativeOne, x}
assert m.atoms(Symbol) == {x}
def test_pinv():
# Pseudoinverse of an invertible matrix is the inverse.
A1 = Matrix([[a, b], [c, d]])
assert simplify(A1.pinv(method="RD")) == simplify(A1.inv())
# Test the four properties of the pseudoinverse for various matrices.
As = [Matrix([[13, 104], [2212, 3], [-3, 5]]),
Matrix([[1, 7, 9], [11, 17, 19]]),
Matrix([a, b])]
for A in As:
A_pinv = A.pinv(method="RD")
AAp = A * A_pinv
ApA = A_pinv * A
assert simplify(AAp * A) == A
assert simplify(ApA * A_pinv) == A_pinv
assert AAp.H == AAp
assert ApA.H == ApA
# XXX Pinv with diagonalization makes expression too complicated.
for A in As:
A_pinv = simplify(A.pinv(method="ED"))
AAp = A * A_pinv
ApA = A_pinv * A
assert simplify(AAp * A) == A
assert simplify(ApA * A_pinv) == A_pinv
assert AAp.H == AAp
assert ApA.H == ApA
# XXX Computing pinv using diagonalization makes an expression that
# is too complicated to simplify.
# A1 = Matrix([[a, b], [c, d]])
# assert simplify(A1.pinv(method="ED")) == simplify(A1.inv())
# so this is tested numerically at a fixed random point
from sympy.core.numbers import comp
q = A1.pinv(method="ED")
w = A1.inv()
reps = {a: -73633, b: 11362, c: 55486, d: 62570}
assert all(
comp(i.n(), j.n())
for i, j in zip(q.subs(reps), w.subs(reps))
)
@XFAIL
def test_pinv_rank_deficient_when_diagonalization_fails():
# Test the four properties of the pseudoinverse for matrices when
# diagonalization of A.H*A fails.
As = [Matrix([
[61, 89, 55, 20, 71, 0],
[62, 96, 85, 85, 16, 0],
[69, 56, 17, 4, 54, 0],
[10, 54, 91, 41, 71, 0],
[ 7, 30, 10, 48, 90, 0],
[0,0,0,0,0,0]])]
for A in As:
A_pinv = A.pinv(method="ED")
AAp = A * A_pinv
ApA = A_pinv * A
assert simplify(AAp * A) == A
assert simplify(ApA * A_pinv) == A_pinv
assert AAp.H == AAp
assert ApA.H == ApA
def test_issue_7201():
assert ones(0, 1) + ones(0, 1) == Matrix(0, 1, [])
assert ones(1, 0) + ones(1, 0) == Matrix(1, 0, [])
def test_free_symbols():
for M in ImmutableMatrix, ImmutableSparseMatrix, Matrix, SparseMatrix:
assert M([[x], [0]]).free_symbols == {x}
def test_from_ndarray():
"""See issue 7465."""
try:
from numpy import array
except ImportError:
skip('NumPy must be available to test creating matrices from ndarrays')
assert Matrix(array([1, 2, 3])) == Matrix([1, 2, 3])
assert Matrix(array([[1, 2, 3]])) == Matrix([[1, 2, 3]])
assert Matrix(array([[1, 2, 3], [4, 5, 6]])) == \
Matrix([[1, 2, 3], [4, 5, 6]])
assert Matrix(array([x, y, z])) == Matrix([x, y, z])
raises(NotImplementedError,
lambda: Matrix(array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])))
assert Matrix([array([1, 2]), array([3, 4])]) == Matrix([[1, 2], [3, 4]])
assert Matrix([array([1, 2]), [3, 4]]) == Matrix([[1, 2], [3, 4]])
assert Matrix([array([]), array([])]) == Matrix([])
def test_17522_numpy():
from sympy.matrices.common import _matrixify
try:
from numpy import array, matrix
except ImportError:
skip('NumPy must be available to test indexing matrixified NumPy ndarrays and matrices')
m = _matrixify(array([[1, 2], [3, 4]]))
assert m[3] == 4
assert list(m) == [1, 2, 3, 4]
m = _matrixify(matrix([[1, 2], [3, 4]]))
assert m[3] == 4
assert list(m) == [1, 2, 3, 4]
def test_17522_mpmath():
from sympy.matrices.common import _matrixify
try:
from mpmath import matrix
except ImportError:
skip('mpmath must be available to test indexing matrixified mpmath matrices')
m = _matrixify(matrix([[1, 2], [3, 4]]))
assert m[3] == 4
assert list(m) == [1, 2, 3, 4]
def test_17522_scipy():
from sympy.matrices.common import _matrixify
try:
from scipy.sparse import csr_matrix
except ImportError:
skip('SciPy must be available to test indexing matrixified SciPy sparse matrices')
m = _matrixify(csr_matrix([[1, 2], [3, 4]]))
assert m[3] == 4
assert list(m) == [1, 2, 3, 4]
def test_hermitian():
a = Matrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
def test_doit():
a = Matrix([[Add(x,x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_issue_9457_9467_9876():
# for row_del(index)
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
M.row_del(1)
assert M == Matrix([[1, 2, 3], [3, 4, 5]])
N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
N.row_del(-2)
assert N == Matrix([[1, 2, 3], [3, 4, 5]])
O = Matrix([[1, 2, 3], [5, 6, 7], [9, 10, 11]])
O.row_del(-1)
assert O == Matrix([[1, 2, 3], [5, 6, 7]])
P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: P.row_del(10))
Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: Q.row_del(-10))
# for col_del(index)
M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
M.col_del(1)
assert M == Matrix([[1, 3], [2, 4], [3, 5]])
N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
N.col_del(-2)
assert N == Matrix([[1, 3], [2, 4], [3, 5]])
P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: P.col_del(10))
Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
raises(IndexError, lambda: Q.col_del(-10))
def test_issue_9422():
x, y = symbols('x y', commutative=False)
a, b = symbols('a b')
M = eye(2)
M1 = Matrix(2, 2, [x, y, y, z])
assert y*x*M != x*y*M
assert b*a*M == a*b*M
assert x*M1 != M1*x
assert a*M1 == M1*a
assert y*x*M == Matrix([[y*x, 0], [0, y*x]])
def test_issue_10770():
M = Matrix([])
a = ['col_insert', 'row_join'], Matrix([9, 6, 3])
b = ['row_insert', 'col_join'], a[1].T
c = ['row_insert', 'col_insert'], Matrix([[1, 2], [3, 4]])
for ops, m in (a, b, c):
for op in ops:
f = getattr(M, op)
new = f(m) if 'join' in op else f(42, m)
assert new == m and id(new) != id(m)
def test_issue_10658():
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert A.extract([0, 1, 2], [True, True, False]) == \
Matrix([[1, 2], [4, 5], [7, 8]])
assert A.extract([0, 1, 2], [True, False, False]) == Matrix([[1], [4], [7]])
assert A.extract([True, False, False], [0, 1, 2]) == Matrix([[1, 2, 3]])
assert A.extract([True, False, True], [0, 1, 2]) == \
Matrix([[1, 2, 3], [7, 8, 9]])
assert A.extract([0, 1, 2], [False, False, False]) == Matrix(3, 0, [])
assert A.extract([False, False, False], [0, 1, 2]) == Matrix(0, 3, [])
assert A.extract([True, False, True], [False, True, False]) == \
Matrix([[2], [8]])
def test_opportunistic_simplification():
# this test relates to issue #10718, #9480, #11434
# issue #9480
m = Matrix([[-5 + 5*sqrt(2), -5], [-5*sqrt(2)/2 + 5, -5*sqrt(2)/2]])
assert m.rank() == 1
# issue #10781
m = Matrix([[3+3*sqrt(3)*I, -9],[4,-3+3*sqrt(3)*I]])
assert simplify(m.rref()[0] - Matrix([[1, -9/(3 + 3*sqrt(3)*I)], [0, 0]])) == zeros(2, 2)
# issue #11434
ax,ay,bx,by,cx,cy,dx,dy,ex,ey,t0,t1 = symbols('a_x a_y b_x b_y c_x c_y d_x d_y e_x e_y t_0 t_1')
m = Matrix([[ax,ay,ax*t0,ay*t0,0],[bx,by,bx*t0,by*t0,0],[cx,cy,cx*t0,cy*t0,1],[dx,dy,dx*t0,dy*t0,1],[ex,ey,2*ex*t1-ex*t0,2*ey*t1-ey*t0,0]])
assert m.rank() == 4
def test_partial_pivoting():
# example from https://en.wikipedia.org/wiki/Pivot_element
# partial pivoting with back substitution gives a perfect result
# naive pivoting give an error ~1e-13, so anything better than
# 1e-15 is good
mm=Matrix([[0.003 ,59.14, 59.17],[ 5.291, -6.13,46.78]])
assert (mm.rref()[0] - Matrix([[1.0, 0, 10.0], [ 0, 1.0, 1.0]])).norm() < 1e-15
# issue #11549
m_mixed = Matrix([[6e-17, 1.0, 4],[ -1.0, 0, 8],[ 0, 0, 1]])
m_float = Matrix([[6e-17, 1.0, 4.],[ -1.0, 0., 8.],[ 0., 0., 1.]])
m_inv = Matrix([[ 0, -1.0, 8.0],[1.0, 6.0e-17, -4.0],[ 0, 0, 1]])
# this example is numerically unstable and involves a matrix with a norm >= 8,
# this comparing the difference of the results with 1e-15 is numerically sound.
assert (m_mixed.inv() - m_inv).norm() < 1e-15
assert (m_float.inv() - m_inv).norm() < 1e-15
def test_iszero_substitution():
""" When doing numerical computations, all elements that pass
the iszerofunc test should be set to numerically zero if they
aren't already. """
# Matrix from issue #9060
m = Matrix([[0.9, -0.1, -0.2, 0],[-0.8, 0.9, -0.4, 0],[-0.1, -0.8, 0.6, 0]])
m_rref = m.rref(iszerofunc=lambda x: abs(x)<6e-15)[0]
m_correct = Matrix([[1.0, 0, -0.301369863013699, 0],[ 0, 1.0, -0.712328767123288, 0],[ 0, 0, 0, 0]])
m_diff = m_rref - m_correct
assert m_diff.norm() < 1e-15
# if a zero-substitution wasn't made, this entry will be -1.11022302462516e-16
assert m_rref[2,2] == 0
def test_issue_11238():
from sympy import Point
xx = 8*tan(pi*Rational(13, 45))/(tan(pi*Rational(13, 45)) + sqrt(3))
yy = (-8*sqrt(3)*tan(pi*Rational(13, 45))**2 + 24*tan(pi*Rational(13, 45)))/(-3 + tan(pi*Rational(13, 45))**2)
p1 = Point(0, 0)
p2 = Point(1, -sqrt(3))
p0 = Point(xx,yy)
m1 = Matrix([p1 - simplify(p0), p2 - simplify(p0)])
m2 = Matrix([p1 - p0, p2 - p0])
m3 = Matrix([simplify(p1 - p0), simplify(p2 - p0)])
# This system has expressions which are zero and
# cannot be easily proved to be such, so without
# numerical testing, these assertions will fail.
Z = lambda x: abs(x.n()) < 1e-20
assert m1.rank(simplify=True, iszerofunc=Z) == 1
assert m2.rank(simplify=True, iszerofunc=Z) == 1
assert m3.rank(simplify=True, iszerofunc=Z) == 1
def test_as_real_imag():
m1 = Matrix(2,2,[1,2,3,4])
m2 = m1*S.ImaginaryUnit
m3 = m1 + m2
for kls in classes:
a,b = kls(m3).as_real_imag()
assert list(a) == list(m1)
assert list(b) == list(m1)
def test_deprecated():
# Maintain tests for deprecated functions. We must capture
# the deprecation warnings. When the deprecated functionality is
# removed, the corresponding tests should be removed.
m = Matrix(3, 3, [0, 1, 0, -4, 4, 0, -2, 1, 2])
P, Jcells = m.jordan_cells()
assert Jcells[1] == Matrix(1, 1, [2])
assert Jcells[0] == Matrix(2, 2, [2, 1, 0, 2])
with warns_deprecated_sympy():
assert Matrix([[1,2],[3,4]]).dot(Matrix([[1,3],[4,5]])) == [10, 19, 14, 28]
def test_issue_14489():
from sympy import Mod
A = Matrix([-1, 1, 2])
B = Matrix([10, 20, -15])
assert Mod(A, 3) == Matrix([2, 1, 2])
assert Mod(B, 4) == Matrix([2, 0, 1])
def test_issue_14943():
# Test that __array__ accepts the optional dtype argument
try:
from numpy import array
except ImportError:
skip('NumPy must be available to test creating matrices from ndarrays')
M = Matrix([[1,2], [3,4]])
assert array(M, dtype=float).dtype.name == 'float64'
def test_case_6913():
m = MatrixSymbol('m', 1, 1)
a = Symbol("a")
a = m[0, 0]>0
assert str(a) == 'm[0, 0] > 0'
def test_issue_11948():
A = MatrixSymbol('A', 3, 3)
a = Wild('a')
assert A.match(a) == {a: A}
def test_gramschmidt_conjugate_dot():
vecs = [Matrix([1, I]), Matrix([1, -I])]
assert Matrix.orthogonalize(*vecs) == \
[Matrix([[1], [I]]), Matrix([[1], [-I]])]
mat = Matrix([[1, I], [1, -I]])
Q, R = mat.QRdecomposition()
assert Q * Q.H == Matrix.eye(2)
def test_issue_8207():
a = Matrix(MatrixSymbol('a', 3, 1))
b = Matrix(MatrixSymbol('b', 3, 1))
c = a.dot(b)
d = diff(c, a[0, 0])
e = diff(d, a[0, 0])
assert d == b[0, 0]
assert e == 0
def test_func():
from sympy.simplify.simplify import nthroot
A = Matrix([[1, 2],[0, 3]])
assert A.analytic_func(sin(x*t), x) == Matrix([[sin(t), sin(3*t) - sin(t)], [0, sin(3*t)]])
A = Matrix([[2, 1],[1, 2]])
assert (pi * A / 6).analytic_func(cos(x), x) == Matrix([[sqrt(3)/4, -sqrt(3)/4], [-sqrt(3)/4, sqrt(3)/4]])
raises(ValueError, lambda : zeros(5).analytic_func(log(x), x))
raises(ValueError, lambda : (A*x).analytic_func(log(x), x))
A = Matrix([[0, -1, -2, 3], [0, -1, -2, 3], [0, 1, 0, -1], [0, 0, -1, 1]])
assert A.analytic_func(exp(x), x) == A.exp()
raises(ValueError, lambda : A.analytic_func(sqrt(x), x))
A = Matrix([[41, 12],[12, 34]])
assert simplify(A.analytic_func(sqrt(x), x)**2) == A
A = Matrix([[3, -12, 4], [-1, 0, -2], [-1, 5, -1]])
assert simplify(A.analytic_func(nthroot(x, 3), x)**3) == A
A = Matrix([[2, 0, 0, 0], [1, 2, 0, 0], [0, 1, 3, 0], [0, 0, 1, 3]])
assert A.analytic_func(exp(x), x) == A.exp()
A = Matrix([[0, 2, 1, 6], [0, 0, 1, 2], [0, 0, 0, 3], [0, 0, 0, 0]])
assert A.analytic_func(exp(x*t), x) == expand(simplify((A*t).exp()))
def test_issue_19809():
def f():
assert _dotprodsimp_state.state == None
m = Matrix([[1]])
m = m * m
return True
with dotprodsimp(True):
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(f)
assert future.result()
|
4fa2a0ef6ebf7ff705992fd434e56968de3073bb54cb585a2cc883b52eacddb4 | from sympy import Basic, Expr, S, sympify
from sympy.matrices.common import NonSquareMatrixError
class Determinant(Expr):
"""Matrix Determinant
Represents the determinant of a matrix expression.
Examples
========
>>> from sympy import MatrixSymbol, Determinant, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Determinant(A)
Determinant(A)
>>> Determinant(eye(3)).doit()
1
"""
is_commutative = True
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Determinant, %s, not a matrix" % str(mat))
if not mat.is_square:
raise NonSquareMatrixError("Det of a non-square matrix")
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
def doit(self, expand=False):
try:
return self.arg._eval_determinant()
except (AttributeError, NotImplementedError):
return self
def det(matexpr):
""" Matrix Determinant
Examples
========
>>> from sympy import MatrixSymbol, det, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> det(A)
Determinant(A)
>>> det(eye(3))
1
"""
return Determinant(matexpr).doit()
class Permanent(Expr):
"""Matrix Permanent
Represents the permanent of a matrix expression.
Examples
========
>>> from sympy import MatrixSymbol, Permanent, ones
>>> A = MatrixSymbol('A', 3, 3)
>>> Permanent(A)
Permanent(A)
>>> Permanent(ones(3, 3)).doit()
6
"""
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Permanent, %s, not a matrix" % str(mat))
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
def doit(self, expand=False):
try:
return self.arg.per()
except (AttributeError, NotImplementedError):
return self
def per(matexpr):
""" Matrix Permanent
Examples
========
>>> from sympy import MatrixSymbol, Matrix, per, ones
>>> A = MatrixSymbol('A', 3, 3)
>>> per(A)
Permanent(A)
>>> per(ones(5, 5))
120
>>> M = Matrix([1, 2, 5])
>>> per(M)
8
"""
return Permanent(matexpr).doit()
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_Determinant(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine, det
>>> X = MatrixSymbol('X', 2, 2)
>>> det(X)
Determinant(X)
>>> with assuming(Q.orthogonal(X)):
... print(refine(det(X)))
1
"""
if ask(Q.orthogonal(expr.arg), assumptions):
return S.One
elif ask(Q.singular(expr.arg), assumptions):
return S.Zero
elif ask(Q.unit_triangular(expr.arg), assumptions):
return S.One
return expr
handlers_dict['Determinant'] = refine_Determinant
|
5a08be1360ff803c3a1236e17abe787d75e8891d8eef7184744eb830c0879844 | from sympy.matrices.expressions import MatrixExpr
from sympy import Q
class Factorization(MatrixExpr):
arg = property(lambda self: self.args[0])
shape = property(lambda self: self.arg.shape) # type: ignore
class LofLU(Factorization):
predicates = Q.lower_triangular,
class UofLU(Factorization):
predicates = Q.upper_triangular,
class LofCholesky(LofLU): pass
class UofCholesky(UofLU): pass
class QofQR(Factorization):
predicates = Q.orthogonal,
class RofQR(Factorization):
predicates = Q.upper_triangular,
class EigenVectors(Factorization):
predicates = Q.orthogonal,
class EigenValues(Factorization):
predicates = Q.diagonal,
class UofSVD(Factorization):
predicates = Q.orthogonal,
class SofSVD(Factorization):
predicates = Q.diagonal,
class VofSVD(Factorization):
predicates = Q.orthogonal,
def lu(expr):
return LofLU(expr), UofLU(expr)
def qr(expr):
return QofQR(expr), RofQR(expr)
def eig(expr):
return EigenValues(expr), EigenVectors(expr)
def svd(expr):
return UofSVD(expr), SofSVD(expr), VofSVD(expr)
|
20ca97092b0f874569fcc0bd1d94ee740e4318ce1606aff207e06cb746b3c8cf | """ A module which handles Matrix Expressions """
from .slice import MatrixSlice
from .blockmatrix import BlockMatrix, BlockDiagMatrix, block_collapse, blockcut
from .companion import CompanionMatrix
from .funcmatrix import FunctionMatrix
from .inverse import Inverse
from .matadd import MatAdd
from .matexpr import MatrixExpr, MatrixSymbol, matrix_symbols
from .matmul import MatMul
from .matpow import MatPow
from .trace import Trace, trace
from .determinant import Determinant, det, Permanent, per
from .transpose import Transpose
from .adjoint import Adjoint
from .hadamard import hadamard_product, HadamardProduct, hadamard_power, HadamardPower
from .diagonal import DiagonalMatrix, DiagonalOf, DiagMatrix, diagonalize_vector
from .dotproduct import DotProduct
from .kronecker import kronecker_product, KroneckerProduct, combine_kronecker
from .permutation import PermutationMatrix, MatrixPermute
from .sets import MatrixSet
from .special import ZeroMatrix, Identity, OneMatrix
__all__ = [
'MatrixSlice',
'BlockMatrix', 'BlockDiagMatrix', 'block_collapse', 'blockcut',
'FunctionMatrix',
'CompanionMatrix',
'Inverse',
'MatAdd',
'Identity', 'MatrixExpr', 'MatrixSymbol', 'ZeroMatrix', 'OneMatrix',
'matrix_symbols', 'MatrixSet',
'MatMul',
'MatPow',
'Trace', 'trace',
'Determinant', 'det',
'Transpose',
'Adjoint',
'hadamard_product', 'HadamardProduct', 'hadamard_power', 'HadamardPower',
'DiagonalMatrix', 'DiagonalOf', 'DiagMatrix', 'diagonalize_vector',
'DotProduct',
'kronecker_product', 'KroneckerProduct', 'combine_kronecker',
'PermutationMatrix', 'MatrixPermute',
'Permanent', 'per'
]
|
c6b0c9c87292a5629fae73512a497bc7bdab2518e8c4b006b44fb5b978a92708 | from sympy import Number
from sympy.core import Mul, Basic, sympify, S
from sympy.core.mul import mul
from sympy.functions import adjoint
from sympy.strategies import (rm_id, unpack, typed, flatten, exhaust,
do_one, new)
from sympy.matrices.common import ShapeError, NonInvertibleMatrixError
from sympy.matrices.matrices import MatrixBase
from .inverse import Inverse
from .matexpr import MatrixExpr
from .matpow import MatPow
from .transpose import transpose
from .permutation import PermutationMatrix
from .special import ZeroMatrix, Identity, GenericIdentity, OneMatrix
# XXX: MatMul should perhaps not subclass directly from Mul
class MatMul(MatrixExpr, Mul):
"""
A product of matrix expressions
Examples
========
>>> from sympy import MatMul, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 4)
>>> B = MatrixSymbol('B', 4, 3)
>>> C = MatrixSymbol('C', 3, 6)
>>> MatMul(A, B, C)
A*B*C
"""
is_MatMul = True
identity = GenericIdentity()
def __new__(cls, *args, evaluate=False, check=True, _sympify=True):
if not args:
return cls.identity
# This must be removed aggressively in the constructor to avoid
# TypeErrors from GenericIdentity().shape
args = list(filter(lambda i: cls.identity != i, args))
if _sympify:
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
factor, matrices = obj.as_coeff_matrices()
if check:
validate(*matrices)
if not matrices:
# Should it be
#
# return Basic.__neq__(cls, factor, GenericIdentity()) ?
return factor
if evaluate:
return canonicalize(obj)
return obj
@property
def shape(self):
matrices = [arg for arg in self.args if arg.is_Matrix]
return (matrices[0].rows, matrices[-1].cols)
def _entry(self, i, j, expand=True, **kwargs):
from sympy import Dummy, Sum, Mul, ImmutableMatrix, Integer
coeff, matrices = self.as_coeff_matrices()
if len(matrices) == 1: # situation like 2*X, matmul is just X
return coeff * matrices[0][i, j]
indices = [None]*(len(matrices) + 1)
ind_ranges = [None]*(len(matrices) - 1)
indices[0] = i
indices[-1] = j
def f():
counter = 1
while True:
yield Dummy("i_%i" % counter)
counter += 1
dummy_generator = kwargs.get("dummy_generator", f())
for i in range(1, len(matrices)):
indices[i] = next(dummy_generator)
for i, arg in enumerate(matrices[:-1]):
ind_ranges[i] = arg.shape[1] - 1
matrices = [arg._entry(indices[i], indices[i+1], dummy_generator=dummy_generator) for i, arg in enumerate(matrices)]
expr_in_sum = Mul.fromiter(matrices)
if any(v.has(ImmutableMatrix) for v in matrices):
expand = True
result = coeff*Sum(
expr_in_sum,
*zip(indices[1:-1], [0]*len(ind_ranges), ind_ranges)
)
# Don't waste time in result.doit() if the sum bounds are symbolic
if not any(isinstance(v, (Integer, int)) for v in ind_ranges):
expand = False
return result.doit() if expand else result
def as_coeff_matrices(self):
scalars = [x for x in self.args if not x.is_Matrix]
matrices = [x for x in self.args if x.is_Matrix]
coeff = Mul(*scalars)
if coeff.is_commutative is False:
raise NotImplementedError("noncommutative scalars in MatMul are not supported.")
return coeff, matrices
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, MatMul(*matrices)
def _eval_transpose(self):
"""Transposition of matrix multiplication.
Notes
=====
The following rules are applied.
Transposition for matrix multiplied with another matrix:
`\\left(A B\\right)^{T} = B^{T} A^{T}`
Transposition for matrix multiplied with scalar:
`\\left(c A\\right)^{T} = c A^{T}`
References
==========
.. [1] https://en.wikipedia.org/wiki/Transpose
"""
coeff, matrices = self.as_coeff_matrices()
return MatMul(
coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()
def _eval_adjoint(self):
return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit()
def _eval_trace(self):
factor, mmul = self.as_coeff_mmul()
if factor != 1:
from .trace import trace
return factor * trace(mmul.doit())
else:
raise NotImplementedError("Can't simplify any further")
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import Determinant
factor, matrices = self.as_coeff_matrices()
square_matrices = only_squares(*matrices)
return factor**self.rows * Mul(*list(map(Determinant, square_matrices)))
def _eval_inverse(self):
try:
return MatMul(*[
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]]).doit()
except ShapeError:
return Inverse(self)
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
# treat scalar*MatrixSymbol or scalar*MatPow separately
expr = canonicalize(MatMul(*args))
return expr
# Needed for partial compatibility with Mul
def args_cnc(self, **kwargs):
coeff_c = [x for x in self.args if x.is_commutative]
coeff_nc = [x for x in self.args if not x.is_commutative]
return [coeff_c, coeff_nc]
def _eval_derivative_matrix_lines(self, x):
from .transpose import Transpose
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
if right_args:
right_mat = MatMul.fromiter(right_args)
else:
right_mat = Identity(self.shape[1])
if left_args:
left_rev = MatMul.fromiter([Transpose(i).doit() if i.is_Matrix else i for i in reversed(left_args)])
else:
left_rev = Identity(self.shape[0])
d = self.args[ind]._eval_derivative_matrix_lines(x)
for i in d:
i.append_first(left_rev)
i.append_second(right_mat)
lines.append(i)
return lines
mul.register_handlerclass((Mul, MatMul), MatMul)
def validate(*matrices):
""" Checks for valid shapes for args of MatMul """
for i in range(len(matrices)-1):
A, B = matrices[i:i+2]
if A.cols != B.rows:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
# Rules
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(MatMul, *args)
def any_zeros(mul):
if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args]):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
[1 1] [1 2]
A*[ ]*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[4 6]
A*[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, (MatrixBase, Number)) and isinstance(last, (MatrixBase, Number)):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necesssary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
========
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
def combine_powers(mul):
"""Combine consecutive powers with the same base into one
e.g. A*A**2 -> A**3
This also cancels out the possible matrix inverses using the
knowledgebase of ``Inverse``.
e.g. Y * X * X.I -> Y
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for B in args[1:]:
A = new_args[-1]
if A.is_square == False or B.is_square == False:
new_args.append(B)
continue
if isinstance(A, MatPow):
A_base, A_exp = A.args
else:
A_base, A_exp = A, S.One
if isinstance(B, MatPow):
B_base, B_exp = B.args
else:
B_base, B_exp = B, S.One
if A_base == B_base:
new_exp = A_exp + B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
elif not isinstance(B_base, MatrixBase):
try:
B_base_inv = B_base.inverse()
except NonInvertibleMatrixError:
B_base_inv = None
if B_base_inv is not None and A_base == B_base_inv:
new_exp = A_exp - B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
new_args.append(B)
return newmul(factor, *new_args)
def combine_permutations(mul):
"""Refine products of permutation matrices as the products of cycles.
"""
args = mul.args
l = len(args)
if l < 2:
return mul
result = [args[0]]
for i in range(1, l):
A = result[-1]
B = args[i]
if isinstance(A, PermutationMatrix) and \
isinstance(B, PermutationMatrix):
cycle_1 = A.args[0]
cycle_2 = B.args[0]
result[-1] = PermutationMatrix(cycle_1 * cycle_2)
else:
result.append(B)
return MatMul(*result)
def combine_one_matrices(mul):
"""
Combine products of OneMatrix
e.g. OneMatrix(2, 3) * OneMatrix(3, 4) -> 3 * OneMatrix(2, 4)
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for B in args[1:]:
A = new_args[-1]
if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):
new_args.append(B)
continue
new_args.pop()
new_args.append(OneMatrix(A.shape[0], B.shape[1]))
factor *= A.shape[1]
return newmul(factor, *new_args)
rules = (
any_zeros, remove_ids, combine_one_matrices, combine_powers, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten, combine_permutations)
canonicalize = exhaust(typed({MatMul: do_one(*rules)}))
def only_squares(*matrices):
"""factor matrices only if they are square"""
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(MatMul(*matrices[start:i+1]).doit())
start = i+1
return out
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_MatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X.T
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
exprargs = []
for args in expr.args:
if args.is_Matrix:
exprargs.append(args)
else:
newargs.append(args)
last = exprargs[0]
for arg in exprargs[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
handlers_dict['MatMul'] = refine_MatMul
|
4900a05c117d21ea2f1df0a09d810b734cfd0752786a24c75bee39d66e052a28 | from typing import Tuple as tTuple
from sympy.core.logic import FuzzyBool
from functools import wraps, reduce
import collections
from sympy.core import S, Symbol, Integer, Basic, Expr, Mul, Add
from sympy.core.decorators import call_highest_priority
from sympy.core.compatibility import SYMPY_INTS, default_sort_key
from sympy.core.symbol import Str
from sympy.core.sympify import SympifyError, _sympify
from sympy.functions import conjugate, adjoint
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.common import NonSquareMatrixError
from sympy.simplify import simplify
from sympy.utilities.misc import filldedent
from sympy.multipledispatch import dispatch
def _sympifyit(arg, retval=None):
# This version of _sympifyit sympifies MutableMatrix objects
def deco(func):
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
b = _sympify(b)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
return deco
class MatrixExpr(Expr):
"""Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol, MatAdd, MatMul, Transpose, Inverse
"""
# Should not be considered iterable by the
# sympy.core.compatibility.iterable function. Subclass that actually are
# iterable (i.e., explicit matrices) should set this to True.
_iterable = False
_op_priority = 11.0
is_Matrix = True # type: bool
is_MatrixExpr = True # type: bool
is_Identity = None # type: FuzzyBool
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
is_number = False
is_symbol = False
is_scalar = False
def __new__(cls, *args, **kwargs):
args = map(_sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
@property
def shape(self) -> tTuple[Expr, Expr]:
raise NotImplementedError
@property
def _add_handler(self):
return MatAdd
@property
def _mul_handler(self):
return MatMul
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other, check=True).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self, check=True).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other, check=True).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self, check=True).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __matmul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmatmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return MatPow(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions.transpose import Transpose
return Adjoint(Transpose(self))
def as_real_imag(self, deep=True, **hints):
from sympy import I
real = S.Half * (self + self._eval_conjugate())
im = (self - self._eval_conjugate())/(2*I)
return (real, im)
def _eval_inverse(self):
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_power(self, exp):
"""
Override this in sub-classes to implement simplification of powers. The cases where the exponent
is -1, 0, 1 are already covered in MatPow.doit(), so implementations can exclude these cases.
"""
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.func(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _eval_derivative_n_times(self, x, n):
return Basic._eval_derivative_n_times(self, x, n)
def _eval_derivative(self, x):
# `x` is a scalar:
if self.has(x):
# See if there are other methods using it:
return super()._eval_derivative(x)
else:
return ZeroMatrix(*self.shape)
@classmethod
def _check_dim(cls, dim):
"""Helper function to check invalid matrix dimensions"""
from sympy.core.assumptions import check_assumptions
ok = check_assumptions(dim, integer=True, nonnegative=True)
if ok is False:
raise ValueError(
"The dimension specification {} should be "
"a nonnegative integer.".format(dim))
def _entry(self, i, j, **kwargs):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return S.One, self
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
@property
def T(self):
'''Matrix transposition'''
return self.transpose()
def inverse(self):
if not self.is_square:
raise NonSquareMatrixError('Inverse of non-square matrix')
return self._eval_inverse()
def inv(self):
return self.inverse()
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(self.rows is None or
(0 <= i) != False and (i < self.rows) != False) and
(0 <= j) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = _sympify(i), _sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
elif isinstance(key, (SYMPY_INTS, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
# allow single indexing if number of columns is known
if not isinstance(cols, Integer):
raise IndexError(filldedent('''
Single indexing is only supported when the number
of columns is known.'''))
key = _sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid index %s" % key)
elif isinstance(key, (Symbol, Expr)):
raise IndexError(filldedent('''
Only integers may be used when addressing the matrix
with a single index.'''))
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableDenseMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
if (not isinstance(self.rows, (SYMPY_INTS, Integer))
or not isinstance(self.cols, (SYMPY_INTS, Integer))):
raise ValueError(
'Matrix with symbolic shape '
'cannot be represented explicitly.')
from sympy.matrices.immutable import ImmutableDenseMatrix
return ImmutableDenseMatrix([[self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableDenseMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return 1, MatMul(self)
@staticmethod
def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r"""
Parse expression of matrices with explicitly summed indices into a
matrix expression without indices, if possible.
This transformation expressed in mathematical notation:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Optional parameter ``first_index``: specify which free index to use as
the index starting the expression.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum
>>> from sympy.abc import i, j, k, l, N
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A.T*B
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
Trace(A)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B.T*A.T
"""
from sympy import Sum, Mul, Add, MatMul, transpose, trace
from sympy.strategies.traverse import bottom_up
def remove_matelement(expr, i1, i2):
def repl_match(pos):
def func(x):
if not isinstance(x, MatrixElement):
return False
if x.args[pos] != i1:
return False
if x.args[3-pos] == 0:
if x.args[0].shape[2-pos] == 1:
return True
else:
return False
return True
return func
expr = expr.replace(repl_match(1),
lambda x: x.args[0])
expr = expr.replace(repl_match(2),
lambda x: transpose(x.args[0]))
# Make sure that all Mul are transformed to MatMul and that they
# are flattened:
rule = bottom_up(lambda x: reduce(lambda a, b: a*b, x.args) if isinstance(x, (Mul, MatMul)) else x)
return rule(expr)
def recurse_expr(expr, index_ranges={}):
if expr.is_Mul:
nonmatargs = []
pos_arg = []
pos_ind = []
dlinks = {}
link_ind = []
counter = 0
args_ind = []
for arg in expr.args:
retvals = recurse_expr(arg, index_ranges)
assert isinstance(retvals, list)
if isinstance(retvals, list):
for i in retvals:
args_ind.append(i)
else:
args_ind.append(retvals)
for arg_symbol, arg_indices in args_ind:
if arg_indices is None:
nonmatargs.append(arg_symbol)
continue
if isinstance(arg_symbol, MatrixElement):
arg_symbol = arg_symbol.args[0]
pos_arg.append(arg_symbol)
pos_ind.append(arg_indices)
link_ind.append([None]*len(arg_indices))
for i, ind in enumerate(arg_indices):
if ind in dlinks:
other_i = dlinks[ind]
link_ind[counter][i] = other_i
link_ind[other_i[0]][other_i[1]] = (counter, i)
dlinks[ind] = (counter, i)
counter += 1
counter2 = 0
lines = {}
while counter2 < len(link_ind):
for i, e in enumerate(link_ind):
if None in e:
line_start_index = (i, e.index(None))
break
cur_ind_pos = line_start_index
cur_line = []
index1 = pos_ind[cur_ind_pos[0]][cur_ind_pos[1]]
while True:
d, r = cur_ind_pos
if pos_arg[d] != 1:
if r % 2 == 1:
cur_line.append(transpose(pos_arg[d]))
else:
cur_line.append(pos_arg[d])
next_ind_pos = link_ind[d][1-r]
counter2 += 1
# Mark as visited, there will be no `None` anymore:
link_ind[d] = (-1, -1)
if next_ind_pos is None:
index2 = pos_ind[d][1-r]
lines[(index1, index2)] = cur_line
break
cur_ind_pos = next_ind_pos
lines = {k: MatMul.fromiter(v) if len(v) != 1 else v[0] for k, v in lines.items()}
return [(Mul.fromiter(nonmatargs), None)] + [
(MatrixElement(a, i, j), (i, j)) for (i, j), a in lines.items()
]
elif expr.is_Add:
res = [recurse_expr(i) for i in expr.args]
d = collections.defaultdict(list)
for res_addend in res:
scalar = 1
for elem, indices in res_addend:
if indices is None:
scalar = elem
continue
indices = tuple(sorted(indices, key=default_sort_key))
d[indices].append(scalar*remove_matelement(elem, *indices))
scalar = 1
return [(MatrixElement(Add.fromiter(v), *k), k) for k, v in d.items()]
elif isinstance(expr, KroneckerDelta):
i1, i2 = expr.args
if dimensions is not None:
identity = Identity(dimensions[0])
else:
identity = S.One
return [(MatrixElement(identity, i1, i2), (i1, i2))]
elif isinstance(expr, MatrixElement):
matrix_symbol, i1, i2 = expr.args
if i1 in index_ranges:
r1, r2 = index_ranges[i1]
if r1 != 0 or matrix_symbol.shape[0] != r2+1:
raise ValueError("index range mismatch: {} vs. (0, {})".format(
(r1, r2), matrix_symbol.shape[0]))
if i2 in index_ranges:
r1, r2 = index_ranges[i2]
if r1 != 0 or matrix_symbol.shape[1] != r2+1:
raise ValueError("index range mismatch: {} vs. (0, {})".format(
(r1, r2), matrix_symbol.shape[1]))
if (i1 == i2) and (i1 in index_ranges):
return [(trace(matrix_symbol), None)]
return [(MatrixElement(matrix_symbol, i1, i2), (i1, i2))]
elif isinstance(expr, Sum):
return recurse_expr(
expr.args[0],
index_ranges={i[0]: i[1:] for i in expr.args[1:]}
)
else:
return [(expr, None)]
retvals = recurse_expr(expr)
factors, indices = zip(*retvals)
retexpr = Mul.fromiter(factors)
if len(indices) == 0 or list(set(indices)) == [None]:
return retexpr
if first_index is None:
for i in indices:
if i is not None:
ind0 = i
break
return remove_matelement(retexpr, *ind0)
else:
return remove_matelement(retexpr, first_index, last_index)
def applyfunc(self, func):
from .applyfunc import ElementwiseApplyFunction
return ElementwiseApplyFunction(func, self)
@dispatch(MatrixExpr, Expr)
def _eval_is_eq(lhs, rhs): # noqa:F811
return False
@dispatch(MatrixExpr, MatrixExpr) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
if lhs.shape != rhs.shape:
return False
if (lhs - rhs).is_ZeroMatrix:
return True
def get_postprocessor(cls):
def _postprocessor(expr):
# To avoid circular imports, we can't have MatMul/MatAdd on the top level
mat_class = {Mul: MatMul, Add: MatAdd}[cls]
nonmatrices = []
matrices = []
for term in expr.args:
if isinstance(term, MatrixExpr):
matrices.append(term)
else:
nonmatrices.append(term)
if not matrices:
return cls._from_args(nonmatrices)
if nonmatrices:
if cls == Mul:
for i in range(len(matrices)):
if not matrices[i].is_MatrixExpr:
# If one of the matrices explicit, absorb the scalar into it
# (doit will combine all explicit matrices into one, so it
# doesn't matter which)
matrices[i] = matrices[i].__mul__(cls._from_args(nonmatrices))
nonmatrices = []
break
else:
# Maintain the ability to create Add(scalar, matrix) without
# raising an exception. That way different algorithms can
# replace matrix expressions with non-commutative symbols to
# manipulate them like non-commutative scalars.
return cls._from_args(nonmatrices + [mat_class(*matrices).doit(deep=False)])
if mat_class == MatAdd:
return mat_class(*matrices).doit(deep=False)
return mat_class(cls._from_args(nonmatrices), *matrices).doit(deep=False)
return _postprocessor
Basic._constructor_postprocessor_mapping[MatrixExpr] = {
"Mul": [get_postprocessor(Mul)],
"Add": [get_postprocessor(Add)],
}
def _matrix_derivative(expr, x):
from sympy.tensor.array.array_derivatives import ArrayDerivative
lines = expr._eval_derivative_matrix_lines(x)
parts = [i.build() for i in lines]
from sympy.codegen.array_utils import recognize_matrix_expression
parts = [[recognize_matrix_expression(j).doit() for j in i] for i in parts]
def _get_shape(elem):
if isinstance(elem, MatrixExpr):
return elem.shape
return 1, 1
def get_rank(parts):
return sum([j not in (1, None) for i in parts for j in _get_shape(i)])
ranks = [get_rank(i) for i in parts]
rank = ranks[0]
def contract_one_dims(parts):
if len(parts) == 1:
return parts[0]
else:
p1, p2 = parts[:2]
if p2.is_Matrix:
p2 = p2.T
if p1 == Identity(1):
pbase = p2
elif p2 == Identity(1):
pbase = p1
else:
pbase = p1*p2
if len(parts) == 2:
return pbase
else: # len(parts) > 2
if pbase.is_Matrix:
raise ValueError("")
return pbase*Mul.fromiter(parts[2:])
if rank <= 2:
return Add.fromiter([contract_one_dims(i) for i in parts])
return ArrayDerivative(expr, x)
class MatrixElement(Expr):
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
is_symbol = True
is_commutative = True
def __new__(cls, name, n, m):
n, m = map(_sympify, (n, m))
from sympy import MatrixBase
if isinstance(name, (MatrixBase,)):
if n.is_Integer and m.is_Integer:
return name[n, m]
if isinstance(name, str):
name = Symbol(name)
name = _sympify(name)
obj = Expr.__new__(cls, name, n, m)
return obj
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return args[0][args[1], args[2]]
@property
def indices(self):
return self.args[1:]
def _eval_derivative(self, v):
from sympy import Sum, symbols, Dummy
if not isinstance(v, MatrixElement):
from sympy import MatrixBase
if isinstance(self.parent, MatrixBase):
return self.parent.diff(v)[self.i, self.j]
return S.Zero
M = self.args[0]
m, n = self.parent.shape
if M == v.args[0]:
return KroneckerDelta(self.args[1], v.args[1], (0, m-1)) * \
KroneckerDelta(self.args[2], v.args[2], (0, n-1))
if isinstance(M, Inverse):
i, j = self.args[1:]
i1, i2 = symbols("z1, z2", cls=Dummy)
Y = M.args[0]
r1, r2 = Y.shape
return -Sum(M[i, i1]*Y[i1, i2].diff(v)*M[i2, j], (i1, 0, r1-1), (i2, 0, r2-1))
if self.has(v.args[0]):
return None
return S.Zero
class MatrixSymbol(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
Examples
========
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
is_symbol = True
_diff_wrt = True
def __new__(cls, name, n, m):
n, m = _sympify(n), _sympify(m)
cls._check_dim(m)
cls._check_dim(n)
if isinstance(name, str):
name = Str(name)
obj = Basic.__new__(cls, name, n, m)
return obj
@property
def shape(self):
return self.args[1], self.args[2]
@property
def name(self):
return self.args[0].name
def _entry(self, i, j, **kwargs):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return {self}
def _eval_simplify(self, **kwargs):
return self
def _eval_derivative(self, x):
# x is a scalar:
return ZeroMatrix(self.shape[0], self.shape[1])
def _eval_derivative_matrix_lines(self, x):
if self != x:
first = ZeroMatrix(x.shape[0], self.shape[0]) if self.shape[0] != 1 else S.Zero
second = ZeroMatrix(x.shape[1], self.shape[1]) if self.shape[1] != 1 else S.Zero
return [_LeftRightArgs(
[first, second],
)]
else:
first = Identity(self.shape[0]) if self.shape[0] != 1 else S.One
second = Identity(self.shape[1]) if self.shape[1] != 1 else S.One
return [_LeftRightArgs(
[first, second],
)]
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
class _LeftRightArgs:
r"""
Helper class to compute matrix derivatives.
The logic: when an expression is derived by a matrix `X_{mn}`, two lines of
matrix multiplications are created: the one contracted to `m` (first line),
and the one contracted to `n` (second line).
Transposition flips the side by which new matrices are connected to the
lines.
The trace connects the end of the two lines.
"""
def __init__(self, lines, higher=S.One):
self._lines = [i for i in lines]
self._first_pointer_parent = self._lines
self._first_pointer_index = 0
self._first_line_index = 0
self._second_pointer_parent = self._lines
self._second_pointer_index = 1
self._second_line_index = 1
self.higher = higher
@property
def first_pointer(self):
return self._first_pointer_parent[self._first_pointer_index]
@first_pointer.setter
def first_pointer(self, value):
self._first_pointer_parent[self._first_pointer_index] = value
@property
def second_pointer(self):
return self._second_pointer_parent[self._second_pointer_index]
@second_pointer.setter
def second_pointer(self, value):
self._second_pointer_parent[self._second_pointer_index] = value
def __repr__(self):
built = [self._build(i) for i in self._lines]
return "_LeftRightArgs(lines=%s, higher=%s)" % (
built,
self.higher,
)
def transpose(self):
self._first_pointer_parent, self._second_pointer_parent = self._second_pointer_parent, self._first_pointer_parent
self._first_pointer_index, self._second_pointer_index = self._second_pointer_index, self._first_pointer_index
self._first_line_index, self._second_line_index = self._second_line_index, self._first_line_index
return self
@staticmethod
def _build(expr):
from sympy.core.expr import ExprBuilder
if isinstance(expr, ExprBuilder):
return expr.build()
if isinstance(expr, list):
if len(expr) == 1:
return expr[0]
else:
return expr[0](*[_LeftRightArgs._build(i) for i in expr[1]])
else:
return expr
def build(self):
data = [self._build(i) for i in self._lines]
if self.higher != 1:
data += [self._build(self.higher)]
data = [i.doit() for i in data]
return data
def matrix_form(self):
if self.first != 1 and self.higher != 1:
raise ValueError("higher dimensional array cannot be represented")
def _get_shape(elem):
if isinstance(elem, MatrixExpr):
return elem.shape
return (None, None)
if _get_shape(self.first)[1] != _get_shape(self.second)[1]:
# Remove one-dimensional identity matrices:
# (this is needed by `a.diff(a)` where `a` is a vector)
if _get_shape(self.second) == (1, 1):
return self.first*self.second[0, 0]
if _get_shape(self.first) == (1, 1):
return self.first[1, 1]*self.second.T
raise ValueError("incompatible shapes")
if self.first != 1:
return self.first*self.second.T
else:
return self.higher
def rank(self):
"""
Number of dimensions different from trivial (warning: not related to
matrix rank).
"""
rank = 0
if self.first != 1:
rank += sum([i != 1 for i in self.first.shape])
if self.second != 1:
rank += sum([i != 1 for i in self.second.shape])
if self.higher != 1:
rank += 2
return rank
def _multiply_pointer(self, pointer, other):
from sympy.core.expr import ExprBuilder
from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
subexpr = ExprBuilder(
CodegenArrayContraction,
[
ExprBuilder(
CodegenArrayTensorProduct,
[
pointer,
other
]
),
(1, 2)
],
validator=CodegenArrayContraction._validate
)
return subexpr
def append_first(self, other):
self.first_pointer *= other
def append_second(self, other):
self.second_pointer *= other
def _make_matrix(x):
from sympy import ImmutableDenseMatrix
if isinstance(x, MatrixExpr):
return x
return ImmutableDenseMatrix([[x]])
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
from .special import ZeroMatrix, Identity
|
d51ce6d593e859fbba20b3c59938dc53c53185c10b1f6f6e28b21ecbc6e57f02 | from sympy.core.sympify import _sympify
from sympy.matrices.expressions import MatrixExpr
from sympy import S, I, sqrt, exp
class DFT(MatrixExpr):
""" Discrete Fourier Transform """
def __new__(cls, n):
n = _sympify(n)
cls._check_dim(n)
obj = super().__new__(cls, n)
return obj
n = property(lambda self: self.args[0]) # type: ignore
shape = property(lambda self: (self.n, self.n)) # type: ignore
def _entry(self, i, j, **kwargs):
w = exp(-2*S.Pi*I/self.n)
return w**(i*j) / sqrt(self.n)
def _eval_inverse(self):
return IDFT(self.n)
class IDFT(DFT):
""" Inverse Discrete Fourier Transform """
def _entry(self, i, j, **kwargs):
w = exp(-2*S.Pi*I/self.n)
return w**(-i*j) / sqrt(self.n)
def _eval_inverse(self):
return DFT(self.n)
|
e3fbd47ae7c28822115a44a41fa7b74bb8a8a530c50fa94d507eed3c369b666f | from sympy.core.sympify import _sympify
from sympy.matrices.expressions import MatrixExpr
from sympy.core import S, Eq, Ge
from sympy.functions.special.tensor_functions import KroneckerDelta
class DiagonalMatrix(MatrixExpr):
"""DiagonalMatrix(M) will create a matrix expression that
behaves as though all off-diagonal elements,
`M[i, j]` where `i != j`, are zero.
Examples
========
>>> from sympy import MatrixSymbol, DiagonalMatrix, Symbol
>>> n = Symbol('n', integer=True)
>>> m = Symbol('m', integer=True)
>>> D = DiagonalMatrix(MatrixSymbol('x', 2, 3))
>>> D[1, 2]
0
>>> D[1, 1]
x[1, 1]
The length of the diagonal -- the lesser of the two dimensions of `M` --
is accessed through the `diagonal_length` property:
>>> D.diagonal_length
2
>>> DiagonalMatrix(MatrixSymbol('x', n + 1, n)).diagonal_length
n
When one of the dimensions is symbolic the other will be treated as
though it is smaller:
>>> tall = DiagonalMatrix(MatrixSymbol('x', n, 3))
>>> tall.diagonal_length
3
>>> tall[10, 1]
0
When the size of the diagonal is not known, a value of None will
be returned:
>>> DiagonalMatrix(MatrixSymbol('x', n, m)).diagonal_length is None
True
"""
arg = property(lambda self: self.args[0])
shape = property(lambda self: self.arg.shape) # type:ignore
@property
def diagonal_length(self):
r, c = self.shape
if r.is_Integer and c.is_Integer:
m = min(r, c)
elif r.is_Integer and not c.is_Integer:
m = r
elif c.is_Integer and not r.is_Integer:
m = c
elif r == c:
m = r
else:
try:
m = min(r, c)
except TypeError:
m = None
return m
def _entry(self, i, j, **kwargs):
if self.diagonal_length is not None:
if Ge(i, self.diagonal_length) is S.true:
return S.Zero
elif Ge(j, self.diagonal_length) is S.true:
return S.Zero
eq = Eq(i, j)
if eq is S.true:
return self.arg[i, i]
elif eq is S.false:
return S.Zero
return self.arg[i, j]*KroneckerDelta(i, j)
class DiagonalOf(MatrixExpr):
"""DiagonalOf(M) will create a matrix expression that
is equivalent to the diagonal of `M`, represented as
a single column matrix.
Examples
========
>>> from sympy import MatrixSymbol, DiagonalOf, Symbol
>>> n = Symbol('n', integer=True)
>>> m = Symbol('m', integer=True)
>>> x = MatrixSymbol('x', 2, 3)
>>> diag = DiagonalOf(x)
>>> diag.shape
(2, 1)
The diagonal can be addressed like a matrix or vector and will
return the corresponding element of the original matrix:
>>> diag[1, 0] == diag[1] == x[1, 1]
True
The length of the diagonal -- the lesser of the two dimensions of `M` --
is accessed through the `diagonal_length` property:
>>> diag.diagonal_length
2
>>> DiagonalOf(MatrixSymbol('x', n + 1, n)).diagonal_length
n
When only one of the dimensions is symbolic the other will be
treated as though it is smaller:
>>> dtall = DiagonalOf(MatrixSymbol('x', n, 3))
>>> dtall.diagonal_length
3
When the size of the diagonal is not known, a value of None will
be returned:
>>> DiagonalOf(MatrixSymbol('x', n, m)).diagonal_length is None
True
"""
arg = property(lambda self: self.args[0])
@property
def shape(self):
r, c = self.arg.shape
if r.is_Integer and c.is_Integer:
m = min(r, c)
elif r.is_Integer and not c.is_Integer:
m = r
elif c.is_Integer and not r.is_Integer:
m = c
elif r == c:
m = r
else:
try:
m = min(r, c)
except TypeError:
m = None
return m, S.One
@property
def diagonal_length(self):
return self.shape[0]
def _entry(self, i, j, **kwargs):
return self.arg._entry(i, i, **kwargs)
class DiagMatrix(MatrixExpr):
"""
Turn a vector into a diagonal matrix.
"""
def __new__(cls, vector):
vector = _sympify(vector)
obj = MatrixExpr.__new__(cls, vector)
shape = vector.shape
dim = shape[1] if shape[0] == 1 else shape[0]
if vector.shape[0] != 1:
obj._iscolumn = True
else:
obj._iscolumn = False
obj._shape = (dim, dim)
obj._vector = vector
return obj
@property
def shape(self):
return self._shape
def _entry(self, i, j, **kwargs):
if self._iscolumn:
result = self._vector._entry(i, 0, **kwargs)
else:
result = self._vector._entry(0, j, **kwargs)
if i != j:
result *= KroneckerDelta(i, j)
return result
def _eval_transpose(self):
return self
def as_explicit(self):
from sympy import diag
return diag(*list(self._vector.as_explicit()))
def doit(self, **hints):
from sympy.assumptions import ask, Q
from sympy import Transpose, Mul, MatMul
from sympy import MatrixBase, eye
vector = self._vector
# This accounts for shape (1, 1) and identity matrices, among others:
if ask(Q.diagonal(vector)):
return vector
if isinstance(vector, MatrixBase):
ret = eye(max(vector.shape))
for i in range(ret.shape[0]):
ret[i, i] = vector[i]
return type(vector)(ret)
if vector.is_MatMul:
matrices = [arg for arg in vector.args if arg.is_Matrix]
scalars = [arg for arg in vector.args if arg not in matrices]
if scalars:
return Mul.fromiter(scalars)*DiagMatrix(MatMul.fromiter(matrices).doit()).doit()
if isinstance(vector, Transpose):
vector = vector.arg
return DiagMatrix(vector)
def diagonalize_vector(vector):
return DiagMatrix(vector).doit()
|
c1231dc8afb1e7f5bde949d91a86ae348154d973f7621d93b748aa2bf92e3ff7 | from sympy.core.compatibility import reduce
import operator
from sympy.core import Add, Basic, sympify
from sympy.core.add import add
from sympy.functions import adjoint
from sympy.matrices.common import ShapeError
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, flatten, sort, condition,
exhaust, do_one, glom)
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices.expressions.special import ZeroMatrix, GenericZeroMatrix
from sympy.utilities import default_sort_key, sift
# XXX: MatAdd should perhaps not subclass directly from Add
class MatAdd(MatrixExpr, Add):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like SymPy Add
Examples
========
>>> from sympy import MatAdd, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
identity = GenericZeroMatrix()
def __new__(cls, *args, evaluate=False, check=False, _sympify=True):
if not args:
return cls.identity
# This must be removed aggressively in the constructor to avoid
# TypeErrors from GenericZeroMatrix().shape
args = list(filter(lambda i: cls.identity != i, args))
if _sympify:
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
if check:
if all(not isinstance(i, MatrixExpr) for i in args):
return Add.fromiter(args)
validate(*args)
if evaluate:
if all(not isinstance(i, MatrixExpr) for i in args):
return Add(*args, evaluate=True)
obj = canonicalize(obj)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j, **kwargs):
return Add(*[arg._entry(i, j, **kwargs) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from .trace import trace
return Add(*[trace(arg) for arg in self.args]).doit()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(MatAdd(*args))
def _eval_derivative_matrix_lines(self, x):
add_lines = [arg._eval_derivative_matrix_lines(x) for arg in self.args]
return [j for i in add_lines for j in i]
add.register_handlerclass((Add, MatAdd), MatAdd)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
factor_of = lambda arg: arg.as_coeff_mmul()[0]
matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
""" Merge explicit MatrixBase arguments
Examples
========
>>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint
>>> from sympy.matrices.expressions.matadd import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X)
[1 0] [1 2]
A + [ ] + [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X))
[2 2]
A + [ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(operator.add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(*rules)))
|
f821c771797418542182b9d24479c1cec1b6b99859dbbb928bfa02ac90fc5ad1 | from sympy import Trace
from sympy.testing.pytest import raises, slow
from sympy.matrices.expressions.blockmatrix import (
block_collapse, bc_matmul, bc_block_plus_ident, BlockDiagMatrix,
BlockMatrix, bc_dist, bc_matadd, bc_transpose, bc_inverse,
blockcut, reblock_2x2, deblock)
from sympy.matrices.expressions import (MatrixSymbol, Identity,
Inverse, trace, Transpose, det, ZeroMatrix)
from sympy.matrices.common import NonInvertibleMatrixError
from sympy.matrices import (
Matrix, ImmutableMatrix, ImmutableSparseMatrix)
from sympy.core import Tuple, symbols, Expr
from sympy.functions import transpose
i, j, k, l, m, n, p = symbols('i:n, p', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
C = MatrixSymbol('C', n, n)
D = MatrixSymbol('D', n, n)
G = MatrixSymbol('G', n, n)
H = MatrixSymbol('H', n, n)
b1 = BlockMatrix([[G, H]])
b2 = BlockMatrix([[G], [H]])
def test_bc_matmul():
assert bc_matmul(H*b1*b2*G) == BlockMatrix([[(H*G*G + H*H*H)*G]])
def test_bc_matadd():
assert bc_matadd(BlockMatrix([[G, H]]) + BlockMatrix([[H, H]])) == \
BlockMatrix([[G+H, H+H]])
def test_bc_transpose():
assert bc_transpose(Transpose(BlockMatrix([[A, B], [C, D]]))) == \
BlockMatrix([[A.T, C.T], [B.T, D.T]])
def test_bc_dist_diag():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', m, m)
C = MatrixSymbol('C', l, l)
X = BlockDiagMatrix(A, B, C)
assert bc_dist(X+X).equals(BlockDiagMatrix(2*A, 2*B, 2*C))
def test_block_plus_ident():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', m, m)
X = BlockMatrix([[A, B], [C, D]])
Z = MatrixSymbol('Z', n + m, n + m)
assert bc_block_plus_ident(X + Identity(m + n) + Z) == \
BlockDiagMatrix(Identity(n), Identity(m)) + X + Z
def test_BlockMatrix():
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', n, k)
C = MatrixSymbol('C', l, m)
D = MatrixSymbol('D', l, k)
M = MatrixSymbol('M', m + k, p)
N = MatrixSymbol('N', l + n, k + m)
X = BlockMatrix(Matrix([[A, B], [C, D]]))
assert X.__class__(*X.args) == X
# block_collapse does nothing on normal inputs
E = MatrixSymbol('E', n, m)
assert block_collapse(A + 2*E) == A + 2*E
F = MatrixSymbol('F', m, m)
assert block_collapse(E.T*A*F) == E.T*A*F
assert X.shape == (l + n, k + m)
assert X.blockshape == (2, 2)
assert transpose(X) == BlockMatrix(Matrix([[A.T, C.T], [B.T, D.T]]))
assert transpose(X).shape == X.shape[::-1]
# Test that BlockMatrices and MatrixSymbols can still mix
assert (X*M).is_MatMul
assert X._blockmul(M).is_MatMul
assert (X*M).shape == (n + l, p)
assert (X + N).is_MatAdd
assert X._blockadd(N).is_MatAdd
assert (X + N).shape == X.shape
E = MatrixSymbol('E', m, 1)
F = MatrixSymbol('F', k, 1)
Y = BlockMatrix(Matrix([[E], [F]]))
assert (X*Y).shape == (l + n, 1)
assert block_collapse(X*Y).blocks[0, 0] == A*E + B*F
assert block_collapse(X*Y).blocks[1, 0] == C*E + D*F
# block_collapse passes down into container objects, transposes, and inverse
assert block_collapse(transpose(X*Y)) == transpose(block_collapse(X*Y))
assert block_collapse(Tuple(X*Y, 2*X)) == (
block_collapse(X*Y), block_collapse(2*X))
# Make sure that MatrixSymbols will enter 1x1 BlockMatrix if it simplifies
Ab = BlockMatrix([[A]])
Z = MatrixSymbol('Z', *A.shape)
assert block_collapse(Ab + Z) == A + Z
def test_block_collapse_explicit_matrices():
A = Matrix([[1, 2], [3, 4]])
assert block_collapse(BlockMatrix([[A]])) == A
A = ImmutableSparseMatrix([[1, 2], [3, 4]])
assert block_collapse(BlockMatrix([[A]])) == A
def test_issue_17624():
a = MatrixSymbol("a", 2, 2)
z = ZeroMatrix(2, 2)
b = BlockMatrix([[a, z], [z, z]])
assert block_collapse(b * b) == BlockMatrix([[a**2, z], [z, z]])
assert block_collapse(b * b * b) == BlockMatrix([[a**3, z], [z, z]])
def test_issue_18618():
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert A == Matrix(BlockDiagMatrix(A))
def test_BlockMatrix_trace():
A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']
X = BlockMatrix([[A, B], [C, D]])
assert trace(X) == trace(A) + trace(D)
assert trace(BlockMatrix([ZeroMatrix(n, n)])) == 0
def test_BlockMatrix_Determinant():
A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']
X = BlockMatrix([[A, B], [C, D]])
from sympy import assuming, Q
with assuming(Q.invertible(A)):
assert det(X) == det(A) * det(D - C*A.I*B)
assert isinstance(det(X), Expr)
assert det(BlockMatrix([A])) == det(A)
assert det(BlockMatrix([ZeroMatrix(n, n)])) == 0
def test_squareBlockMatrix():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', m, m)
X = BlockMatrix([[A, B], [C, D]])
Y = BlockMatrix([[A]])
assert X.is_square
Q = X + Identity(m + n)
assert (block_collapse(Q) ==
BlockMatrix([[A + Identity(n), B], [C, D + Identity(m)]]))
assert (X + MatrixSymbol('Q', n + m, n + m)).is_MatAdd
assert (X * MatrixSymbol('Q', n + m, n + m)).is_MatMul
assert block_collapse(Y.I) == A.I
assert isinstance(X.inverse(), Inverse)
assert not X.is_Identity
Z = BlockMatrix([[Identity(n), B], [C, D]])
assert not Z.is_Identity
def test_BlockMatrix_2x2_inverse_symbolic():
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', n, k - m)
C = MatrixSymbol('C', k - n, m)
D = MatrixSymbol('D', k - n, k - m)
X = BlockMatrix([[A, B], [C, D]])
assert X.is_square and X.shape == (k, k)
assert isinstance(block_collapse(X.I), Inverse) # Can't invert when none of the blocks is square
# test code path where only A is invertible
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = ZeroMatrix(m, m)
X = BlockMatrix([[A, B], [C, D]])
assert block_collapse(X.inverse()) == BlockMatrix([
[A.I + A.I * B * (D - C * A.I * B).I * C * A.I, -A.I * B * (D - C * A.I * B).I],
[-(D - C * A.I * B).I * C * A.I, (D - C * A.I * B).I],
])
# test code path where only B is invertible
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', n, n)
C = ZeroMatrix(m, m)
D = MatrixSymbol('D', m, n)
X = BlockMatrix([[A, B], [C, D]])
assert block_collapse(X.inverse()) == BlockMatrix([
[-(C - D * B.I * A).I * D * B.I, (C - D * B.I * A).I],
[B.I + B.I * A * (C - D * B.I * A).I * D * B.I, -B.I * A * (C - D * B.I * A).I],
])
# test code path where only C is invertible
A = MatrixSymbol('A', n, m)
B = ZeroMatrix(n, n)
C = MatrixSymbol('C', m, m)
D = MatrixSymbol('D', m, n)
X = BlockMatrix([[A, B], [C, D]])
assert block_collapse(X.inverse()) == BlockMatrix([
[-C.I * D * (B - A * C.I * D).I, C.I + C.I * D * (B - A * C.I * D).I * A * C.I],
[(B - A * C.I * D).I, -(B - A * C.I * D).I * A * C.I],
])
# test code path where only D is invertible
A = ZeroMatrix(n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', m, m)
X = BlockMatrix([[A, B], [C, D]])
assert block_collapse(X.inverse()) == BlockMatrix([
[(A - B * D.I * C).I, -(A - B * D.I * C).I * B * D.I],
[-D.I * C * (A - B * D.I * C).I, D.I + D.I * C * (A - B * D.I * C).I * B * D.I],
])
def test_BlockMatrix_2x2_inverse_numeric():
"""Test 2x2 block matrix inversion numerically for all 4 formulas"""
M = Matrix([[1, 2], [3, 4]])
# rank deficient matrices that have full rank when two of them combined
D1 = Matrix([[1, 2], [2, 4]])
D2 = Matrix([[1, 3], [3, 9]])
D3 = Matrix([[1, 4], [4, 16]])
assert D1.rank() == D2.rank() == D3.rank() == 1
assert (D1 + D2).rank() == (D2 + D3).rank() == (D3 + D1).rank() == 2
# Only A is invertible
K = BlockMatrix([[M, D1], [D2, D3]])
assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()
# Only B is invertible
K = BlockMatrix([[D1, M], [D2, D3]])
assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()
# Only C is invertible
K = BlockMatrix([[D1, D2], [M, D3]])
assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()
# Only D is invertible
K = BlockMatrix([[D1, D2], [D3, M]])
assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()
@slow
def test_BlockMatrix_3x3_symbolic():
# Only test one of these, instead of all permutations, because it's slow
rowblocksizes = (n, m, k)
colblocksizes = (m, k, n)
K = BlockMatrix([
[MatrixSymbol('M%s%s' % (rows, cols), rows, cols) for cols in colblocksizes]
for rows in rowblocksizes
])
collapse = block_collapse(K.I)
assert isinstance(collapse, BlockMatrix)
def test_BlockDiagMatrix():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', m, m)
C = MatrixSymbol('C', l, l)
M = MatrixSymbol('M', n + m + l, n + m + l)
X = BlockDiagMatrix(A, B, C)
Y = BlockDiagMatrix(A, 2*B, 3*C)
assert X.blocks[1, 1] == B
assert X.shape == (n + m + l, n + m + l)
assert all(X.blocks[i, j].is_ZeroMatrix if i != j else X.blocks[i, j] in [A, B, C]
for i in range(3) for j in range(3))
assert X.__class__(*X.args) == X
assert X.get_diag_blocks() == (A, B, C)
assert isinstance(block_collapse(X.I * X), Identity)
assert bc_matmul(X*X) == BlockDiagMatrix(A*A, B*B, C*C)
assert block_collapse(X*X) == BlockDiagMatrix(A*A, B*B, C*C)
#XXX: should be == ??
assert block_collapse(X + X).equals(BlockDiagMatrix(2*A, 2*B, 2*C))
assert block_collapse(X*Y) == BlockDiagMatrix(A*A, 2*B*B, 3*C*C)
assert block_collapse(X + Y) == BlockDiagMatrix(2*A, 3*B, 4*C)
# Ensure that BlockDiagMatrices can still interact with normal MatrixExprs
assert (X*(2*M)).is_MatMul
assert (X + (2*M)).is_MatAdd
assert (X._blockmul(M)).is_MatMul
assert (X._blockadd(M)).is_MatAdd
def test_BlockDiagMatrix_nonsquare():
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', k, l)
X = BlockDiagMatrix(A, B)
assert X.shape == (n + k, m + l)
assert X.shape == (n + k, m + l)
assert X.rowblocksizes == [n, k]
assert X.colblocksizes == [m, l]
C = MatrixSymbol('C', n, m)
D = MatrixSymbol('D', k, l)
Y = BlockDiagMatrix(C, D)
assert block_collapse(X + Y) == BlockDiagMatrix(A + C, B + D)
assert block_collapse(X * Y.T) == BlockDiagMatrix(A * C.T, B * D.T)
raises(NonInvertibleMatrixError, lambda: BlockDiagMatrix(A, C.T).inverse())
def test_BlockDiagMatrix_determinant():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', m, m)
assert det(BlockDiagMatrix()) == 1
assert det(BlockDiagMatrix(A)) == det(A)
assert det(BlockDiagMatrix(A, B)) == det(A) * det(B)
# non-square blocks
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', n, m)
assert det(BlockDiagMatrix(C, D)) == 0
def test_BlockDiagMatrix_trace():
assert trace(BlockDiagMatrix()) == 0
assert trace(BlockDiagMatrix(ZeroMatrix(n, n))) == 0
A = MatrixSymbol('A', n, n)
assert trace(BlockDiagMatrix(A)) == trace(A)
B = MatrixSymbol('B', m, m)
assert trace(BlockDiagMatrix(A, B)) == trace(A) + trace(B)
# non-square blocks
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', n, m)
assert isinstance(trace(BlockDiagMatrix(C, D)), Trace)
def test_BlockDiagMatrix_transpose():
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', k, l)
assert transpose(BlockDiagMatrix()) == BlockDiagMatrix()
assert transpose(BlockDiagMatrix(A)) == BlockDiagMatrix(A.T)
assert transpose(BlockDiagMatrix(A, B)) == BlockDiagMatrix(A.T, B.T)
def test_issue_2460():
bdm1 = BlockDiagMatrix(Matrix([i]), Matrix([j]))
bdm2 = BlockDiagMatrix(Matrix([k]), Matrix([l]))
assert block_collapse(bdm1 + bdm2) == BlockDiagMatrix(Matrix([i + k]), Matrix([j + l]))
def test_blockcut():
A = MatrixSymbol('A', n, m)
B = blockcut(A, (n/2, n/2), (m/2, m/2))
assert B == BlockMatrix([[A[:n/2, :m/2], A[:n/2, m/2:]],
[A[n/2:, :m/2], A[n/2:, m/2:]]])
M = ImmutableMatrix(4, 4, range(16))
B = blockcut(M, (2, 2), (2, 2))
assert M == ImmutableMatrix(B)
B = blockcut(M, (1, 3), (2, 2))
assert ImmutableMatrix(B.blocks[0, 1]) == ImmutableMatrix([[2, 3]])
def test_reblock_2x2():
B = BlockMatrix([[MatrixSymbol('A_%d%d'%(i,j), 2, 2)
for j in range(3)]
for i in range(3)])
assert B.blocks.shape == (3, 3)
BB = reblock_2x2(B)
assert BB.blocks.shape == (2, 2)
assert B.shape == BB.shape
assert B.as_explicit() == BB.as_explicit()
def test_deblock():
B = BlockMatrix([[MatrixSymbol('A_%d%d'%(i,j), n, n)
for j in range(4)]
for i in range(4)])
assert deblock(reblock_2x2(B)) == B
def test_block_collapse_type():
bm1 = BlockDiagMatrix(ImmutableMatrix([1]), ImmutableMatrix([2]))
bm2 = BlockDiagMatrix(ImmutableMatrix([3]), ImmutableMatrix([4]))
assert bm1.T.__class__ == BlockDiagMatrix
assert block_collapse(bm1 - bm2).__class__ == BlockDiagMatrix
assert block_collapse(Inverse(bm1)).__class__ == BlockDiagMatrix
assert block_collapse(Transpose(bm1)).__class__ == BlockDiagMatrix
assert bc_transpose(Transpose(bm1)).__class__ == BlockDiagMatrix
assert bc_inverse(Inverse(bm1)).__class__ == BlockDiagMatrix
def test_invalid_block_matrix():
raises(ValueError, lambda: BlockMatrix([
[Identity(2), Identity(5)],
]))
raises(ValueError, lambda: BlockMatrix([
[Identity(n), Identity(m)],
]))
raises(ValueError, lambda: BlockMatrix([
[ZeroMatrix(n, n), ZeroMatrix(n, n)],
[ZeroMatrix(n, n - 1), ZeroMatrix(n, n + 1)],
]))
raises(ValueError, lambda: BlockMatrix([
[ZeroMatrix(n - 1, n), ZeroMatrix(n, n)],
[ZeroMatrix(n + 1, n), ZeroMatrix(n, n)],
]))
|
e02ffc6b73f11b516e8733a509e8e0de6c086473220a6138287d5e4f2fff4cab | from sympy.core.relational import Eq, is_eq
from sympy.core.basic import Basic
from sympy.core.logic import fuzzy_and, fuzzy_bool
from sympy.logic.boolalg import And
from sympy.multipledispatch import dispatch
from sympy.sets.sets import tfn, ProductSet, Interval, FiniteSet, Set
@dispatch(Interval, FiniteSet) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return False
@dispatch(FiniteSet, Interval) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return False
@dispatch(Interval, Interval) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return And(Eq(lhs.left, rhs.left),
Eq(lhs.right, rhs.right),
lhs.left_open == rhs.left_open,
lhs.right_open == rhs.right_open)
@dispatch(FiniteSet, Interval) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return False
@dispatch(FiniteSet, FiniteSet) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
def all_in_both():
s_set = set(lhs.args)
o_set = set(rhs.args)
yield fuzzy_and(lhs._contains(e) for e in o_set - s_set)
yield fuzzy_and(rhs._contains(e) for e in s_set - o_set)
return tfn[fuzzy_and(all_in_both())]
@dispatch(ProductSet, ProductSet) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
if len(lhs.sets) != len(rhs.sets):
return False
eqs = (is_eq(x, y) for x, y in zip(lhs.sets, rhs.sets))
return tfn[fuzzy_and(map(fuzzy_bool, eqs))]
@dispatch(Set, Basic) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return False
@dispatch(Set, Set) # type:ignore
def _eval_is_eq(lhs, rhs): # noqa: F811
return None
|
ca4b52ab5e959cd32f5cc96800c37771dbd39d6979474a3bbe7604e671540555 | from sympy import symbols, S, oo
from sympy.core import Basic, Expr
from sympy.core.numbers import Infinity, NegativeInfinity
from sympy.multipledispatch import dispatch
from sympy.sets import Interval, FiniteSet
# XXX: The functions in this module are clearly not tested and are broken in a
# number of ways.
_x, _y = symbols("x y")
@dispatch(Basic, Basic) # type: ignore # noqa:F811
def _set_add(x, y): # noqa:F811
return None
@dispatch(Expr, Expr) # type: ignore # noqa:F811
def _set_add(x, y): # noqa:F811
return x+y
@dispatch(Interval, Interval) # type: ignore # noqa:F811
def _set_add(x, y): # noqa:F811
"""
Additions in interval arithmetic
https://en.wikipedia.org/wiki/Interval_arithmetic
"""
return Interval(x.start + y.start, x.end + y.end,
x.left_open or y.left_open, x.right_open or y.right_open)
@dispatch(Interval, Infinity) # type: ignore # noqa:F811
def _set_add(x, y): # noqa:F811
if x.start is S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet({S.Infinity})
@dispatch(Interval, NegativeInfinity) # type: ignore # noqa:F811
def _set_add(x, y): # noqa:F811
if x.end is S.Infinity:
return Interval(-oo, oo)
return FiniteSet({S.NegativeInfinity})
@dispatch(Basic, Basic) # type: ignore
def _set_sub(x, y): # noqa:F811
return None
@dispatch(Expr, Expr) # type: ignore # noqa:F811
def _set_sub(x, y): # noqa:F811
return x-y
@dispatch(Interval, Interval) # type: ignore # noqa:F811
def _set_sub(x, y): # noqa:F811
"""
Subtractions in interval arithmetic
https://en.wikipedia.org/wiki/Interval_arithmetic
"""
return Interval(x.start - y.end, x.end - y.start,
x.left_open or y.right_open, x.right_open or y.left_open)
@dispatch(Interval, Infinity) # type: ignore # noqa:F811
def _set_sub(x, y): # noqa:F811
if x.start is S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet(-oo)
@dispatch(Interval, NegativeInfinity) # type: ignore # noqa:F811
def _set_sub(x, y): # noqa:F811
if x.start is S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet(-oo)
|
3b1e03721fff6ebdd54bace8920fb58b4151259eee45aaf32369d032c870eb9d | from sympy import (Interval, Intersection, Set, EmptySet, S, sympify,
FiniteSet, Union, ComplexRegion, ProductSet)
from sympy.multipledispatch import dispatch
from sympy.sets.fancysets import (Naturals, Naturals0, Integers, Rationals,
Reals)
from sympy.sets.sets import UniversalSet
@dispatch(Naturals0, Naturals) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Rationals, Naturals) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Rationals, Naturals0) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Reals, Naturals) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Reals, Naturals0) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Reals, Rationals) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(Integers, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
intersect = Intersection(a, b)
if intersect == a:
return b
elif intersect == b:
return a
@dispatch(ComplexRegion, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
if b.is_subset(S.Reals):
# treat a subset of reals as a complex region
b = ComplexRegion.from_real(b)
if b.is_ComplexRegion:
# a in rectangular form
if (not a.polar) and (not b.polar):
return ComplexRegion(Union(a.sets, b.sets))
# a in polar form
elif a.polar and b.polar:
return ComplexRegion(Union(a.sets, b.sets), polar=True)
return None
@dispatch(type(EmptySet), Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return b
@dispatch(UniversalSet, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return a
@dispatch(ProductSet, ProductSet) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
if b.is_subset(a):
return a
if len(b.sets) != len(a.sets):
return None
if len(a.sets) == 2:
a1, a2 = a.sets
b1, b2 = b.sets
if a1 == b1:
return a1 * Union(a2, b2)
if a2 == b2:
return Union(a1, b1) * a2
return None
@dispatch(ProductSet, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
if b.is_subset(a):
return a
return None
@dispatch(Interval, Interval) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
if a._is_comparable(b):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(a.end, b.end)
start = Max(a.start, b.start)
if (end < start or
(end == start and (end not in a and end not in b))):
return None
else:
start = Min(a.start, b.start)
end = Max(a.end, b.end)
left_open = ((a.start != start or a.left_open) and
(b.start != start or b.left_open))
right_open = ((a.end != end or a.right_open) and
(b.end != end or b.right_open))
return Interval(start, end, left_open, right_open)
@dispatch(Interval, UniversalSet) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return S.UniversalSet
@dispatch(Interval, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
# If I have open end points and these endpoints are contained in b
# But only in case, when endpoints are finite. Because
# interval does not contain oo or -oo.
open_left_in_b_and_finite = (a.left_open and
sympify(b.contains(a.start)) is S.true and
a.start.is_finite)
open_right_in_b_and_finite = (a.right_open and
sympify(b.contains(a.end)) is S.true and
a.end.is_finite)
if open_left_in_b_and_finite or open_right_in_b_and_finite:
# Fill in my end points and return
open_left = a.left_open and a.start not in b
open_right = a.right_open and a.end not in b
new_a = Interval(a.start, a.end, open_left, open_right)
return {new_a, b}
return None
@dispatch(FiniteSet, FiniteSet) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return FiniteSet(*(a._elements | b._elements))
@dispatch(FiniteSet, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
# If `b` set contains one of my elements, remove it from `a`
if any(b.contains(x) == True for x in a):
return {
FiniteSet(*[x for x in a if b.contains(x) != True]), b}
return None
@dispatch(Set, Set) # type: ignore # noqa:F811
def union_sets(a, b): # noqa:F811
return None
|
da239d31ec439392f729ca113f704e46d6da38db2a9f8623d5648ef2316cfc83 | from sympy import Symbol, Contains, S, Interval, FiniteSet, oo, Eq
from sympy.core.expr import unchanged
from sympy.testing.pytest import raises
def test_contains_basic():
raises(TypeError, lambda: Contains(S.Integers, 1))
assert Contains(2, S.Integers) is S.true
assert Contains(-2, S.Naturals) is S.false
i = Symbol('i', integer=True)
assert Contains(i, S.Naturals) == Contains(i, S.Naturals, evaluate=False)
def test_issue_6194():
x = Symbol('x')
assert unchanged(Contains, x, Interval(0, 1))
assert Interval(0, 1).contains(x) == (S.Zero <= x) & (x <= 1)
assert Contains(x, FiniteSet(0)) != S.false
assert Contains(x, Interval(1, 1)) != S.false
assert Contains(x, S.Integers) != S.false
def test_issue_10326():
assert Contains(oo, Interval(-oo, oo)) == False
assert Contains(-oo, Interval(-oo, oo)) == False
def test_binary_symbols():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
assert Contains(x, FiniteSet(y, Eq(z, True))
).binary_symbols == {y, z}
def test_as_set():
x = Symbol('x')
y = Symbol('y')
# Contains is a BooleanFunction whose value depends on an arg's
# containment in a Set -- rewriting as a Set is not yet implemented
raises(NotImplementedError, lambda:
Contains(x, FiniteSet(y)).as_set())
|
1ec812f6de32cc32d4dd6d1cbf66aca61411fa4c4d4beb97280447b1a3f77fd4 | from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan,
Max, Min, Float, DisjointUnion,
FiniteSet, Intersection, imageset, I, true, false, ProductSet,
sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi,
Pow, Contains, Sum, rootof, SymmetricDifference, Piecewise,
Matrix, Range, Add, symbols, zoo, Rational)
from mpmath import mpi
from sympy.core.expr import unchanged
from sympy.core.relational import Eq, Ne, Le, Lt, LessThan
from sympy.logic import And, Or, Xor
from sympy.testing.pytest import raises, XFAIL, warns_deprecated_sympy
from sympy.abc import x, y, z, m, n
def test_imageset():
ints = S.Integers
assert imageset(x, x - 1, S.Naturals) is S.Naturals0
assert imageset(x, x + 1, S.Naturals0) is S.Naturals
assert imageset(x, abs(x), S.Naturals0) is S.Naturals0
assert imageset(x, abs(x), S.Naturals) is S.Naturals
assert imageset(x, abs(x), S.Integers) is S.Naturals0
# issue 16878a
r = symbols('r', real=True)
assert imageset(x, (x, x), S.Reals)._contains((1, r)) == None
assert imageset(x, (x, x), S.Reals)._contains((1, 2)) == False
assert (r, r) in imageset(x, (x, x), S.Reals)
assert 1 + I in imageset(x, x + I, S.Reals)
assert {1} not in imageset(x, (x,), S.Reals)
assert (1, 1) not in imageset(x, (x,) , S.Reals)
raises(TypeError, lambda: imageset(x, ints))
raises(ValueError, lambda: imageset(x, y, z, ints))
raises(ValueError, lambda: imageset(Lambda(x, cos(x)), y))
assert (1, 2) in imageset(Lambda((x, y), (x, y)), ints, ints)
raises(ValueError, lambda: imageset(Lambda(x, x), ints, ints))
assert imageset(cos, ints) == ImageSet(Lambda(x, cos(x)), ints)
def f(x):
return cos(x)
assert imageset(f, ints) == imageset(x, cos(x), ints)
f = lambda x: cos(x)
assert imageset(f, ints) == ImageSet(Lambda(x, cos(x)), ints)
assert imageset(x, 1, ints) == FiniteSet(1)
assert imageset(x, y, ints) == {y}
assert imageset((x, y), (1, z), ints, S.Reals) == {(1, z)}
clash = Symbol('x', integer=true)
assert (str(imageset(lambda x: x + clash, Interval(-2, 1)).lamda.expr)
in ('x0 + x', 'x + x0'))
x1, x2 = symbols("x1, x2")
assert imageset(lambda x, y:
Add(x, y), Interval(1, 2), Interval(2, 3)).dummy_eq(
ImageSet(Lambda((x1, x2), x1 + x2),
Interval(1, 2), Interval(2, 3)))
def test_is_empty():
for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals,
S.UniversalSet]:
assert s.is_empty is False
assert S.EmptySet.is_empty is True
def test_is_finiteset():
for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals,
S.UniversalSet]:
assert s.is_finite_set is False
assert S.EmptySet.is_finite_set is True
assert FiniteSet(1, 2).is_finite_set is True
assert Interval(1, 2).is_finite_set is False
assert Interval(x, y).is_finite_set is None
assert ProductSet(FiniteSet(1), FiniteSet(2)).is_finite_set is True
assert ProductSet(FiniteSet(1), Interval(1, 2)).is_finite_set is False
assert ProductSet(FiniteSet(1), Interval(x, y)).is_finite_set is None
assert Union(Interval(0, 1), Interval(2, 3)).is_finite_set is False
assert Union(FiniteSet(1), Interval(2, 3)).is_finite_set is False
assert Union(FiniteSet(1), FiniteSet(2)).is_finite_set is True
assert Union(FiniteSet(1), Interval(x, y)).is_finite_set is None
assert Intersection(Interval(x, y), FiniteSet(1)).is_finite_set is True
assert Intersection(Interval(x, y), Interval(1, 2)).is_finite_set is None
assert Intersection(FiniteSet(x), FiniteSet(y)).is_finite_set is True
assert Complement(FiniteSet(1), Interval(x, y)).is_finite_set is True
assert Complement(Interval(x, y), FiniteSet(1)).is_finite_set is None
assert Complement(Interval(1, 2), FiniteSet(x)).is_finite_set is False
assert DisjointUnion(Interval(-5, 3), FiniteSet(x, y)).is_finite_set is False
assert DisjointUnion(S.EmptySet, FiniteSet(x, y), S.EmptySet).is_finite_set is True
def test_deprecated_is_EmptySet():
with warns_deprecated_sympy():
S.EmptySet.is_EmptySet
def test_interval_arguments():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(0, oo).right_open is true
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(-oo, 0).left_open is true
assert Interval(oo, -oo) == S.EmptySet
assert Interval(oo, oo) == S.EmptySet
assert Interval(-oo, -oo) == S.EmptySet
assert Interval(oo, x) == S.EmptySet
assert Interval(oo, oo) == S.EmptySet
assert Interval(x, -oo) == S.EmptySet
assert Interval(x, x) == {x}
assert isinstance(Interval(1, 1), FiniteSet)
e = Sum(x, (x, 1, 3))
assert isinstance(Interval(e, e), FiniteSet)
assert Interval(1, 0) == S.EmptySet
assert Interval(1, 1).measure == 0
assert Interval(1, 1, False, True) == S.EmptySet
assert Interval(1, 1, True, False) == S.EmptySet
assert Interval(1, 1, True, True) == S.EmptySet
assert isinstance(Interval(0, Symbol('a')), Interval)
assert Interval(Symbol('a', real=True, positive=True), 0) == S.EmptySet
raises(ValueError, lambda: Interval(0, S.ImaginaryUnit))
raises(ValueError, lambda: Interval(0, Symbol('z', extended_real=False)))
raises(ValueError, lambda: Interval(x, x + S.ImaginaryUnit))
raises(NotImplementedError, lambda: Interval(0, 1, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y)))
def test_interval_symbolic_end_points():
a = Symbol('a', real=True)
assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3)
assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a)
assert Interval(0, a).contains(1) == LessThan(1, a)
def test_interval_is_empty():
x, y = symbols('x, y')
r = Symbol('r', real=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
nn = Symbol('nn', nonnegative=True)
assert Interval(1, 2).is_empty == False
assert Interval(3, 3).is_empty == False # FiniteSet
assert Interval(r, r).is_empty == False # FiniteSet
assert Interval(r, r + nn).is_empty == False
assert Interval(x, x).is_empty == False
assert Interval(1, oo).is_empty == False
assert Interval(-oo, oo).is_empty == False
assert Interval(-oo, 1).is_empty == False
assert Interval(x, y).is_empty == None
assert Interval(r, oo).is_empty == False # real implies finite
assert Interval(n, 0).is_empty == False
assert Interval(n, 0, left_open=True).is_empty == False
assert Interval(p, 0).is_empty == True # EmptySet
assert Interval(nn, 0).is_empty == None
assert Interval(n, p).is_empty == False
assert Interval(0, p, left_open=True).is_empty == False
assert Interval(0, p, right_open=True).is_empty == False
assert Interval(0, nn, left_open=True).is_empty == None
assert Interval(0, nn, right_open=True).is_empty == None
def test_union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2)
assert Union(S.EmptySet) == S.EmptySet
assert Union(Interval(0, 1), *[FiniteSet(1.0/n) for n in range(1, 10)]) == \
Interval(0, 1)
# issue #18241:
x = Symbol('x')
assert Union(Interval(0, 1), FiniteSet(1, x)) == Union(
Interval(0, 1), FiniteSet(x))
assert unchanged(Union, Interval(0, 1), FiniteSet(2, x))
assert Interval(1, 2).union(Interval(2, 3)) == \
Interval(1, 2) + Interval(2, 3)
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
assert Union(Set()) == Set()
assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3)
assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs')
assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3)
assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3)
assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4)
assert FiniteSet(1, 2, 3) & S.EmptySet == S.EmptySet
assert FiniteSet(1, 2, 3) | S.EmptySet == FiniteSet(1, 2, 3)
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \
FiniteSet(x, FiniteSet(y, z))
# Test that Intervals and FiniteSets play nicely
assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3)
assert Interval(1, 3, True, True) + FiniteSet(3) == \
Interval(1, 3, True, False)
X = Interval(1, 3) + FiniteSet(5)
Y = Interval(1, 2) + FiniteSet(3)
XandY = X.intersect(Y)
assert 2 in X and 3 in X and 3 in XandY
assert XandY.is_subset(X) and XandY.is_subset(Y)
raises(TypeError, lambda: Union(1, 2, 3))
assert X.is_iterable is False
# issue 7843
assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \
FiniteSet(-sqrt(-I), sqrt(-I))
assert Union(S.Reals, S.Integers) == S.Reals
def test_union_iter():
# Use Range because it is ordered
u = Union(Range(3), Range(5), Range(4), evaluate=False)
# Round robin
assert list(u) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4]
def test_union_is_empty():
assert (Interval(x, y) + FiniteSet(1)).is_empty == False
assert (Interval(x, y) + Interval(-x, y)).is_empty == None
def test_difference():
assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True)
assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True)
assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True)
assert Interval(1, 3, True) - Interval(2, 3, True) == \
Interval(1, 2, True, False)
assert Interval(0, 2) - FiniteSet(1) == \
Union(Interval(0, 1, False, True), Interval(1, 2, True, False))
# issue #18119
assert S.Reals - FiniteSet(I) == S.Reals
assert S.Reals - FiniteSet(-I, I) == S.Reals
assert Interval(0, 10) - FiniteSet(-I, I) == Interval(0, 10)
assert Interval(0, 10) - FiniteSet(1, I) == Union(
Interval.Ropen(0, 1), Interval.Lopen(1, 10))
assert S.Reals - FiniteSet(1, 2 + I, x, y**2) == Complement(
Union(Interval.open(-oo, 1), Interval.open(1, oo)), FiniteSet(x, y**2),
evaluate=False)
assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3)
assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \
FiniteSet(1, 2)
assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4)
assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert -1 in S.Reals - S.Naturals
def test_Complement():
A = FiniteSet(1, 3, 4)
B = FiniteSet(3, 4)
C = Interval(1, 3)
D = Interval(1, 2)
assert Complement(A, B, evaluate=False).is_iterable is True
assert Complement(A, C, evaluate=False).is_iterable is True
assert Complement(C, D, evaluate=False).is_iterable is None
assert FiniteSet(*Complement(A, B, evaluate=False)) == FiniteSet(1)
assert FiniteSet(*Complement(A, C, evaluate=False)) == FiniteSet(4)
raises(TypeError, lambda: FiniteSet(*Complement(C, A, evaluate=False)))
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2), FiniteSet(2, 3, 4)),
Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False)
assert -1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert Complement(S.Integers, S.UniversalSet) == EmptySet
assert S.UniversalSet.complement(S.Integers) == EmptySet
assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0)))
assert S.EmptySet - S.Integers == S.EmptySet
assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1)
assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \
Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi))
# issue 12712
assert Complement(FiniteSet(x, y, 2), Interval(-10, 10)) == \
Complement(FiniteSet(x, y), Interval(-10, 10))
A = FiniteSet(*symbols('a:c'))
B = FiniteSet(*symbols('d:f'))
assert unchanged(Complement, ProductSet(A, A), B)
A2 = ProductSet(A, A)
B3 = ProductSet(B, B, B)
assert A2 - B3 == A2
assert B3 - A2 == B3
def test_set_operations_nonsets():
'''Tests that e.g. FiniteSet(1) * 2 raises TypeError'''
ops = [
lambda a, b: a + b,
lambda a, b: a - b,
lambda a, b: a * b,
lambda a, b: a / b,
lambda a, b: a // b,
lambda a, b: a | b,
lambda a, b: a & b,
lambda a, b: a ^ b,
# FiniteSet(1) ** 2 gives a ProductSet
#lambda a, b: a ** b,
]
Sx = FiniteSet(x)
Sy = FiniteSet(y)
sets = [
{1},
FiniteSet(1),
Interval(1, 2),
Union(Sx, Interval(1, 2)),
Intersection(Sx, Sy),
Complement(Sx, Sy),
ProductSet(Sx, Sy),
S.EmptySet,
]
nums = [0, 1, 2, S(0), S(1), S(2)]
for si in sets:
for ni in nums:
for op in ops:
raises(TypeError, lambda : op(si, ni))
raises(TypeError, lambda : op(ni, si))
raises(TypeError, lambda: si ** object())
raises(TypeError, lambda: si ** {1})
def test_complement():
assert Interval(0, 1).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True))
assert Interval(0, 1, True, False).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True))
assert Interval(0, 1, False, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True))
assert Interval(0, 1, True, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True))
assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet
assert S.UniversalSet.complement(S.Reals) == S.EmptySet
assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet
assert S.EmptySet.complement(S.Reals) == S.Reals
assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True),
Interval(3, oo, True, True))
assert FiniteSet(0).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True))
assert (FiniteSet(5) + Interval(S.NegativeInfinity,
0)).complement(S.Reals) == \
Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True)
assert FiniteSet(1, 2, 3).complement(S.Reals) == \
Interval(S.NegativeInfinity, 1, True, True) + \
Interval(1, 2, True, True) + Interval(2, 3, True, True) +\
Interval(3, S.Infinity, True, True)
assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x))
assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) +
Interval(0, oo, True, True)
, FiniteSet(x), evaluate=False)
square = Interval(0, 1) * Interval(0, 1)
notsquare = square.complement(S.Reals*S.Reals)
assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(
pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)])
assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)])
def test_intersect1():
assert all(S.Integers.intersection(i) is i for i in
(S.Naturals, S.Naturals0))
assert all(i.intersection(S.Integers) is i for i in
(S.Naturals, S.Naturals0))
s = S.Naturals0
assert S.Naturals.intersection(s) is S.Naturals
assert s.intersection(S.Naturals) is S.Naturals
x = Symbol('x')
assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2)
assert Interval(0, 2).intersect(Interval(1, 2, True)) == \
Interval(1, 2, True)
assert Interval(0, 2, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, False)
assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, True)
assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \
Union(Interval(0, 1), Interval(2, 2))
assert FiniteSet(1, 2).intersect(FiniteSet(1, 2, 3)) == FiniteSet(1, 2)
assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x)
assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \
FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet
assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \
Union(Interval(0, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \
S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \
S.EmptySet
assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \
Intersection(FiniteSet(2, 3, 4, 5, 6), Union(FiniteSet('ham'), Interval(0, 5)))
assert Intersection(FiniteSet(1, 2, 3), Interval(2, x), Interval(3, y)) == \
Intersection(FiniteSet(3), Interval(2, x), Interval(3, y), evaluate=False)
assert Intersection(FiniteSet(1, 2), Interval(0, 3), Interval(x, y)) == \
Intersection({1, 2}, Interval(x, y), evaluate=False)
assert Intersection(FiniteSet(1, 2, 4), Interval(0, 3), Interval(x, y)) == \
Intersection({1, 2}, Interval(x, y), evaluate=False)
# XXX: Is the real=True necessary here?
# https://github.com/sympy/sympy/issues/17532
m, n = symbols('m, n', real=True)
assert Intersection(FiniteSet(m), FiniteSet(m, n), Interval(m, m+1)) == \
FiniteSet(m)
# issue 8217
assert Intersection(FiniteSet(x), FiniteSet(y)) == \
Intersection(FiniteSet(x), FiniteSet(y), evaluate=False)
assert FiniteSet(x).intersect(S.Reals) == \
Intersection(S.Reals, FiniteSet(x), evaluate=False)
# tests for the intersection alias
assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
def test_intersection():
# iterable
i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False)
assert i.is_iterable
assert set(i) == {S(2), S(3)}
# challenging intervals
x = Symbol('x', real=True)
i = Intersection(Interval(0, 3), Interval(x, 6))
assert (5 in i) is False
raises(TypeError, lambda: 2 in i)
# Singleton special cases
assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet
assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x)
# Products
line = Interval(0, 5)
i = Intersection(line**2, line**3, evaluate=False)
assert (2, 2) not in i
assert (2, 2, 2) not in i
raises(TypeError, lambda: list(i))
a = Intersection(Intersection(S.Integers, S.Naturals, evaluate=False), S.Reals, evaluate=False)
assert a._argset == frozenset([Intersection(S.Naturals, S.Integers, evaluate=False), S.Reals])
assert Intersection(S.Complexes, FiniteSet(S.ComplexInfinity)) == S.EmptySet
# issue 12178
assert Intersection() == S.UniversalSet
# issue 16987
assert Intersection({1}, {1}, {x}) == Intersection({1}, {x})
def test_issue_9623():
n = Symbol('n')
a = S.Reals
b = Interval(0, oo)
c = FiniteSet(n)
assert Intersection(a, b, c) == Intersection(b, c)
assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet
def test_is_disjoint():
assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False
assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True
def test_ProductSet__len__():
A = FiniteSet(1, 2)
B = FiniteSet(1, 2, 3)
assert ProductSet(A).__len__() == 2
assert ProductSet(A).__len__() is not S(2)
assert ProductSet(A, B).__len__() == 6
assert ProductSet(A, B).__len__() is not S(6)
def test_ProductSet():
# ProductSet is always a set of Tuples
assert ProductSet(S.Reals) == S.Reals ** 1
assert ProductSet(S.Reals, S.Reals) == S.Reals ** 2
assert ProductSet(S.Reals, S.Reals, S.Reals) == S.Reals ** 3
assert ProductSet(S.Reals) != S.Reals
assert ProductSet(S.Reals, S.Reals) == S.Reals * S.Reals
assert ProductSet(S.Reals, S.Reals, S.Reals) != S.Reals * S.Reals * S.Reals
assert ProductSet(S.Reals, S.Reals, S.Reals) == (S.Reals * S.Reals * S.Reals).flatten()
assert 1 not in ProductSet(S.Reals)
assert (1,) in ProductSet(S.Reals)
assert 1 not in ProductSet(S.Reals, S.Reals)
assert (1, 2) in ProductSet(S.Reals, S.Reals)
assert (1, I) not in ProductSet(S.Reals, S.Reals)
assert (1, 2, 3) in ProductSet(S.Reals, S.Reals, S.Reals)
assert (1, 2, 3) in S.Reals ** 3
assert (1, 2, 3) not in S.Reals * S.Reals * S.Reals
assert ((1, 2), 3) in S.Reals * S.Reals * S.Reals
assert (1, (2, 3)) not in S.Reals * S.Reals * S.Reals
assert (1, (2, 3)) in S.Reals * (S.Reals * S.Reals)
assert ProductSet() == FiniteSet(())
assert ProductSet(S.Reals, S.EmptySet) == S.EmptySet
# See GH-17458
for ni in range(5):
Rn = ProductSet(*(S.Reals,) * ni)
assert (1,) * ni in Rn
assert 1 not in Rn
assert (S.Reals * S.Reals) * S.Reals != S.Reals * (S.Reals * S.Reals)
S1 = S.Reals
S2 = S.Integers
x1 = pi
x2 = 3
assert x1 in S1
assert x2 in S2
assert (x1, x2) in S1 * S2
S3 = S1 * S2
x3 = (x1, x2)
assert x3 in S3
assert (x3, x3) in S3 * S3
assert x3 + x3 not in S3 * S3
raises(ValueError, lambda: S.Reals**-1)
with warns_deprecated_sympy():
ProductSet(FiniteSet(s) for s in range(2))
raises(TypeError, lambda: ProductSet(None))
S1 = FiniteSet(1, 2)
S2 = FiniteSet(3, 4)
S3 = ProductSet(S1, S2)
assert (S3.as_relational(x, y)
== And(S1.as_relational(x), S2.as_relational(y))
== And(Or(Eq(x, 1), Eq(x, 2)), Or(Eq(y, 3), Eq(y, 4))))
raises(ValueError, lambda: S3.as_relational(x))
raises(ValueError, lambda: S3.as_relational(x, 1))
raises(ValueError, lambda: ProductSet(Interval(0, 1)).as_relational(x, y))
Z2 = ProductSet(S.Integers, S.Integers)
assert Z2.contains((1, 2)) is S.true
assert Z2.contains((1,)) is S.false
assert Z2.contains(x) == Contains(x, Z2, evaluate=False)
assert Z2.contains(x).subs(x, 1) is S.false
assert Z2.contains((x, 1)).subs(x, 2) is S.true
assert Z2.contains((x, y)) == Contains((x, y), Z2, evaluate=False)
assert unchanged(Contains, (x, y), Z2)
assert Contains((1, 2), Z2) is S.true
def test_ProductSet_of_single_arg_is_not_arg():
assert unchanged(ProductSet, Interval(0, 1))
assert unchanged(ProductSet, ProductSet(Interval(0, 1)))
def test_ProductSet_is_empty():
assert ProductSet(S.Integers, S.Reals).is_empty == False
assert ProductSet(Interval(x, 1), S.Reals).is_empty == None
def test_interval_subs():
a = Symbol('a', real=True)
assert Interval(0, a).subs(a, 2) == Interval(0, 2)
assert Interval(a, 0).subs(a, 2) == S.EmptySet
def test_interval_to_mpi():
assert Interval(0, 1).to_mpi() == mpi(0, 1)
assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1)
assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1))
def test_set_evalf():
assert Interval(S(11)/64, S.Half).evalf() == Interval(
Float('0.171875'), Float('0.5'))
assert Interval(x, S.Half, right_open=True).evalf() == Interval(
x, Float('0.5'), right_open=True)
assert Interval(-oo, S.Half).evalf() == Interval(-oo, Float('0.5'))
assert FiniteSet(2, x).evalf() == FiniteSet(Float('2.0'), x)
def test_measure():
a = Symbol('a', real=True)
assert Interval(1, 3).measure == 2
assert Interval(0, a).measure == a
assert Interval(1, a).measure == a - 1
assert Union(Interval(1, 2), Interval(3, 4)).measure == 2
assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \
== 2
assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0
assert S.EmptySet.measure == 0
square = Interval(0, 10) * Interval(0, 10)
offsetsquare = Interval(5, 15) * Interval(5, 15)
band = Interval(-oo, oo) * Interval(2, 4)
assert square.measure == offsetsquare.measure == 100
assert (square + offsetsquare).measure == 175 # there is some overlap
assert (square - offsetsquare).measure == 75
assert (square * FiniteSet(1, 2, 3)).measure == 0
assert (square.intersect(band)).measure == 20
assert (square + band).measure is oo
assert (band * FiniteSet(1, 2, 3)).measure is nan
def test_is_subset():
assert Interval(0, 1).is_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_subset(Interval(0, 2)) is False
assert Interval(0, 1).is_subset(FiniteSet(0, 1)) is False
assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4))
assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False
assert FiniteSet(1).is_subset(Interval(0, 2))
assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False
assert (Interval(1, 2) + FiniteSet(3)).is_subset(
Interval(0, 2, False, True) + FiniteSet(2, 3))
assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True
assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False
assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True
assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True
assert Interval(0, 1).is_subset(S.EmptySet) is False
assert S.EmptySet.is_subset(S.EmptySet) is True
raises(ValueError, lambda: S.EmptySet.is_subset(1))
# tests for the issubset alias
assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True
assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True
assert S.Naturals.is_subset(S.Integers)
assert S.Naturals0.is_subset(S.Integers)
assert FiniteSet(x).is_subset(FiniteSet(y)) is None
assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x)) is True
assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x+1)) is False
assert Interval(0, 1).is_subset(Interval(0, 1, left_open=True)) is False
assert Interval(-2, 3).is_subset(Union(Interval(-oo, -2), Interval(3, oo))) is False
n = Symbol('n', integer=True)
assert Range(-3, 4, 1).is_subset(FiniteSet(-10, 10)) is False
assert Range(S(10)**100).is_subset(FiniteSet(0, 1, 2)) is False
assert Range(6, 0, -2).is_subset(FiniteSet(2, 4, 6)) is True
assert Range(1, oo).is_subset(FiniteSet(1, 2)) is False
assert Range(-oo, 1).is_subset(FiniteSet(1)) is False
assert Range(3).is_subset(FiniteSet(0, 1, n)) is None
assert Range(n, n + 2).is_subset(FiniteSet(n, n + 1)) is True
assert Range(5).is_subset(Interval(0, 4, right_open=True)) is False
def test_is_proper_subset():
assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False
assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0))
def test_is_superset():
assert Interval(0, 1).is_superset(Interval(0, 2)) == False
assert Interval(0, 3).is_superset(Interval(0, 2))
assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(1).is_superset(Interval(0, 2)) == False
assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False
assert (Interval(1, 2) + FiniteSet(3)).is_superset(
Interval(0, 2, False, True) + FiniteSet(2, 3)) == False
assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False
assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False
assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False
assert Interval(0, 1).is_superset(S.EmptySet) == True
assert S.EmptySet.is_superset(S.EmptySet) == True
raises(ValueError, lambda: S.EmptySet.is_superset(1))
# tests for the issuperset alias
assert Interval(0, 1).issuperset(S.EmptySet) == True
assert S.EmptySet.issuperset(S.EmptySet) == True
def test_is_proper_superset():
assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False
assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True
assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0))
def test_contains():
assert Interval(0, 2).contains(1) is S.true
assert Interval(0, 2).contains(3) is S.false
assert Interval(0, 2, True, False).contains(0) is S.false
assert Interval(0, 2, True, False).contains(2) is S.true
assert Interval(0, 2, False, True).contains(0) is S.true
assert Interval(0, 2, False, True).contains(2) is S.false
assert Interval(0, 2, True, True).contains(0) is S.false
assert Interval(0, 2, True, True).contains(2) is S.false
assert (Interval(0, 2) in Interval(0, 2)) is False
assert FiniteSet(1, 2, 3).contains(2) is S.true
assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true
assert FiniteSet(y)._contains(x) is None
raises(TypeError, lambda: x in FiniteSet(y))
assert FiniteSet({x, y})._contains({x}) is None
assert FiniteSet({x, y}).subs(y, x)._contains({x}) is True
assert FiniteSet({x, y}).subs(y, x+1)._contains({x}) is False
# issue 8197
from sympy.abc import a, b
assert isinstance(FiniteSet(b).contains(-a), Contains)
assert isinstance(FiniteSet(b).contains(a), Contains)
assert isinstance(FiniteSet(a).contains(1), Contains)
raises(TypeError, lambda: 1 in FiniteSet(a))
# issue 8209
rad1 = Pow(Pow(2, Rational(1, 3)) - 1, Rational(1, 3))
rad2 = Pow(Rational(1, 9), Rational(1, 3)) - Pow(Rational(2, 9), Rational(1, 3)) + Pow(Rational(4, 9), Rational(1, 3))
s1 = FiniteSet(rad1)
s2 = FiniteSet(rad2)
assert s1 - s2 == S.EmptySet
items = [1, 2, S.Infinity, S('ham'), -1.1]
fset = FiniteSet(*items)
assert all(item in fset for item in items)
assert all(fset.contains(item) is S.true for item in items)
assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true
assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false
assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false
assert S.EmptySet.contains(1) is S.false
assert FiniteSet(rootof(x**3 + x - 1, 0)).contains(S.Infinity) is S.false
assert rootof(x**5 + x**3 + 1, 0) in S.Reals
assert not rootof(x**5 + x**3 + 1, 1) in S.Reals
# non-bool results
assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \
Or(And(S.One <= x, x <= 2), And(S(3) <= x, x <= 4))
assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \
And(y <= 3, y <= x, S.One <= y, S(2) <= y)
assert (S.Complexes).contains(S.ComplexInfinity) == S.false
def test_interval_symbolic():
x = Symbol('x')
e = Interval(0, 1)
assert e.contains(x) == And(S.Zero <= x, x <= 1)
raises(TypeError, lambda: x in e)
e = Interval(0, 1, True, True)
assert e.contains(x) == And(S.Zero < x, x < 1)
c = Symbol('c', real=False)
assert Interval(x, x + 1).contains(c) == False
e = Symbol('e', extended_real=True)
assert Interval(-oo, oo).contains(e) == And(
S.NegativeInfinity < e, e < S.Infinity)
def test_union_contains():
x = Symbol('x')
i1 = Interval(0, 1)
i2 = Interval(2, 3)
i3 = Union(i1, i2)
assert i3.as_relational(x) == Or(And(S.Zero <= x, x <= 1), And(S(2) <= x, x <= 3))
raises(TypeError, lambda: x in i3)
e = i3.contains(x)
assert e == i3.as_relational(x)
assert e.subs(x, -0.5) is false
assert e.subs(x, 0.5) is true
assert e.subs(x, 1.5) is false
assert e.subs(x, 2.5) is true
assert e.subs(x, 3.5) is false
U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6)
assert all(el not in U for el in [0, 4, -oo])
assert all(el in U for el in [2, 5, 10])
def test_is_number():
assert Interval(0, 1).is_number is False
assert Set().is_number is False
def test_Interval_is_left_unbounded():
assert Interval(3, 4).is_left_unbounded is False
assert Interval(-oo, 3).is_left_unbounded is True
assert Interval(Float("-inf"), 3).is_left_unbounded is True
def test_Interval_is_right_unbounded():
assert Interval(3, 4).is_right_unbounded is False
assert Interval(3, oo).is_right_unbounded is True
assert Interval(3, Float("+inf")).is_right_unbounded is True
def test_Interval_as_relational():
x = Symbol('x')
assert Interval(-1, 2, False, False).as_relational(x) == \
And(Le(-1, x), Le(x, 2))
assert Interval(-1, 2, True, False).as_relational(x) == \
And(Lt(-1, x), Le(x, 2))
assert Interval(-1, 2, False, True).as_relational(x) == \
And(Le(-1, x), Lt(x, 2))
assert Interval(-1, 2, True, True).as_relational(x) == \
And(Lt(-1, x), Lt(x, 2))
assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2))
assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2))
assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo))
assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo))
assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo))
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert Interval(x, y).as_relational(x) == (x <= y)
assert Interval(y, x).as_relational(x) == (y <= x)
def test_Finite_as_relational():
x = Symbol('x')
y = Symbol('y')
assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2))
assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5))
def test_Union_as_relational():
x = Symbol('x')
assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \
Or(And(Le(0, x), Le(x, 1)), Eq(x, 2))
assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \
And(Lt(0, x), Le(x, 1))
def test_Intersection_as_relational():
x = Symbol('x')
assert (Intersection(Interval(0, 1), FiniteSet(2),
evaluate=False).as_relational(x)
== And(And(Le(0, x), Le(x, 1)), Eq(x, 2)))
def test_Complement_as_relational():
x = Symbol('x')
expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False)
assert expr.as_relational(x) == \
And(Le(0, x), Le(x, 1), Ne(x, 2))
@XFAIL
def test_Complement_as_relational_fail():
x = Symbol('x')
expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False)
# XXX This example fails because 0 <= x changes to x >= 0
# during the evaluation.
assert expr.as_relational(x) == \
(0 <= x) & (x <= 1) & Ne(x, 2)
def test_SymmetricDifference_as_relational():
x = Symbol('x')
expr = SymmetricDifference(Interval(0, 1), FiniteSet(2), evaluate=False)
assert expr.as_relational(x) == Xor(Eq(x, 2), Le(0, x) & Le(x, 1))
def test_EmptySet():
assert S.EmptySet.as_relational(Symbol('x')) is S.false
assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet
assert S.EmptySet.boundary == S.EmptySet
def test_finite_basic():
x = Symbol('x')
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersect(B)
assert A.is_subset(AorB) and B.is_subset(AorB)
assert AandB.is_subset(A)
assert AandB == FiniteSet(3)
assert A.inf == 1 and A.sup == 3
assert AorB.inf == 1 and AorB.sup == 5
assert FiniteSet(x, 1, 5).sup == Max(x, 5)
assert FiniteSet(x, 1, 5).inf == Min(x, 1)
# issue 7335
assert FiniteSet(S.EmptySet) != S.EmptySet
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3)
# Ensure a variety of types can exist in a FiniteSet
assert FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval)
assert (A > B) is False
assert (A >= B) is False
assert (A < B) is False
assert (A <= B) is False
assert AorB > A and AorB > B
assert AorB >= A and AorB >= B
assert A >= A and A <= A
assert A >= AandB and B >= AandB
assert A > AandB and B > AandB
assert FiniteSet(1.0) == FiniteSet(1)
def test_product_basic():
H, T = 'H', 'T'
unit_line = Interval(0, 1)
d6 = FiniteSet(1, 2, 3, 4, 5, 6)
d4 = FiniteSet(1, 2, 3, 4)
coin = FiniteSet(H, T)
square = unit_line * unit_line
assert (0, 0) in square
assert 0 not in square
assert (H, T) in coin ** 2
assert (.5, .5, .5) in (square * unit_line).flatten()
assert ((.5, .5), .5) in square * unit_line
assert (H, 3, 3) in (coin * d6 * d6).flatten()
assert ((H, 3), 3) in coin * d6 * d6
HH, TT = sympify(H), sympify(T)
assert set(coin**2) == {(HH, HH), (HH, TT), (TT, HH), (TT, TT)}
assert (d4*d4).is_subset(d6*d6)
assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union(
(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True))*Interval(-oo, oo),
Interval(-oo, oo)*(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True)))
assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3)
assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3)
assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3)
assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square
assert len(coin*coin*coin) == 8
assert len(S.EmptySet*S.EmptySet) == 0
assert len(S.EmptySet*coin) == 0
raises(TypeError, lambda: len(coin*Interval(0, 2)))
def test_real():
x = Symbol('x', real=True, finite=True)
I = Interval(0, 5)
J = Interval(10, 20)
A = FiniteSet(1, 2, 30, x, S.Pi)
B = FiniteSet(-4, 0)
C = FiniteSet(100)
D = FiniteSet('Ham', 'Eggs')
assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C])
assert not D.is_subset(S.Reals)
assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C])
assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D])
assert not (I + A + D).is_subset(S.Reals)
def test_supinf():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert (Interval(0, 1) + FiniteSet(2)).sup == 2
assert (Interval(0, 1) + FiniteSet(2)).inf == 0
assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x)
assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x)
assert FiniteSet(5, 1, x).sup == Max(5, x)
assert FiniteSet(5, 1, x).inf == Min(1, x)
assert FiniteSet(5, 1, x, y).sup == Max(5, x, y)
assert FiniteSet(5, 1, x, y).inf == Min(1, x, y)
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \
S.Infinity
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \
S.NegativeInfinity
assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs')
def test_universalset():
U = S.UniversalSet
x = Symbol('x')
assert U.as_relational(x) is S.true
assert U.union(Interval(2, 4)) == U
assert U.intersect(Interval(2, 4)) == Interval(2, 4)
assert U.measure is S.Infinity
assert U.boundary == S.EmptySet
assert U.contains(0) is S.true
def test_Union_of_ProductSets_shares():
line = Interval(0, 2)
points = FiniteSet(0, 1, 2)
assert Union(line * line, line * points) == line * line
def test_Interval_free_symbols():
# issue 6211
assert Interval(0, 1).free_symbols == set()
x = Symbol('x', real=True)
assert Interval(0, x).free_symbols == {x}
def test_image_interval():
from sympy.core.numbers import Rational
x = Symbol('x', real=True)
a = Symbol('a', real=True)
assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \
Interval(-4, 2, True, False)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1, True, True)) == \
Interval(0, 4, False, True)
assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1)
assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \
Interval(-35, 0) # Multiple Maxima
assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \
+ Interval(2, oo) # Single Infinite discontinuity
assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \
Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities
# Test for Python lambda
assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(Lambda(x, a*x), Interval(0, 1)) == \
ImageSet(Lambda(x, a*x), Interval(0, 1))
assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \
ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1))
def test_image_piecewise():
f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True))
f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True))
assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(Rational(1, 25), oo))
assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1)
@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826
def test_image_Intersection():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \
Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2)))
def test_image_FiniteSet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6)
def test_image_Union():
x = Symbol('x', real=True)
assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \
(Interval(0, 4) + FiniteSet(9))
def test_image_EmptySet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, S.EmptySet) == S.EmptySet
def test_issue_5724_7680():
assert I not in S.Reals # issue 7680
assert Interval(-oo, oo).contains(I) is S.false
def test_boundary():
assert FiniteSet(1).boundary == FiniteSet(1)
assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1)
for left_open in (true, false) for right_open in (true, false))
def test_boundary_Union():
assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3)
assert ((Interval(0, 1, False, True)
+ Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2))
assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2)
assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \
== FiniteSet(0, 10)
assert Union(Interval(0, 10, True, True),
Interval(10, 15, True, True), evaluate=False).boundary \
== FiniteSet(0, 10, 15)
@XFAIL
def test_union_boundary_of_joining_sets():
""" Testing the boundary of unions is a hard problem """
assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
def test_boundary_ProductSet():
open_square = Interval(0, 1, True, True) ** 2
assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1))
second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True)
assert (open_square + second_square).boundary == (
FiniteSet(0, 1) * Interval(0, 1)
+ FiniteSet(1, 2) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1)
+ Interval(1, 2) * FiniteSet(0, 1))
def test_boundary_ProductSet_line():
line_in_r2 = Interval(0, 1) * FiniteSet(0)
assert line_in_r2.boundary == line_in_r2
def test_is_open():
assert Interval(0, 1, False, False).is_open is False
assert Interval(0, 1, True, False).is_open is False
assert Interval(0, 1, True, True).is_open is True
assert FiniteSet(1, 2, 3).is_open is False
def test_is_closed():
assert Interval(0, 1, False, False).is_closed is True
assert Interval(0, 1, True, False).is_closed is False
assert FiniteSet(1, 2, 3).is_closed is True
def test_closure():
assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False)
def test_interior():
assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True)
def test_issue_7841():
raises(TypeError, lambda: x in S.Reals)
def test_Eq():
assert Eq(Interval(0, 1), Interval(0, 1))
assert Eq(Interval(0, 1), Interval(0, 2)) == False
s1 = FiniteSet(0, 1)
s2 = FiniteSet(1, 2)
assert Eq(s1, s1)
assert Eq(s1, s2) == False
assert Eq(s1*s2, s1*s2)
assert Eq(s1*s2, s2*s1) == False
assert unchanged(Eq, FiniteSet({x, y}), FiniteSet({x}))
assert Eq(FiniteSet({x, y}).subs(y, x), FiniteSet({x})) is S.true
assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x) is S.true
assert Eq(FiniteSet({x, y}).subs(y, x+1), FiniteSet({x})) is S.false
assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x+1) is S.false
assert Eq(ProductSet({1}, {2}), Interval(1, 2)) not in (S.true, S.false)
assert Eq(ProductSet({1}), ProductSet({1}, {2})) is S.false
assert Eq(FiniteSet(()), FiniteSet(1)) is S.false
assert Eq(ProductSet(), FiniteSet(1)) is S.false
i1 = Interval(0, 1)
i2 = Interval(x, y)
assert unchanged(Eq, ProductSet(i1, i1), ProductSet(i2, i2))
def test_SymmetricDifference():
A = FiniteSet(0, 1, 2, 3, 4, 5)
B = FiniteSet(2, 4, 6, 8, 10)
C = Interval(8, 10)
assert SymmetricDifference(A, B, evaluate=False).is_iterable is True
assert SymmetricDifference(A, C, evaluate=False).is_iterable is None
assert FiniteSet(*SymmetricDifference(A, B, evaluate=False)) == \
FiniteSet(0, 1, 3, 5, 6, 8, 10)
raises(TypeError,
lambda: FiniteSet(*SymmetricDifference(A, C, evaluate=False)))
assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \
FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10)
assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3 , 4 , 5)) \
== FiniteSet(5)
assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \
FiniteSet(3, 4, 6)
assert Set(1, 2 , 3) ^ Set(2, 3, 4) == Union(Set(1, 2, 3) - Set(2, 3, 4), \
Set(2, 3, 4) - Set(1, 2, 3))
assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \
Interval(2, 5), Interval(2, 5) - Interval(0, 4))
def test_issue_9536():
from sympy.functions.elementary.exponential import log
a = Symbol('a', real=True)
assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a)))
def test_issue_9637():
n = Symbol('n')
a = FiniteSet(n)
b = FiniteSet(2, n)
assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False)
assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False)
assert Complement(Interval(1, 3), b) == \
Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a)
assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False)
assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False)
def test_issue_9808():
# See https://github.com/sympy/sympy/issues/16342
assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False)
assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \
Complement(FiniteSet(1), FiniteSet(y), evaluate=False)
def test_issue_9956():
assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo)
assert Interval(-oo, oo).contains(1) is S.true
def test_issue_Symbol_inter():
i = Interval(0, oo)
r = S.Reals
mat = Matrix([0, 0, 0])
assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \
Intersection(i, FiniteSet(m))
assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \
Intersection(i, FiniteSet(m, n))
assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \
Intersection(Intersection({m, z}, {m, n, x}), r)
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \
Intersection(FiniteSet(3, m, n), FiniteSet(m, n, x), r, evaluate=False)
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \
Intersection(FiniteSet(3, m, n), r)
assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \
Intersection(r, FiniteSet(n))
assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \
Intersection(r, FiniteSet(sin(x), cos(x)))
assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \
Intersection(r, FiniteSet(x**2, sin(x)))
def test_issue_11827():
assert S.Naturals0**4
def test_issue_10113():
f = x**2/(x**2 - 4)
assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True))
assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0)
assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(Rational(9, 5), oo))
def test_issue_10248():
raises(
TypeError, lambda: list(Intersection(S.Reals, FiniteSet(x)))
)
A = Symbol('A', real=True)
assert list(Intersection(S.Reals, FiniteSet(A))) == [A]
def test_issue_9447():
a = Interval(0, 1) + Interval(2, 3)
assert Complement(S.UniversalSet, a) == Complement(
S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
assert Complement(S.Naturals, a) == Complement(
S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
def test_issue_10337():
assert (FiniteSet(2) == 3) is False
assert (FiniteSet(2) != 3) is True
raises(TypeError, lambda: FiniteSet(2) < 3)
raises(TypeError, lambda: FiniteSet(2) <= 3)
raises(TypeError, lambda: FiniteSet(2) > 3)
raises(TypeError, lambda: FiniteSet(2) >= 3)
def test_issue_10326():
bad = [
EmptySet,
FiniteSet(1),
Interval(1, 2),
S.ComplexInfinity,
S.ImaginaryUnit,
S.Infinity,
S.NaN,
S.NegativeInfinity,
]
interval = Interval(0, 5)
for i in bad:
assert i not in interval
x = Symbol('x', real=True)
nr = Symbol('nr', extended_real=False)
assert x + 1 in Interval(x, x + 4)
assert nr not in Interval(x, x + 4)
assert Interval(1, 2) in FiniteSet(Interval(0, 5), Interval(1, 2))
assert Interval(-oo, oo).contains(oo) is S.false
assert Interval(-oo, oo).contains(-oo) is S.false
def test_issue_2799():
U = S.UniversalSet
a = Symbol('a', real=True)
inf_interval = Interval(a, oo)
R = S.Reals
assert U + inf_interval == inf_interval + U
assert U + R == R + U
assert R + inf_interval == inf_interval + R
def test_issue_9706():
assert Interval(-oo, 0).closure == Interval(-oo, 0, True, False)
assert Interval(0, oo).closure == Interval(0, oo, False, True)
assert Interval(-oo, oo).closure == Interval(-oo, oo)
def test_issue_8257():
reals_plus_infinity = Union(Interval(-oo, oo), FiniteSet(oo))
reals_plus_negativeinfinity = Union(Interval(-oo, oo), FiniteSet(-oo))
assert Interval(-oo, oo) + FiniteSet(oo) == reals_plus_infinity
assert FiniteSet(oo) + Interval(-oo, oo) == reals_plus_infinity
assert Interval(-oo, oo) + FiniteSet(-oo) == reals_plus_negativeinfinity
assert FiniteSet(-oo) + Interval(-oo, oo) == reals_plus_negativeinfinity
def test_issue_10931():
assert S.Integers - S.Integers == EmptySet
assert S.Integers - S.Reals == EmptySet
def test_issue_11174():
soln = Intersection(Interval(-oo, oo), FiniteSet(-x), evaluate=False)
assert Intersection(FiniteSet(-x), S.Reals) == soln
soln = Intersection(S.Reals, FiniteSet(x), evaluate=False)
assert Intersection(FiniteSet(x), S.Reals) == soln
def test_issue_18505():
assert ImageSet(Lambda(n, sqrt(pi*n/2 - 1 + pi/2)), S.Integers).contains(0) == \
Contains(0, ImageSet(Lambda(n, sqrt(pi*n/2 - 1 + pi/2)), S.Integers))
def test_finite_set_intersection():
# The following should not produce recursion errors
# Note: some of these are not completely correct. See
# https://github.com/sympy/sympy/issues/16342.
assert Intersection(FiniteSet(-oo, x), FiniteSet(x)) == FiniteSet(x)
assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(0, x)]) == FiniteSet(x)
assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(x)]) == FiniteSet(x)
assert Intersection._handle_finite_sets([FiniteSet(2, 3, x, y), FiniteSet(1, 2, x)]) == \
Intersection._handle_finite_sets([FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)]) == \
Intersection(FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)) == \
Intersection(FiniteSet(1, 2, x), FiniteSet(2, x, y))
assert FiniteSet(1+x-y) & FiniteSet(1) == \
FiniteSet(1) & FiniteSet(1+x-y) == \
Intersection(FiniteSet(1+x-y), FiniteSet(1), evaluate=False)
assert FiniteSet(1) & FiniteSet(x) == FiniteSet(x) & FiniteSet(1) == \
Intersection(FiniteSet(1), FiniteSet(x), evaluate=False)
assert FiniteSet({x}) & FiniteSet({x, y}) == \
Intersection(FiniteSet({x}), FiniteSet({x, y}), evaluate=False)
def test_union_intersection_constructor():
# The actual exception does not matter here, so long as these fail
sets = [FiniteSet(1), FiniteSet(2)]
raises(Exception, lambda: Union(sets))
raises(Exception, lambda: Intersection(sets))
raises(Exception, lambda: Union(tuple(sets)))
raises(Exception, lambda: Intersection(tuple(sets)))
raises(Exception, lambda: Union(i for i in sets))
raises(Exception, lambda: Intersection(i for i in sets))
# Python sets are treated the same as FiniteSet
# The union of a single set (of sets) is the set (of sets) itself
assert Union(set(sets)) == FiniteSet(*sets)
assert Intersection(set(sets)) == FiniteSet(*sets)
assert Union({1}, {2}) == FiniteSet(1, 2)
assert Intersection({1, 2}, {2, 3}) == FiniteSet(2)
def test_Union_contains():
assert zoo not in Union(
Interval.open(-oo, 0), Interval.open(0, oo))
@XFAIL
def test_issue_16878b():
# in intersection_sets for (ImageSet, Set) there is no code
# that handles the base_set of S.Reals like there is
# for Integers
assert imageset(x, (x, x), S.Reals).is_subset(S.Reals**2) is True
def test_DisjointUnion():
assert DisjointUnion(FiniteSet(1, 2, 3), FiniteSet(1, 2, 3), FiniteSet(1, 2, 3)).rewrite(Union) == (FiniteSet(1, 2, 3) * FiniteSet(0, 1, 2))
assert DisjointUnion(Interval(1, 3), Interval(2, 4)).rewrite(Union) == Union(Interval(1, 3) * FiniteSet(0), Interval(2, 4) * FiniteSet(1))
assert DisjointUnion(Interval(0, 5), Interval(0, 5)).rewrite(Union) == Union(Interval(0, 5) * FiniteSet(0), Interval(0, 5) * FiniteSet(1))
assert DisjointUnion(Interval(-1, 2), S.EmptySet, S.EmptySet).rewrite(Union) == Interval(-1, 2) * FiniteSet(0)
assert DisjointUnion(Interval(-1, 2)).rewrite(Union) == Interval(-1, 2) * FiniteSet(0)
assert DisjointUnion(S.EmptySet, Interval(-1, 2), S.EmptySet).rewrite(Union) == Interval(-1, 2) * FiniteSet(1)
assert DisjointUnion(Interval(-oo, oo)).rewrite(Union) == Interval(-oo, oo) * FiniteSet(0)
assert DisjointUnion(S.EmptySet).rewrite(Union) == S.EmptySet
assert DisjointUnion().rewrite(Union) == S.EmptySet
raises(TypeError, lambda: DisjointUnion(Symbol('n')))
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert DisjointUnion(FiniteSet(x), FiniteSet(y, z)).rewrite(Union) == (FiniteSet(x) * FiniteSet(0)) + (FiniteSet(y, z) * FiniteSet(1))
def test_DisjointUnion_is_empty():
assert DisjointUnion(S.EmptySet).is_empty is True
assert DisjointUnion(S.EmptySet, S.EmptySet).is_empty is True
assert DisjointUnion(S.EmptySet, FiniteSet(1, 2, 3)).is_empty is False
def test_DisjointUnion_is_iterable():
assert DisjointUnion(S.Integers, S.Naturals, S.Rationals).is_iterable is True
assert DisjointUnion(S.EmptySet, S.Reals).is_iterable is False
assert DisjointUnion(FiniteSet(1, 2, 3), S.EmptySet, FiniteSet(x, y)).is_iterable is True
assert DisjointUnion(S.EmptySet, S.EmptySet).is_iterable is False
def test_DisjointUnion_contains():
assert (0, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (0, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (0, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (1, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (1, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (1, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (2, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (2, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (2, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (0, 1, 2) not in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (0, 0.5) not in DisjointUnion(FiniteSet(0.5))
assert (0, 5) not in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2))
assert (x, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y))
assert (y, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y))
assert (z, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y))
assert (y, 2) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y))
assert (0.5, 0) in DisjointUnion(Interval(0, 1), Interval(0, 2))
assert (0.5, 1) in DisjointUnion(Interval(0, 1), Interval(0, 2))
assert (1.5, 0) not in DisjointUnion(Interval(0, 1), Interval(0, 2))
assert (1.5, 1) in DisjointUnion(Interval(0, 1), Interval(0, 2))
def test_DisjointUnion_iter():
D = DisjointUnion(FiniteSet(3, 5, 7, 9), FiniteSet(x, y, z))
it = iter(D)
L1 = [(x, 1), (y, 1), (z, 1)]
L2 = [(3, 0), (5, 0), (7, 0), (9, 0)]
nxt = next(it)
assert nxt in L2
L2.remove(nxt)
nxt = next(it)
assert nxt in L1
L1.remove(nxt)
nxt = next(it)
assert nxt in L2
L2.remove(nxt)
nxt = next(it)
assert nxt in L1
L1.remove(nxt)
nxt = next(it)
assert nxt in L2
L2.remove(nxt)
nxt = next(it)
assert nxt in L1
L1.remove(nxt)
nxt = next(it)
assert nxt in L2
L2.remove(nxt)
raises(StopIteration, lambda: next(it))
raises(ValueError, lambda: iter(DisjointUnion(Interval(0, 1), S.EmptySet)))
def test_DisjointUnion_len():
assert len(DisjointUnion(FiniteSet(3, 5, 7, 9), FiniteSet(x, y, z))) == 7
assert len(DisjointUnion(S.EmptySet, S.EmptySet, FiniteSet(x, y, z), S.EmptySet)) == 3
raises(ValueError, lambda: len(DisjointUnion(Interval(0, 1), S.EmptySet)))
def test_issue_20089():
B = FiniteSet(FiniteSet(1, 2), FiniteSet(1))
assert not 1 in B
assert not 1.0 in B
assert not Eq(1, FiniteSet(1, 2))
assert FiniteSet(1) in B
A = FiniteSet(1, 2)
assert A in B
assert B.issubset(B)
assert not A.issubset(B)
assert 1 in A
C = FiniteSet(FiniteSet(1, 2), FiniteSet(1), 1, 2)
assert A.issubset(C)
assert B.issubset(C)
|
797fdba2ff24d6530d8b5bae791e4fe636e2696d8fd8a01e3b7493cc55909dcf | from sympy import Symbol, sympify
from sympy.core.compatibility import is_sequence
from sympy.geometry.entity import GeometryEntity
from .plot_interval import PlotInterval
from .plot_object import PlotObject
from .util import parse_option_string
class PlotMode(PlotObject):
"""
Grandparent class for plotting
modes. Serves as interface for
registration, lookup, and init
of modes.
To create a new plot mode,
inherit from PlotModeBase
or one of its children, such
as PlotSurface or PlotCurve.
"""
## Class-level attributes
## used to register and lookup
## plot modes. See PlotModeBase
## for descriptions and usage.
i_vars, d_vars = '', ''
intervals = []
aliases = []
is_default = False
## Draw is the only method here which
## is meant to be overridden in child
## classes, and PlotModeBase provides
## a base implementation.
def draw(self):
raise NotImplementedError()
## Everything else in this file has to
## do with registration and retrieval
## of plot modes. This is where I've
## hidden much of the ugliness of automatic
## plot mode divination...
## Plot mode registry data structures
_mode_alias_list = []
_mode_map = {
1: {1: {}, 2: {}},
2: {1: {}, 2: {}},
3: {1: {}, 2: {}},
} # [d][i][alias_str]: class
_mode_default_map = {
1: {},
2: {},
3: {},
} # [d][i]: class
_i_var_max, _d_var_max = 2, 3
def __new__(cls, *args, **kwargs):
"""
This is the function which interprets
arguments given to Plot.__init__ and
Plot.__setattr__. Returns an initialized
instance of the appropriate child class.
"""
newargs, newkwargs = PlotMode._extract_options(args, kwargs)
mode_arg = newkwargs.get('mode', '')
# Interpret the arguments
d_vars, intervals = PlotMode._interpret_args(newargs)
i_vars = PlotMode._find_i_vars(d_vars, intervals)
i, d = max([len(i_vars), len(intervals)]), len(d_vars)
# Find the appropriate mode
subcls = PlotMode._get_mode(mode_arg, i, d)
# Create the object
o = object.__new__(subcls)
# Do some setup for the mode instance
o.d_vars = d_vars
o._fill_i_vars(i_vars)
o._fill_intervals(intervals)
o.options = newkwargs
return o
@staticmethod
def _get_mode(mode_arg, i_var_count, d_var_count):
"""
Tries to return an appropriate mode class.
Intended to be called only by __new__.
mode_arg
Can be a string or a class. If it is a
PlotMode subclass, it is simply returned.
If it is a string, it can an alias for
a mode or an empty string. In the latter
case, we try to find a default mode for
the i_var_count and d_var_count.
i_var_count
The number of independent variables
needed to evaluate the d_vars.
d_var_count
The number of dependent variables;
usually the number of functions to
be evaluated in plotting.
For example, a Cartesian function y = f(x) has
one i_var (x) and one d_var (y). A parametric
form x,y,z = f(u,v), f(u,v), f(u,v) has two
two i_vars (u,v) and three d_vars (x,y,z).
"""
# if the mode_arg is simply a PlotMode class,
# check that the mode supports the numbers
# of independent and dependent vars, then
# return it
try:
m = None
if issubclass(mode_arg, PlotMode):
m = mode_arg
except TypeError:
pass
if m:
if not m._was_initialized:
raise ValueError(("To use unregistered plot mode %s "
"you must first call %s._init_mode().")
% (m.__name__, m.__name__))
if d_var_count != m.d_var_count:
raise ValueError(("%s can only plot functions "
"with %i dependent variables.")
% (m.__name__,
m.d_var_count))
if i_var_count > m.i_var_count:
raise ValueError(("%s cannot plot functions "
"with more than %i independent "
"variables.")
% (m.__name__,
m.i_var_count))
return m
# If it is a string, there are two possibilities.
if isinstance(mode_arg, str):
i, d = i_var_count, d_var_count
if i > PlotMode._i_var_max:
raise ValueError(var_count_error(True, True))
if d > PlotMode._d_var_max:
raise ValueError(var_count_error(False, True))
# If the string is '', try to find a suitable
# default mode
if not mode_arg:
return PlotMode._get_default_mode(i, d)
# Otherwise, interpret the string as a mode
# alias (e.g. 'cartesian', 'parametric', etc)
else:
return PlotMode._get_aliased_mode(mode_arg, i, d)
else:
raise ValueError("PlotMode argument must be "
"a class or a string")
@staticmethod
def _get_default_mode(i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
try:
return PlotMode._mode_default_map[d][i]
except KeyError:
# Keep looking for modes in higher i var counts
# which support the given d var count until we
# reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_default_mode(i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a default mode "
"for %i independent and %i "
"dependent variables.") % (i_vars, d))
@staticmethod
def _get_aliased_mode(alias, i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
if alias not in PlotMode._mode_alias_list:
raise ValueError(("Couldn't find a mode called"
" %s. Known modes: %s.")
% (alias, ", ".join(PlotMode._mode_alias_list)))
try:
return PlotMode._mode_map[d][i][alias]
except TypeError:
# Keep looking for modes in higher i var counts
# which support the given d var count and alias
# until we reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a %s mode "
"for %i independent and %i "
"dependent variables.")
% (alias, i_vars, d))
@classmethod
def _register(cls):
"""
Called once for each user-usable plot mode.
For Cartesian2D, it is invoked after the
class definition: Cartesian2D._register()
"""
name = cls.__name__
cls._init_mode()
try:
i, d = cls.i_var_count, cls.d_var_count
# Add the mode to _mode_map under all
# given aliases
for a in cls.aliases:
if a not in PlotMode._mode_alias_list:
# Also track valid aliases, so
# we can quickly know when given
# an invalid one in _get_mode.
PlotMode._mode_alias_list.append(a)
PlotMode._mode_map[d][i][a] = cls
if cls.is_default:
# If this mode was marked as the
# default for this d,i combination,
# also set that.
PlotMode._mode_default_map[d][i] = cls
except Exception as e:
raise RuntimeError(("Failed to register "
"plot mode %s. Reason: %s")
% (name, (str(e))))
@classmethod
def _init_mode(cls):
"""
Initializes the plot mode based on
the 'mode-specific parameters' above.
Only intended to be called by
PlotMode._register(). To use a mode without
registering it, you can directly call
ModeSubclass._init_mode().
"""
def symbols_list(symbol_str):
return [Symbol(s) for s in symbol_str]
# Convert the vars strs into
# lists of symbols.
cls.i_vars = symbols_list(cls.i_vars)
cls.d_vars = symbols_list(cls.d_vars)
# Var count is used often, calculate
# it once here
cls.i_var_count = len(cls.i_vars)
cls.d_var_count = len(cls.d_vars)
if cls.i_var_count > PlotMode._i_var_max:
raise ValueError(var_count_error(True, False))
if cls.d_var_count > PlotMode._d_var_max:
raise ValueError(var_count_error(False, False))
# Try to use first alias as primary_alias
if len(cls.aliases) > 0:
cls.primary_alias = cls.aliases[0]
else:
cls.primary_alias = cls.__name__
di = cls.intervals
if len(di) != cls.i_var_count:
raise ValueError("Plot mode must provide a "
"default interval for each i_var.")
for i in range(cls.i_var_count):
# default intervals must be given [min,max,steps]
# (no var, but they must be in the same order as i_vars)
if len(di[i]) != 3:
raise ValueError("length should be equal to 3")
# Initialize an incomplete interval,
# to later be filled with a var when
# the mode is instantiated.
di[i] = PlotInterval(None, *di[i])
# To prevent people from using modes
# without these required fields set up.
cls._was_initialized = True
_was_initialized = False
## Initializer Helper Methods
@staticmethod
def _find_i_vars(functions, intervals):
i_vars = []
# First, collect i_vars in the
# order they are given in any
# intervals.
for i in intervals:
if i.v is None:
continue
elif i.v in i_vars:
raise ValueError(("Multiple intervals given "
"for %s.") % (str(i.v)))
i_vars.append(i.v)
# Then, find any remaining
# i_vars in given functions
# (aka d_vars)
for f in functions:
for a in f.free_symbols:
if a not in i_vars:
i_vars.append(a)
return i_vars
def _fill_i_vars(self, i_vars):
# copy default i_vars
self.i_vars = [Symbol(str(i)) for i in self.i_vars]
# replace with given i_vars
for i in range(len(i_vars)):
self.i_vars[i] = i_vars[i]
def _fill_intervals(self, intervals):
# copy default intervals
self.intervals = [PlotInterval(i) for i in self.intervals]
# track i_vars used so far
v_used = []
# fill copy of default
# intervals with given info
for i in range(len(intervals)):
self.intervals[i].fill_from(intervals[i])
if self.intervals[i].v is not None:
v_used.append(self.intervals[i].v)
# Find any orphan intervals and
# assign them i_vars
for i in range(len(self.intervals)):
if self.intervals[i].v is None:
u = [v for v in self.i_vars if v not in v_used]
if len(u) == 0:
raise ValueError("length should not be equal to 0")
self.intervals[i].v = u[0]
v_used.append(u[0])
@staticmethod
def _interpret_args(args):
interval_wrong_order = "PlotInterval %s was given before any function(s)."
interpret_error = "Could not interpret %s as a function or interval."
functions, intervals = [], []
if isinstance(args[0], GeometryEntity):
for coords in list(args[0].arbitrary_point()):
functions.append(coords)
intervals.append(PlotInterval.try_parse(args[0].plot_interval()))
else:
for a in args:
i = PlotInterval.try_parse(a)
if i is not None:
if len(functions) == 0:
raise ValueError(interval_wrong_order % (str(i)))
else:
intervals.append(i)
else:
if is_sequence(a, include=str):
raise ValueError(interpret_error % (str(a)))
try:
f = sympify(a)
functions.append(f)
except TypeError:
raise ValueError(interpret_error % str(a))
return functions, intervals
@staticmethod
def _extract_options(args, kwargs):
newkwargs, newargs = {}, []
for a in args:
if isinstance(a, str):
newkwargs = dict(newkwargs, **parse_option_string(a))
else:
newargs.append(a)
newkwargs = dict(newkwargs, **kwargs)
return newargs, newkwargs
def var_count_error(is_independent, is_plotting):
"""
Used to format an error message which differs
slightly in 4 places.
"""
if is_plotting:
v = "Plotting"
else:
v = "Registering plot modes"
if is_independent:
n, s = PlotMode._i_var_max, "independent"
else:
n, s = PlotMode._d_var_max, "dependent"
return ("%s with more than %i %s variables "
"is not supported.") % (v, n, s)
|
c79bdfab3b44abeccdafa71b89dc654c995b03e9cc31aec95ee3c008129d5f47 | from pyglet.window import Window
from pyglet.clock import Clock
from threading import Thread, Lock
gl_lock = Lock()
class ManagedWindow(Window):
"""
A pyglet window with an event loop which executes automatically
in a separate thread. Behavior is added by creating a subclass
which overrides setup, update, and/or draw.
"""
fps_limit = 30
default_win_args = dict(width=600,
height=500,
vsync=False,
resizable=True)
def __init__(self, **win_args):
"""
It is best not to override this function in the child
class, unless you need to take additional arguments.
Do any OpenGL initialization calls in setup().
"""
# check if this is run from the doctester
if win_args.get('runfromdoctester', False):
return
self.win_args = dict(self.default_win_args, **win_args)
self.Thread = Thread(target=self.__event_loop__)
self.Thread.start()
def __event_loop__(self, **win_args):
"""
The event loop thread function. Do not override or call
directly (it is called by __init__).
"""
gl_lock.acquire()
try:
try:
super().__init__(**self.win_args)
self.switch_to()
self.setup()
except Exception as e:
print("Window initialization failed: %s" % (str(e)))
self.has_exit = True
finally:
gl_lock.release()
clock = Clock()
clock.fps_limit = self.fps_limit
while not self.has_exit:
dt = clock.tick()
gl_lock.acquire()
try:
try:
self.switch_to()
self.dispatch_events()
self.clear()
self.update(dt)
self.draw()
self.flip()
except Exception as e:
print("Uncaught exception in event loop: %s" % str(e))
self.has_exit = True
finally:
gl_lock.release()
super().close()
def close(self):
"""
Closes the window.
"""
self.has_exit = True
def setup(self):
"""
Called once before the event loop begins.
Override this method in a child class. This
is the best place to put things like OpenGL
initialization calls.
"""
pass
def update(self, dt):
"""
Called before draw during each iteration of
the event loop. dt is the elapsed time in
seconds since the last update. OpenGL rendering
calls are best put in draw() rather than here.
"""
pass
def draw(self):
"""
Called after update during each iteration of
the event loop. Put OpenGL rendering calls
here.
"""
pass
if __name__ == '__main__':
ManagedWindow()
|
2bc332b9d6114b617e92ee5b973916837ab78283033d4b17ab39872c353652a7 | try:
from ctypes import c_float
except ImportError:
pass
import pyglet.gl as pgl
from math import sqrt as _sqrt, acos as _acos
def cross(a, b):
return (a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0])
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def mag(a):
return _sqrt(a[0]**2 + a[1]**2 + a[2]**2)
def norm(a):
m = mag(a)
return (a[0] / m, a[1] / m, a[2] / m)
def get_sphere_mapping(x, y, width, height):
x = min([max([x, 0]), width])
y = min([max([y, 0]), height])
sr = _sqrt((width/2)**2 + (height/2)**2)
sx = ((x - width / 2) / sr)
sy = ((y - height / 2) / sr)
sz = 1.0 - sx**2 - sy**2
if sz > 0.0:
sz = _sqrt(sz)
return (sx, sy, sz)
else:
sz = 0
return norm((sx, sy, sz))
rad2deg = 180.0 / 3.141592
def get_spherical_rotatation(p1, p2, width, height, theta_multiplier):
v1 = get_sphere_mapping(p1[0], p1[1], width, height)
v2 = get_sphere_mapping(p2[0], p2[1], width, height)
d = min(max([dot(v1, v2), -1]), 1)
if abs(d - 1.0) < 0.000001:
return None
raxis = norm( cross(v1, v2) )
rtheta = theta_multiplier * rad2deg * _acos(d)
pgl.glPushMatrix()
pgl.glLoadIdentity()
pgl.glRotatef(rtheta, *raxis)
mat = (c_float*16)()
pgl.glGetFloatv(pgl.GL_MODELVIEW_MATRIX, mat)
pgl.glPopMatrix()
return mat
|
dbaab33b0c7fd30952014930757acef69ce4fb15d03d3fbdbf22acebf5505a00 | from threading import RLock
# it is sufficient to import "pyglet" here once
try:
import pyglet.gl as pgl
except ImportError:
raise ImportError("pyglet is required for plotting.\n "
"visit http://www.pyglet.org/")
from sympy.core.compatibility import is_sequence, SYMPY_INTS
from sympy.core.numbers import Integer
from sympy.geometry.entity import GeometryEntity
from sympy.plotting.pygletplot.plot_axes import PlotAxes
from sympy.plotting.pygletplot.plot_mode import PlotMode
from sympy.plotting.pygletplot.plot_object import PlotObject
from sympy.plotting.pygletplot.plot_window import PlotWindow
from sympy.plotting.pygletplot.util import parse_option_string
from sympy.utilities.decorator import doctest_depends_on
from time import sleep
from os import getcwd, listdir
import ctypes
@doctest_depends_on(modules=('pyglet',))
class PygletPlot:
"""
Plot Examples
=============
See examples/advanced/pyglet_plotting.py for many more examples.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy.abc import x, y, z
>>> Plot(x*y**3-y*x**3)
[0]: -x**3*y + x*y**3, 'mode=cartesian'
>>> p = Plot()
>>> p[1] = x*y
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p = Plot()
>>> p[1] = x**2+y**2
>>> p[2] = -x**2-y**2
Variable Intervals
==================
The basic format is [var, min, max, steps], but the
syntax is flexible and arguments left out are taken
from the defaults for the current coordinate mode:
>>> Plot(x**2) # implies [x,-5,5,100]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100]
[0]: x**2 - y**2, 'mode=cartesian'
>>> Plot(x**2, [x,-13,13,100])
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [-13,13]) # [x,-13,13,100]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [x,-13,13]) # [x,-13,13,10]
[0]: x**2, 'mode=cartesian'
>>> Plot(1*x, [], [x], mode='cylindrical')
... # [unbound_theta,0,2*Pi,40], [x,-1,1,20]
[0]: x, 'mode=cartesian'
Coordinate Modes
================
Plot supports several curvilinear coordinate modes, and
they independent for each plotted function. You can specify
a coordinate mode explicitly with the 'mode' named argument,
but it can be automatically determined for Cartesian or
parametric plots, and therefore must only be specified for
polar, cylindrical, and spherical modes.
Specifically, Plot(function arguments) and Plot[n] =
(function arguments) will interpret your arguments as a
Cartesian plot if you provide one function and a parametric
plot if you provide two or three functions. Similarly, the
arguments will be interpreted as a curve if one variable is
used, and a surface if two are used.
Supported mode names by number of variables:
1: parametric, cartesian, polar
2: parametric, cartesian, cylindrical = polar, spherical
>>> Plot(1, mode='spherical')
Calculator-like Interface
=========================
>>> p = Plot(visible=False)
>>> f = x**2
>>> p[1] = f
>>> p[2] = f.diff(x)
>>> p[3] = f.diff(x).diff(x)
>>> p
[1]: x**2, 'mode=cartesian'
[2]: 2*x, 'mode=cartesian'
[3]: 2, 'mode=cartesian'
>>> p.show()
>>> p.clear()
>>> p
<blank plot>
>>> p[1] = x**2+y**2
>>> p[1].style = 'solid'
>>> p[2] = -x**2-y**2
>>> p[2].style = 'wireframe'
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p[1].style = 'both'
>>> p[2].style = 'both'
>>> p.close()
Plot Window Keyboard Controls
=============================
Screen Rotation:
X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2
Z axis Q,E, Numpad 7,9
Model Rotation:
Z axis Z,C, Numpad 1,3
Zoom: R,F, PgUp,PgDn, Numpad +,-
Reset Camera: X, Numpad 5
Camera Presets:
XY F1
XZ F2
YZ F3
Perspective F4
Sensitivity Modifier: SHIFT
Axes Toggle:
Visible F5
Colors F6
Close Window: ESCAPE
=============================
"""
@doctest_depends_on(modules=('pyglet',))
def __init__(self, *fargs, **win_args):
"""
Positional Arguments
====================
Any given positional arguments are used to
initialize a plot function at index 1. In
other words...
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy.abc import x
>>> p = Plot(x**2, visible=False)
...is equivalent to...
>>> p = Plot(visible=False)
>>> p[1] = x**2
Note that in earlier versions of the plotting
module, you were able to specify multiple
functions in the initializer. This functionality
has been dropped in favor of better automatic
plot plot_mode detection.
Named Arguments
===============
axes
An option string of the form
"key1=value1; key2 = value2" which
can use the following options:
style = ordinate
none OR frame OR box OR ordinate
stride = 0.25
val OR (val_x, val_y, val_z)
overlay = True (draw on top of plot)
True OR False
colored = False (False uses Black,
True uses colors
R,G,B = X,Y,Z)
True OR False
label_axes = False (display axis names
at endpoints)
True OR False
visible = True (show immediately
True OR False
The following named arguments are passed as
arguments to window initialization:
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
# Register the plot modes
from . import plot_modes # noqa
self._win_args = win_args
self._window = None
self._render_lock = RLock()
self._functions = {}
self._pobjects = []
self._screenshot = ScreenShot(self)
axe_options = parse_option_string(win_args.pop('axes', ''))
self.axes = PlotAxes(**axe_options)
self._pobjects.append(self.axes)
self[0] = fargs
if win_args.get('visible', True):
self.show()
## Window Interfaces
def show(self):
"""
Creates and displays a plot window, or activates it
(gives it focus) if it has already been created.
"""
if self._window and not self._window.has_exit:
self._window.activate()
else:
self._win_args['visible'] = True
self.axes.reset_resources()
#if hasattr(self, '_doctest_depends_on'):
# self._win_args['runfromdoctester'] = True
self._window = PlotWindow(self, **self._win_args)
def close(self):
"""
Closes the plot window.
"""
if self._window:
self._window.close()
def saveimage(self, outfile=None, format='', size=(600, 500)):
"""
Saves a screen capture of the plot window to an
image file.
If outfile is given, it can either be a path
or a file object. Otherwise a png image will
be saved to the current working directory.
If the format is omitted, it is determined from
the filename extension.
"""
self._screenshot.save(outfile, format, size)
## Function List Interfaces
def clear(self):
"""
Clears the function list of this plot.
"""
self._render_lock.acquire()
self._functions = {}
self.adjust_all_bounds()
self._render_lock.release()
def __getitem__(self, i):
"""
Returns the function at position i in the
function list.
"""
return self._functions[i]
def __setitem__(self, i, args):
"""
Parses and adds a PlotMode to the function
list.
"""
if not (isinstance(i, (SYMPY_INTS, Integer)) and i >= 0):
raise ValueError("Function index must "
"be an integer >= 0.")
if isinstance(args, PlotObject):
f = args
else:
if (not is_sequence(args)) or isinstance(args, GeometryEntity):
args = [args]
if len(args) == 0:
return # no arguments given
kwargs = dict(bounds_callback=self.adjust_all_bounds)
f = PlotMode(*args, **kwargs)
if f:
self._render_lock.acquire()
self._functions[i] = f
self._render_lock.release()
else:
raise ValueError("Failed to parse '%s'."
% ', '.join(str(a) for a in args))
def __delitem__(self, i):
"""
Removes the function in the function list at
position i.
"""
self._render_lock.acquire()
del self._functions[i]
self.adjust_all_bounds()
self._render_lock.release()
def firstavailableindex(self):
"""
Returns the first unused index in the function list.
"""
i = 0
self._render_lock.acquire()
while i in self._functions:
i += 1
self._render_lock.release()
return i
def append(self, *args):
"""
Parses and adds a PlotMode to the function
list at the first available index.
"""
self.__setitem__(self.firstavailableindex(), args)
def __len__(self):
"""
Returns the number of functions in the function list.
"""
return len(self._functions)
def __iter__(self):
"""
Allows iteration of the function list.
"""
return self._functions.itervalues()
def __repr__(self):
return str(self)
def __str__(self):
"""
Returns a string containing a new-line separated
list of the functions in the function list.
"""
s = ""
if len(self._functions) == 0:
s += "<blank plot>"
else:
self._render_lock.acquire()
s += "\n".join(["%s[%i]: %s" % ("", i, str(self._functions[i]))
for i in self._functions])
self._render_lock.release()
return s
def adjust_all_bounds(self):
self._render_lock.acquire()
self.axes.reset_bounding_box()
for f in self._functions:
self.axes.adjust_bounds(self._functions[f].bounds)
self._render_lock.release()
def wait_for_calculations(self):
sleep(0)
self._render_lock.acquire()
for f in self._functions:
a = self._functions[f]._get_calculating_verts
b = self._functions[f]._get_calculating_cverts
while a() or b():
sleep(0)
self._render_lock.release()
class ScreenShot:
def __init__(self, plot):
self._plot = plot
self.screenshot_requested = False
self.outfile = None
self.format = ''
self.invisibleMode = False
self.flag = 0
def __bool__(self):
return self.screenshot_requested
def _execute_saving(self):
if self.flag < 3:
self.flag += 1
return
size_x, size_y = self._plot._window.get_size()
size = size_x*size_y*4*ctypes.sizeof(ctypes.c_ubyte)
image = ctypes.create_string_buffer(size)
pgl.glReadPixels(0, 0, size_x, size_y, pgl.GL_RGBA, pgl.GL_UNSIGNED_BYTE, image)
from PIL import Image
im = Image.frombuffer('RGBA', (size_x, size_y),
image.raw, 'raw', 'RGBA', 0, 1)
im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile, self.format)
self.flag = 0
self.screenshot_requested = False
if self.invisibleMode:
self._plot._window.close()
def save(self, outfile=None, format='', size=(600, 500)):
self.outfile = outfile
self.format = format
self.size = size
self.screenshot_requested = True
if not self._plot._window or self._plot._window.has_exit:
self._plot._win_args['visible'] = False
self._plot._win_args['width'] = size[0]
self._plot._win_args['height'] = size[1]
self._plot.axes.reset_resources()
self._plot._window = PlotWindow(self._plot, **self._plot._win_args)
self.invisibleMode = True
if self.outfile is None:
self.outfile = self._create_unique_path()
print(self.outfile)
def _create_unique_path(self):
cwd = getcwd()
l = listdir(cwd)
path = ''
i = 0
while True:
if not 'plot_%s.png' % i in l:
path = cwd + '/plot_%s.png' % i
break
i += 1
return path
|
d1c69f8769784c7f287a79eae63aa3e95ddaff564747a9dcf0d695b5958a1ee7 | import pyglet.gl as pgl
from sympy.core import S
from sympy.core.compatibility import is_sequence
from sympy.plotting.pygletplot.color_scheme import ColorScheme
from sympy.plotting.pygletplot.plot_mode import PlotMode
from time import sleep
from threading import Thread, Event, RLock
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, bounds_callback=None, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
self.cbounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = bounds_callback
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = pgl.glGenLists(1)
pgl.glNewList(dl, pgl.GL_COMPILE)
function()
pgl.glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if pgl.GL_TRUE == pgl.glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT)
pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_FILL)
pgl.glCallList(dl)
pgl.glPopAttrib()
def _draw_wireframe_display_list(self, dl):
pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT)
pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_LINE)
pgl.glEnable(pgl.GL_POLYGON_OFFSET_LINE)
pgl.glPolygonOffset(-0.005, -50.0)
pgl.glCallList(dl)
pgl.glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError("Color change failed. "
"Reason: %s" % (str(e)))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % ("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None'))
return ", ".join([f, i, o])
|
c9f90683b4c5df23e185150386f3cc4f5b898126e84f08003ad28e25d3fb77c1 | import pyglet.gl as pgl
from sympy.plotting.pygletplot.plot_rotation import get_spherical_rotatation
from sympy.plotting.pygletplot.util import get_model_matrix, model_to_screen, \
screen_to_model, vec_subs
class PlotCamera:
min_dist = 0.05
max_dist = 500.0
min_ortho_dist = 100.0
max_ortho_dist = 10000.0
_default_dist = 6.0
_default_ortho_dist = 600.0
rot_presets = {
'xy': (0, 0, 0),
'xz': (-90, 0, 0),
'yz': (0, 90, 0),
'perspective': (-45, 0, -45)
}
def __init__(self, window, ortho=False):
self.window = window
self.axes = self.window.plot.axes
self.ortho = ortho
self.reset()
def init_rot_matrix(self):
pgl.glPushMatrix()
pgl.glLoadIdentity()
self._rot = get_model_matrix()
pgl.glPopMatrix()
def set_rot_preset(self, preset_name):
self.init_rot_matrix()
try:
r = self.rot_presets[preset_name]
except AttributeError:
raise ValueError(
"%s is not a valid rotation preset." % preset_name)
try:
self.euler_rotate(r[0], 1, 0, 0)
self.euler_rotate(r[1], 0, 1, 0)
self.euler_rotate(r[2], 0, 0, 1)
except AttributeError:
pass
def reset(self):
self._dist = 0.0
self._x, self._y = 0.0, 0.0
self._rot = None
if self.ortho:
self._dist = self._default_ortho_dist
else:
self._dist = self._default_dist
self.init_rot_matrix()
def mult_rot_matrix(self, rot):
pgl.glPushMatrix()
pgl.glLoadMatrixf(rot)
pgl.glMultMatrixf(self._rot)
self._rot = get_model_matrix()
pgl.glPopMatrix()
def setup_projection(self):
pgl.glMatrixMode(pgl.GL_PROJECTION)
pgl.glLoadIdentity()
if self.ortho:
# yep, this is pseudo ortho (don't tell anyone)
pgl.gluPerspective(
0.3, float(self.window.width)/float(self.window.height),
self.min_ortho_dist - 0.01, self.max_ortho_dist + 0.01)
else:
pgl.gluPerspective(
30.0, float(self.window.width)/float(self.window.height),
self.min_dist - 0.01, self.max_dist + 0.01)
pgl.glMatrixMode(pgl.GL_MODELVIEW)
def _get_scale(self):
return 1.0, 1.0, 1.0
def apply_transformation(self):
pgl.glLoadIdentity()
pgl.glTranslatef(self._x, self._y, -self._dist)
if self._rot is not None:
pgl.glMultMatrixf(self._rot)
pgl.glScalef(*self._get_scale())
def spherical_rotate(self, p1, p2, sensitivity=1.0):
mat = get_spherical_rotatation(p1, p2, self.window.width,
self.window.height, sensitivity)
if mat is not None:
self.mult_rot_matrix(mat)
def euler_rotate(self, angle, x, y, z):
pgl.glPushMatrix()
pgl.glLoadMatrixf(self._rot)
pgl.glRotatef(angle, x, y, z)
self._rot = get_model_matrix()
pgl.glPopMatrix()
def zoom_relative(self, clicks, sensitivity):
if self.ortho:
dist_d = clicks * sensitivity * 50.0
min_dist = self.min_ortho_dist
max_dist = self.max_ortho_dist
else:
dist_d = clicks * sensitivity
min_dist = self.min_dist
max_dist = self.max_dist
new_dist = (self._dist - dist_d)
if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist:
self._dist = new_dist
def mouse_translate(self, x, y, dx, dy):
pgl.glPushMatrix()
pgl.glLoadIdentity()
pgl.glTranslatef(0, 0, -self._dist)
z = model_to_screen(0, 0, 0)[2]
d = vec_subs(screen_to_model(x, y, z), screen_to_model(x - dx, y - dy, z))
pgl.glPopMatrix()
self._x += d[0]
self._y += d[1]
|
52475dd7fbaa7505cf9dd0a34481984e005e2d995a9824d8f1283d2ae44df792 | from sympy import Basic, Symbol, symbols, lambdify
from .util import interpolate, rinterpolate, create_bounds, update_bounds
from sympy.utilities.iterables import sift
class ColorGradient:
colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9]
intervals = 0.0, 1.0
def __init__(self, *args):
if len(args) == 2:
self.colors = list(args)
self.intervals = [0.0, 1.0]
elif len(args) > 0:
if len(args) % 2 != 0:
raise ValueError("len(args) should be even")
self.colors = [args[i] for i in range(1, len(args), 2)]
self.intervals = [args[i] for i in range(0, len(args), 2)]
assert len(self.colors) == len(self.intervals)
def copy(self):
c = ColorGradient()
c.colors = [e[::] for e in self.colors]
c.intervals = self.intervals[::]
return c
def _find_interval(self, v):
m = len(self.intervals)
i = 0
while i < m - 1 and self.intervals[i] <= v:
i += 1
return i
def _interpolate_axis(self, axis, v):
i = self._find_interval(v)
v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
def __call__(self, r, g, b):
c = self._interpolate_axis
return c(0, r), c(1, g), c(2, b)
default_color_schemes = {} # defined at the bottom of this file
class ColorScheme:
def __init__(self, *args, **kwargs):
self.args = args
self.f, self.gradient = None, ColorGradient()
if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]):
self.f = args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0] in default_color_schemes:
cs = default_color_schemes[args[0]]
self.f, self.gradient = cs.f, cs.gradient.copy()
else:
self.f = lambdify('x,y,z,u,v', args[0])
else:
self.f, self.gradient = self._interpret_args(args)
self._test_color_function()
if not isinstance(self.gradient, ColorGradient):
raise ValueError("Color gradient not properly initialized. "
"(Not a ColorGradient instance.)")
def _interpret_args(self, args):
f, gradient = None, self.gradient
atoms, lists = self._sort_args(args)
s = self._pop_symbol_list(lists)
s = self._fill_in_vars(s)
# prepare the error message for lambdification failure
f_str = ', '.join(str(fa) for fa in atoms)
s_str = (str(sa) for sa in s)
s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0)
f_error = ValueError("Could not interpret arguments "
"%s as functions of %s." % (f_str, s_str))
# try to lambdify args
if len(atoms) == 1:
fv = atoms[0]
try:
f = lambdify(s, [fv, fv, fv])
except TypeError:
raise f_error
elif len(atoms) == 3:
fr, fg, fb = atoms
try:
f = lambdify(s, [fr, fg, fb])
except TypeError:
raise f_error
else:
raise ValueError("A ColorScheme must provide 1 or 3 "
"functions in x, y, z, u, and/or v.")
# try to intrepret any given color information
if len(lists) == 0:
gargs = []
elif len(lists) == 1:
gargs = lists[0]
elif len(lists) == 2:
try:
(r1, g1, b1), (r2, g2, b2) = lists
except TypeError:
raise ValueError("If two color arguments are given, "
"they must be given in the format "
"(r1, g1, b1), (r2, g2, b2).")
gargs = lists
elif len(lists) == 3:
try:
(r1, r2), (g1, g2), (b1, b2) = lists
except Exception:
raise ValueError("If three color arguments are given, "
"they must be given in the format "
"(r1, r2), (g1, g2), (b1, b2). To create "
"a multi-step gradient, use the syntax "
"[0, colorStart, step1, color1, ..., 1, "
"colorEnd].")
gargs = [[r1, g1, b1], [r2, g2, b2]]
else:
raise ValueError("Don't know what to do with collection "
"arguments %s." % (', '.join(str(l) for l in lists)))
if gargs:
try:
gradient = ColorGradient(*gargs)
except Exception as ex:
raise ValueError(("Could not initialize a gradient "
"with arguments %s. Inner "
"exception: %s") % (gargs, str(ex)))
return f, gradient
def _pop_symbol_list(self, lists):
symbol_lists = []
for l in lists:
mark = True
for s in l:
if s is not None and not isinstance(s, Symbol):
mark = False
break
if mark:
lists.remove(l)
symbol_lists.append(l)
if len(symbol_lists) == 1:
return symbol_lists[0]
elif len(symbol_lists) == 0:
return []
else:
raise ValueError("Only one list of Symbols "
"can be given for a color scheme.")
def _fill_in_vars(self, args):
defaults = symbols('x,y,z,u,v')
v_error = ValueError("Could not find what to plot.")
if len(args) == 0:
return defaults
if not isinstance(args, (tuple, list)):
raise v_error
if len(args) == 0:
return defaults
for s in args:
if s is not None and not isinstance(s, Symbol):
raise v_error
# when vars are given explicitly, any vars
# not given are marked 'unbound' as to not
# be accidentally used in an expression
vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)]
# interpret as t
if len(args) == 1:
vars[3] = args[0]
# interpret as u,v
elif len(args) == 2:
if args[0] is not None:
vars[3] = args[0]
if args[1] is not None:
vars[4] = args[1]
# interpret as x,y,z
elif len(args) >= 3:
# allow some of x,y,z to be
# left unbound if not given
if args[0] is not None:
vars[0] = args[0]
if args[1] is not None:
vars[1] = args[1]
if args[2] is not None:
vars[2] = args[2]
# interpret the rest as t
if len(args) >= 4:
vars[3] = args[3]
# ...or u,v
if len(args) >= 5:
vars[4] = args[4]
return vars
def _sort_args(self, args):
lists, atoms = sift(args,
lambda a: isinstance(a, (tuple, list)), binary=True)
return atoms, lists
def _test_color_function(self):
if not callable(self.f):
raise ValueError("Color function is not callable.")
try:
result = self.f(0, 0, 0, 0, 0)
if len(result) != 3:
raise ValueError("length should be equal to 3")
except TypeError:
raise ValueError("Color function needs to accept x,y,z,u,v, "
"as arguments even if it doesn't use all of them.")
except AssertionError:
raise ValueError("Color function needs to return 3-tuple r,g,b.")
except Exception:
pass # color function probably not valid at 0,0,0,0,0
def __call__(self, x, y, z, u, v):
try:
return self.f(x, y, z, u, v)
except Exception:
return None
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over a single
independent variable u.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
if verts[_u] is None:
cverts.append(None)
else:
x, y, z = verts[_u]
u, v = u_set[_u], None
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
cverts.append(c)
if callable(inc_pos):
inc_pos()
# scale and apply gradient
for _u in range(len(u_set)):
if cverts[_u] is not None:
for _c in range(3):
# scale from [f_min, f_max] to [0,1]
cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1],
cverts[_u][_c])
# apply gradient
cverts[_u] = self.gradient(*cverts[_u])
if callable(inc_pos):
inc_pos()
return cverts
def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over two
independent variables u and v.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*len(v_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
column = list()
for _v in range(len(v_set)):
if verts[_u][_v] is None:
column.append(None)
else:
x, y, z = verts[_u][_v]
u, v = u_set[_u], v_set[_v]
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
column.append(c)
if callable(inc_pos):
inc_pos()
cverts.append(column)
# scale and apply gradient
for _u in range(len(u_set)):
for _v in range(len(v_set)):
if cverts[_u][_v] is not None:
# scale from [f_min, f_max] to [0,1]
for _c in range(3):
cverts[_u][_v][_c] = rinterpolate(bounds[_c][0],
bounds[_c][1], cverts[_u][_v][_c])
# apply gradient
cverts[_u][_v] = self.gradient(*cverts[_u][_v])
if callable(inc_pos):
inc_pos()
return cverts
def str_base(self):
return ", ".join(str(a) for a in self.args)
def __repr__(self):
return "%s" % (self.str_base())
x, y, z, t, u, v = symbols('x,y,z,t,u,v')
default_color_schemes['rainbow'] = ColorScheme(z, y, x)
default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97),
(0.97, 0.4, 0.4), (None, None, z))
default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z),
[0.00, (0.2, 0.2, 1.0),
0.35, (0.2, 0.8, 0.4),
0.50, (0.3, 0.9, 0.3),
0.65, (0.4, 0.8, 0.2),
1.00, (1.0, 0.2, 0.2)])
default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z),
[0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)])
|
32ba092523c26c10d3f201acfc676aa55fca1ec96530d269c71d0c537bca255f | from pyglet.window import key
from pyglet.window.mouse import LEFT, RIGHT, MIDDLE
from sympy.plotting.pygletplot.util import get_direction_vectors, get_basis_vectors
class PlotController:
normal_mouse_sensitivity = 4.0
modified_mouse_sensitivity = 1.0
normal_key_sensitivity = 160.0
modified_key_sensitivity = 40.0
keymap = {
key.LEFT: 'left',
key.A: 'left',
key.NUM_4: 'left',
key.RIGHT: 'right',
key.D: 'right',
key.NUM_6: 'right',
key.UP: 'up',
key.W: 'up',
key.NUM_8: 'up',
key.DOWN: 'down',
key.S: 'down',
key.NUM_2: 'down',
key.Z: 'rotate_z_neg',
key.NUM_1: 'rotate_z_neg',
key.C: 'rotate_z_pos',
key.NUM_3: 'rotate_z_pos',
key.Q: 'spin_left',
key.NUM_7: 'spin_left',
key.E: 'spin_right',
key.NUM_9: 'spin_right',
key.X: 'reset_camera',
key.NUM_5: 'reset_camera',
key.NUM_ADD: 'zoom_in',
key.PAGEUP: 'zoom_in',
key.R: 'zoom_in',
key.NUM_SUBTRACT: 'zoom_out',
key.PAGEDOWN: 'zoom_out',
key.F: 'zoom_out',
key.RSHIFT: 'modify_sensitivity',
key.LSHIFT: 'modify_sensitivity',
key.F1: 'rot_preset_xy',
key.F2: 'rot_preset_xz',
key.F3: 'rot_preset_yz',
key.F4: 'rot_preset_perspective',
key.F5: 'toggle_axes',
key.F6: 'toggle_axe_colors',
key.F8: 'save_image'
}
def __init__(self, window, *, invert_mouse_zoom=False, **kwargs):
self.invert_mouse_zoom = invert_mouse_zoom
self.window = window
self.camera = window.camera
self.action = {
# Rotation around the view Y (up) vector
'left': False,
'right': False,
# Rotation around the view X vector
'up': False,
'down': False,
# Rotation around the view Z vector
'spin_left': False,
'spin_right': False,
# Rotation around the model Z vector
'rotate_z_neg': False,
'rotate_z_pos': False,
# Reset to the default rotation
'reset_camera': False,
# Performs camera z-translation
'zoom_in': False,
'zoom_out': False,
# Use alternative sensitivity (speed)
'modify_sensitivity': False,
# Rotation presets
'rot_preset_xy': False,
'rot_preset_xz': False,
'rot_preset_yz': False,
'rot_preset_perspective': False,
# axes
'toggle_axes': False,
'toggle_axe_colors': False,
# screenshot
'save_image': False
}
def update(self, dt):
z = 0
if self.action['zoom_out']:
z -= 1
if self.action['zoom_in']:
z += 1
if z != 0:
self.camera.zoom_relative(z/10.0, self.get_key_sensitivity()/10.0)
dx, dy, dz = 0, 0, 0
if self.action['left']:
dx -= 1
if self.action['right']:
dx += 1
if self.action['up']:
dy -= 1
if self.action['down']:
dy += 1
if self.action['spin_left']:
dz += 1
if self.action['spin_right']:
dz -= 1
if not self.is_2D():
if dx != 0:
self.camera.euler_rotate(dx*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[1]))
if dy != 0:
self.camera.euler_rotate(dy*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[0]))
if dz != 0:
self.camera.euler_rotate(dz*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[2]))
else:
self.camera.mouse_translate(0, 0, dx*dt*self.get_key_sensitivity(),
-dy*dt*self.get_key_sensitivity())
rz = 0
if self.action['rotate_z_neg'] and not self.is_2D():
rz -= 1
if self.action['rotate_z_pos'] and not self.is_2D():
rz += 1
if rz != 0:
self.camera.euler_rotate(rz*dt*self.get_key_sensitivity(),
*(get_basis_vectors()[2]))
if self.action['reset_camera']:
self.camera.reset()
if self.action['rot_preset_xy']:
self.camera.set_rot_preset('xy')
if self.action['rot_preset_xz']:
self.camera.set_rot_preset('xz')
if self.action['rot_preset_yz']:
self.camera.set_rot_preset('yz')
if self.action['rot_preset_perspective']:
self.camera.set_rot_preset('perspective')
if self.action['toggle_axes']:
self.action['toggle_axes'] = False
self.camera.axes.toggle_visible()
if self.action['toggle_axe_colors']:
self.action['toggle_axe_colors'] = False
self.camera.axes.toggle_colors()
if self.action['save_image']:
self.action['save_image'] = False
self.window.plot.saveimage()
return True
def get_mouse_sensitivity(self):
if self.action['modify_sensitivity']:
return self.modified_mouse_sensitivity
else:
return self.normal_mouse_sensitivity
def get_key_sensitivity(self):
if self.action['modify_sensitivity']:
return self.modified_key_sensitivity
else:
return self.normal_key_sensitivity
def on_key_press(self, symbol, modifiers):
if symbol in self.keymap:
self.action[self.keymap[symbol]] = True
def on_key_release(self, symbol, modifiers):
if symbol in self.keymap:
self.action[self.keymap[symbol]] = False
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if buttons & LEFT:
if self.is_2D():
self.camera.mouse_translate(x, y, dx, dy)
else:
self.camera.spherical_rotate((x - dx, y - dy), (x, y),
self.get_mouse_sensitivity())
if buttons & MIDDLE:
self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy,
self.get_mouse_sensitivity()/20.0)
if buttons & RIGHT:
self.camera.mouse_translate(x, y, dx, dy)
def on_mouse_scroll(self, x, y, dx, dy):
self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy,
self.get_mouse_sensitivity())
def is_2D(self):
functions = self.window.plot._functions
for i in functions:
if len(functions[i].i_vars) > 1 or len(functions[i].d_vars) > 2:
return False
return True
|
51c4051ae35a55b8cd4317cf63fa250f906e408f396972aeedaf3dc46a1ce114 | import pyglet.gl as pgl
from sympy.core import S
from sympy.plotting.pygletplot.plot_mode_base import PlotModeBase
class PlotCurve(PlotModeBase):
style_override = 'wireframe'
def _on_calculate_verts(self):
self.t_interval = self.intervals[0]
self.t_set = list(self.t_interval.frange())
self.bounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
evaluate = self._get_evaluator()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = float(self.t_interval.v_len)
self.verts = list()
b = self.bounds
for t in self.t_set:
try:
_e = evaluate(t) # calculate vertex
except (NameError, ZeroDivisionError):
_e = None
if _e is not None: # update bounding box
for axis in range(3):
b[axis][0] = min([b[axis][0], _e[axis]])
b[axis][1] = max([b[axis][1], _e[axis]])
self.verts.append(_e)
self._calculating_verts_pos += 1.0
for axis in range(3):
b[axis][2] = b[axis][1] - b[axis][0]
if b[axis][2] == 0.0:
b[axis][2] = 1.0
self.push_wireframe(self.draw_verts(False))
def _on_calculate_cverts(self):
if not self.verts or not self.color:
return
def set_work_len(n):
self._calculating_cverts_len = float(n)
def inc_work_pos():
self._calculating_cverts_pos += 1.0
set_work_len(1)
self._calculating_cverts_pos = 0
self.cverts = self.color.apply_to_curve(self.verts,
self.t_set,
set_len=set_work_len,
inc_pos=inc_work_pos)
self.push_wireframe(self.draw_verts(True))
def calculate_one_cvert(self, t):
vert = self.verts[t]
return self.color(vert[0], vert[1], vert[2],
self.t_set[t], None)
def draw_verts(self, use_cverts):
def f():
pgl.glBegin(pgl.GL_LINE_STRIP)
for t in range(len(self.t_set)):
p = self.verts[t]
if p is None:
pgl.glEnd()
pgl.glBegin(pgl.GL_LINE_STRIP)
continue
if use_cverts:
c = self.cverts[t]
if c is None:
c = (0, 0, 0)
pgl.glColor3f(*c)
else:
pgl.glColor3f(*self.default_wireframe_color)
pgl.glVertex3f(*p)
pgl.glEnd()
return f
|
bdcd9f223554247cb015003250989c642179c93080efad08b310369fdc8a9bac | import pyglet.gl as pgl
from sympy.core import S
from sympy.plotting.pygletplot.plot_mode_base import PlotModeBase
class PlotSurface(PlotModeBase):
default_rot_preset = 'perspective'
def _on_calculate_verts(self):
self.u_interval = self.intervals[0]
self.u_set = list(self.u_interval.frange())
self.v_interval = self.intervals[1]
self.v_set = list(self.v_interval.frange())
self.bounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
evaluate = self._get_evaluator()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = float(
self.u_interval.v_len*self.v_interval.v_len)
verts = list()
b = self.bounds
for u in self.u_set:
column = list()
for v in self.v_set:
try:
_e = evaluate(u, v) # calculate vertex
except ZeroDivisionError:
_e = None
if _e is not None: # update bounding box
for axis in range(3):
b[axis][0] = min([b[axis][0], _e[axis]])
b[axis][1] = max([b[axis][1], _e[axis]])
column.append(_e)
self._calculating_verts_pos += 1.0
verts.append(column)
for axis in range(3):
b[axis][2] = b[axis][1] - b[axis][0]
if b[axis][2] == 0.0:
b[axis][2] = 1.0
self.verts = verts
self.push_wireframe(self.draw_verts(False, False))
self.push_solid(self.draw_verts(False, True))
def _on_calculate_cverts(self):
if not self.verts or not self.color:
return
def set_work_len(n):
self._calculating_cverts_len = float(n)
def inc_work_pos():
self._calculating_cverts_pos += 1.0
set_work_len(1)
self._calculating_cverts_pos = 0
self.cverts = self.color.apply_to_surface(self.verts,
self.u_set,
self.v_set,
set_len=set_work_len,
inc_pos=inc_work_pos)
self.push_solid(self.draw_verts(True, True))
def calculate_one_cvert(self, u, v):
vert = self.verts[u][v]
return self.color(vert[0], vert[1], vert[2],
self.u_set[u], self.v_set[v])
def draw_verts(self, use_cverts, use_solid_color):
def f():
for u in range(1, len(self.u_set)):
pgl.glBegin(pgl.GL_QUAD_STRIP)
for v in range(len(self.v_set)):
pa = self.verts[u - 1][v]
pb = self.verts[u][v]
if pa is None or pb is None:
pgl.glEnd()
pgl.glBegin(pgl.GL_QUAD_STRIP)
continue
if use_cverts:
ca = self.cverts[u - 1][v]
cb = self.cverts[u][v]
if ca is None:
ca = (0, 0, 0)
if cb is None:
cb = (0, 0, 0)
else:
if use_solid_color:
ca = cb = self.default_solid_color
else:
ca = cb = self.default_wireframe_color
pgl.glColor3f(*ca)
pgl.glVertex3f(*pa)
pgl.glColor3f(*cb)
pgl.glVertex3f(*pb)
pgl.glEnd()
return f
|
2f96c102d973aa8f649495d53bd1dfcec2ffe7053fe6b46cadf5c8df234e8673 | from sympy import Symbol, sympify
from sympy.core.numbers import Integer
class PlotInterval:
"""
"""
_v, _v_min, _v_max, _v_steps = None, None, None, None
def require_all_args(f):
def check(self, *args, **kwargs):
for g in [self._v, self._v_min, self._v_max, self._v_steps]:
if g is None:
raise ValueError("PlotInterval is incomplete.")
return f(self, *args, **kwargs)
return check
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], PlotInterval):
self.fill_from(args[0])
return
elif isinstance(args[0], str):
try:
args = eval(args[0])
except TypeError:
s_eval_error = "Could not interpret string %s."
raise ValueError(s_eval_error % (args[0]))
elif isinstance(args[0], (tuple, list)):
args = args[0]
else:
raise ValueError("Not an interval.")
if not isinstance(args, (tuple, list)) or len(args) > 4:
f_error = "PlotInterval must be a tuple or list of length 4 or less."
raise ValueError(f_error)
args = list(args)
if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)):
self.v = args.pop(0)
if len(args) in [2, 3]:
self.v_min = args.pop(0)
self.v_max = args.pop(0)
if len(args) == 1:
self.v_steps = args.pop(0)
elif len(args) == 1:
self.v_steps = args.pop(0)
def get_v(self):
return self._v
def set_v(self, v):
if v is None:
self._v = None
return
if not isinstance(v, Symbol):
raise ValueError("v must be a sympy Symbol.")
self._v = v
def get_v_min(self):
return self._v_min
def set_v_min(self, v_min):
if v_min is None:
self._v_min = None
return
try:
self._v_min = sympify(v_min)
float(self._v_min.evalf())
except TypeError:
raise ValueError("v_min could not be interpreted as a number.")
def get_v_max(self):
return self._v_max
def set_v_max(self, v_max):
if v_max is None:
self._v_max = None
return
try:
self._v_max = sympify(v_max)
float(self._v_max.evalf())
except TypeError:
raise ValueError("v_max could not be interpreted as a number.")
def get_v_steps(self):
return self._v_steps
def set_v_steps(self, v_steps):
if v_steps is None:
self._v_steps = None
return
if isinstance(v_steps, int):
v_steps = Integer(v_steps)
elif not isinstance(v_steps, Integer):
raise ValueError("v_steps must be an int or sympy Integer.")
if v_steps <= Integer(0):
raise ValueError("v_steps must be positive.")
self._v_steps = v_steps
@require_all_args
def get_v_len(self):
return self.v_steps + 1
v = property(get_v, set_v)
v_min = property(get_v_min, set_v_min)
v_max = property(get_v_max, set_v_max)
v_steps = property(get_v_steps, set_v_steps)
v_len = property(get_v_len)
def fill_from(self, b):
if b.v is not None:
self.v = b.v
if b.v_min is not None:
self.v_min = b.v_min
if b.v_max is not None:
self.v_max = b.v_max
if b.v_steps is not None:
self.v_steps = b.v_steps
@staticmethod
def try_parse(*args):
"""
Returns a PlotInterval if args can be interpreted
as such, otherwise None.
"""
if len(args) == 1 and isinstance(args[0], PlotInterval):
return args[0]
try:
return PlotInterval(*args)
except ValueError:
return None
def _str_base(self):
return ",".join([str(self.v), str(self.v_min),
str(self.v_max), str(self.v_steps)])
def __repr__(self):
"""
A string representing the interval in class constructor form.
"""
return "PlotInterval(%s)" % (self._str_base())
def __str__(self):
"""
A string representing the interval in list form.
"""
return "[%s]" % (self._str_base())
@require_all_args
def assert_complete(self):
pass
@require_all_args
def vrange(self):
"""
Yields v_steps+1 sympy numbers ranging from
v_min to v_max.
"""
d = (self.v_max - self.v_min) / self.v_steps
for i in range(self.v_steps + 1):
a = self.v_min + (d * Integer(i))
yield a
@require_all_args
def vrange2(self):
"""
Yields v_steps pairs of sympy numbers ranging from
(v_min, v_min + step) to (v_max - step, v_max).
"""
d = (self.v_max - self.v_min) / self.v_steps
a = self.v_min + (d * Integer(0))
for i in range(self.v_steps):
b = self.v_min + (d * Integer(i + 1))
yield a, b
a = b
def frange(self):
for i in self.vrange():
yield float(i.evalf())
|
9192949a13fcfa23fc2e2456c020cd0b80b76f5249d9232ef0cf20154d4b4bdd | from sympy import lambdify
from sympy.core.numbers import pi
from sympy.functions import sin, cos
from sympy.plotting.pygletplot.plot_curve import PlotCurve
from sympy.plotting.pygletplot.plot_surface import PlotSurface
from math import sin as p_sin
from math import cos as p_cos
def float_vec3(f):
def inner(*args):
v = f(*args)
return float(v[0]), float(v[1]), float(v[2])
return inner
class Cartesian2D(PlotCurve):
i_vars, d_vars = 'x', 'y'
intervals = [[-5, 5, 100]]
aliases = ['cartesian']
is_default = True
def _get_sympy_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
@float_vec3
def e(_x):
return (_x, fy.subs(x, _x), 0.0)
return e
def _get_lambda_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
return lambdify([x], [x, fy, 0.0])
class Cartesian3D(PlotSurface):
i_vars, d_vars = 'xy', 'z'
intervals = [[-1, 1, 40], [-1, 1, 40]]
aliases = ['cartesian', 'monge']
is_default = True
def _get_sympy_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
@float_vec3
def e(_x, _y):
return (_x, _y, fz.subs(x, _x).subs(y, _y))
return e
def _get_lambda_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
return lambdify([x, y], [x, y, fz])
class ParametricCurve2D(PlotCurve):
i_vars, d_vars = 't', 'xy'
intervals = [[0, 2*pi, 100]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
@float_vec3
def e(_t):
return (fx.subs(t, _t), fy.subs(t, _t), 0.0)
return e
def _get_lambda_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
return lambdify([t], [fx, fy, 0.0])
class ParametricCurve3D(PlotCurve):
i_vars, d_vars = 't', 'xyz'
intervals = [[0, 2*pi, 100]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy, fz = self.d_vars
t = self.t_interval.v
@float_vec3
def e(_t):
return (fx.subs(t, _t), fy.subs(t, _t), fz.subs(t, _t))
return e
def _get_lambda_evaluator(self):
fx, fy, fz = self.d_vars
t = self.t_interval.v
return lambdify([t], [fx, fy, fz])
class ParametricSurface(PlotSurface):
i_vars, d_vars = 'uv', 'xyz'
intervals = [[-1, 1, 40], [-1, 1, 40]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy, fz = self.d_vars
u = self.u_interval.v
v = self.v_interval.v
@float_vec3
def e(_u, _v):
return (fx.subs(u, _u).subs(v, _v),
fy.subs(u, _u).subs(v, _v),
fz.subs(u, _u).subs(v, _v))
return e
def _get_lambda_evaluator(self):
fx, fy, fz = self.d_vars
u = self.u_interval.v
v = self.v_interval.v
return lambdify([u, v], [fx, fy, fz])
class Polar(PlotCurve):
i_vars, d_vars = 't', 'r'
intervals = [[0, 2*pi, 100]]
aliases = ['polar']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.t_interval.v
def e(_t):
_r = float(fr.subs(t, _t))
return (_r*p_cos(_t), _r*p_sin(_t), 0.0)
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.t_interval.v
fx, fy = fr*cos(t), fr*sin(t)
return lambdify([t], [fx, fy, 0.0])
class Cylindrical(PlotSurface):
i_vars, d_vars = 'th', 'r'
intervals = [[0, 2*pi, 40], [-1, 1, 20]]
aliases = ['cylindrical', 'polar']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
def e(_t, _h):
_r = float(fr.subs(t, _t).subs(h, _h))
return (_r*p_cos(_t), _r*p_sin(_t), _h)
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
fx, fy = fr*cos(t), fr*sin(t)
return lambdify([t, h], [fx, fy, h])
class Spherical(PlotSurface):
i_vars, d_vars = 'tp', 'r'
intervals = [[0, 2*pi, 40], [0, pi, 20]]
aliases = ['spherical']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
p = self.v_interval.v
def e(_t, _p):
_r = float(fr.subs(t, _t).subs(p, _p))
return (_r*p_cos(_t)*p_sin(_p),
_r*p_sin(_t)*p_sin(_p),
_r*p_cos(_p))
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
p = self.v_interval.v
fx = fr * cos(t) * sin(p)
fy = fr * sin(t) * sin(p)
fz = fr * cos(p)
return lambdify([t, p], [fx, fy, fz])
Cartesian2D._register()
Cartesian3D._register()
ParametricCurve2D._register()
ParametricCurve3D._register()
ParametricSurface._register()
Polar._register()
Cylindrical._register()
Spherical._register()
|
82a277fc468671851ed12f01172e97421ec914acb3bdd7269eebbddd829fbddd | try:
from ctypes import c_float, c_int, c_double
except ImportError:
pass
import pyglet.gl as pgl
from sympy.core import S
def get_model_matrix(array_type=c_float, glGetMethod=pgl.glGetFloatv):
"""
Returns the current modelview matrix.
"""
m = (array_type*16)()
glGetMethod(pgl.GL_MODELVIEW_MATRIX, m)
return m
def get_projection_matrix(array_type=c_float, glGetMethod=pgl.glGetFloatv):
"""
Returns the current modelview matrix.
"""
m = (array_type*16)()
glGetMethod(pgl.GL_PROJECTION_MATRIX, m)
return m
def get_viewport():
"""
Returns the current viewport.
"""
m = (c_int*4)()
pgl.glGetIntegerv(pgl.GL_VIEWPORT, m)
return m
def get_direction_vectors():
m = get_model_matrix()
return ((m[0], m[4], m[8]),
(m[1], m[5], m[9]),
(m[2], m[6], m[10]))
def get_view_direction_vectors():
m = get_model_matrix()
return ((m[0], m[1], m[2]),
(m[4], m[5], m[6]),
(m[8], m[9], m[10]))
def get_basis_vectors():
return ((1, 0, 0), (0, 1, 0), (0, 0, 1))
def screen_to_model(x, y, z):
m = get_model_matrix(c_double, pgl.glGetDoublev)
p = get_projection_matrix(c_double, pgl.glGetDoublev)
w = get_viewport()
mx, my, mz = c_double(), c_double(), c_double()
pgl.gluUnProject(x, y, z, m, p, w, mx, my, mz)
return float(mx.value), float(my.value), float(mz.value)
def model_to_screen(x, y, z):
m = get_model_matrix(c_double, pgl.glGetDoublev)
p = get_projection_matrix(c_double, pgl.glGetDoublev)
w = get_viewport()
mx, my, mz = c_double(), c_double(), c_double()
pgl.gluProject(x, y, z, m, p, w, mx, my, mz)
return float(mx.value), float(my.value), float(mz.value)
def vec_subs(a, b):
return tuple(a[i] - b[i] for i in range(len(a)))
def billboard_matrix():
"""
Removes rotational components of
current matrix so that primitives
are always drawn facing the viewer.
|1|0|0|x|
|0|1|0|x|
|0|0|1|x| (x means left unchanged)
|x|x|x|x|
"""
m = get_model_matrix()
# XXX: for i in range(11): m[i] = i ?
m[0] = 1
m[1] = 0
m[2] = 0
m[4] = 0
m[5] = 1
m[6] = 0
m[8] = 0
m[9] = 0
m[10] = 1
pgl.glLoadMatrixf(m)
def create_bounds():
return [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
def update_bounds(b, v):
if v is None:
return
for axis in range(3):
b[axis][0] = min([b[axis][0], v[axis]])
b[axis][1] = max([b[axis][1], v[axis]])
def interpolate(a_min, a_max, a_ratio):
return a_min + a_ratio * (a_max - a_min)
def rinterpolate(a_min, a_max, a_value):
a_range = a_max - a_min
if a_max == a_min:
a_range = 1.0
return (a_value - a_min) / float(a_range)
def interpolate_color(color1, color2, ratio):
return tuple(interpolate(color1[i], color2[i], ratio) for i in range(3))
def scale_value(v, v_min, v_len):
return (v - v_min) / v_len
def scale_value_list(flist):
v_min, v_max = min(flist), max(flist)
v_len = v_max - v_min
return list(scale_value(f, v_min, v_len) for f in flist)
def strided_range(r_min, r_max, stride, max_steps=50):
o_min, o_max = r_min, r_max
if abs(r_min - r_max) < 0.001:
return []
try:
range(int(r_min - r_max))
except (TypeError, OverflowError):
return []
if r_min > r_max:
raise ValueError("r_min can not be greater than r_max")
r_min_s = (r_min % stride)
r_max_s = stride - (r_max % stride)
if abs(r_max_s - stride) < 0.001:
r_max_s = 0.0
r_min -= r_min_s
r_max += r_max_s
r_steps = int((r_max - r_min)/stride)
if max_steps and r_steps > max_steps:
return strided_range(o_min, o_max, stride*2)
return [r_min] + list(r_min + e*stride for e in range(1, r_steps + 1)) + [r_max]
def parse_option_string(s):
if not isinstance(s, str):
return None
options = {}
for token in s.split(';'):
pieces = token.split('=')
if len(pieces) == 1:
option, value = pieces[0], ""
elif len(pieces) == 2:
option, value = pieces
else:
raise ValueError("Plot option string '%s' is malformed." % (s))
options[option.strip()] = value.strip()
return options
def dot_product(v1, v2):
return sum(v1[i]*v2[i] for i in range(3))
def vec_sub(v1, v2):
return tuple(v1[i] - v2[i] for i in range(3))
def vec_mag(v):
return sum(v[i]**2 for i in range(3))**(0.5)
|
a86b7370aba9e08b750aa6768f103b167a2b0ae6b84bd23e07fec8dd21c18dc4 | class PlotObject:
"""
Base class for objects which can be displayed in
a Plot.
"""
visible = True
def _draw(self):
if self.visible:
self.draw()
def draw(self):
"""
OpenGL rendering code for the plot object.
Override in base class.
"""
pass
|
93eaa5fef658f6081ed502b16a00b9beb8216b67e4316924857c10164b888666 | from sympy.core.compatibility import clock
import pyglet.gl as pgl
from sympy.plotting.pygletplot.managed_window import ManagedWindow
from sympy.plotting.pygletplot.plot_camera import PlotCamera
from sympy.plotting.pygletplot.plot_controller import PlotController
class PlotWindow(ManagedWindow):
def __init__(self, plot, antialiasing=True, ortho=False,
invert_mouse_zoom=False, linewidth=1.5, caption="SymPy Plot",
**kwargs):
"""
Named Arguments
===============
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
self.plot = plot
self.camera = None
self._calculating = False
self.antialiasing = antialiasing
self.ortho = ortho
self.invert_mouse_zoom = invert_mouse_zoom
self.linewidth = linewidth
self.title = caption
self.last_caption_update = 0
self.caption_update_interval = 0.2
self.drawing_first_object = True
super().__init__(**kwargs)
def setup(self):
self.camera = PlotCamera(self, ortho=self.ortho)
self.controller = PlotController(self,
invert_mouse_zoom=self.invert_mouse_zoom)
self.push_handlers(self.controller)
pgl.glClearColor(1.0, 1.0, 1.0, 0.0)
pgl.glClearDepth(1.0)
pgl.glDepthFunc(pgl.GL_LESS)
pgl.glEnable(pgl.GL_DEPTH_TEST)
pgl.glEnable(pgl.GL_LINE_SMOOTH)
pgl.glShadeModel(pgl.GL_SMOOTH)
pgl.glLineWidth(self.linewidth)
pgl.glEnable(pgl.GL_BLEND)
pgl.glBlendFunc(pgl.GL_SRC_ALPHA, pgl.GL_ONE_MINUS_SRC_ALPHA)
if self.antialiasing:
pgl.glHint(pgl.GL_LINE_SMOOTH_HINT, pgl.GL_NICEST)
pgl.glHint(pgl.GL_POLYGON_SMOOTH_HINT, pgl.GL_NICEST)
self.camera.setup_projection()
def on_resize(self, w, h):
super().on_resize(w, h)
if self.camera is not None:
self.camera.setup_projection()
def update(self, dt):
self.controller.update(dt)
def draw(self):
self.plot._render_lock.acquire()
self.camera.apply_transformation()
calc_verts_pos, calc_verts_len = 0, 0
calc_cverts_pos, calc_cverts_len = 0, 0
should_update_caption = (clock() - self.last_caption_update >
self.caption_update_interval)
if len(self.plot._functions.values()) == 0:
self.drawing_first_object = True
try:
dict.iteritems
except AttributeError:
# Python 3
iterfunctions = iter(self.plot._functions.values())
else:
# Python 2
iterfunctions = self.plot._functions.itervalues()
for r in iterfunctions:
if self.drawing_first_object:
self.camera.set_rot_preset(r.default_rot_preset)
self.drawing_first_object = False
pgl.glPushMatrix()
r._draw()
pgl.glPopMatrix()
# might as well do this while we are
# iterating and have the lock rather
# than locking and iterating twice
# per frame:
if should_update_caption:
try:
if r.calculating_verts:
calc_verts_pos += r.calculating_verts_pos
calc_verts_len += r.calculating_verts_len
if r.calculating_cverts:
calc_cverts_pos += r.calculating_cverts_pos
calc_cverts_len += r.calculating_cverts_len
except ValueError:
pass
for r in self.plot._pobjects:
pgl.glPushMatrix()
r._draw()
pgl.glPopMatrix()
if should_update_caption:
self.update_caption(calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len)
self.last_caption_update = clock()
if self.plot._screenshot:
self.plot._screenshot._execute_saving()
self.plot._render_lock.release()
def update_caption(self, calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len):
caption = self.title
if calc_verts_len or calc_cverts_len:
caption += " (calculating"
if calc_verts_len > 0:
p = (calc_verts_pos / calc_verts_len) * 100
caption += " vertices %i%%" % (p)
if calc_cverts_len > 0:
p = (calc_cverts_pos / calc_cverts_len) * 100
caption += " colors %i%%" % (p)
caption += ")"
if self.caption != caption:
self.set_caption(caption)
|
f7ed9393049c81aa56221fabd01e7f6070be039fd07a97d353c0ebeaf453e294 | import pyglet.gl as pgl
from pyglet import font
from sympy.core import S
from sympy.core.compatibility import is_sequence
from sympy.plotting.pygletplot.plot_object import PlotObject
from sympy.plotting.pygletplot.util import billboard_matrix, dot_product, \
get_direction_vectors, strided_range, vec_mag, vec_sub
class PlotAxes(PlotObject):
def __init__(self, *args,
style='', none=None, frame=None, box=None, ordinate=None,
stride=0.25,
visible='', overlay='', colored='', label_axes='', label_ticks='',
tick_length=0.1,
font_face='Arial', font_size=28,
**kwargs):
# initialize style parameter
style = style.lower()
# allow alias kwargs to override style kwarg
if none is not None:
style = 'none'
if frame is not None:
style = 'frame'
if box is not None:
style = 'box'
if ordinate is not None:
style = 'ordinate'
if style in ['', 'ordinate']:
self._render_object = PlotAxesOrdinate(self)
elif style in ['frame', 'box']:
self._render_object = PlotAxesFrame(self)
elif style in ['none']:
self._render_object = None
else:
raise ValueError(("Unrecognized axes style %s.") % (style))
# initialize stride parameter
try:
stride = eval(stride)
except TypeError:
pass
if is_sequence(stride):
if len(stride) != 3:
raise ValueError("length should be equal to 3")
self._stride = stride
else:
self._stride = [stride, stride, stride]
self._tick_length = float(tick_length)
# setup bounding box and ticks
self._origin = [0, 0, 0]
self.reset_bounding_box()
def flexible_boolean(input, default):
if input in [True, False]:
return input
if input in ['f', 'F', 'false', 'False']:
return False
if input in ['t', 'T', 'true', 'True']:
return True
return default
# initialize remaining parameters
self.visible = flexible_boolean(kwargs, True)
self._overlay = flexible_boolean(overlay, True)
self._colored = flexible_boolean(colored, False)
self._label_axes = flexible_boolean(label_axes, False)
self._label_ticks = flexible_boolean(label_ticks, True)
# setup label font
self.font_face = font_face
self.font_size = font_size
# this is also used to reinit the
# font on window close/reopen
self.reset_resources()
def reset_resources(self):
self.label_font = None
def reset_bounding_box(self):
self._bounding_box = [[None, None], [None, None], [None, None]]
self._axis_ticks = [[], [], []]
def draw(self):
if self._render_object:
pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT | pgl.GL_DEPTH_BUFFER_BIT)
if self._overlay:
pgl.glDisable(pgl.GL_DEPTH_TEST)
self._render_object.draw()
pgl.glPopAttrib()
def adjust_bounds(self, child_bounds):
b = self._bounding_box
c = child_bounds
for i in [0, 1, 2]:
if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity:
continue
b[i][0] = c[i][0] if b[i][0] is None else min([b[i][0], c[i][0]])
b[i][1] = c[i][1] if b[i][1] is None else max([b[i][1], c[i][1]])
self._bounding_box = b
self._recalculate_axis_ticks(i)
def _recalculate_axis_ticks(self, axis):
b = self._bounding_box
if b[axis][0] is None or b[axis][1] is None:
self._axis_ticks[axis] = []
else:
self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1],
self._stride[axis])
def toggle_visible(self):
self.visible = not self.visible
def toggle_colors(self):
self._colored = not self._colored
class PlotAxesBase(PlotObject):
def __init__(self, parent_axes):
self._p = parent_axes
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored]
self.draw_background(color)
self.draw_axis(2, color[2])
self.draw_axis(1, color[1])
self.draw_axis(0, color[0])
def draw_background(self, color):
pass # optional
def draw_axis(self, axis, color):
raise NotImplementedError()
def draw_text(self, text, position, color, scale=1.0):
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
if self._p.label_font is None:
self._p.label_font = font.load(self._p.font_face,
self._p.font_size,
bold=True, italic=False)
label = font.Text(self._p.label_font, text,
color=color,
valign=font.Text.BASELINE,
halign=font.Text.CENTER)
pgl.glPushMatrix()
pgl.glTranslatef(*position)
billboard_matrix()
scale_factor = 0.005 * scale
pgl.glScalef(scale_factor, scale_factor, scale_factor)
pgl.glColor4f(0, 0, 0, 0)
label.draw()
pgl.glPopMatrix()
def draw_line(self, v, color):
o = self._p._origin
pgl.glBegin(pgl.GL_LINES)
pgl.glColor3f(*color)
pgl.glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2])
pgl.glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2])
pgl.glEnd()
class PlotAxesOrdinate(PlotAxesBase):
def __init__(self, parent_axes):
super().__init__(parent_axes)
def draw_axis(self, axis, color):
ticks = self._p._axis_ticks[axis]
radius = self._p._tick_length / 2.0
if len(ticks) < 2:
return
# calculate the vector for this axis
axis_lines = [[0, 0, 0], [0, 0, 0]]
axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
axis_vector = vec_sub(axis_lines[1], axis_lines[0])
# calculate angle to the z direction vector
pos_z = get_direction_vectors()[2]
d = abs(dot_product(axis_vector, pos_z))
d = d / vec_mag(axis_vector)
# don't draw labels if we're looking down the axis
labels_visible = abs(d - 1.0) > 0.02
# draw the ticks and labels
for tick in ticks:
self.draw_tick_line(axis, color, radius, tick, labels_visible)
# draw the axis line and labels
self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
axis_line = [[0, 0, 0], [0, 0, 0]]
axis_line[0][axis], axis_line[1][axis] = a_min, a_max
self.draw_line(axis_line, color)
if labels_visible:
self.draw_axis_line_labels(axis, color, axis_line)
def draw_axis_line_labels(self, axis, color, axis_line):
if not self._p._label_axes:
return
axis_labels = [axis_line[0][::], axis_line[1][::]]
axis_labels[0][axis] -= 0.3
axis_labels[1][axis] += 0.3
a_str = ['X', 'Y', 'Z'][axis]
self.draw_text("-" + a_str, axis_labels[0], color)
self.draw_text("+" + a_str, axis_labels[1], color)
def draw_tick_line(self, axis, color, radius, tick, labels_visible):
tick_axis = {0: 1, 1: 0, 2: 1}[axis]
tick_line = [[0, 0, 0], [0, 0, 0]]
tick_line[0][axis] = tick_line[1][axis] = tick
tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
self.draw_line(tick_line, color)
if labels_visible:
self.draw_tick_line_label(axis, color, radius, tick)
def draw_tick_line_label(self, axis, color, radius, tick):
if not self._p._label_axes:
return
tick_label_vector = [0, 0, 0]
tick_label_vector[axis] = tick
tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1, 1, 1][
axis] * radius * 3.5
self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
class PlotAxesFrame(PlotAxesBase):
def __init__(self, parent_axes):
super().__init__(parent_axes)
def draw_background(self, color):
pass
def draw_axis(self, axis, color):
raise NotImplementedError()
|
6b4768d0dbbcf04f2984e79a3fc60bb97da79264e8dcdd326f22f1e708c2c2f2 | """
Interval Arithmetic for plotting.
This module does not implement interval arithmetic accurately and
hence cannot be used for purposes other than plotting. If you want
to use interval arithmetic, use mpmath's interval arithmetic.
The module implements interval arithmetic using numpy and
python floating points. The rounding up and down is not handled
and hence this is not an accurate implementation of interval
arithmetic.
The module uses numpy for speed which cannot be achieved with mpmath.
"""
# Q: Why use numpy? Why not simply use mpmath's interval arithmetic?
# A: mpmath's interval arithmetic simulates a floating point unit
# and hence is slow, while numpy evaluations are orders of magnitude
# faster.
# Q: Why create a separate class for intervals? Why not use sympy's
# Interval Sets?
# A: The functionalities that will be required for plotting is quite
# different from what Interval Sets implement.
# Q: Why is rounding up and down according to IEEE754 not handled?
# A: It is not possible to do it in both numpy and python. An external
# library has to used, which defeats the whole purpose i.e., speed. Also
# rounding is handled for very few functions in those libraries.
# Q Will my plots be affected?
# A It will not affect most of the plots. The interval arithmetic
# module based suffers the same problems as that of floating point
# arithmetic.
from sympy.core.logic import fuzzy_and
from sympy.simplify.simplify import nsimplify
from .interval_membership import intervalMembership
class interval:
""" Represents an interval containing floating points as start and
end of the interval
The is_valid variable tracks whether the interval obtained as the
result of the function is in the domain and is continuous.
- True: Represents the interval result of a function is continuous and
in the domain of the function.
- False: The interval argument of the function was not in the domain of
the function, hence the is_valid of the result interval is False
- None: The function was not continuous over the interval or
the function's argument interval is partly in the domain of the
function
A comparison between an interval and a real number, or a
comparison between two intervals may return ``intervalMembership``
of two 3-valued logic values.
"""
def __init__(self, *args, is_valid=True, **kwargs):
self.is_valid = is_valid
if len(args) == 1:
if isinstance(args[0], interval):
self.start, self.end = args[0].start, args[0].end
else:
self.start = float(args[0])
self.end = float(args[0])
elif len(args) == 2:
if args[0] < args[1]:
self.start = float(args[0])
self.end = float(args[1])
else:
self.start = float(args[1])
self.end = float(args[0])
else:
raise ValueError("interval takes a maximum of two float values "
"as arguments")
@property
def mid(self):
return (self.start + self.end) / 2.0
@property
def width(self):
return self.end - self.start
def __repr__(self):
return "interval(%f, %f)" % (self.start, self.end)
def __str__(self):
return "[%f, %f]" % (self.start, self.end)
def __lt__(self, other):
if isinstance(other, (int, float)):
if self.end < other:
return intervalMembership(True, self.is_valid)
elif self.start > other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.end < other. start:
return intervalMembership(True, valid)
if self.start > other.end:
return intervalMembership(False, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (int, float)):
if self.start > other:
return intervalMembership(True, self.is_valid)
elif self.end < other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
return other.__lt__(self)
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return intervalMembership(True, self.is_valid)
if other in self:
return intervalMembership(None, self.is_valid)
else:
return intervalMembership(False, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.start == other.start and self.end == other.end:
return intervalMembership(True, valid)
elif self.__lt__(other)[0] is not None:
return intervalMembership(False, valid)
else:
return intervalMembership(None, valid)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return intervalMembership(False, self.is_valid)
if other in self:
return intervalMembership(None, self.is_valid)
else:
return intervalMembership(True, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.start == other.start and self.end == other.end:
return intervalMembership(False, valid)
if not self.__lt__(other)[0] is None:
return intervalMembership(True, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, (int, float)):
if self.end <= other:
return intervalMembership(True, self.is_valid)
if self.start > other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.end <= other.start:
return intervalMembership(True, valid)
if self.start > other.end:
return intervalMembership(False, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, (int, float)):
if self.start >= other:
return intervalMembership(True, self.is_valid)
elif self.end < other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
return other.__le__(self)
def __add__(self, other):
if isinstance(other, (int, float)):
if self.is_valid:
return interval(self.start + other, self.end + other)
else:
start = self.start + other
end = self.end + other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start + other.start
end = self.end + other.end
valid = fuzzy_and([self.is_valid, other.is_valid])
return interval(start, end, is_valid=valid)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, (int, float)):
start = self.start - other
end = self.end - other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start - other.end
end = self.end - other.start
valid = fuzzy_and([self.is_valid, other.is_valid])
return interval(start, end, is_valid=valid)
else:
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (int, float)):
start = other - self.end
end = other - self.start
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
return other.__sub__(self)
else:
return NotImplemented
def __neg__(self):
if self.is_valid:
return interval(-self.end, -self.start)
else:
return interval(-self.end, -self.start, is_valid=self.is_valid)
def __mul__(self, other):
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif self.is_valid is None or other.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
inters = []
inters.append(self.start * other.start)
inters.append(self.end * other.start)
inters.append(self.start * other.end)
inters.append(self.end * other.end)
start = min(inters)
end = max(inters)
return interval(start, end)
elif isinstance(other, (int, float)):
return interval(self.start*other, self.end*other, is_valid=self.is_valid)
else:
return NotImplemented
__rmul__ = __mul__
def __contains__(self, other):
if isinstance(other, (int, float)):
return self.start <= other and self.end >= other
else:
return self.start <= other.start and other.end <= self.end
def __rtruediv__(self, other):
if isinstance(other, (int, float)):
other = interval(other)
return other.__truediv__(self)
elif isinstance(other, interval):
return other.__truediv__(self)
else:
return NotImplemented
def __truediv__(self, other):
# Both None and False are handled
if not self.is_valid:
# Don't divide as the value is not valid
return interval(-float('inf'), float('inf'), is_valid=self.is_valid)
if isinstance(other, (int, float)):
if other == 0:
# Divide by zero encountered. valid nowhere
return interval(-float('inf'), float('inf'), is_valid=False)
else:
return interval(self.start / other, self.end / other)
elif isinstance(other, interval):
if other.is_valid is False or self.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif other.is_valid is None or self.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
# denominator contains both signs, i.e. being divided by zero
# return the whole real line with is_valid = None
if 0 in other:
return interval(-float('inf'), float('inf'), is_valid=None)
# denominator negative
this = self
if other.end < 0:
this = -this
other = -other
# denominator positive
inters = []
inters.append(this.start / other.start)
inters.append(this.end / other.start)
inters.append(this.start / other.end)
inters.append(this.end / other.end)
start = max(inters)
end = min(inters)
return interval(start, end)
else:
return NotImplemented
def __pow__(self, other):
# Implements only power to an integer.
from .lib_interval import exp, log
if not self.is_valid:
return self
if isinstance(other, interval):
return exp(other * log(self))
elif isinstance(other, (float, int)):
if other < 0:
return 1 / self.__pow__(abs(other))
else:
if int(other) == other:
return _pow_int(self, other)
else:
return _pow_float(self, other)
else:
return NotImplemented
def __rpow__(self, other):
if isinstance(other, (float, int)):
if not self.is_valid:
#Don't do anything
return self
elif other < 0:
if self.width > 0:
return interval(-float('inf'), float('inf'), is_valid=False)
else:
power_rational = nsimplify(self.start)
num, denom = power_rational.as_numer_denom()
if denom % 2 == 0:
return interval(-float('inf'), float('inf'),
is_valid=False)
else:
start = -abs(other)**self.start
end = start
return interval(start, end)
else:
return interval(other**self.start, other**self.end)
elif isinstance(other, interval):
return other.__pow__(self)
else:
return NotImplemented
def __hash__(self):
return hash((self.is_valid, self.start, self.end))
def _pow_float(inter, power):
"""Evaluates an interval raised to a floating point."""
power_rational = nsimplify(power)
num, denom = power_rational.as_numer_denom()
if num % 2 == 0:
start = abs(inter.start)**power
end = abs(inter.end)**power
if start < 0:
ret = interval(0, max(start, end))
else:
ret = interval(start, end)
return ret
elif denom % 2 == 0:
if inter.end < 0:
return interval(-float('inf'), float('inf'), is_valid=False)
elif inter.start < 0:
return interval(0, inter.end**power, is_valid=None)
else:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0:
start = -abs(inter.start)**power
else:
start = inter.start**power
if inter.end < 0:
end = -abs(inter.end)**power
else:
end = inter.end**power
return interval(start, end, is_valid=inter.is_valid)
def _pow_int(inter, power):
"""Evaluates an interval raised to an integer power"""
power = int(power)
if power & 1:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0 and inter.end > 0:
start = 0
end = max(inter.start**power, inter.end**power)
return interval(start, end)
else:
return interval(inter.start**power, inter.end**power)
|
2f43ab96a7bd0f0dcf6c61ce3f63fc53c9040e920a49c3f8fa2fd7e72d61a468 | from sympy.plotting.intervalmath import interval
from sympy.external import import_module
from sympy.core.compatibility import reduce
""" The module contains implemented functions for interval arithmetic."""
def Abs(x):
if isinstance(x, (int, float)):
return interval(abs(x))
elif isinstance(x, interval):
if x.start < 0 and x.end > 0:
return interval(0, max(abs(x.start), abs(x.end)), is_valid=x.is_valid)
else:
return interval(abs(x.start), abs(x.end))
else:
raise NotImplementedError
#Monotonic
def exp(x):
"""evaluates the exponential of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.exp(x), np.exp(x))
elif isinstance(x, interval):
return interval(np.exp(x.start), np.exp(x.end), is_valid=x.is_valid)
else:
raise NotImplementedError
#Monotonic
def log(x):
"""evaluates the natural logarithm of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
if x <= 0:
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.log(x))
elif isinstance(x, interval):
if not x.is_valid:
return interval(-np.inf, np.inf, is_valid=x.is_valid)
elif x.end <= 0:
return interval(-np.inf, np.inf, is_valid=False)
elif x.start <= 0:
return interval(-np.inf, np.inf, is_valid=None)
return interval(np.log(x.start), np.log(x.end))
else:
raise NotImplementedError
#Monotonic
def log10(x):
"""evaluates the logarithm to the base 10 of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
if x <= 0:
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.log10(x))
elif isinstance(x, interval):
if not x.is_valid:
return interval(-np.inf, np.inf, is_valid=x.is_valid)
elif x.end <= 0:
return interval(-np.inf, np.inf, is_valid=False)
elif x.start <= 0:
return interval(-np.inf, np.inf, is_valid=None)
return interval(np.log10(x.start), np.log10(x.end))
else:
raise NotImplementedError
#Monotonic
def atan(x):
"""evaluates the tan inverse of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.arctan(x))
elif isinstance(x, interval):
start = np.arctan(x.start)
end = np.arctan(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError
#periodic
def sin(x):
"""evaluates the sine of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.sin(x))
elif isinstance(x, interval):
if not x.is_valid:
return interval(-1, 1, is_valid=x.is_valid)
na, __ = divmod(x.start, np.pi / 2.0)
nb, __ = divmod(x.end, np.pi / 2.0)
start = min(np.sin(x.start), np.sin(x.end))
end = max(np.sin(x.start), np.sin(x.end))
if nb - na > 4:
return interval(-1, 1, is_valid=x.is_valid)
elif na == nb:
return interval(start, end, is_valid=x.is_valid)
else:
if (na - 1) // 4 != (nb - 1) // 4:
#sin has max
end = 1
if (na - 3) // 4 != (nb - 3) // 4:
#sin has min
start = -1
return interval(start, end)
else:
raise NotImplementedError
#periodic
def cos(x):
"""Evaluates the cos of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.sin(x))
elif isinstance(x, interval):
if not (np.isfinite(x.start) and np.isfinite(x.end)):
return interval(-1, 1, is_valid=x.is_valid)
na, __ = divmod(x.start, np.pi / 2.0)
nb, __ = divmod(x.end, np.pi / 2.0)
start = min(np.cos(x.start), np.cos(x.end))
end = max(np.cos(x.start), np.cos(x.end))
if nb - na > 4:
#differ more than 2*pi
return interval(-1, 1, is_valid=x.is_valid)
elif na == nb:
#in the same quadarant
return interval(start, end, is_valid=x.is_valid)
else:
if (na) // 4 != (nb) // 4:
#cos has max
end = 1
if (na - 2) // 4 != (nb - 2) // 4:
#cos has min
start = -1
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError
def tan(x):
"""Evaluates the tan of an interval"""
return sin(x) / cos(x)
#Monotonic
def sqrt(x):
"""Evaluates the square root of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
if x > 0:
return interval(np.sqrt(x))
else:
return interval(-np.inf, np.inf, is_valid=False)
elif isinstance(x, interval):
#Outside the domain
if x.end < 0:
return interval(-np.inf, np.inf, is_valid=False)
#Partially outside the domain
elif x.start < 0:
return interval(-np.inf, np.inf, is_valid=None)
else:
return interval(np.sqrt(x.start), np.sqrt(x.end),
is_valid=x.is_valid)
else:
raise NotImplementedError
def imin(*args):
"""Evaluates the minimum of a list of intervals"""
np = import_module('numpy')
if not all(isinstance(arg, (int, float, interval)) for arg in args):
return NotImplementedError
else:
new_args = [a for a in args if isinstance(a, (int, float))
or a.is_valid]
if len(new_args) == 0:
if all(a.is_valid is False for a in args):
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(-np.inf, np.inf, is_valid=None)
start_array = [a if isinstance(a, (int, float)) else a.start
for a in new_args]
end_array = [a if isinstance(a, (int, float)) else a.end
for a in new_args]
return interval(min(start_array), min(end_array))
def imax(*args):
"""Evaluates the maximum of a list of intervals"""
np = import_module('numpy')
if not all(isinstance(arg, (int, float, interval)) for arg in args):
return NotImplementedError
else:
new_args = [a for a in args if isinstance(a, (int, float))
or a.is_valid]
if len(new_args) == 0:
if all(a.is_valid is False for a in args):
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(-np.inf, np.inf, is_valid=None)
start_array = [a if isinstance(a, (int, float)) else a.start
for a in new_args]
end_array = [a if isinstance(a, (int, float)) else a.end
for a in new_args]
return interval(max(start_array), max(end_array))
#Monotonic
def sinh(x):
"""Evaluates the hyperbolic sine of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.sinh(x), np.sinh(x))
elif isinstance(x, interval):
return interval(np.sinh(x.start), np.sinh(x.end), is_valid=x.is_valid)
else:
raise NotImplementedError
def cosh(x):
"""Evaluates the hyperbolic cos of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.cosh(x), np.cosh(x))
elif isinstance(x, interval):
#both signs
if x.start < 0 and x.end > 0:
end = max(np.cosh(x.start), np.cosh(x.end))
return interval(1, end, is_valid=x.is_valid)
else:
#Monotonic
start = np.cosh(x.start)
end = np.cosh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError
#Monotonic
def tanh(x):
"""Evaluates the hyperbolic tan of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.tanh(x), np.tanh(x))
elif isinstance(x, interval):
return interval(np.tanh(x.start), np.tanh(x.end), is_valid=x.is_valid)
else:
raise NotImplementedError
def asin(x):
"""Evaluates the inverse sine of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
#Outside the domain
if abs(x) > 1:
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.arcsin(x), np.arcsin(x))
elif isinstance(x, interval):
#Outside the domain
if x.is_valid is False or x.start > 1 or x.end < -1:
return interval(-np.inf, np.inf, is_valid=False)
#Partially outside the domain
elif x.start < -1 or x.end > 1:
return interval(-np.inf, np.inf, is_valid=None)
else:
start = np.arcsin(x.start)
end = np.arcsin(x.end)
return interval(start, end, is_valid=x.is_valid)
def acos(x):
"""Evaluates the inverse cos of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
if abs(x) > 1:
#Outside the domain
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.arccos(x), np.arccos(x))
elif isinstance(x, interval):
#Outside the domain
if x.is_valid is False or x.start > 1 or x.end < -1:
return interval(-np.inf, np.inf, is_valid=False)
#Partially outside the domain
elif x.start < -1 or x.end > 1:
return interval(-np.inf, np.inf, is_valid=None)
else:
start = np.arccos(x.start)
end = np.arccos(x.end)
return interval(start, end, is_valid=x.is_valid)
def ceil(x):
"""Evaluates the ceiling of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.ceil(x))
elif isinstance(x, interval):
if x.is_valid is False:
return interval(-np.inf, np.inf, is_valid=False)
else:
start = np.ceil(x.start)
end = np.ceil(x.end)
#Continuous over the interval
if start == end:
return interval(start, end, is_valid=x.is_valid)
else:
#Not continuous over the interval
return interval(start, end, is_valid=None)
else:
return NotImplementedError
def floor(x):
"""Evaluates the floor of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.floor(x))
elif isinstance(x, interval):
if x.is_valid is False:
return interval(-np.inf, np.inf, is_valid=False)
else:
start = np.floor(x.start)
end = np.floor(x.end)
#continuous over the argument
if start == end:
return interval(start, end, is_valid=x.is_valid)
else:
#not continuous over the interval
return interval(start, end, is_valid=None)
else:
return NotImplementedError
def acosh(x):
"""Evaluates the inverse hyperbolic cosine of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
#Outside the domain
if x < 1:
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.arccosh(x))
elif isinstance(x, interval):
#Outside the domain
if x.end < 1:
return interval(-np.inf, np.inf, is_valid=False)
#Partly outside the domain
elif x.start < 1:
return interval(-np.inf, np.inf, is_valid=None)
else:
start = np.arccosh(x.start)
end = np.arccosh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
return NotImplementedError
#Monotonic
def asinh(x):
"""Evaluates the inverse hyperbolic sine of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.arcsinh(x))
elif isinstance(x, interval):
start = np.arcsinh(x.start)
end = np.arcsinh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
return NotImplementedError
def atanh(x):
"""Evaluates the inverse hyperbolic tangent of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
#Outside the domain
if abs(x) >= 1:
return interval(-np.inf, np.inf, is_valid=False)
else:
return interval(np.arctanh(x))
elif isinstance(x, interval):
#outside the domain
if x.is_valid is False or x.start >= 1 or x.end <= -1:
return interval(-np.inf, np.inf, is_valid=False)
#partly outside the domain
elif x.start <= -1 or x.end >= 1:
return interval(-np.inf, np.inf, is_valid=None)
else:
start = np.arctanh(x.start)
end = np.arctanh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
return NotImplementedError
#Three valued logic for interval plotting.
def And(*args):
"""Defines the three valued ``And`` behaviour for a 2-tuple of
three valued logic values"""
def reduce_and(cmp_intervala, cmp_intervalb):
if cmp_intervala[0] is False or cmp_intervalb[0] is False:
first = False
elif cmp_intervala[0] is None or cmp_intervalb[0] is None:
first = None
else:
first = True
if cmp_intervala[1] is False or cmp_intervalb[1] is False:
second = False
elif cmp_intervala[1] is None or cmp_intervalb[1] is None:
second = None
else:
second = True
return (first, second)
return reduce(reduce_and, args)
def Or(*args):
"""Defines the three valued ``Or`` behaviour for a 2-tuple of
three valued logic values"""
def reduce_or(cmp_intervala, cmp_intervalb):
if cmp_intervala[0] is True or cmp_intervalb[0] is True:
first = True
elif cmp_intervala[0] is None or cmp_intervalb[0] is None:
first = None
else:
first = False
if cmp_intervala[1] is True or cmp_intervalb[1] is True:
second = True
elif cmp_intervala[1] is None or cmp_intervalb[1] is None:
second = None
else:
second = False
return (first, second)
return reduce(reduce_or, args)
|
d55a4ed53ed48ef3f170cc920b91a1665f3e54cfc3c488ab4960b76469b12006 | from sympy.core.logic import fuzzy_and, fuzzy_or, fuzzy_not, fuzzy_xor
class intervalMembership:
"""Represents a boolean expression returned by the comparison of
the interval object.
Parameters
==========
(a, b) : (bool, bool)
The first value determines the comparison as follows:
- True: If the comparison is True throughout the intervals.
- False: If the comparison is False throughout the intervals.
- None: If the comparison is True for some part of the intervals.
The second value is determined as follows:
- True: If both the intervals in comparison are valid.
- False: If at least one of the intervals is False, else
- None
"""
def __init__(self, a, b):
self._wrapped = (a, b)
def __getitem__(self, i):
try:
return self._wrapped[i]
except IndexError:
raise IndexError(
"{} must be a valid indexing for the 2-tuple."
.format(i))
def __len__(self):
return 2
def __iter__(self):
return iter(self._wrapped)
def __str__(self):
return "intervalMembership({}, {})".format(*self)
__repr__ = __str__
def __and__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_and([a1, a2]), fuzzy_and([b1, b2]))
def __or__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_or([a1, a2]), fuzzy_and([b1, b2]))
def __invert__(self):
a, b = self
return intervalMembership(fuzzy_not(a), b)
def __xor__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_xor([a1, a2]), fuzzy_and([b1, b2]))
def __eq__(self, other):
return self._wrapped == other
def __ne__(self, other):
return self._wrapped != other
|
f2ca1194310f21081cb5f2f55caf0de6f08618e65bcc061f0a0d7988beed17ba | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A tool to generate AUTHORS. We started tracking authors before moving
to git, so we have to do some manual rearrangement of the git history
authors in order to get the order in AUTHORS. bin/mailmap_update.py
should be run before committing the results.
"""
from __future__ import unicode_literals
from __future__ import print_function
import codecs
import sys
import os
if sys.version_info < (3, 6):
sys.exit("This script requires Python 3.6 or newer")
from subprocess import run, PIPE
from distutils.version import LooseVersion
from collections import OrderedDict
def red(text):
return "\033[31m%s\033[0m" % text
def yellow(text):
return "\033[33m%s\033[0m" % text
def green(text):
return "\033[32m%s\033[0m" % text
# put sympy on the path
mailmap_update_path = os.path.abspath(__file__)
mailmap_update_dir = os.path.dirname(mailmap_update_path)
sympy_top = os.path.split(mailmap_update_dir)[0]
sympy_dir = os.path.join(sympy_top, 'sympy')
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
from sympy.utilities.misc import filldedent
# check git version
minimal = '1.8.4.2'
git_ver = run(['git', '--version'], stdout=PIPE, encoding='utf-8').stdout[12:]
if LooseVersion(git_ver) < LooseVersion(minimal):
print(yellow("Please use a git version >= %s" % minimal))
def author_name(line):
assert line.count("<") == line.count(">") == 1
assert line.endswith(">")
return line.split("<", 1)[0].strip()
def move(l, i1, i2, who):
x = l.pop(i1)
# this will fail if the .mailmap is not right
assert who == author_name(x), \
'%s was not found at line %i' % (who, i1)
l.insert(i2, x)
# find who git knows ahout
git_command = ["git", "log", "--topo-order", "--reverse", "--format=%aN <%aE>"]
git_people = run(git_command, stdout=PIPE, encoding='utf-8').stdout.strip().split("\n")
# remove duplicates, keeping the original order
git_people = list(OrderedDict.fromkeys(git_people))
# Do the few changes necessary in order to reproduce AUTHORS:
try:
move(git_people, 2, 0, 'Ondřej Čertík')
move(git_people, 42, 1, 'Fabian Pedregosa')
move(git_people, 22, 2, 'Jurjen N.E. Bos')
git_people.insert(4, "*Marc-Etienne M.Leveille <[email protected]>")
move(git_people, 10, 5, 'Brian Jorgensen')
git_people.insert(11, "*Ulrich Hecht <[email protected]>")
# this will fail if the .mailmap is not right
assert 'Kirill Smelkov' == author_name(git_people.pop(12)
), 'Kirill Smelkov was not found at line 12'
move(git_people, 12, 32, 'Sebastian Krämer')
move(git_people, 227, 35, 'Case Van Horsen')
git_people.insert(43, "*Dan <[email protected]>")
move(git_people, 57, 59, 'Aaron Meurer')
move(git_people, 58, 57, 'Andrew Docherty')
move(git_people, 67, 66, 'Chris Smith')
move(git_people, 79, 76, 'Kevin Goodsell')
git_people.insert(84, "*Chu-Ching Huang <[email protected]>")
move(git_people, 93, 92, 'James Pearson')
# this will fail if the .mailmap is not right
assert 'Sergey B Kirpichev' == author_name(git_people.pop(226)
), 'Sergey B Kirpichev was not found at line 226.'
index = git_people.index(
"azure-pipelines[bot] " +
"<azure-pipelines[bot]@users.noreply.github.com>")
git_people.pop(index)
index = git_people.index(
"whitesource-bolt-for-github[bot] " +
"<whitesource-bolt-for-github[bot]@users.noreply.github.com>")
git_people.pop(index)
except AssertionError as msg:
print(red(msg))
sys.exit(1)
# define new lines for the file
header = filldedent("""
All people who contributed to SymPy by sending at least a patch or
more (in the order of the date of their first contribution), except
those who explicitly didn't want to be mentioned. People with a * next
to their names are not found in the metadata of the git history. This
file is generated automatically by running `./bin/authors_update.py`.
""").lstrip()
fmt = """There are a total of {authors_count} authors."""
header_extra = fmt.format(authors_count=len(git_people))
lines = header.splitlines()
lines.append('')
lines.append(header_extra)
lines.append('')
lines.extend(git_people)
# compare to old lines and stop if no changes were made
old_lines = codecs.open(os.path.realpath(os.path.join(
__file__, os.path.pardir, os.path.pardir, "AUTHORS")),
"r", "utf-8").read().splitlines()
if old_lines == lines:
print(green('No changes made to AUTHORS.'))
sys.exit(0)
# check for new additions
new_authors = []
for i in sorted(set(lines) - set(old_lines)):
try:
author_name(i)
new_authors.append(i)
except AssertionError:
continue
# write the new file
with codecs.open(os.path.realpath(os.path.join(
__file__, os.path.pardir, os.path.pardir, "AUTHORS")),
"w", "utf-8") as fd:
fd.write('\n'.join(lines))
fd.write('\n')
# warn about additions
if new_authors:
print(yellow(filldedent("""
The following authors were added to AUTHORS.
If mailmap_update.py has already been run and
each author appears as desired and is not a
duplicate of some other author, then the
changes can be committed. Otherwise, see
.mailmap for instructions on how to change
an author's entry.""")))
print()
for i in sorted(new_authors, key=lambda x: x.lower()):
print('\t%s' % i)
else:
print(yellow("The AUTHORS file was updated."))
print(red("Changes were made in the authors file"))
sys.exit(1)
|
ecd3541b88a7bddd06c3b0c479a3b7f428b5b5947b628c907e3e008db57fdd45 | #!/usr/bin/env python
"""
Test that only executable files have an executable bit set
"""
from __future__ import print_function
import os
import sys
from get_sympy import path_hack
base_dir = path_hack()
def test_executable(path):
if not os.path.isdir(path):
if os.access(path, os.X_OK):
with open(path, 'r') as f:
if f.readline()[:2] != "#!":
exn_msg = "File at " + path + " either should not be executable or should have a shebang line"
raise OSError(exn_msg)
else:
for file in os.listdir(path):
if file in ('.git', 'venv_main'):
continue
test_executable(os.path.join(path, file))
test_executable(base_dir)
|
864d616b14830b1155725198c85f9d149cf668909931d533b9f3e9e198e25b2d | class TestsFailedError(Exception):
pass
print('Testing optional dependencies')
import sympy
test_list = [
# numpy
'*numpy*',
'sympy/core/',
'sympy/matrices/',
'sympy/physics/quantum/',
'sympy/utilities/tests/test_lambdify.py',
# scipy
'*scipy*',
# llvmlite
'*llvm*',
# theano
'*theano*',
# gmpy
'polys',
# autowrap
'*autowrap*',
# ipython
'*ipython*',
# antlr, lfortran, clang
'sympy/parsing/',
# matchpy
'*rubi*',
# codegen
'sympy/codegen/',
'sympy/utilities/tests/test_codegen',
'sympy/utilities/_compilation/tests/test_compilation',
# cloudpickle
'pickling',
# pycosat
'sympy/logic',
'sympy/assumptions',
#stats
'sympy/stats',
]
blacklist = [
'sympy/physics/quantum/tests/test_circuitplot.py',
]
doctest_list = [
# numpy
'sympy/matrices/',
'sympy/utilities/lambdify.py',
# scipy
'*scipy*',
# llvmlite
'*llvm*',
# theano
'*theano*',
# gmpy
'polys',
# autowrap
'*autowrap*',
# ipython
'*ipython*',
# antlr, lfortran, clang
'sympy/parsing/',
# matchpy
'*rubi*',
# codegen
'sympy/codegen/',
# pycosat
'sympy/logic',
'sympy/assumptions',
#stats
'sympy/stats',
]
if not (sympy.test(*test_list, blacklist=blacklist) and sympy.doctest(*doctest_list)):
raise TestsFailedError('Tests failed')
print('Testing MATPLOTLIB')
# Set matplotlib so that it works correctly in headless Travis. We have to do
# this here because it doesn't work after the sympy plotting module is
# imported.
import matplotlib
matplotlib.use("Agg")
import sympy
# Unfortunately, we have to use subprocess=False so that the above will be
# applied, so no hash randomization here.
if not (sympy.test('sympy/plotting', 'sympy/physics/quantum/tests/test_circuitplot.py',
subprocess=False) and sympy.doctest('sympy/plotting', subprocess=False)):
raise TestsFailedError('Tests failed')
print('Testing SYMENGINE')
import sympy
if not sympy.test('sympy/physics/mechanics'):
raise TestsFailedError('Tests failed')
if not sympy.test('sympy/liealgebras'):
raise TestsFailedError('Tests failed')
|
acb4a84d6b44b95700e5a5eafec914d31541e8fe4934052068d8679d160a7f0c | #!/usr/bin/env python
"""
Test that
from sympy import *
doesn't import anything other than SymPy, it's hard dependencies (mpmath), and
hard optional dependencies (gmpy2). Importing unnecessary libraries
can accidentally add hard dependencies to SymPy in the worst case, or at best
slow down the SymPy import time when they are installed.
Note, for this test to be effective, every external library that could
potentially be imported by SymPy must be installed.
TODO: Monkeypatch the importer to detect non-standard library imports even
when they aren't installed.
Based on code from
https://stackoverflow.com/questions/22195382/how-to-check-if-a-module-library-package-is-part-of-the-python-standard-library.
"""
# These libraries will always be imported with SymPy
hard_dependencies = ['mpmath']
# These libraries are optional, but are always imported at SymPy import time
# when they are installed. External libraries should only be added to this
# list if they are required for core SymPy functionality.
hard_optional_dependencies = ['gmpy', 'gmpy2', 'pycosat']
import sys
import os
def is_stdlib(p):
return ((p.startswith(sys.prefix)
or p.startswith(sys.base_prefix))
and 'site-packages' not in p)
stdlib = {p for p in sys.path if is_stdlib(p)}
existing_modules = list(sys.modules.keys())
# hook in-tree SymPy into Python path, if possible
this_path = os.path.abspath(__file__)
this_dir = os.path.dirname(this_path)
sympy_top = os.path.split(this_dir)[0]
sympy_dir = os.path.join(sympy_top, 'sympy')
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
def test_external_imports():
exec("from sympy import *", {})
bad = []
for mod in sys.modules:
if '.' in mod and mod.split('.')[0] in sys.modules:
# Only worry about the top-level modules
continue
if mod in existing_modules:
continue
if any(mod == i or mod.startswith(i + '.') for i in ['sympy'] +
hard_dependencies + hard_optional_dependencies):
continue
if mod in sys.builtin_module_names:
continue
fname = getattr(sys.modules[mod], "__file__", None)
if fname is None:
bad.append(mod)
continue
if fname.endswith(('__init__.py', '__init__.pyc', '__init__.pyo')):
fname = os.path.dirname(fname)
if os.path.dirname(fname) in stdlib:
continue
bad.append(mod)
if bad:
raise RuntimeError("""Unexpected external modules found when running 'from sympy import *':
""" + '\n '.join(bad))
print("No unexpected external modules were imported with 'from sympy import *'!")
if __name__ == '__main__':
test_external_imports()
|
cb482834e0f2d52e694191d64b07cebf482d6c5022ea33a9ceaedcd3da52e45d | #!/usr/bin/env python
from subprocess import check_call
def run(*cmdline, cwd=None, env=None):
"""
Run command in subprocess and get lines of output
"""
return check_call(cmdline, encoding='utf-8', cwd=cwd, env=env)
|
4e1aba5e85a405de6e2755d23d184f886f66a8d4f0879eb57887ea9d33d9027b | #!/usr/bin/env python
import os
import json
from subprocess import check_output
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import glob
from contextlib import contextmanager
import requests
from requests_oauthlib import OAuth2
def main(version, push=None):
"""
WARNING: If push is given as --push then this will push the release to
github.
"""
push = push == '--push'
_GitHub_release(version, push)
def error(msg):
raise ValueError(msg)
def blue(text):
return "\033[34m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def green(text):
return "\033[32m%s\033[0m" % text
def _GitHub_release(version, push, username=None, user='sympy', token=None,
token_file_path="~/.sympy/release-token", repo='sympy', draft=False):
"""
Upload the release files to GitHub.
The tag must be pushed up first. You can test on another repo by changing
user and repo.
"""
if not requests:
error("requests and requests-oauthlib must be installed to upload to GitHub")
release_text = GitHub_release_text(version)
short_version = get_sympy_short_version(version)
tag = 'sympy-' + version
prerelease = short_version != version
urls = URLs(user=user, repo=repo)
if not username:
username = input("GitHub username: ")
token = load_token_file(token_file_path)
if not token:
username, password, token = GitHub_authenticate(urls, username, token)
# If the tag in question is not pushed up yet, then GitHub will just
# create it off of master automatically, which is not what we want. We
# could make it create it off the release branch, but even then, we would
# not be sure that the correct commit is tagged. So we require that the
# tag exist first.
if not check_tag_exists(version):
sys.exit(red("The tag for this version has not been pushed yet. Cannot upload the release."))
# See https://developer.github.com/v3/repos/releases/#create-a-release
# First, create the release
post = {}
post['tag_name'] = tag
post['name'] = "SymPy " + version
post['body'] = release_text
post['draft'] = draft
post['prerelease'] = prerelease
print("Creating release for tag", tag, end=' ')
if push:
result = query_GitHub(urls.releases_url, username, password=None,
token=token, data=json.dumps(post)).json()
release_id = result['id']
else:
print(green("Not pushing!"))
print(green("Done"))
# Then, upload all the files to it.
for key in descriptions:
tarball = get_tarball_name(key, version)
params = {}
params['name'] = tarball
if tarball.endswith('gz'):
headers = {'Content-Type':'application/gzip'}
elif tarball.endswith('pdf'):
headers = {'Content-Type':'application/pdf'}
elif tarball.endswith('zip'):
headers = {'Content-Type':'application/zip'}
else:
headers = {'Content-Type':'application/octet-stream'}
print("Uploading", tarball, end=' ')
sys.stdout.flush()
with open(os.path.join('release/release-' + version, tarball), 'rb') as f:
if push:
result = query_GitHub(urls.release_uploads_url % release_id, username,
password=None, token=token, data=f, params=params,
headers=headers).json()
else:
print(green("Not uploading!"))
print(green("Done"))
# TODO: download the files and check that they have the right sha256 sum
def GitHub_release_text(version):
"""
Generate text to put in the GitHub release Markdown box
"""
shortversion = get_sympy_short_version(version)
htmltable = table(version)
out = """\
See https://github.com/sympy/sympy/wiki/release-notes-for-{shortversion} for the release notes.
{htmltable}
**Note**: Do not download the **Source code (zip)** or the **Source code (tar.gz)**
files below.
"""
out = out.format(shortversion=shortversion, htmltable=htmltable)
print(blue("Here are the release notes to copy into the GitHub release "
"Markdown form:"))
print()
print(out)
return out
def get_sympy_short_version(version):
"""
Get the short version of SymPy being released, not including any rc tags
(like 0.7.3)
"""
parts = version.split('.')
# Remove rc tags
# Handle both 1.0.rc1 and 1.1rc1
if not parts[-1].isdigit():
if parts[-1][0].isdigit():
parts[-1] = parts[-1][0]
else:
parts.pop(-1)
return '.'.join(parts)
class URLs(object):
"""
This class contains URLs and templates which used in requests to GitHub API
"""
def __init__(self, user="sympy", repo="sympy",
api_url="https://api.github.com",
authorize_url="https://api.github.com/authorizations",
uploads_url='https://uploads.github.com',
main_url='https://github.com'):
"""Generates all URLs and templates"""
self.user = user
self.repo = repo
self.api_url = api_url
self.authorize_url = authorize_url
self.uploads_url = uploads_url
self.main_url = main_url
self.pull_list_url = api_url + "/repos" + "/" + user + "/" + repo + "/pulls"
self.issue_list_url = api_url + "/repos/" + user + "/" + repo + "/issues"
self.releases_url = api_url + "/repos/" + user + "/" + repo + "/releases"
self.single_issue_template = self.issue_list_url + "/%d"
self.single_pull_template = self.pull_list_url + "/%d"
self.user_info_template = api_url + "/users/%s"
self.user_repos_template = api_url + "/users/%s/repos"
self.issue_comment_template = (api_url + "/repos" + "/" + user + "/" + repo + "/issues/%d" +
"/comments")
self.release_uploads_url = (uploads_url + "/repos/" + user + "/" +
repo + "/releases/%d" + "/assets")
self.release_download_url = (main_url + "/" + user + "/" + repo +
"/releases/download/%s/%s")
def load_token_file(path="~/.sympy/release-token"):
print("> Using token file %s" % path)
path = os.path.expanduser(path)
path = os.path.abspath(path)
if os.path.isfile(path):
try:
with open(path) as f:
token = f.readline()
except IOError:
print("> Unable to read token file")
return
else:
print("> Token file does not exist")
return
return token.strip()
def GitHub_authenticate(urls, username, token=None):
_login_message = """\
Enter your GitHub username & password or press ^C to quit. The password
will be kept as a Python variable as long as this script is running and
https to authenticate with GitHub, otherwise not saved anywhere else:\
"""
if username:
print("> Authenticating as %s" % username)
else:
print(_login_message)
username = input("Username: ")
authenticated = False
if token:
print("> Authenticating using token")
try:
GitHub_check_authentication(urls, username, None, token)
except AuthenticationFailed:
print("> Authentication failed")
else:
print("> OK")
password = None
authenticated = True
while not authenticated:
password = getpass("Password: ")
try:
print("> Checking username and password ...")
GitHub_check_authentication(urls, username, password, None)
except AuthenticationFailed:
print("> Authentication failed")
else:
print("> OK.")
authenticated = True
if password:
generate = input("> Generate API token? [Y/n] ")
if generate.lower() in ["y", "ye", "yes", ""]:
name = input("> Name of token on GitHub? [SymPy Release] ")
if name == "":
name = "SymPy Release"
token = generate_token(urls, username, password, name=name)
print("Your token is", token)
print("Use this token from now on as GitHub_release:token=" + token +
",username=" + username)
print(red("DO NOT share this token with anyone"))
save = input("Do you want to save this token to a file [yes]? ")
if save.lower().strip() in ['y', 'yes', 'ye', '']:
save_token_file(token)
return username, password, token
def run(*cmdline, cwd=None):
"""
Run command in subprocess and get lines of output
"""
return check_output(cmdline, encoding='utf-8', cwd=cwd).splitlines()
def check_tag_exists(version):
"""
Check if the tag for this release has been uploaded yet.
"""
tag = 'sympy-' + version
all_tag_lines = run('git', 'ls-remote', '--tags', 'origin')
return any(tag in tag_line for tag_line in all_tag_lines)
def generate_token(urls, username, password, OTP=None, name="SymPy Release"):
enc_data = json.dumps(
{
"scopes": ["public_repo"],
"note": name
}
)
url = urls.authorize_url
rep = query_GitHub(url, username=username, password=password,
data=enc_data).json()
return rep["token"]
def GitHub_check_authentication(urls, username, password, token):
"""
Checks that username & password is valid.
"""
query_GitHub(urls.api_url, username, password, token)
class AuthenticationFailed(Exception):
pass
def query_GitHub(url, username=None, password=None, token=None, data=None,
OTP=None, headers=None, params=None, files=None):
"""
Query GitHub API.
In case of a multipage result, DOES NOT query the next page.
"""
headers = headers or {}
if OTP:
headers['X-GitHub-OTP'] = OTP
if token:
auth = OAuth2(client_id=username, token=dict(access_token=token,
token_type='bearer'))
else:
auth = HTTPBasicAuth(username, password)
if data:
r = requests.post(url, auth=auth, data=data, headers=headers,
params=params, files=files)
else:
r = requests.get(url, auth=auth, headers=headers, params=params, stream=True)
if r.status_code == 401:
two_factor = r.headers.get('X-GitHub-OTP')
if two_factor:
print("A two-factor authentication code is required:", two_factor.split(';')[1].strip())
OTP = input("Authentication code: ")
return query_GitHub(url, username=username, password=password,
token=token, data=data, OTP=OTP)
raise AuthenticationFailed("invalid username or password")
r.raise_for_status()
return r
def save_token_file(token):
token_file = input("> Enter token file location [~/.sympy/release-token] ")
token_file = token_file or "~/.sympy/release-token"
token_file_expand = os.path.expanduser(token_file)
token_file_expand = os.path.abspath(token_file_expand)
token_folder, _ = os.path.split(token_file_expand)
try:
if not os.path.isdir(token_folder):
os.mkdir(token_folder, 0o700)
with open(token_file_expand, 'w') as f:
f.write(token + '\n')
os.chmod(token_file_expand, stat.S_IREAD | stat.S_IWRITE)
except OSError as e:
print("> Unable to create folder for token file: ", e)
return
except IOError as e:
print("> Unable to save token file: ", e)
return
return token_file
def table(version):
"""
Make an html table of the downloads.
This is for pasting into the GitHub releases page. See GitHub_release().
"""
tarball_formatter_dict = dict(_tarball_format(version))
shortversion = get_sympy_short_version(version)
tarball_formatter_dict['version'] = shortversion
sha256s = [i.split('\t') for i in _sha256(version, print_=False, local=True).split('\n')]
sha256s_dict = {name: sha256 for sha256, name in sha256s}
sizes = [i.split('\t') for i in _size(version, print_=False).split('\n')]
sizes_dict = {name: size for size, name in sizes}
table = []
# https://docs.python.org/2/library/contextlib.html#contextlib.contextmanager. Not
# recommended as a real way to generate html, but it works better than
# anything else I've tried.
@contextmanager
def tag(name):
table.append("<%s>" % name)
yield
table.append("</%s>" % name)
@contextmanager
def a_href(link):
table.append("<a href=\"%s\">" % link)
yield
table.append("</a>")
with tag('table'):
with tag('tr'):
for headname in ["Filename", "Description", "size", "sha256"]:
with tag("th"):
table.append(headname)
for key in descriptions:
name = get_tarball_name(key, version)
with tag('tr'):
with tag('td'):
with a_href('https://github.com/sympy/sympy/releases/download/sympy-%s/%s' % (version, name)):
with tag('b'):
table.append(name)
with tag('td'):
table.append(descriptions[key].format(**tarball_formatter_dict))
with tag('td'):
table.append(sizes_dict[name])
with tag('td'):
table.append(sha256s_dict[name])
out = ' '.join(table)
return out
descriptions = OrderedDict([
('source', "The SymPy source installer.",),
('wheel', "A wheel of the package.",),
('html', '''Html documentation. This is the same as
the <a href="https://docs.sympy.org/latest/index.html">online documentation</a>.''',),
('pdf', '''Pdf version of the <a href="https://docs.sympy.org/latest/index.html"> html documentation</a>.''',),
])
def _size(version, print_=True):
"""
Print the sizes of the release files. Run locally.
"""
out = run(*(['du', '-h'] + release_files(version)))
out = [i.split() for i in out]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
if print_:
print(out)
return out
def _sha256(version, print_=True, local=False):
if local:
out = run(*(['shasum', '-a', '256'] + release_files(version)))
else:
raise ValueError('Should not get here...')
out = run(*(['shasum', '-a', '256', '/root/release/*']))
# Remove the release/ part for printing. Useful for copy-pasting into the
# release notes.
out = [i.split() for i in out]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
if print_:
print(out)
return out
def get_tarball_name(file, version):
"""
Get the name of a tarball
file should be one of
source-orig: The original name of the source tarball
source-orig-notar: The name of the untarred directory
source: The source tarball (after renaming)
wheel: The wheel
html: The name of the html zip
html-nozip: The name of the html, without ".zip"
pdf-orig: The original name of the pdf file
pdf: The name of the pdf file (after renaming)
"""
doctypename = defaultdict(str, {'html': 'zip', 'pdf': 'pdf'})
if file in {'source-orig', 'source'}:
name = 'sympy-{version}.tar.gz'
elif file == 'source-orig-notar':
name = "sympy-{version}"
elif file in {'html', 'pdf', 'html-nozip'}:
name = "sympy-docs-{type}-{version}"
if file == 'html-nozip':
# zip files keep the name of the original zipped directory. See
# https://github.com/sympy/sympy/issues/7087.
file = 'html'
else:
name += ".{extension}"
elif file == 'pdf-orig':
name = "sympy-{version}.pdf"
elif file == 'wheel':
name = 'sympy-{version}-py3-none-any.whl'
else:
raise ValueError(file + " is not a recognized argument")
ret = name.format(version=version, type=file,
extension=doctypename[file])
return ret
def release_files(version):
"""
Returns the list of local release files
"""
paths = glob.glob('release/release-' + version + '/*')
if not paths:
raise ValueError("No release files found")
return paths
tarball_name_types = {
'source-orig',
'source-orig-notar',
'source',
'wheel',
'html',
'html-nozip',
'pdf-orig',
'pdf',
}
# Have to make this lazy so that version can be defined.
class _tarball_format(Mapping):
def __init__(self, version):
self.version = version
def __getitem__(self, name):
return get_tarball_name(name, self.version)
def __iter__(self):
return iter(tarball_name_types)
def __len__(self):
return len(tarball_name_types)
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
|
fafa1158ab001865cc2e6106a659f59a1a133343e2bd8e08b563e33234a87840 | #!/usr/bin/env python3
import os
from pathlib import Path
from subprocess import check_output
import unicodedata
def main(version, outdir):
"""
Print authors text to put at the bottom of the release notes
"""
outdir = Path(outdir)
authors, authorcount, newauthorcount = get_authors(version)
authors_text = f"""## Authors
The following people contributed at least one patch to this release (names are
given in alphabetical order by last name). A total of {authorcount} people
contributed to this release. People with a * by their names contributed a
patch for the first time for this release; {newauthorcount} people contributed
for the first time for this release.
Thanks to everyone who contributed to this release!
"""
authors_lines = []
for name in authors:
authors_lines.append("- " + name)
authors_text += '\n'.join(authors_lines)
# Output to file and to screen
with open(outdir / 'authors.txt', 'w') as authorsfile:
authorsfile.write(authors_text)
print()
print(blue("Here are the authors to put at the bottom of the release notes."))
print()
print(authors_text)
def blue(text):
return "\033[34m%s\033[0m" % text
def get_authors(version):
"""
Get the list of authors since the previous release
Returns the list in alphabetical order by last name. Authors who
contributed for the first time for this release will have a star appended
to the end of their names.
Note: it's a good idea to use ./bin/mailmap_update.py (from the base sympy
directory) to make AUTHORS and .mailmap up-to-date first before using
this. fab vagrant release does this automatically.
"""
def lastnamekey(name):
"""
Sort key to sort by last name
Note, we decided to sort based on the last name, because that way is
fair. We used to sort by commit count or line number count, but that
bumps up people who made lots of maintenance changes like updating
mpmath or moving some files around.
"""
# Note, this will do the wrong thing for people who have multi-word
# last names, but there are also people with middle initials. I don't
# know of a perfect way to handle everyone. Feel free to fix up the
# list by hand.
text = name.strip().split()[-1].lower()
# Convert things like Čertík to Certik
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
old_release_tag = get_previous_version_tag(version)
out = check_output(['git', '--no-pager', 'log', old_release_tag + '..', '--format=%aN'])
releaseauthors = set(out.decode('utf-8').strip().split('\n'))
out = check_output(['git', '--no-pager', 'log', old_release_tag, '--format=%aN'])
priorauthors = set(out.decode('utf-8').strip().split('\n'))
releaseauthors = {name.strip() for name in releaseauthors if name.strip()}
priorauthors = {name.strip() for name in priorauthors if name.strip()}
newauthors = releaseauthors - priorauthors
starred_newauthors = {name + "*" for name in newauthors}
authors = releaseauthors - newauthors | starred_newauthors
return (sorted(authors, key=lastnamekey), len(releaseauthors), len(newauthors))
def get_previous_version_tag(version):
"""
Get the version of the previous release
"""
# We try, probably too hard, to portably get the number of the previous
# release of SymPy. Our strategy is to look at the git tags. The
# following assumptions are made about the git tags:
# - The only tags are for releases
# - The tags are given the consistent naming:
# sympy-major.minor.micro[.rcnumber]
# (e.g., sympy-0.7.2 or sympy-0.7.2.rc1)
# In particular, it goes back in the tag history and finds the most recent
# tag that doesn't contain the current short version number as a substring.
shortversion = get_sympy_short_version(version)
curcommit = "HEAD"
while True:
cmdline = f'git describe --abbrev=0 --tags {curcommit}'
print(cmdline)
curtag = check_output(cmdline.split()).decode('utf-8').strip()
if shortversion in curtag:
# If the tagged commit is a merge commit, we cannot be sure
# that it will go back in the right direction. This almost
# never happens, so just error
cmdline = f'git rev-list --parents -n 1 {curtag}'
print(cmdline)
parents = check_output(cmdline).decode('utf-8').strip().split()
# rev-list prints the current commit and then all its parents
# If the tagged commit *is* a merge commit, just comment this
# out, and manually make sure `get_previous_version_tag` is correct
# assert len(parents) == 2, curtag
curcommit = curtag + "^" # The parent of the tagged commit
else:
print(blue("Using {tag} as the tag for the previous "
"release.".format(tag=curtag)))
return curtag
sys.exit(red("Could not find the tag for the previous release."))
def get_sympy_short_version(version):
"""
Get the short version of SymPy being released, not including any rc tags
(like 0.7.3)
"""
parts = version.split('.')
# Remove rc tags
# Handle both 1.0.rc1 and 1.1rc1
if not parts[-1].isdigit():
if parts[-1][0].isdigit():
parts[-1] = parts[-1][0]
else:
parts.pop(-1)
return '.'.join(parts)
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))
|
67eefbcf90d5c670dc3ccb6a6b3903272bb355f721251a1f230634307538d6c7 | #!/usr/bin/env python3
import os
from pathlib import Path
from subprocess import check_output
def main(version, outdir):
outdir = Path(outdir)
build_files = [
outdir / f'sympy-{version}.tar.gz',
outdir / f'sympy-{version}-py3-none-any.whl',
outdir / f'sympy-docs-html-{version}.zip',
outdir / f'sympy-docs-pdf-{version}.pdf',
]
out = check_output(['shasum', '-a', '256'] + build_files)
out = out.decode('ascii')
# Remove the release/ part for printing. Useful for copy-pasting into the
# release notes.
out = [i.split() for i in out.strip().split('\n')]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
# Output to file and to screen
with open(outdir / 'sha256.txt', 'w') as shafile:
shafile.write(out)
print(out)
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))
|
441f489f234f099f7fc1b656c236ba524d62397a0f1bcae0b463d0757757952e | #!/usr/bin/env python3
from pathlib import Path
from tempfile import TemporaryDirectory
from subprocess import check_call
PY_VERSIONS = '3.6', '3.7', '3.8', '3.9'
def main(version, outdir):
for pyversion in PY_VERSIONS:
test_sdist(pyversion, version, outdir)
test_wheel(pyversion, version, outdir)
def run(cmd, cwd=None):
if isinstance(cmd, str):
cmd = cmd.split()
return check_call(cmd, cwd=cwd)
def green(text):
return "\033[32m%s\033[0m" % text
def test_sdist(pyversion, version, outdir, wheel=False):
print(green('-' * 80))
if not wheel:
print(green(' Testing Python %s (sdist)' % pyversion))
else:
print(green(' Testing Python %s (wheel)' % pyversion))
print(green('-' * 80))
python_exe = f'python{pyversion}'
wheelname = f'sympy-{version}-py3-none-any.whl'
tarname = f'sympy-{version}.tar.gz'
tardir = f'sympy-{version}'
with TemporaryDirectory() as tempdir:
outdir = Path(outdir)
tempdir = Path(tempdir)
venv = tempdir / f'venv{pyversion}'
pip = venv / "bin" / "pip"
python = venv / "bin" / "python"
run(f'{python_exe} -m venv {venv}')
run(f'{pip} install -U -q pip')
if not wheel:
run(f'{pip} install -q mpmath')
run(f'cp {outdir/tarname} {tempdir/tarname}')
run(f'tar -xzf {tempdir/tarname} -C {tempdir}')
run(f'{python} setup.py -q install', cwd=tempdir/tardir)
else:
run(f'{pip} install -q {outdir/wheelname}')
isympy = venv / "bin" / "isympy"
run([python, '-c', "import sympy; print(sympy.__version__); print('sympy installed successfully')"])
run(f'{python} -m isympy --version')
run(f'{isympy} --version')
def test_wheel(*args):
return test_sdist(*args, wheel=True)
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))
|
2cafdbefc9607abb992d8148687657da496c349b6d313bf0884cd4292556b7e0 | #!/usr/bin/env python3
import os
from os.path import dirname, join, basename, normpath
from os import chdir
import shutil
from helpers import run
ROOTDIR = dirname(dirname(__file__))
DOCSDIR = join(ROOTDIR, 'doc')
def main(version, outputdir):
os.makedirs(outputdir, exist_ok=True)
build_html(DOCSDIR, outputdir, version)
build_latex(DOCSDIR, outputdir, version)
def build_html(docsdir, outputdir, version):
run('make', 'clean', cwd=docsdir)
run('make', 'html', cwd=docsdir)
builddir = join(docsdir, '_build')
docsname = 'sympy-docs-html-%s' % (version,)
zipname = docsname + '.zip'
cwd = os.getcwd()
try:
chdir(builddir)
shutil.move('html', docsname)
run('zip', '-9lr', zipname, docsname)
finally:
chdir(cwd)
shutil.move(join(builddir, zipname), join(outputdir, zipname))
def build_latex(docsdir, outputdir, version):
run('make', 'clean', cwd=docsdir)
run('make', 'latex', cwd=docsdir)
latexdir = join(docsdir, '_build', 'latex')
env = os.environ.copy()
env['LATEXMKOPTS'] = '-xelatex -silent'
run('make', 'clean', cwd=latexdir, env=env)
run('make', 'all', cwd=latexdir, env=env)
srcfilename = 'sympy-%s.pdf' % (version,)
dstfilename = 'sympy-docs-pdf-%s.pdf' % (version,)
src = join('doc', '_build', 'latex', srcfilename)
dst = join(outputdir, dstfilename)
shutil.copyfile(src, dst)
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
|
a79c73f478cec0680590def7485a90b117ff50b44d852bbd20ba281fb7f457ab | #!/usr/bin/env python3
from os.path import join, basename, normpath
from subprocess import check_call
def main(version, outdir):
check_version(version, outdir)
run_stage(['bin/mailmap_update.py'])
run_stage(['bin/authors_update.py'])
run_stage(['mkdir', '-p', outdir])
build_release_files('bdist_wheel', 'sympy-%s-py3-none-any.whl', outdir, version)
build_release_files('sdist', 'sympy-%s.tar.gz', outdir, version)
run_stage(['release/compare_tar_against_git.py', join(outdir, 'sympy-%s.tar.gz' % (version,)), '.'])
run_stage(['release/test_install.py', version, outdir])
run_stage(['release/build_docs.py', version, outdir])
run_stage(['release/sha256.py', version, outdir])
run_stage(['release/authors.py', version, outdir])
def green(text):
return "\033[32m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def print_header(color, *msgs):
newlines = '\n'
vline = '-' * 80
print(color(newlines + vline))
for msg in msgs:
print(color(msg))
print(color(vline + newlines))
def run_stage(cmd):
cmdline = ' $ %s' % (' '.join(cmd),)
print_header(green, 'running:', cmdline)
try:
check_call(cmd)
except Exception as e:
print_header(red, 'failed:', cmdline)
raise e from None
else:
print_header(green, 'completed:', cmdline)
def build_release_files(cmd, fname, outdir, version):
fname = fname % (version,)
run_stage(['python', 'setup.py', '-q', cmd])
src = join('dist', fname)
dst = join(outdir, fname)
run_stage(['mv', src, dst])
def check_version(version, outdir):
from sympy.release import __version__ as checked_out_version
if version != checked_out_version:
msg = "version %s does not match checkout %s"
raise AssertionError(msg % (version, checked_out_version))
if basename(normpath(outdir)) != 'release-%s' % (version,):
msg = "version %s does not match output directory %s"
raise AssertionError(msg % (version, outdir))
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
|
097fae90e229d2654205b8dcba5c89cc96b9942de6bfa3857ea8cc66689c8f12 | #!/usr/bin/env python3
from subprocess import check_output
import sys
import os.path
def main(tarname, gitroot):
"""Run this as ./compare_tar_against_git.py TARFILE GITROOT
Args
====
TARFILE: Path to the built sdist (sympy-xx.tar.gz)
GITROOT: Path ro root of git (dir containing .git)
"""
compare_tar_against_git(tarname, gitroot)
## TARBALL WHITELISTS
# If a file does not end up in the tarball that should, add it to setup.py if
# it is Python, or MANIFEST.in if it is not. (There is a command at the top
# of setup.py to gather all the things that should be there).
# TODO: Also check that this whitelist isn't growing out of date from files
# removed from git.
# Files that are in git that should not be in the tarball
git_whitelist = {
# Git specific dotfiles
'.gitattributes',
'.gitignore',
'.mailmap',
# Travis and CI
'.travis.yml',
'.github/workflows/runtests.yml',
'.ci/durations.json',
'.ci/generate_durations_log.sh',
'.ci/parse_durations_log.py',
'.ci/blacklisted.json',
'.ci/README.rst',
'.github/FUNDING.yml',
'.editorconfig',
'.coveragerc',
'CODEOWNERS',
'asv.conf.travis.json',
'coveragerc_travis',
'codecov.yml',
'pytest.ini',
'MANIFEST.in',
'banner.svg',
# Code of conduct
'CODE_OF_CONDUCT.md',
# Pull request template
'PULL_REQUEST_TEMPLATE.md',
# Contributing guide
'CONTRIBUTING.md',
# Nothing from bin/ should be shipped unless we intend to install it. Most
# of this stuff is for development anyway. To run the tests from the
# tarball, use setup.py test, or import sympy and run sympy.test() or
# sympy.doctest().
'bin/adapt_paths.py',
'bin/ask_update.py',
'bin/authors_update.py',
'bin/build_doc.sh',
'bin/coverage_doctest.py',
'bin/coverage_report.py',
'bin/deploy_doc.sh',
'bin/diagnose_imports',
'bin/doctest',
'bin/generate_module_list.py',
'bin/generate_test_list.py',
'bin/get_sympy.py',
'bin/mailmap_update.py',
'bin/py.bench',
'bin/strip_whitespace',
'bin/sympy_time.py',
'bin/sympy_time_cache.py',
'bin/test',
'bin/test_external_imports.py',
'bin/test_executable.py',
'bin/test_import',
'bin/test_import.py',
'bin/test_isolated',
'bin/test_py2_import.py',
'bin/test_setup.py',
'bin/test_submodule_imports.py',
'bin/test_travis.sh',
'bin/test_optional_dependencies.py',
'bin/test_sphinx.sh',
# The notebooks are not ready for shipping yet. They need to be cleaned
# up, and preferably doctested. See also
# https://github.com/sympy/sympy/issues/6039.
'examples/advanced/identitysearch_example.ipynb',
'examples/beginner/plot_advanced.ipynb',
'examples/beginner/plot_colors.ipynb',
'examples/beginner/plot_discont.ipynb',
'examples/beginner/plot_gallery.ipynb',
'examples/beginner/plot_intro.ipynb',
'examples/intermediate/limit_examples_advanced.ipynb',
'examples/intermediate/schwarzschild.ipynb',
'examples/notebooks/density.ipynb',
'examples/notebooks/fidelity.ipynb',
'examples/notebooks/fresnel_integrals.ipynb',
'examples/notebooks/qubits.ipynb',
'examples/notebooks/sho1d_example.ipynb',
'examples/notebooks/spin.ipynb',
'examples/notebooks/trace.ipynb',
'examples/notebooks/Bezout_Dixon_resultant.ipynb',
'examples/notebooks/IntegrationOverPolytopes.ipynb',
'examples/notebooks/Macaulay_resultant.ipynb',
'examples/notebooks/Sylvester_resultant.ipynb',
'examples/notebooks/README.txt',
# This stuff :)
'release/.gitignore',
'release/README.md',
'release/Vagrantfile',
'release/fabfile.py',
'release/Dockerfile',
'release/Dockerfile-base',
'release/release.sh',
'release/rever.xsh',
'release/pull_and_run_rever.sh',
'release/compare_tar_against_git.py',
'release/update_docs.py',
'release/aptinstall.sh',
'release/build_docs.py',
'release/github_release.py',
'release/helpers.py',
'release/releasecheck.py',
'release/requirements.txt',
'release/update_requirements.sh',
'release/test_install.py',
'release/sha256.py',
'release/authors.py',
# This is just a distribute version of setup.py. Used mainly for setup.py
# develop, which we don't care about in the release tarball
'setupegg.py',
# pytest stuff
'conftest.py',
# Encrypted deploy key for deploying dev docs to GitHub
'github_deploy_key.enc',
}
# Files that should be in the tarball should not be in git
tarball_whitelist = {
# Generated by setup.py. Contains metadata for PyPI.
"PKG-INFO",
# Generated by setuptools. More metadata.
'setup.cfg',
'sympy.egg-info/PKG-INFO',
'sympy.egg-info/SOURCES.txt',
'sympy.egg-info/dependency_links.txt',
'sympy.egg-info/requires.txt',
'sympy.egg-info/top_level.txt',
'sympy.egg-info/not-zip-safe',
'sympy.egg-info/entry_points.txt',
# Not sure where this is generated from...
'doc/commit_hash.txt',
}
def blue(text):
return "\033[34m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def run(*cmdline, cwd=None):
"""
Run command in subprocess and get lines of output
"""
return check_output(cmdline, encoding='utf-8', cwd=cwd).splitlines()
def full_path_split(path):
"""
Function to do a full split on a path.
"""
# Based on https://stackoverflow.com/a/13505966/161801
rest, tail = os.path.split(path)
if not rest or rest == os.path.sep:
return (tail,)
return full_path_split(rest) + (tail,)
def compare_tar_against_git(tarname, gitroot):
"""
Compare the contents of the tarball against git ls-files
See the bottom of the file for the whitelists.
"""
git_lsfiles = set(i.strip() for i in run('git', 'ls-files', cwd=gitroot))
tar_output_orig = set(run('tar', 'tf', tarname))
tar_output = set()
for file in tar_output_orig:
# The tar files are like sympy-0.7.3/sympy/__init__.py, and the git
# files are like sympy/__init__.py.
split_path = full_path_split(file)
if split_path[-1]:
# Exclude directories, as git ls-files does not include them
tar_output.add(os.path.join(*split_path[1:]))
# print tar_output
# print git_lsfiles
fail = False
print()
print(blue("Files in the tarball from git that should not be there:"))
print()
for line in sorted(tar_output.intersection(git_whitelist)):
fail = True
print(line)
print()
print(blue("Files in git but not in the tarball:"))
print()
for line in sorted(git_lsfiles - tar_output - git_whitelist):
fail = True
print(line)
print()
print(blue("Files in the tarball but not in git:"))
print()
for line in sorted(tar_output - git_lsfiles - tarball_whitelist):
fail = True
print(line)
print()
if fail:
sys.exit(red("Non-whitelisted files found or not found in the tarball"))
if __name__ == "__main__":
main(*sys.argv[1:])
|
f8cf1fb5af3e9585a1bb289dc2eead6e301aba7d1aaa3cb3c5024d8060a42bce | """
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as simple
as possible in order to be comprehensible and easily extensible. SymPy is
written entirely in Python. It depends on mpmath, and other external libraries
may be optionally for things like plotting support.
See the webpage for more information and documentation:
https://sympy.org
"""
import sys
if sys.version_info < (3, 6):
raise ImportError("Python version 3.6 or above is required for SymPy.")
del sys
try:
import mpmath
except ImportError:
raise ImportError("SymPy now depends on mpmath as an external library. "
"See https://docs.sympy.org/latest/install.html#mpmath for more information.")
del mpmath
from sympy.release import __version__
if 'dev' in __version__:
def enable_warnings():
import warnings
warnings.filterwarnings('default', '.*', DeprecationWarning, module='sympy.*')
del warnings
enable_warnings()
del enable_warnings
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug() # type: bool
from .core import (sympify, SympifyError, cacheit, Basic, Atom,
preorder_traversal, S, Expr, AtomicExpr, UnevaluatedExpr, Symbol,
Wild, Dummy, symbols, var, Number, Float, Rational, Integer,
NumberSymbol, RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo,
AlgebraicNumber, comp, mod_inverse, Pow, integer_nthroot, integer_log,
Mul, prod, Add, Mod, Rel, Eq, Ne, Lt, Le, Gt, Ge, Equality,
GreaterThan, LessThan, Unequality, StrictGreaterThan, StrictLessThan,
vectorize, Lambda, WildFunction, Derivative, diff, FunctionClass,
Function, Subs, expand, PoleError, count_ops, expand_mul, expand_log,
expand_func, expand_trig, expand_complex, expand_multinomial, nfloat,
expand_power_base, expand_power_exp, arity, PrecisionExhausted, N,
evalf, Tuple, Dict, gcd_terms, factor_terms, factor_nc, evaluate,
Catalan, EulerGamma, GoldenRatio, TribonacciConstant)
from .logic import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor,
Implies, Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map,
true, false, satisfiable)
from .assumptions import (AppliedPredicate, Predicate, AssumptionsContext,
assuming, Q, ask, register_handler, remove_handler, refine)
from .polys import (Poly, PurePoly, poly_from_expr, parallel_poly_from_expr,
degree, total_degree, degree_list, LC, LM, LT, pdiv, prem, pquo,
pexquo, div, rem, quo, exquo, half_gcdex, gcdex, invert,
subresultants, resultant, discriminant, cofactors, gcd_list, gcd,
lcm_list, lcm, terms_gcd, trunc, monic, content, primitive, compose,
decompose, sturm, gff_list, gff, sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor, intervals, refine_root, count_roots, real_roots,
nroots, ground_roots, nth_power_roots_poly, cancel, reduced, groebner,
is_zero_dimensional, GroebnerBasis, poly, symmetrize, horner,
interpolate, rational_interpolate, viete, together,
BasePolynomialError, ExactQuotientFailed, PolynomialDivisionFailed,
OperationNotSupported, HeuristicGCDFailed, HomomorphismFailed,
IsomorphismFailed, ExtraneousFactors, EvaluationFailed,
RefinementFailed, CoercionFailed, NotInvertible, NotReversible,
NotAlgebraic, DomainError, PolynomialError, UnificationFailed,
GeneratorsError, GeneratorsNeeded, ComputationFailed,
UnivariatePolynomialError, MultivariatePolynomialError,
PolificationFailed, OptionError, FlagError, minpoly,
minimal_polynomial, primitive_element, field_isomorphism,
to_number_field, isolate, itermonomials, Monomial, lex, grlex,
grevlex, ilex, igrlex, igrevlex, CRootOf, rootof, RootOf,
ComplexRootOf, RootSum, roots, Domain, FiniteField, IntegerRing,
RationalField, RealField, ComplexField, PythonFiniteField,
GMPYFiniteField, PythonIntegerRing, GMPYIntegerRing, PythonRational,
GMPYRationalField, AlgebraicField, PolynomialRing, FractionField,
ExpressionDomain, FF_python, FF_gmpy, ZZ_python, ZZ_gmpy, QQ_python,
QQ_gmpy, GF, FF, ZZ, QQ, ZZ_I, QQ_I, RR, CC, EX, construct_domain,
swinnerton_dyer_poly, cyclotomic_poly, symmetric_poly, random_poly,
interpolating_poly, jacobi_poly, chebyshevt_poly, chebyshevu_poly,
hermite_poly, legendre_poly, laguerre_poly, apart, apart_list,
assemble_partfrac_list, Options, ring, xring, vring, sring, field,
xfield, vfield, sfield)
from .series import (Order, O, limit, Limit, gruntz, series, approximants,
residue, EmptySequence, SeqPer, SeqFormula, sequence, SeqAdd, SeqMul,
fourier_series, fps, difference_delta, limit_seq)
from .functions import (factorial, factorial2, rf, ff, binomial,
RisingFactorial, FallingFactorial, subfactorial, carmichael,
fibonacci, lucas, tribonacci, harmonic, bernoulli, bell, euler,
catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root,
cbrt, re, im, sign, Abs, conjugate, arg, polar_lift,
periodic_argument, unbranched_argument, principal_branch, transpose,
adjoint, polarify, unpolarify, sin, cos, tan, sec, csc, cot, sinc,
asin, acos, atan, asec, acsc, acot, atan2, exp_polar, exp, ln, log,
LambertW, sinh, cosh, tanh, coth, sech, csch, asinh, acosh, atanh,
acoth, asech, acsch, floor, ceiling, frac, Piecewise, piecewise_fold,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv, Ei, expint, E1, li,
Li, Si, Ci, Shi, Chi, fresnels, fresnelc, gamma, lowergamma,
uppergamma, polygamma, loggamma, digamma, trigamma, multigamma,
dirichlet_eta, zeta, lerchphi, polylog, stieltjes, Eijk, LeviCivita,
KroneckerDelta, SingularityFunction, DiracDelta, Heaviside,
bspline_basis, bspline_basis_set, interpolating_spline, besselj,
bessely, besseli, besselk, hankel1, hankel2, jn, yn, jn_zeros, hn1,
hn2, airyai, airybi, airyaiprime, airybiprime, marcumq, hyper,
meijerg, appellf1, legendre, assoc_legendre, hermite, chebyshevt,
chebyshevu, chebyshevu_root, chebyshevt_root, laguerre,
assoc_laguerre, gegenbauer, jacobi, jacobi_normalized, Ynm, Ynm_c,
Znm, elliptic_k, elliptic_f, elliptic_e, elliptic_pi, beta, mathieus,
mathieuc, mathieusprime, mathieucprime)
from .ntheory import (nextprime, prevprime, prime, primepi, primerange,
randprime, Sieve, sieve, primorial, cycle_length, composite,
compositepi, isprime, divisors, proper_divisors, factorint,
multiplicity, perfect_power, pollard_pm1, pollard_rho, primefactors,
totient, trailing, divisor_count, proper_divisor_count, divisor_sigma,
factorrat, reduced_totient, primenu, primeomega,
mersenne_prime_exponent, is_perfect, is_mersenne_prime, is_abundant,
is_deficient, is_amicable, abundance, npartitions, is_primitive_root,
is_quad_residue, legendre_symbol, jacobi_symbol, n_order, sqrt_mod,
quadratic_residues, primitive_root, nthroot_mod, is_nthpow_residue,
sqrt_mod_iter, mobius, discrete_log, quadratic_congruence,
binomial_coefficients, binomial_coefficients_list,
multinomial_coefficients, continued_fraction_periodic,
continued_fraction_iterator, continued_fraction_reduce,
continued_fraction_convergents, continued_fraction, egyptian_fraction)
from .concrete import product, Product, summation, Sum
from .discrete import (fft, ifft, ntt, intt, fwht, ifwht, mobius_transform,
inverse_mobius_transform, convolution, covering_product,
intersecting_product)
from .simplify import (simplify, hypersimp, hypersimilar, logcombine,
separatevars, posify, besselsimp, kroneckersimp, signsimp, bottom_up,
nsimplify, FU, fu, sqrtdenest, cse, use, epath, EPath, hyperexpand,
collect, rcollect, radsimp, collect_const, fraction, numer, denom,
trigsimp, exptrigsimp, powsimp, powdenest, combsimp, gammasimp,
ratsimp, ratsimpmodprime)
from .sets import (Set, Interval, Union, EmptySet, FiniteSet, ProductSet,
Intersection, DisjointUnion, imageset, Complement, SymmetricDifference, ImageSet,
Range, ComplexRegion, Reals, Contains, ConditionSet, Ordinal,
OmegaPower, ord0, PowerSet, Naturals, Naturals0, UniversalSet,
Integers, Rationals)
from .solvers import (solve, solve_linear_system, solve_linear_system_LU,
solve_undetermined_coeffs, nsolve, solve_linear, checksol, det_quick,
inv_quick, check_assumptions, failing_assumptions, diophantine,
rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper, checkodesol,
classify_ode, dsolve, homogeneous_order, solve_poly_system,
solve_triangulated, pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol, ode_order, reduce_inequalities,
reduce_abs_inequality, reduce_abs_inequalities, solve_poly_inequality,
solve_rational_inequalities, solve_univariate_inequality, decompogen,
solveset, linsolve, linear_eq_to_matrix, nonlinsolve, substitution,
Complexes)
from .matrices import (ShapeError, NonSquareMatrixError, GramSchmidt,
casoratian, diag, eye, hessian, jordan_cell, list2numpy, matrix2numpy,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,
rot_axis3, symarray, wronskian, zeros, MutableDenseMatrix,
DeferredVector, MatrixBase, Matrix, MutableMatrix,
MutableSparseMatrix, banded, ImmutableDenseMatrix,
ImmutableSparseMatrix, ImmutableMatrix, SparseMatrix, MatrixSlice,
BlockDiagMatrix, BlockMatrix, FunctionMatrix, Identity, Inverse,
MatAdd, MatMul, MatPow, MatrixExpr, MatrixSymbol, Trace, Transpose,
ZeroMatrix, OneMatrix, blockcut, block_collapse, matrix_symbols,
Adjoint, hadamard_product, HadamardProduct, HadamardPower,
Determinant, det, diagonalize_vector, DiagMatrix, DiagonalMatrix,
DiagonalOf, trace, DotProduct, kronecker_product, KroneckerProduct,
PermutationMatrix, MatrixPermute, Permanent, per)
from .geometry import (Point, Point2D, Point3D, Line, Ray, Segment, Line2D,
Segment2D, Ray2D, Line3D, Segment3D, Ray3D, Plane, Ellipse, Circle,
Polygon, RegularPolygon, Triangle, rad, deg, are_similar, centroid,
convex_hull, idiff, intersection, closest_points, farthest_points,
GeometryError, Curve, Parabola)
from .utilities import (flatten, group, take, subsets, variations,
numbered_symbols, cartes, capture, dict_merge, postorder_traversal,
interactive_traversal, prefixes, postfixes, sift, topological_sort,
unflatten, has_dups, has_variety, reshape, default_sort_key, ordered,
rotations, filldedent, lambdify, source, threaded, xthreaded, public,
memoize_property, timed)
from .integrals import (integrate, Integral, line_integrate, mellin_transform,
inverse_mellin_transform, MellinTransform, InverseMellinTransform,
laplace_transform, inverse_laplace_transform, LaplaceTransform,
InverseLaplaceTransform, fourier_transform, inverse_fourier_transform,
FourierTransform, InverseFourierTransform, sine_transform,
inverse_sine_transform, SineTransform, InverseSineTransform,
cosine_transform, inverse_cosine_transform, CosineTransform,
InverseCosineTransform, hankel_transform, inverse_hankel_transform,
HankelTransform, InverseHankelTransform, singularityintegrate)
from .tensor import (IndexedBase, Idx, Indexed, get_contraction_structure,
get_indices, MutableDenseNDimArray, ImmutableDenseNDimArray,
MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray,
tensorproduct, tensorcontraction, tensordiagonal, derive_by_array,
permutedims, Array, DenseNDimArray, SparseNDimArray)
from .parsing import parse_expr
from .calculus import (euler_equations, singularities, is_increasing,
is_strictly_increasing, is_decreasing, is_strictly_decreasing,
is_monotonic, finite_diff_weights, apply_finite_diff, as_finite_diff,
differentiate_finite, periodicity, not_empty_in, AccumBounds,
is_convex, stationary_points, minimum, maximum)
from .algebras import Quaternion
from .printing import (pager_print, pretty, pretty_print, pprint,
pprint_use_unicode, pprint_try_use_unicode, latex, print_latex,
multiline_latex, mathml, print_mathml, python, print_python, pycode,
ccode, print_ccode, glsl_code, print_glsl, cxxcode, fcode,
print_fcode, rcode, print_rcode, jscode, print_jscode, julia_code,
mathematica_code, octave_code, rust_code, print_gtk, preview, srepr,
print_tree, StrPrinter, sstr, sstrrepr, TableForm, dotprint,
maple_code, print_maple_code)
from .testing import test, doctest
# This module causes conflicts with other modules:
# from .stats import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit, plot_parametric
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
from .deprecated import C, ClassRegistry, class_registry
__all__ = [
# sympy.core
'sympify', 'SympifyError', 'cacheit', 'Basic', 'Atom',
'preorder_traversal', 'S', 'Expr', 'AtomicExpr', 'UnevaluatedExpr',
'Symbol', 'Wild', 'Dummy', 'symbols', 'var', 'Number', 'Float',
'Rational', 'Integer', 'NumberSymbol', 'RealNumber', 'igcd', 'ilcm',
'seterr', 'E', 'I', 'nan', 'oo', 'pi', 'zoo', 'AlgebraicNumber', 'comp',
'mod_inverse', 'Pow', 'integer_nthroot', 'integer_log', 'Mul', 'prod',
'Add', 'Mod', 'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge', 'Equality',
'GreaterThan', 'LessThan', 'Unequality', 'StrictGreaterThan',
'StrictLessThan', 'vectorize', 'Lambda', 'WildFunction', 'Derivative',
'diff', 'FunctionClass', 'Function', 'Subs', 'expand', 'PoleError',
'count_ops', 'expand_mul', 'expand_log', 'expand_func', 'expand_trig',
'expand_complex', 'expand_multinomial', 'nfloat', 'expand_power_base',
'expand_power_exp', 'arity', 'PrecisionExhausted', 'N', 'evalf', 'Tuple',
'Dict', 'gcd_terms', 'factor_terms', 'factor_nc', 'evaluate', 'Catalan',
'EulerGamma', 'GoldenRatio', 'TribonacciConstant',
# sympy.logic
'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor',
'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic',
'bool_map', 'true', 'false', 'satisfiable',
# sympy.assumptions
'AppliedPredicate', 'Predicate', 'AssumptionsContext', 'assuming', 'Q',
'ask', 'register_handler', 'remove_handler', 'refine',
# sympy.polys
'Poly', 'PurePoly', 'poly_from_expr', 'parallel_poly_from_expr', 'degree',
'total_degree', 'degree_list', 'LC', 'LM', 'LT', 'pdiv', 'prem', 'pquo',
'pexquo', 'div', 'rem', 'quo', 'exquo', 'half_gcdex', 'gcdex', 'invert',
'subresultants', 'resultant', 'discriminant', 'cofactors', 'gcd_list',
'gcd', 'lcm_list', 'lcm', 'terms_gcd', 'trunc', 'monic', 'content',
'primitive', 'compose', 'decompose', 'sturm', 'gff_list', 'gff',
'sqf_norm', 'sqf_part', 'sqf_list', 'sqf', 'factor_list', 'factor',
'intervals', 'refine_root', 'count_roots', 'real_roots', 'nroots',
'ground_roots', 'nth_power_roots_poly', 'cancel', 'reduced', 'groebner',
'is_zero_dimensional', 'GroebnerBasis', 'poly', 'symmetrize', 'horner',
'interpolate', 'rational_interpolate', 'viete', 'together',
'BasePolynomialError', 'ExactQuotientFailed', 'PolynomialDivisionFailed',
'OperationNotSupported', 'HeuristicGCDFailed', 'HomomorphismFailed',
'IsomorphismFailed', 'ExtraneousFactors', 'EvaluationFailed',
'RefinementFailed', 'CoercionFailed', 'NotInvertible', 'NotReversible',
'NotAlgebraic', 'DomainError', 'PolynomialError', 'UnificationFailed',
'GeneratorsError', 'GeneratorsNeeded', 'ComputationFailed',
'UnivariatePolynomialError', 'MultivariatePolynomialError',
'PolificationFailed', 'OptionError', 'FlagError', 'minpoly',
'minimal_polynomial', 'primitive_element', 'field_isomorphism',
'to_number_field', 'isolate', 'itermonomials', 'Monomial', 'lex', 'grlex',
'grevlex', 'ilex', 'igrlex', 'igrevlex', 'CRootOf', 'rootof', 'RootOf',
'ComplexRootOf', 'RootSum', 'roots', 'Domain', 'FiniteField',
'IntegerRing', 'RationalField', 'RealField', 'ComplexField',
'PythonFiniteField', 'GMPYFiniteField', 'PythonIntegerRing',
'GMPYIntegerRing', 'PythonRational', 'GMPYRationalField',
'AlgebraicField', 'PolynomialRing', 'FractionField', 'ExpressionDomain',
'FF_python', 'FF_gmpy', 'ZZ_python', 'ZZ_gmpy', 'QQ_python', 'QQ_gmpy',
'GF', 'FF', 'ZZ', 'QQ', 'ZZ_I', 'QQ_I', 'RR', 'CC', 'EX', 'construct_domain',
'swinnerton_dyer_poly', 'cyclotomic_poly', 'symmetric_poly',
'random_poly', 'interpolating_poly', 'jacobi_poly', 'chebyshevt_poly',
'chebyshevu_poly', 'hermite_poly', 'legendre_poly', 'laguerre_poly',
'apart', 'apart_list', 'assemble_partfrac_list', 'Options', 'ring',
'xring', 'vring', 'sring', 'field', 'xfield', 'vfield', 'sfield',
# sympy.series
'Order', 'O', 'limit', 'Limit', 'gruntz', 'series', 'approximants',
'residue', 'EmptySequence', 'SeqPer', 'SeqFormula', 'sequence', 'SeqAdd',
'SeqMul', 'fourier_series', 'fps', 'difference_delta', 'limit_seq',
# sympy.functions
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial', 'carmichael', 'fibonacci', 'lucas',
'tribonacci', 'harmonic', 'bernoulli', 'bell', 'euler', 'catalan',
'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root',
'cbrt', 're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify', 'sin', 'cos', 'tan',
'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan', 'asec', 'acsc',
'acot', 'atan2', 'exp_polar', 'exp', 'ln', 'log', 'LambertW', 'sinh',
'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh',
'acoth', 'asech', 'acsch', 'floor', 'ceiling', 'frac', 'Piecewise',
'piecewise_fold', 'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv',
'erf2inv', 'Ei', 'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi',
'fresnels', 'fresnelc', 'gamma', 'lowergamma', 'uppergamma', 'polygamma',
'loggamma', 'digamma', 'trigamma', 'multigamma', 'dirichlet_eta', 'zeta',
'lerchphi', 'polylog', 'stieltjes', 'Eijk', 'LeviCivita',
'KroneckerDelta', 'SingularityFunction', 'DiracDelta', 'Heaviside',
'bspline_basis', 'bspline_basis_set', 'interpolating_spline', 'besselj',
'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn', 'yn',
'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime',
'airybiprime', 'marcumq', 'hyper', 'meijerg', 'appellf1', 'legendre',
'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu',
'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre',
'gegenbauer', 'jacobi', 'jacobi_normalized', 'Ynm', 'Ynm_c', 'Znm',
'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi', 'beta',
'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime',
# sympy.ntheory
'nextprime', 'prevprime', 'prime', 'primepi', 'primerange', 'randprime',
'Sieve', 'sieve', 'primorial', 'cycle_length', 'composite', 'compositepi',
'isprime', 'divisors', 'proper_divisors', 'factorint', 'multiplicity',
'perfect_power', 'pollard_pm1', 'pollard_rho', 'primefactors', 'totient',
'trailing', 'divisor_count', 'proper_divisor_count', 'divisor_sigma',
'factorrat', 'reduced_totient', 'primenu', 'primeomega',
'mersenne_prime_exponent', 'is_perfect', 'is_mersenne_prime',
'is_abundant', 'is_deficient', 'is_amicable', 'abundance', 'npartitions',
'is_primitive_root', 'is_quad_residue', 'legendre_symbol',
'jacobi_symbol', 'n_order', 'sqrt_mod', 'quadratic_residues',
'primitive_root', 'nthroot_mod', 'is_nthpow_residue', 'sqrt_mod_iter',
'mobius', 'discrete_log', 'quadratic_congruence', 'binomial_coefficients',
'binomial_coefficients_list', 'multinomial_coefficients',
'continued_fraction_periodic', 'continued_fraction_iterator',
'continued_fraction_reduce', 'continued_fraction_convergents',
'continued_fraction', 'egyptian_fraction',
# sympy.concrete
'product', 'Product', 'summation', 'Sum',
# sympy.discrete
'fft', 'ifft', 'ntt', 'intt', 'fwht', 'ifwht', 'mobius_transform',
'inverse_mobius_transform', 'convolution', 'covering_product',
'intersecting_product',
# sympy.simplify
'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars',
'posify', 'besselsimp', 'kroneckersimp', 'signsimp', 'bottom_up',
'nsimplify', 'FU', 'fu', 'sqrtdenest', 'cse', 'use', 'epath', 'EPath',
'hyperexpand', 'collect', 'rcollect', 'radsimp', 'collect_const',
'fraction', 'numer', 'denom', 'trigsimp', 'exptrigsimp', 'powsimp',
'powdenest', 'combsimp', 'gammasimp', 'ratsimp', 'ratsimpmodprime',
# sympy.sets
'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet',
'Intersection', 'imageset', 'DisjointUnion', 'Complement', 'SymmetricDifference',
'ImageSet', 'Range', 'ComplexRegion', 'Reals', 'Contains', 'ConditionSet',
'Ordinal', 'OmegaPower', 'ord0', 'PowerSet', 'Reals', 'Naturals',
'Naturals0', 'UniversalSet', 'Integers', 'Rationals',
# sympy.solvers
'solve', 'solve_linear_system', 'solve_linear_system_LU',
'solve_undetermined_coeffs', 'nsolve', 'solve_linear', 'checksol',
'det_quick', 'inv_quick', 'check_assumptions', 'failing_assumptions',
'diophantine', 'rsolve', 'rsolve_poly', 'rsolve_ratio', 'rsolve_hyper',
'checkodesol', 'classify_ode', 'dsolve', 'homogeneous_order',
'solve_poly_system', 'solve_triangulated', 'pde_separate',
'pde_separate_add', 'pde_separate_mul', 'pdsolve', 'classify_pde',
'checkpdesol', 'ode_order', 'reduce_inequalities',
'reduce_abs_inequality', 'reduce_abs_inequalities',
'solve_poly_inequality', 'solve_rational_inequalities',
'solve_univariate_inequality', 'decompogen', 'solveset', 'linsolve',
'linear_eq_to_matrix', 'nonlinsolve', 'substitution', 'Complexes',
# sympy.matrices
'ShapeError', 'NonSquareMatrixError', 'GramSchmidt', 'casoratian', 'diag',
'eye', 'hessian', 'jordan_cell', 'list2numpy', 'matrix2numpy',
'matrix_multiply_elementwise', 'ones', 'randMatrix', 'rot_axis1',
'rot_axis2', 'rot_axis3', 'symarray', 'wronskian', 'zeros',
'MutableDenseMatrix', 'DeferredVector', 'MatrixBase', 'Matrix',
'MutableMatrix', 'MutableSparseMatrix', 'banded', 'ImmutableDenseMatrix',
'ImmutableSparseMatrix', 'ImmutableMatrix', 'SparseMatrix', 'MatrixSlice',
'BlockDiagMatrix', 'BlockMatrix', 'FunctionMatrix', 'Identity', 'Inverse',
'MatAdd', 'MatMul', 'MatPow', 'MatrixExpr', 'MatrixSymbol', 'Trace',
'Transpose', 'ZeroMatrix', 'OneMatrix', 'blockcut', 'block_collapse',
'matrix_symbols', 'Adjoint', 'hadamard_product', 'HadamardProduct',
'HadamardPower', 'Determinant', 'det', 'diagonalize_vector', 'DiagMatrix',
'DiagonalMatrix', 'DiagonalOf', 'trace', 'DotProduct',
'kronecker_product', 'KroneckerProduct', 'PermutationMatrix',
'MatrixPermute', 'Permanent', 'per',
# sympy.geometry
'Point', 'Point2D', 'Point3D', 'Line', 'Ray', 'Segment', 'Line2D',
'Segment2D', 'Ray2D', 'Line3D', 'Segment3D', 'Ray3D', 'Plane', 'Ellipse',
'Circle', 'Polygon', 'RegularPolygon', 'Triangle', 'rad', 'deg',
'are_similar', 'centroid', 'convex_hull', 'idiff', 'intersection',
'closest_points', 'farthest_points', 'GeometryError', 'Curve', 'Parabola',
# sympy.utilities
'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols',
'cartes', 'capture', 'dict_merge', 'postorder_traversal',
'interactive_traversal', 'prefixes', 'postfixes', 'sift',
'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape',
'default_sort_key', 'ordered', 'rotations', 'filldedent', 'lambdify',
'source', 'threaded', 'xthreaded', 'public', 'memoize_property', 'test',
'doctest', 'timed',
# sympy.integrals
'integrate', 'Integral', 'line_integrate', 'mellin_transform',
'inverse_mellin_transform', 'MellinTransform', 'InverseMellinTransform',
'laplace_transform', 'inverse_laplace_transform', 'LaplaceTransform',
'InverseLaplaceTransform', 'fourier_transform',
'inverse_fourier_transform', 'FourierTransform',
'InverseFourierTransform', 'sine_transform', 'inverse_sine_transform',
'SineTransform', 'InverseSineTransform', 'cosine_transform',
'inverse_cosine_transform', 'CosineTransform', 'InverseCosineTransform',
'hankel_transform', 'inverse_hankel_transform', 'HankelTransform',
'InverseHankelTransform', 'singularityintegrate',
# sympy.tensor
'IndexedBase', 'Idx', 'Indexed', 'get_contraction_structure',
'get_indices', 'MutableDenseNDimArray', 'ImmutableDenseNDimArray',
'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray',
'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array',
'permutedims', 'Array', 'DenseNDimArray', 'SparseNDimArray',
# sympy.parsing
'parse_expr',
# sympy.calculus
'euler_equations', 'singularities', 'is_increasing',
'is_strictly_increasing', 'is_decreasing', 'is_strictly_decreasing',
'is_monotonic', 'finite_diff_weights', 'apply_finite_diff',
'as_finite_diff', 'differentiate_finite', 'periodicity', 'not_empty_in',
'AccumBounds', 'is_convex', 'stationary_points', 'minimum', 'maximum',
# sympy.algebras
'Quaternion',
# sympy.printing
'pager_print', 'pretty', 'pretty_print', 'pprint', 'pprint_use_unicode',
'pprint_try_use_unicode', 'latex', 'print_latex', 'multiline_latex',
'mathml', 'print_mathml', 'python', 'print_python', 'pycode', 'ccode',
'print_ccode', 'glsl_code', 'print_glsl', 'cxxcode', 'fcode',
'print_fcode', 'rcode', 'print_rcode', 'jscode', 'print_jscode',
'julia_code', 'mathematica_code', 'octave_code', 'rust_code', 'print_gtk',
'preview', 'srepr', 'print_tree', 'StrPrinter', 'sstr', 'sstrrepr',
'TableForm', 'dotprint', 'maple_code', 'print_maple_code',
# sympy.plotting
'plot', 'textplot', 'plot_backends', 'plot_implicit', 'plot_parametric',
# sympy.interactive
'init_session', 'init_printing',
# sympy.testing
'test', 'doctest',
# sympy.deprecated:
'C', 'ClassRegistry', 'class_registry',
]
#===========================================================================#
# #
# XXX: The names below were importable before sympy 1.6 using #
# #
# from sympy import * #
# #
# This happened implicitly because there was no __all__ defined in this #
# __init__.py file. Not every package is imported. The list matches what #
# would have been imported before. It is possible that these packages will #
# not be imported by a star-import from sympy in future. #
# #
#===========================================================================#
__all__.extend([
'algebras',
'assumptions',
'calculus',
'concrete',
'deprecated',
'discrete',
'external',
'functions',
'geometry',
'interactive',
'multipledispatch',
'ntheory',
'parsing',
'plotting',
'polys',
'printing',
'release',
'strategies',
'tensor',
'utilities',
])
|
efcaaa9b88bd2674dad4f87634df43820d7ba1b6be3e86a952f421b145b7e549 | """
This module exports all latin and greek letters as Symbols, so you can
conveniently do
>>> from sympy.abc import x, y
instead of the slightly more clunky-looking
>>> from sympy import symbols
>>> x, y = symbols('x y')
Caveats
=======
1. As of the time of writing this, the names ``C``, ``O``, ``S``, ``I``, ``N``,
``E``, and ``Q`` are colliding with names defined in SymPy. If you import them
from both ``sympy.abc`` and ``sympy``, the second import will "win".
This is an issue only for * imports, which should only be used for short-lived
code such as interactive sessions and throwaway scripts that do not survive
until the next SymPy upgrade, where ``sympy`` may contain a different set of
names.
2. This module does not define symbol names on demand, i.e.
``from sympy.abc import foo`` will be reported as an error because
``sympy.abc`` does not contain the name ``foo``. To get a symbol named ``foo``,
you still need to use ``Symbol('foo')`` or ``symbols('foo')``.
You can freely mix usage of ``sympy.abc`` and ``Symbol``/``symbols``, though
sticking with one and only one way to get the symbols does tend to make the code
more readable.
The module also defines some special names to help detect which names clash
with the default SymPy namespace.
``_clash1`` defines all the single letter variables that clash with
SymPy objects; ``_clash2`` defines the multi-letter clashing symbols;
and ``_clash`` is the union of both. These can be passed for ``locals``
during sympification if one desires Symbols rather than the non-Symbol
objects for those names.
Examples
========
>>> from sympy import S
>>> from sympy.abc import _clash1, _clash2, _clash
>>> S("Q & C", locals=_clash1)
C & Q
>>> S('pi(x)', locals=_clash2)
pi(x)
>>> S('pi(C, Q)', locals=_clash)
pi(C, Q)
"""
from typing import Any, Dict
import string
from .core import Symbol, symbols
from .core.alphabets import greeks
##### Symbol definitions #####
# Implementation note: The easiest way to avoid typos in the symbols()
# parameter is to copy it from the left-hand side of the assignment.
a, b, c, d, e, f, g, h, i, j = symbols('a, b, c, d, e, f, g, h, i, j')
k, l, m, n, o, p, q, r, s, t = symbols('k, l, m, n, o, p, q, r, s, t')
u, v, w, x, y, z = symbols('u, v, w, x, y, z')
A, B, C, D, E, F, G, H, I, J = symbols('A, B, C, D, E, F, G, H, I, J')
K, L, M, N, O, P, Q, R, S, T = symbols('K, L, M, N, O, P, Q, R, S, T')
U, V, W, X, Y, Z = symbols('U, V, W, X, Y, Z')
alpha, beta, gamma, delta = symbols('alpha, beta, gamma, delta')
epsilon, zeta, eta, theta = symbols('epsilon, zeta, eta, theta')
iota, kappa, lamda, mu = symbols('iota, kappa, lamda, mu')
nu, xi, omicron, pi = symbols('nu, xi, omicron, pi')
rho, sigma, tau, upsilon = symbols('rho, sigma, tau, upsilon')
phi, chi, psi, omega = symbols('phi, chi, psi, omega')
##### Clashing-symbols diagnostics #####
# We want to know which names in SymPy collide with those in here.
# This is mostly for diagnosing SymPy's namespace during SymPy development.
_latin = list(string.ascii_letters)
# OSINEQ should not be imported as they clash; gamma, pi and zeta clash, too
_greek = list(greeks) # make a copy, so we can mutate it
# Note: We import lamda since lambda is a reserved keyword in Python
_greek.remove("lambda")
_greek.append("lamda")
ns = {} # type: Dict[str, Any]
exec('from sympy import *', ns)
_clash1 = {}
_clash2 = {}
while ns:
_k, _ = ns.popitem()
if _k in _greek:
_clash2[_k] = Symbol(_k)
_greek.remove(_k)
elif _k in _latin:
_clash1[_k] = Symbol(_k)
_latin.remove(_k)
_clash = {}
_clash.update(_clash1)
_clash.update(_clash2)
del _latin, _greek, Symbol, _k
|
0d5894384723fba57ca68df70fa6373741e3ba2ff6f2895e0708775e706b904e | #
# SymPy documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 22 19:34:32 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import inspect
import os
import subprocess
from datetime import datetime
import sympy
# If your extensions are in another directory, add it here.
sys.path = ['ext'] + sys.path
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.linkcode', 'sphinx_math_dollar',
'sphinx.ext.mathjax', 'numpydoc', 'sympylive',
'sphinx.ext.graphviz', 'matplotlib.sphinxext.plot_directive']
# Use this to use pngmath instead
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ]
# Enable warnings for all bad cross references. These are turned into errors
# with the -W flag in the Makefile.
nitpicky = True
# To stop docstrings inheritance.
autodoc_inherit_docstrings = False
# MathJax file, which is free to use. See https://www.mathjax.org/#gettingstarted
# As explained in the link using latest.js will get the latest version even
# though it says 2.7.5.
mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS_HTML-full'
# See https://www.sympy.org/sphinx-math-dollar/
mathjax_config = {
'tex2jax': {
'inlineMath': [ ["\\(","\\)"] ],
'displayMath': [["\\[","\\]"] ],
},
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
suppress_warnings = ['ref.citation', 'ref.footnote']
# General substitutions.
project = 'SymPy'
copyright = '{} SymPy Development Team'.format(datetime.utcnow().year)
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = sympy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Don't show the source code hyperlinks when using matplotlib plot directive.
plot_html_show_source_link = False
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_theme = 'classic'
html_logo = '_static/sympylogo.png'
html_favicon = '../_build/logo/sympy-notailtext-favicon.ico'
# See http://www.sphinx-doc.org/en/master/theming.html#builtin-themes
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
html_domain_indices = ['py-modindex']
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymPydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual], toctree_only).
# toctree_only is set to True so that the start file document itself is not included in the
# output, only the documents referenced by it via TOC trees. The extra stuff in the master
# document is intended to show up in the HTML, but doesn't really belong in the LaTeX output.
latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation',
'SymPy Development Team', 'manual', True)]
# Additional stuff for the LaTeX preamble.
# Tweaked to work with XeTeX.
latex_elements = {
'babel': '',
'fontenc': r'''
% Define version of \LaTeX that is usable in math mode
\let\OldLaTeX\LaTeX
\renewcommand{\LaTeX}{\text{\OldLaTeX}}
\usepackage{bm}
\usepackage{amssymb}
\usepackage{fontspec}
\usepackage[english]{babel}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
'fontpkg': '',
'inputenc': '',
'utf8extra': '',
'preamble': r'''
'''
}
# SymPy logo on title page
html_logo = '_static/sympylogo.png'
latex_logo = '_static/sympylogo_big.png'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Show page numbers next to internal references
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
default_role = 'math'
pngmath_divpng_args = ['-gamma 1.5', '-D 110']
# Note, this is ignored by the mathjax extension
# Any \newcommand should be defined in the file
pngmath_latex_preamble = '\\usepackage{amsmath}\n' \
'\\usepackage{bm}\n' \
'\\usepackage{amsfonts}\n' \
'\\usepackage{amssymb}\n' \
'\\setlength{\\parindent}{0pt}\n'
texinfo_documents = [
(master_doc, 'sympy', 'SymPy Documentation', 'SymPy Development Team',
'SymPy', 'Computer algebra system (CAS) in Python', 'Programming', 1),
]
# Use svg for graphviz
graphviz_output_format = 'svg'
# Requried for linkcode extension.
# Get commit hash from the external file.
commit_hash_filepath = '../commit_hash.txt'
commit_hash = None
if os.path.isfile(commit_hash_filepath):
with open(commit_hash_filepath) as f:
commit_hash = f.readline()
# Get commit hash from the external file.
if not commit_hash:
try:
commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
commit_hash = commit_hash.decode('ascii')
commit_hash = commit_hash.rstrip()
except:
import warnings
warnings.warn(
"Failed to get the git commit hash as the command " \
"'git rev-parse HEAD' is not working. The commit hash will be " \
"assumed as the SymPy master, but the lines may be misleading " \
"or nonexistent as it is not the correct branch the doc is " \
"built with. Check your installation of 'git' if you want to " \
"resolve this warning.")
commit_hash = 'master'
fork = 'sympy'
blobpath = \
"https://github.com/{}/sympy/blob/{}/sympy/".format(fork, commit_hash)
def linkcode_resolve(domain, info):
"""Determine the URL corresponding to Python object."""
if domain != 'py':
return
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(sympy.__file__))
return blobpath + fn + linespec
|
29940a5ec4bd87b770a7e0d608d4de2abfa660e2a92244176eabedef583246aa | """
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaNoncentral
BetaPrime
BoundedPareto
Cauchy
Chi
ChiNoncentral
ChiSquared
Dagum
Erlang
ExGaussian
Exponential
ExponentialPower
FDistribution
FisherZ
Frechet
Gamma
GammaInverse
Gumbel
Gompertz
Kumaraswamy
Laplace
Levy
Logistic
LogLogistic
LogitNormal
LogNormal
Lomax
Maxwell
Moyal
Nakagami
Normal
Pareto
PowerFunction
QuadraticU
RaisedCosine
Rayleigh
Reciprocal
ShiftedGompertz
StudentT
Trapezoidal
Triangular
Uniform
UniformSum
VonMises
Wald
Weibull
WignerSemicircle
"""
from sympy import beta as beta_fn
from sympy import cos, sin, tan, atan, exp, besseli, besselj, besselk
from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma, sign,
Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs,
Lambda, Basic, lowergamma, erf, erfc, erfi, erfinv, I, asin,
hyper, uppergamma, sinh, Ne, expint, Rational, integrate)
from sympy.matrices import MatrixBase, MatrixExpr
from sympy.stats.crv import SingleContinuousPSpace, SingleContinuousDistribution
from sympy.stats.rv import _value_check, is_random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaNoncentral',
'BetaPrime',
'BoundedPareto',
'Cauchy',
'Chi',
'ChiNoncentral',
'ChiSquared',
'Dagum',
'Erlang',
'ExGaussian',
'Exponential',
'ExponentialPower',
'FDistribution',
'FisherZ',
'Frechet',
'Gamma',
'GammaInverse',
'Gompertz',
'Gumbel',
'Kumaraswamy',
'Laplace',
'Levy',
'Logistic',
'LogLogistic',
'LogitNormal',
'LogNormal',
'Lomax',
'Maxwell',
'Moyal',
'Nakagami',
'Normal',
'GaussianInverse',
'Pareto',
'PowerFunction',
'QuadraticU',
'RaisedCosine',
'Rayleigh',
'Reciprocal',
'StudentT',
'ShiftedGompertz',
'Trapezoidal',
'Triangular',
'Uniform',
'UniformSum',
'VonMises',
'Wald',
'Weibull',
'WignerSemicircle',
]
@is_random.register(MatrixBase)
def _(x):
return any([is_random(i) for i in x])
def rv(symbol, cls, args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleContinuousPSpace(symbol, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(symbol, CompoundDistribution(dist))
return pspace.value
class ContinuousDistributionHandmade(SingleContinuousDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=Interval(-oo, oo)):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = integrate(pdf(x), (x, set))
_value_check(Eq(val, 1) != S.false, "The pdf on the given set is incorrect.")
def ContinuousRV(symbol, density, set=Interval(-oo, oo), **kwargs):
"""
Create a Continuous Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set/Interval
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Returns
=======
RandomSymbol
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, ContinuousDistributionHandmade, (pdf, set), **kwargs)
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
def pdf(self, x):
a, b = self.a, self.b
return 1/(pi*sqrt((x - a)*(b - x)))
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < a),
(2*asin(sqrt((x - a)/(b - a)))/pi, x <= b),
(S.One, True))
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in (a,b)`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Arcsin, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = Arcsin("x", a, b)
>>> density(X)(z)
1/(pi*sqrt((-a + z)*(b - z)))
>>> cdf(X)(z)
Piecewise((0, a > z),
(2*asin(sqrt((-a + z)/(-a + b)))/pi, b >= z),
(1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Arcsine_distribution
"""
return rv(name, ArcsinDistribution, (a, b))
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'sigma')
@staticmethod
def check(alpha, beta, sigma):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(sigma > 0, "Scale parameter Sigma must be positive.")
@property
def set(self):
return Interval(self.sigma, oo)
def pdf(self, x):
alpha, beta, sigma = self.alpha, self.beta, self.sigma
return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2)
*(alpha/x + 2*beta*log(x/sigma)/x))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of the '
'Benini distribution does not exist.')
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log^2\left[{\frac{x}{\sigma}}\right]}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
This is a heavy-tailed distribution and is also known as the log-Rayleigh
distribution.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Benini, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ / z \\ / z \ 2/ z \
| 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----|
|alpha \sigma/| \sigma/ \sigma/
|----- + -----------------|*e
\ z z /
>>> cdf(X)(z)
Piecewise((1 - exp(-alpha*log(z/sigma) - beta*log(z/sigma)**2), sigma <= z),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Benini_distribution
.. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html
"""
return rv(name, BeniniDistribution, (alpha, beta, sigma))
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta)
def _characteristic_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), I*t)
def _moment_generating_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), t)
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint, factor
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Beta("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 beta - 1
z *(1 - z)
--------------------------
B(alpha, beta)
>>> simplify(E(X))
alpha/(alpha + beta)
>>> factor(simplify(variance(X)))
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_distribution
.. [2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return rv(name, BetaDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Noncentral Beta distribution ------------------------------------------------------------
class BetaNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'lamda')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta, lamda):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(lamda >= 0, "Noncentrality parameter Lambda must be positive")
def pdf(self, x):
alpha, beta, lamda = self.alpha, self.beta, self.lamda
k = Dummy("k")
return Sum(exp(-lamda / 2) * (lamda / 2)**k * x**(alpha + k - 1) *(
1 - x)**(beta - 1) / (factorial(k) * beta_fn(alpha + k, beta)), (k, 0, oo))
def BetaNoncentral(name, alpha, beta, lamda):
r"""
Create a Continuous Random Variable with a Type I Noncentral Beta distribution.
The density of the Noncentral Beta distribution is given by
.. math::
f(x) := \sum_{k=0}^\infty e^{-\lambda/2}\frac{(\lambda/2)^k}{k!}
\frac{x^{\alpha+k-1}(1-x)^{\beta-1}}{\mathrm{B}(\alpha+k,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
lamda: Real number, `\lambda >= 0`, noncentrality parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaNoncentral, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> lamda = Symbol("lamda", nonnegative=True)
>>> z = Symbol("z")
>>> X = BetaNoncentral("x", alpha, beta, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
oo
_____
\ `
\ -lamda
\ k -------
\ k + alpha - 1 /lamda\ beta - 1 2
) z *|-----| *(1 - z) *e
/ \ 2 /
/ ------------------------------------------------
/ B(k + alpha, beta)*k!
/____,
k = 0
Compute cdf with specific 'x', 'alpha', 'beta' and 'lamda' values as follows :
>>> cdf(BetaNoncentral("x", 1, 1, 1), evaluate=False)(2).doit()
2*exp(1/2)
The argument evaluate=False prevents an attempt at evaluation
of the sum for general x, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_beta_distribution
.. [2] https://reference.wolfram.com/language/ref/NoncentralBetaDistribution.html
"""
return rv(name, BetaNoncentralDistribution, (alpha, beta, lamda))
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimeDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
B(alpha, beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Bounded Pareto Distribution --------------------------------------------------
class BoundedParetoDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'left', 'right')
@property
def set(self):
return Interval(self.left , self.right)
@staticmethod
def check(alpha, left, right):
_value_check (alpha.is_positive, "Shape must be positive.")
_value_check (left.is_positive, "Left value should be positive.")
_value_check (right > left, "Right should be greater than left.")
def pdf(self, x):
alpha, left, right = self.alpha, self.left, self.right
num = alpha * (left**alpha) * x**(- alpha -1)
den = 1 - (left/right)**alpha
return num/den
def BoundedPareto(name, alpha, left, right):
r"""
Create a continuous random variable with a Bounded Pareto distribution.
The density of the Bounded Pareto distribution is given by
.. math::
f(x) := \frac{\alpha L^{\alpha}x^{-\alpha-1}}{1-(\frac{L}{H})^{\alpha}}
Parameters
==========
alpha : Real Number, `alpha > 0`
Shape parameter
left : Real Number, `left > 0`
Location parameter
right : Real Number, `right > left`
Location parameter
Examples
========
>>> from sympy.stats import BoundedPareto, density, cdf, E
>>> from sympy import symbols
>>> L, H = symbols('L, H', positive=True)
>>> X = BoundedPareto('X', 2, L, H)
>>> x = symbols('x')
>>> density(X)(x)
2*L**2/(x**3*(1 - L**2/H**2))
>>> cdf(X)(x)
Piecewise((-H**2*L**2/(x**2*(H**2 - L**2)) + H**2/(H**2 - L**2), L <= x), (0, True))
>>> E(X).simplify()
2*H*L/(H + L)
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution#Bounded_Pareto_distribution
"""
return rv (name, BoundedParetoDistribution, (alpha, left, right))
# ------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyDistribution(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
@staticmethod
def check(x0, gamma):
_value_check(gamma > 0, "Scale parameter Gamma must be positive.")
_value_check(x0.is_real, "Location parameter must be real.")
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def _cdf(self, x):
x0, gamma = self.x0, self.gamma
return (1/pi)*atan((x - x0)/gamma) + S.Half
def _characteristic_function(self, t):
return exp(self.x0 * I * t - self.gamma * Abs(t))
def _moment_generating_function(self, t):
raise NotImplementedError("The moment generating function for the "
"Cauchy distribution does not exist.")
def _quantile(self, p):
return self.x0 + self.gamma*tan(pi*(p - S.Half))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi \gamma [1 + {(\frac{x-x_0}{\gamma})}^2]}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2)
def _characteristic_function(self, t):
k = self.k
part_1 = hyper((k/2,), (S.Half,), -t**2/2)
part_2 = I*t*sqrt(2)*gamma((k+1)/2)/gamma(k/2)
part_3 = hyper(((k+1)/2,), (Rational(3, 2),), -t**2/2)
return part_1 + part_2*part_3
def _moment_generating_function(self, t):
k = self.k
part_1 = hyper((k / 2,), (S.Half,), t ** 2 / 2)
part_2 = t * sqrt(2) * gamma((k + 1) / 2) / gamma(k / 2)
part_3 = hyper(((k + 1) / 2,), (S(3) / 2,), t ** 2 / 2)
return part_1 + part_2 * part_3
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer, The number of degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Chi, density, E
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> z = Symbol("z")
>>> X = Chi("x", k)
>>> density(X)(z)
2**(1 - k/2)*z**(k - 1)*exp(-z**2/2)/gamma(k/2)
>>> simplify(E(X))
sqrt(2)*gamma(k/2 + 1/2)/gamma(k/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_distribution
.. [2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return rv(name, ChiDistribution, (k,))
#-------------------------------------------------------------------------------
# Non-central Chi distribution -------------------------------------------------
class ChiNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('k', 'l')
@staticmethod
def check(k, l):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
_value_check(l > 0, "Shift parameter Lambda must be positive.")
set = Interval(0, oo)
def pdf(self, x):
k, l = self.k, self.l
return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x)
def ChiNoncentral(name, k, l):
r"""
Create a continuous random variable with a non-central Chi distribution.
The density of the non-central Chi distribution is given by
.. math::
f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda}
{(\lambda x)^{k/2}} I_{k/2-1}(\lambda x)
with `x \geq 0`. Here, `I_\nu (x)` is the
:ref:`modified Bessel function of the first kind <besseli>`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
lambda : Real number, `\lambda > 0`, Shift parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiNoncentral, density
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True)
>>> l = Symbol("l")
>>> z = Symbol("z")
>>> X = ChiNoncentral("x", k, l)
>>> density(X)(z)
l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z)
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_chi_distribution
"""
return rv(name, ChiNoncentralDistribution, (k, l))
#-------------------------------------------------------------------------------
# Chi squared distribution -----------------------------------------------------
class ChiSquaredDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
k = self.k
return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2)
def _cdf(self, x):
k = self.k
return Piecewise(
(S.One/gamma(k/2)*lowergamma(k/2, x/2), x >= 0),
(0, True)
)
def _characteristic_function(self, t):
return (1 - 2*I*t)**(-self.k/2)
def _moment_generating_function(self, t):
return (1 - 2*t)**(-self.k/2)
def ChiSquared(name, k):
r"""
Create a continuous random variable with a Chi-squared distribution.
The density of the Chi-squared distribution is given by
.. math::
f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)}
x^{\frac{k}{2}-1} e^{-\frac{x}{2}}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer, The number of degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiSquared, density, E, variance, moment
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True, positive=True)
>>> z = Symbol("z")
>>> X = ChiSquared("x", k)
>>> density(X)(z)
2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2)
>>> E(X)
k
>>> variance(X)
2*k
>>> moment(X, 3)
k**3 + 6*k**2 + 8*k
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_squared_distribution
.. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html
"""
return rv(name, ChiSquaredDistribution, (k, ))
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumDistribution(SingleContinuousDistribution):
_argnames = ('p', 'a', 'b')
set = Interval(0, oo)
@staticmethod
def check(p, a, b):
_value_check(p > 0, "Shape parameter p must be positive.")
_value_check(a > 0, "Shape parameter a must be positive.")
_value_check(b > 0, "Scale parameter b must be positive.")
def pdf(self, x):
p, a, b = self.p, self.a, self.b
return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1)))
def _cdf(self, x):
p, a, b = self.p, self.a, self.b
return Piecewise(((S.One + (S(x)/b)**-a)**-p, x>=0),
(S.Zero, True))
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}}
{\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number, `p > 0`, a shape
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Dagum, density, cdf
>>> from sympy import Symbol
>>> p = Symbol("p", positive=True)
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Dagum("x", p, a, b)
>>> density(X)(z)
a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z
>>> cdf(X)(z)
Piecewise(((1 + (z/b)**(-a))**(-p), z >= 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Dagum_distribution
"""
return rv(name, DagumDistribution, (p, a, b))
#-------------------------------------------------------------------------------
# Erlang distribution ----------------------------------------------------------
def Erlang(name, k, l):
r"""
Create a continuous random variable with an Erlang distribution.
The density of the Erlang distribution is given by
.. math::
f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!}
with :math:`x \in [0,\infty]`.
Parameters
==========
k : Positive integer
l : Real number, `\lambda > 0`, the rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Erlang, density, cdf, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> k = Symbol("k", integer=True, positive=True)
>>> l = Symbol("l", positive=True)
>>> z = Symbol("z")
>>> X = Erlang("x", k, l)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k k - 1 -l*z
l *z *e
---------------
Gamma(k)
>>> C = cdf(X)(z)
>>> pprint(C, use_unicode=False)
/lowergamma(k, l*z)
|------------------ for z > 0
< Gamma(k)
|
\ 0 otherwise
>>> E(X)
k/l
>>> simplify(variance(X))
k/l**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Erlang_distribution
.. [2] http://mathworld.wolfram.com/ErlangDistribution.html
"""
return rv(name, GammaDistribution, (k, S.One/l))
# -------------------------------------------------------------------------------
# ExGaussian distribution -----------------------------------------------------
class ExGaussianDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std', 'rate')
set = Interval(-oo, oo)
@staticmethod
def check(mean, std, rate):
_value_check(
std > 0, "Standard deviation of ExGaussian must be positive.")
_value_check(rate > 0, "Rate of ExGaussian must be positive.")
def pdf(self, x):
mean, std, rate = self.mean, self.std, self.rate
term1 = rate/2
term2 = exp(rate * (2 * mean + rate * std**2 - 2*x)/2)
term3 = erfc((mean + rate*std**2 - x)/(sqrt(2)*std))
return term1*term2*term3
def _cdf(self, x):
from sympy.stats import cdf
mean, std, rate = self.mean, self.std, self.rate
u = rate*(x - mean)
v = rate*std
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
return GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
def _characteristic_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - I*t/rate)**(-1)
term2 = exp(I*mean*t - std**2*t**2/2)
return term1 * term2
def _moment_generating_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - t/rate)**(-1)
term2 = exp(mean*t + std**2*t**2/2)
return term1*term2
def ExGaussian(name, mean, std, rate):
r"""
Create a continuous random variable with an Exponentially modified
Gaussian (EMG) distribution.
The density of the exponentially modified Gaussian distribution is given by
.. math::
f(x) := \frac{\lambda}{2}e^{\frac{\lambda}{2}(2\mu+\lambda\sigma^2-2x)}
\text{erfc}(\frac{\mu + \lambda\sigma^2 - x}{\sqrt{2}\sigma})
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
mu : A Real number, the mean of Gaussian component
std: A positive Real number,
:math: `\sigma^2 > 0` the variance of Gaussian component
lambda: A positive Real number,
:math: `\lambda > 0` the rate of Exponential component
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExGaussian, density, cdf, E
>>> from sympy.stats import variance, skewness
>>> from sympy import Symbol, pprint, simplify
>>> mean = Symbol("mu")
>>> std = Symbol("sigma", positive=True)
>>> rate = Symbol("lamda", positive=True)
>>> z = Symbol("z")
>>> X = ExGaussian("x", mean, std, rate)
>>> pprint(density(X)(z), use_unicode=False)
/ 2 \
lamda*\lamda*sigma + 2*mu - 2*z/
--------------------------------- / ___ / 2 \\
2 |\/ 2 *\lamda*sigma + mu - z/|
lamda*e *erfc|-----------------------------|
\ 2*sigma /
----------------------------------------------------------------------------
2
>>> cdf(X)(z)
-(erf(sqrt(2)*(-lamda**2*sigma**2 + lamda*(-mu + z))/(2*lamda*sigma))/2 + 1/2)*exp(lamda**2*sigma**2/2 - lamda*(-mu + z)) + erf(sqrt(2)*(-mu + z)/(2*sigma))/2 + 1/2
>>> E(X)
(lamda*mu + 1)/lamda
>>> simplify(variance(X))
sigma**2 + lamda**(-2)
>>> simplify(skewness(X))
2/(lamda**2*sigma**2 + 1)**(3/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
"""
return rv(name, ExGaussianDistribution, (mean, std, rate))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialDistribution(SingleContinuousDistribution):
_argnames = ('rate',)
set = Interval(0, oo)
@staticmethod
def check(rate):
_value_check(rate > 0, "Rate must be positive.")
def pdf(self, x):
return self.rate * exp(-self.rate*x)
def _cdf(self, x):
return Piecewise(
(S.One - exp(-self.rate*x), x >= 0),
(0, True),
)
def _characteristic_function(self, t):
rate = self.rate
return rate / (rate - I*t)
def _moment_generating_function(self, t):
rate = self.rate
return rate / (rate - t)
def _quantile(self, p):
return -log(1-p)/self.rate
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness, quantile
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> p = Symbol("p")
>>> X = Exponential("x", l)
>>> density(X)(z)
lambda*exp(-lambda*z)
>>> cdf(X)(z)
Piecewise((1 - exp(-lambda*z), z >= 0), (0, True))
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)(z)
10*exp(-10*z)
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponential_distribution
.. [2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return rv(name, ExponentialDistribution, (rate, ))
# -------------------------------------------------------------------------------
# Exponential Power distribution -----------------------------------------------------
class ExponentialPowerDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'alpha', 'beta')
set = Interval(-oo, oo)
@staticmethod
def check(mu, alpha, beta):
_value_check(alpha > 0, "Scale parameter alpha must be positive.")
_value_check(beta > 0, "Shape parameter beta must be positive.")
def pdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = beta*exp(-(Abs(x - mu)/alpha)**beta)
den = 2*alpha*gamma(1/beta)
return num/den
def _cdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = lowergamma(1/beta, (Abs(x - mu) / alpha)**beta)
den = 2*gamma(1/beta)
return sign(x - mu)*num/den + S.Half
def ExponentialPower(name, mu, alpha, beta):
r"""
Create a Continuous Random Variable with Exponential Power distribution.
This distribution is known also as Generalized Normal
distribution version 1
The density of the Exponential Power distribution is given by
.. math::
f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})}
e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number, 'mu' is a location
alpha : Real number, 'alpha > 0' is a scale
beta : Real number, 'beta > 0' is a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExponentialPower, density, cdf
>>> from sympy import Symbol, pprint
>>> z = Symbol("z")
>>> mu = Symbol("mu")
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = ExponentialPower("x", mu, alpha, beta)
>>> pprint(density(X)(z), use_unicode=False)
beta
/|mu - z|\
-|--------|
\ alpha /
beta*e
---------------------
/ 1 \
2*alpha*Gamma|----|
\beta/
>>> cdf(X)(z)
1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta))
References
==========
.. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html
.. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
"""
return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
class FDistributionDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
@staticmethod
def check(d1, d2):
_value_check((d1 > 0, d1.is_integer),
"Degrees of freedom d1 must be positive integer.")
_value_check((d2 > 0, d2.is_integer),
"Degrees of freedom d2 must be positive integer.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'F-distribution does not exist.')
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
Parameters
==========
d1 : `d_1 > 0`, where d_1 is the degrees of freedom (n_1 - 1)
d2 : `d_2 > 0`, where d_2 is the degrees of freedom (n_2 - 1)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/F-distribution
.. [2] http://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
class FisherZDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(-oo, oo)
@staticmethod
def check(d1, d2):
_value_check(d1 > 0, "Degree of freedom d1 must be positive.")
_value_check(d2 > 0, "Degree of freedom d2 must be positive.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) *
exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2))
def FisherZ(name, d1, d2):
r"""
Create a Continuous Random Variable with an Fisher's Z distribution.
The density of the Fisher's Z distribution is given by
.. math::
f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)}
\frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}}
.. TODO - What is the difference between these degrees of freedom?
Parameters
==========
d1 : `d_1 > 0`, degree of freedom
d2 : `d_2 > 0`, degree of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FisherZ, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FisherZ("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d1 d2
d1 d2 - -- - --
-- -- 2 2
2 2 / 2*z \ d1*z
2*d1 *d2 *\d1*e + d2/ *e
-----------------------------------------
/d1 d2\
B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution
.. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
"""
return rv(name, FisherZDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Frechet distribution ---------------------------------------------------------
class FrechetDistribution(SingleContinuousDistribution):
_argnames = ('a', 's', 'm')
set = Interval(0, oo)
@staticmethod
def check(a, s, m):
_value_check(a > 0, "Shape parameter alpha must be positive.")
_value_check(s > 0, "Scale parameter s must be positive.")
def __new__(cls, a, s=1, m=0):
a, s, m = list(map(sympify, (a, s, m)))
return Basic.__new__(cls, a, s, m)
def pdf(self, x):
a, s, m = self.a, self.s, self.m
return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a))
def _cdf(self, x):
a, s, m = self.a, self.s, self.m
return Piecewise((exp(-((x-m)/s)**(-a)), x >= m),
(S.Zero, True))
def Frechet(name, a, s=1, m=0):
r"""
Create a continuous random variable with a Frechet distribution.
The density of the Frechet distribution is given by
.. math::
f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha}
e^{-(\frac{x-m}{s})^{-\alpha}}
with :math:`x \geq m`.
Parameters
==========
a : Real number, :math:`a \in \left(0, \infty\right)` the shape
s : Real number, :math:`s \in \left(0, \infty\right)` the scale
m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Frechet, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", positive=True)
>>> s = Symbol("s", positive=True)
>>> m = Symbol("m", real=True)
>>> z = Symbol("z")
>>> X = Frechet("x", a, s, m)
>>> density(X)(z)
a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s
>>> cdf(X)(z)
Piecewise((exp(-((-m + z)/s)**(-a)), m <= z), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution
"""
return rv(name, FrechetDistribution, (a, s, m))
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaDistribution(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def _cdf(self, x):
k, theta = self.k, self.theta
return Piecewise(
(lowergamma(k, S(x)/theta)/gamma(k), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
return (1 - self.theta*I*t)**(-self.k)
def _moment_generating_function(self, t):
return (1- self.theta*t)**(-self.k)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
Gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
|k*lowergamma|k, -----|
| \ theta/
<---------------------- for z >= 0
| Gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
k*theta
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_distribution
.. [2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
class GammaInverseDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "alpha must be positive")
_value_check(b > 0, "beta must be positive")
def pdf(self, x):
a, b = self.a, self.b
return b**a/gamma(a) * x**(-a-1) * exp(-b/x)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise((uppergamma(a,b/x)/gamma(a), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
a, b = self.a, self.b
return 2 * (-I*b*t)**(a/2) * besselk(a, sqrt(-4*I*b*t)) / gamma(a)
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'gamma inverse distribution does not exist.')
def GammaInverse(name, a, b):
r"""
Create a continuous random variable with an inverse Gamma distribution.
The density of the inverse Gamma distribution is given by
.. math::
f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
with :math:`x > 0`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GammaInverse, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = GammaInverse("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-b
---
a -a - 1 z
b *z *e
---------------
Gamma(a)
>>> cdf(X)(z)
Piecewise((uppergamma(a, b/z)/gamma(a), z > 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse-gamma_distribution
"""
return rv(name, GammaInverseDistribution, (a, b))
#-------------------------------------------------------------------------------
# Gumbel distribution (Maximum and Minimum) --------------------------------------------------------
class GumbelDistribution(SingleContinuousDistribution):
_argnames = ('beta', 'mu', 'minimum')
set = Interval(-oo, oo)
@staticmethod
def check(beta, mu, minimum):
_value_check(beta > 0, "Scale parameter beta must be positive.")
def pdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
f_max = (1/beta)*exp(-z - exp(-z))
f_min = (1/beta)*exp(z - exp(z))
return Piecewise((f_min, self.minimum), (f_max, not self.minimum))
def _cdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
F_max = exp(-exp(-z))
F_min = 1 - exp(-exp(z))
return Piecewise((F_min, self.minimum), (F_max, not self.minimum))
def _characteristic_function(self, t):
cf_max = gamma(1 - I*self.beta*t) * exp(I*self.mu*t)
cf_min = gamma(1 + I*self.beta*t) * exp(I*self.mu*t)
return Piecewise((cf_min, self.minimum), (cf_max, not self.minimum))
def _moment_generating_function(self, t):
mgf_max = gamma(1 - self.beta*t) * exp(self.mu*t)
mgf_min = gamma(1 + self.beta*t) * exp(self.mu*t)
return Piecewise((mgf_min, self.minimum), (mgf_max, not self.minimum))
def Gumbel(name, beta, mu, minimum=False):
r"""
Create a Continuous Random Variable with Gumbel distribution.
The density of the Gumbel distribution is given by
For Maximum
.. math::
f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta}
- \exp \left( -\dfrac{x - \mu}{\beta} \right) \right)
with :math:`x \in [ - \infty, \infty ]`.
For Minimum
.. math::
f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}} + \frac{- \mu + x}{\beta}}}{\beta}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number, 'mu' is a location
beta : Real number, 'beta > 0' is a scale
minimum : Boolean, by default, False, set to True for enabling minimum distribution
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gumbel, density, cdf
>>> from sympy import Symbol
>>> x = Symbol("x")
>>> mu = Symbol("mu")
>>> beta = Symbol("beta", positive=True)
>>> X = Gumbel("x", beta, mu)
>>> density(X)(x)
exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta
>>> cdf(X)(x)
exp(-exp(-(-mu + x)/beta))
References
==========
.. [1] http://mathworld.wolfram.com/GumbelDistribution.html
.. [2] https://en.wikipedia.org/wiki/Gumbel_distribution
.. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html
.. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html
"""
return rv(name, GumbelDistribution, (beta, mu, minimum))
#-------------------------------------------------------------------------------
# Gompertz distribution --------------------------------------------------------
class GompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
eta, b = self.eta, self.b
return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x))
def _cdf(self, x):
eta, b = self.eta, self.b
return 1 - exp(eta)*exp(-eta*exp(b*x))
def _moment_generating_function(self, t):
eta, b = self.eta, self.b
return eta * exp(eta) * expint(t/b, eta)
def Gompertz(name, b, eta):
r"""
Create a Continuous Random Variable with Gompertz distribution.
The density of the Gompertz distribution is given by
.. math::
f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right)
with :math: 'x \in [0, \inf)'.
Parameters
==========
b: Real number, 'b > 0' a scale
eta: Real number, 'eta > 0' a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> z = Symbol("z")
>>> X = Gompertz("x", b, eta)
>>> density(X)(z)
b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z))
References
==========
.. [1] https://en.wikipedia.org/wiki/Gompertz_distribution
"""
return rv(name, GompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# Kumaraswamy distribution -----------------------------------------------------
class KumaraswamyDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "a must be positive")
_value_check(b > 0, "b must be positive")
def pdf(self, x):
a, b = self.a, self.b
return a * b * x**(a-1) * (1-x**a)**(b-1)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < S.Zero),
(1 - (1 - x**a)**b, x <= S.One),
(S.One, True))
def Kumaraswamy(name, a, b):
r"""
Create a Continuous Random Variable with a Kumaraswamy distribution.
The density of the Kumaraswamy distribution is given by
.. math::
f(x) := a b x^{a-1} (1-x^a)^{b-1}
with :math:`x \in [0,1]`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Kumaraswamy, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Kumaraswamy("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
b - 1
a - 1 / a\
a*b*z *\1 - z /
>>> cdf(X)(z)
Piecewise((0, z < 0), (1 - (1 - z**a)**b, z <= 1), (1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Kumaraswamy_distribution
"""
return rv(name, KumaraswamyDistribution, (a, b))
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplaceDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'b')
set = Interval(-oo, oo)
@staticmethod
def check(mu, b):
_value_check(b > 0, "Scale parameter b must be positive.")
_value_check(mu.is_real, "Location parameter mu should be real")
def pdf(self, x):
mu, b = self.mu, self.b
return 1/(2*b)*exp(-Abs(x - mu)/b)
def _cdf(self, x):
mu, b = self.mu, self.b
return Piecewise(
(S.Half*exp((x - mu)/b), x < mu),
(S.One - S.Half*exp(-(x - mu)/b), x >= mu)
)
def _characteristic_function(self, t):
return exp(self.mu*I*t) / (1 + self.b**2*t**2)
def _moment_generating_function(self, t):
return exp(self.mu*t) / (1 - self.b**2*t**2)
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number or a list/matrix, the location (mean) or the
location vector
b : Real number or a positive definite matrix, representing a scale
or the covariance matrix.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Laplace, density, cdf
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Laplace("x", mu, b)
>>> density(X)(z)
exp(-Abs(mu - z)/b)/(2*b)
>>> cdf(X)(z)
Piecewise((exp((-mu + z)/b)/2, mu > z), (1 - exp((mu - z)/b)/2, True))
>>> L = Laplace('L', [1, 2], [[1, 0], [0, 1]])
>>> pprint(density(L)(1, 2), use_unicode=False)
5 / ____\
e *besselk\0, \/ 35 /
---------------------
pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Laplace_distribution
.. [2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
if isinstance(mu, (list, MatrixBase)) and\
isinstance(b, (list, MatrixBase)):
from sympy.stats.joint_rv_types import MultivariateLaplace
return MultivariateLaplace(name, mu, b)
return rv(name, LaplaceDistribution, (mu, b))
#-------------------------------------------------------------------------------
# Levy distribution ---------------------------------------------------------
class LevyDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'c')
@property
def set(self):
return Interval(self.mu, oo)
@staticmethod
def check(mu, c):
_value_check(c > 0, "c (scale parameter) must be positive")
_value_check(mu.is_real, "mu (location paramater) must be real")
def pdf(self, x):
mu, c = self.mu, self.c
return sqrt(c/(2*pi))*exp(-c/(2*(x - mu)))/((x - mu)**(S.One + S.Half))
def _cdf(self, x):
mu, c = self.mu, self.c
return erfc(sqrt(c/(2*(x - mu))))
def _characteristic_function(self, t):
mu, c = self.mu, self.c
return exp(I * mu * t - sqrt(-2 * I * c * t))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of Levy distribution does not exist.')
def Levy(name, mu, c):
r"""
Create a continuous random variable with a Levy distribution.
The density of the Levy distribution is given by
.. math::
f(x) := \sqrt(\frac{c}{2 \pi}) \frac{\exp -\frac{c}{2 (x - \mu)}}{(x - \mu)^{3/2}}
Parameters
==========
mu : Real number, the location parameter
c : Real number, `c > 0`, a scale parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Levy, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> c = Symbol("c", positive=True)
>>> z = Symbol("z")
>>> X = Levy("x", mu, c)
>>> density(X)(z)
sqrt(2)*sqrt(c)*exp(-c/(-2*mu + 2*z))/(2*sqrt(pi)*(-mu + z)**(3/2))
>>> cdf(X)(z)
erfc(sqrt(c)*sqrt(1/(-2*mu + 2*z)))
References
==========
.. [1] https://en.wikipedia.org/wiki/L%C3%A9vy_distribution
.. [2] http://mathworld.wolfram.com/LevyDistribution.html
"""
return rv(name, LevyDistribution, (mu, c))
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
set = Interval(-oo, oo)
@staticmethod
def check(mu, s):
_value_check(s > 0, "Scale parameter s must be positive.")
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2)
def _cdf(self, x):
mu, s = self.mu, self.s
return S.One/(1 + exp(-(x - mu)/s))
def _characteristic_function(self, t):
return Piecewise((exp(I*t*self.mu) * pi*self.s*t / sinh(pi*self.s*t), Ne(t, 0)), (S.One, True))
def _moment_generating_function(self, t):
return exp(self.mu*t) * beta_fn(1 - self.s*t, 1 + self.s*t)
def _quantile(self, p):
return self.mu - self.s*log(-S.One + S.One/p)
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0` a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Logistic, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = Logistic("x", mu, s)
>>> density(X)(z)
exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)
>>> cdf(X)(z)
1/(exp((mu - z)/s) + 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Logistic_distribution
.. [2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return rv(name, LogisticDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log-logistic distribution --------------------------------------------------------
class LogLogisticDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Scale parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
a, b = self.alpha, self.beta
return ((b/a)*(x/a)**(b - 1))/(1 + (x/a)**b)**2
def _cdf(self, x):
a, b = self.alpha, self.beta
return 1/(1 + (x/a)**(-b))
def _quantile(self, p):
a, b = self.alpha, self.beta
return a*((p/(1 - p))**(1/b))
def expectation(self, expr, var, **kwargs):
a, b = self.args
return Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
def LogLogistic(name, alpha, beta):
r"""
Create a continuous random variable with a log-logistic distribution.
The distribution is unimodal when `beta > 1`.
The density of the log-logistic distribution is given by
.. math::
f(x) := \frac{(\frac{\beta}{\alpha})(\frac{x}{\alpha})^{\beta - 1}}
{(1 + (\frac{x}{\alpha})^{\beta})^2}
Parameters
==========
alpha : Real number, `\alpha > 0`, scale parameter and median of distribution
beta : Real number, `\beta > 0` a shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogLogistic, density, cdf, quantile
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", real=True, positive=True)
>>> beta = Symbol("beta", real=True, positive=True)
>>> p = Symbol("p")
>>> z = Symbol("z", positive=True)
>>> X = LogLogistic("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
beta - 1
/ z \
beta*|-----|
\alpha/
------------------------
2
/ beta \
|/ z \ |
alpha*||-----| + 1|
\\alpha/ /
>>> cdf(X)(z)
1/(1 + (z/alpha)**(-beta))
>>> quantile(X)(p)
alpha*(p/(1 - p))**(1/beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Log-logistic_distribution
"""
return rv(name, LogLogisticDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
#Logit-Normal distribution------------------------------------------------------
class LogitNormalDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
set = Interval.open(0, 1)
@staticmethod
def check(mu, s):
_value_check((s ** 2).is_real is not False and s ** 2 > 0, "Squared scale parameter s must be positive.")
_value_check(mu.is_real is not False, "Location parameter must be real")
def _logit(self, x):
return log(x / (1 - x))
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(self._logit(x) - mu)**2/(2*s**2))*(S.One/sqrt(2*pi*(s**2)))*(1/(x*(1 - x)))
def _cdf(self, x):
mu, s = self.mu, self.s
return (S.One/2)*(1 + erf((self._logit(x) - mu)/(sqrt(2*s**2))))
def LogitNormal(name, mu, s):
r"""
Create a continuous random variable with a Logit-Normal distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{1}{s \sqrt{2 \pi}} \frac{1}{x(1 - x)} e^{- \frac{(logit(x) - \mu)^2}{s^2}}
where logit(x) = \log(\frac{x}{1 - x})
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0` a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogitNormal, density, cdf
>>> from sympy import Symbol,pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = LogitNormal("x",mu,s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
/ / z \\
-|-mu + log|-----||
\ \1 - z//
---------------------
2
___ 2*s
\/ 2 *e
----------------------------
____
2*\/ pi *s*z*(1 - z)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + log(z/(1 - z)))**2/(2*s**2))/(2*sqrt(pi)*s*z*(1 - z))
>>> cdf(X)(z)
erf(sqrt(2)*(-mu + log(z/(1 - z)))/(2*s))/2 + 1/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Logit-normal_distribution
"""
return rv(name, LogitNormalDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
set = Interval(0, oo)
@staticmethod
def check(mean, std):
_value_check(std > 0, "Parameter std must be positive.")
def pdf(self, x):
mean, std = self.mean, self.std
return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
def _cdf(self, x):
mean, std = self.mean, self.std
return Piecewise(
(S.Half + S.Half*erf((log(x) - mean)/sqrt(2)/std), x > 0),
(S.Zero, True)
)
def _moment_generating_function(self, t):
raise NotImplementedError('Moment generating function of the log-normal distribution is not defined.')
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number, the log-scale
sigma : Real number, :math:`\sigma^2 > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-(-mu + log(z))
-----------------
2
___ 2*sigma
\/ 2 *e
------------------------
____
2*\/ pi *sigma*z
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z)
References
==========
.. [1] https://en.wikipedia.org/wiki/Lognormal
.. [2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return rv(name, LogNormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Lomax Distribution -----------------------------------------------------------
class LomaxDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'lamda',)
set = Interval(0, oo)
@staticmethod
def check(alpha, lamda):
_value_check(alpha.is_real, "Shape parameter should be real.")
_value_check(lamda.is_real, "Scale parameter should be real.")
_value_check(alpha.is_positive, "Shape parameter should be positive.")
_value_check(lamda.is_positive, "Scale parameter should be positive.")
def pdf(self, x):
lamba, alpha = self.lamda, self.alpha
return (alpha/lamba) * (S.One + x/lamba)**(-alpha-1)
def Lomax(name, alpha, lamda):
r"""
Create a continuous random variable with a Lomax distribution.
The density of the Lomax distribution is given by
.. math::
f(x) := \frac{\alpha}{\lambda}\left[1+\frac{x}{\lambda}\right]^{-(\alpha+1)}
Parameters
==========
alpha : Real Number, `alpha > 0`
Shape parameter
lamda : Real Number, `lamda > 0`
Scale parameter
Examples
========
>>> from sympy.stats import Lomax, density, cdf, E
>>> from sympy import symbols
>>> a, l = symbols('a, l', positive=True)
>>> X = Lomax('X', a, l)
>>> x = symbols('x')
>>> density(X)(x)
a*(1 + x/l)**(-a - 1)/l
>>> cdf(X)(x)
Piecewise((1 - (1 + x/l)**(-a), x >= 0), (0, True))
>>> a = 2
>>> X = Lomax('X', a, l)
>>> E(X)
l
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Lomax_distribution
"""
return rv(name, LomaxDistribution, (alpha, lamda))
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellDistribution(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
@staticmethod
def check(a):
_value_check(a > 0, "Parameter a must be positive.")
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def _cdf(self, x):
a = self.a
return erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Moyal Distribution -----------------------------------------------------------
class MoyalDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'sigma')
@staticmethod
def check(mu, sigma):
_value_check(mu.is_real, "Location parameter must be real.")
_value_check(sigma.is_real and sigma > 0, "Scale parameter must be real\
and positive.")
def pdf(self, x):
mu, sigma = self.mu, self.sigma
num = exp(-(exp(-(x - mu)/sigma) + (x - mu)/(sigma))/2)
den = (sqrt(2*pi) * sigma)
return num/den
def _characteristic_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(I*t*mu)
term2 = (2**(-I*sigma*t) * gamma(Rational(1, 2) - I*t*sigma))
return (term1 * term2)/sqrt(pi)
def _moment_generating_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(t*mu)
term2 = (2**(-1*sigma*t) * gamma(Rational(1, 2) - t*sigma))
return (term1 * term2)/sqrt(pi)
def Moyal(name, mu, sigma):
r"""
Create a continuous random variable with a Moyal distribution.
The density of the Moyal distribution is given by
.. math::
f(x) := \frac{\exp-\frac{1}{2}\exp-\frac{x-\mu}{\sigma}-\frac{x-\mu}{2\sigma}}{\sqrt{2\pi}\sigma}
with :math:`x \in \mathbb{R}`.
Parameters
==========
mu : Real number
Location parameter
sigma : Real positive number
Scale parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Moyal, density, cdf
>>> from sympy import Symbol, simplify
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True, real=True)
>>> z = Symbol("z")
>>> X = Moyal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-exp((mu - z)/sigma)/2 - (-mu + z)/(2*sigma))/(2*sqrt(pi)*sigma)
>>> simplify(cdf(X)(z))
1 - erf(sqrt(2)*exp((mu - z)/(2*sigma))/2)
References
==========
.. [1] https://reference.wolfram.com/language/ref/MoyalDistribution.html
.. [2] http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
"""
return rv(name, MoyalDistribution, (mu, sigma))
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'omega')
set = Interval(0, oo)
@staticmethod
def check(mu, omega):
_value_check(mu >= S.Half, "Shape parameter mu must be greater than equal to 1/2.")
_value_check(omega > 0, "Spread parameter omega must be positive.")
def pdf(self, x):
mu, omega = self.mu, self.omega
return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2)
def _cdf(self, x):
mu, omega = self.mu, self.omega
return Piecewise(
(lowergamma(mu, (mu/omega)*x**2)/gamma(mu), x > 0),
(S.Zero, True))
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, `\mu \geq \frac{1}{2}` a shape
omega : Real number, `\omega > 0`, the spread
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance, cdf
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> z = Symbol("z")
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-mu*z
-------
mu -mu 2*mu - 1 omega
2*mu *omega *z *e
----------------------------------
Gamma(mu)
>>> simplify(E(X))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
omega*Gamma (mu + 1/2)
omega - -----------------------
Gamma(mu)*Gamma(mu + 1)
>>> cdf(X)(z)
Piecewise((lowergamma(mu, mu*z**2/omega)/gamma(mu), z > 0),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Nakagami_distribution
"""
return rv(name, NakagamiDistribution, (mu, omega))
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
@staticmethod
def check(mean, std):
_value_check(std > 0, "Standard deviation must be positive")
def pdf(self, x):
return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std)
def _cdf(self, x):
mean, std = self.mean, self.std
return erf(sqrt(2)*(-mean + x)/(2*std))/2 + S.Half
def _characteristic_function(self, t):
mean, std = self.mean, self.std
return exp(I*mean*t - std**2*t**2/2)
def _moment_generating_function(self, t):
mean, std = self.mean, self.std
return exp(mean*t + std**2*t**2/2)
def _quantile(self, p):
mean, std = self.mean, self.std
return mean + std*sqrt(2)*erfinv(2*p - 1)
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number or a list representing the mean or the mean vector
sigma : Real number or a positive definite square matrix,
:math:`\sigma^2 > 0` the variance
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness, quantile, marginal_distribution
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> y = Symbol("y")
>>> p = Symbol("p")
>>> X = Normal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma)
>>> C = simplify(cdf(X))(z) # it needs a little more help...
>>> pprint(C, use_unicode=False)
/ ___ \
|\/ 2 *(-mu + z)|
erf|---------------|
\ 2*sigma / 1
-------------------- + -
2 2
>>> quantile(X)(p)
mu + sqrt(2)*sigma*erfinv(2*p - 1)
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-z**2/2)/(2*sqrt(pi))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
>>> m = Normal('X', [1, 2], [[2, 1], [1, 2]])
>>> pprint(density(m)(y, z), use_unicode=False)
/1 y\ /2*y z\ / z\ / y 2*z \
|- - -|*|--- - -| + |1 - -|*|- - + --- - 1|
___ \2 2/ \ 3 3/ \ 2/ \ 3 3 /
\/ 3 *e
--------------------------------------------------
6*pi
>>> marginal_distribution(m, m[0])(1)
1/(2*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_distribution
.. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
if isinstance(mean, (list, MatrixBase, MatrixExpr)) and\
isinstance(std, (list, MatrixBase, MatrixExpr)):
from sympy.stats.joint_rv_types import MultivariateNormal
return MultivariateNormal(name, mean, std)
return rv(name, NormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Inverse Gaussian distribution ----------------------------------------------------------
class GaussianInverseDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'shape')
@property
def set(self):
return Interval(0, oo)
@staticmethod
def check(mean, shape):
_value_check(shape > 0, "Shape parameter must be positive")
_value_check(mean > 0, "Mean must be positive")
def pdf(self, x):
mu, s = self.mean, self.shape
return exp(-s*(x - mu)**2 / (2*x*mu**2)) * sqrt(s/(2*pi*x**3))
def _cdf(self, x):
from sympy.stats import cdf
mu, s = self.mean, self.shape
stdNormalcdf = cdf(Normal('x', 0, 1))
first_term = stdNormalcdf(sqrt(s/x) * ((x/mu) - S.One))
second_term = exp(2*s/mu) * stdNormalcdf(-sqrt(s/x)*(x/mu + S.One))
return first_term + second_term
def _characteristic_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*I*t)/s)))
def _moment_generating_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*t)/s)))
def GaussianInverse(name, mean, shape):
r"""
Create a continuous random variable with an Inverse Gaussian distribution.
Inverse Gaussian distribution is also known as Wald distribution.
The density of the Inverse Gaussian distribution is given by
.. math::
f(x) := \sqrt{\frac{\lambda}{2\pi x^3}} e^{-\frac{\lambda(x-\mu)^2}{2x\mu^2}}
Parameters
==========
mu : Positive number representing the mean
lambda : Positive number representing the shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GaussianInverse, density, E, std, skewness
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", positive=True)
>>> lamda = Symbol("lambda", positive=True)
>>> z = Symbol("z", positive=True)
>>> X = GaussianInverse("x", mu, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-lambda*(-mu + z)
-------------------
2
___ ________ 2*mu *z
\/ 2 *\/ lambda *e
-------------------------------------
____ 3/2
2*\/ pi *z
>>> E(X)
mu
>>> std(X).expand()
mu**(3/2)/sqrt(lambda)
>>> skewness(X).expand()
3*sqrt(mu)/sqrt(lambda)
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
.. [2] http://mathworld.wolfram.com/InverseGaussianDistribution.html
"""
return rv(name, GaussianInverseDistribution, (mean, shape))
Wald = GaussianInverse
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoDistribution(SingleContinuousDistribution):
_argnames = ('xm', 'alpha')
@property
def set(self):
return Interval(self.xm, oo)
@staticmethod
def check(xm, alpha):
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
def pdf(self, x):
xm, alpha = self.xm, self.alpha
return alpha * xm**alpha / x**(alpha + 1)
def _cdf(self, x):
xm, alpha = self.xm, self.alpha
return Piecewise(
(S.One - xm**alpha/x**alpha, x>=xm),
(0, True),
)
def _moment_generating_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-xm*t)**alpha * uppergamma(-alpha, -xm*t)
def _characteristic_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-I * xm * t) ** alpha * uppergamma(-alpha, -I * xm * t)
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `x_m > 0`, a scale
alpha : Real number, `\alpha > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Pareto("x", xm, beta)
>>> density(X)(z)
beta*xm**beta*z**(-beta - 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution
.. [2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return rv(name, ParetoDistribution, (xm, alpha))
#-------------------------------------------------------------------------------
# PowerFunction distribution ---------------------------------------------------
class PowerFunctionDistribution(SingleContinuousDistribution):
_argnames=('alpha','a','b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(alpha, a, b):
_value_check(a.is_real, "Continuous Boundary parameter should be real.")
_value_check(b.is_real, "Continuous Boundary parameter should be real.")
_value_check(a < b, " 'a' the left Boundary must be smaller than 'b' the right Boundary." )
_value_check(alpha.is_positive, "Continuous Shape parameter should be positive.")
def pdf(self, x):
alpha, a, b = self.alpha, self.a, self.b
num = alpha*(x - a)**(alpha - 1)
den = (b - a)**alpha
return num/den
def PowerFunction(name, alpha, a, b):
r"""
Creates a continuous random variable with a Power Function Distribution
The density of PowerFunction distribution is given by
.. math::
f(x) := \frac{{\alpha}(x - a)^{\alpha - 1}}{(b - a)^{\alpha}}
with :math:`x \in [a,b]`.
Parameters
==========
alpha: Positive number, `0 < alpha` the shape paramater
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import PowerFunction, density, cdf, E, variance
>>> from sympy import Symbol
>>> alpha = Symbol("alpha", positive=True)
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = PowerFunction("X", 2, a, b)
>>> density(X)(z)
(-2*a + 2*z)/(-a + b)**2
>>> cdf(X)(z)
Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) +
z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))
>>> alpha = 2
>>> a = 0
>>> b = 1
>>> Y = PowerFunction("Y", alpha, a, b)
>>> E(Y)
2/3
>>> variance(Y)
1/18
References
==========
.. [1] http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html
"""
return rv(name, PowerFunctionDistribution, (alpha, a, b))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
class QuadraticUDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(b > a, "Parameter b must be in range (%s, oo)."%(a))
def pdf(self, x):
a, b = self.a, self.b
alpha = 12 / (b-a)**3
beta = (a+b) / 2
return Piecewise(
(alpha * (x-beta)**2, And(a<=x, x<=b)),
(S.Zero, True))
def _moment_generating_function(self, t):
a, b = self.a, self.b
return -3 * (exp(a*t) * (4 + (a**2 + 2*a*(-2 + b) + b**2) * t) \
- exp(b*t) * (4 + (-4*b + (a + b)**2) * t)) / ((a-b)**3 * t**2)
def _characteristic_function(self, t):
a, b = self.a, self.b
return -3*I*(exp(I*a*t*exp(I*b*t)) * (4*I - (-4*b + (a+b)**2)*t)) \
/ ((a-b)**3 * t**2)
def QuadraticU(name, a, b):
r"""
Create a Continuous Random Variable with a U-quadratic distribution.
The density of the U-quadratic distribution is given by
.. math::
f(x) := \alpha (x-\beta)^2
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import QuadraticU, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = QuadraticU("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ 2
| / a b \
|12*|- - - - + z|
| \ 2 2 /
<----------------- for And(b >= z, a <= z)
| 3
| (-a + b)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/U-quadratic_distribution
"""
return rv(name, QuadraticUDistribution, (a, b))
#-------------------------------------------------------------------------------
# RaisedCosine distribution ----------------------------------------------------
class RaisedCosineDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def _characteristic_function(self, t):
mu, s = self.mu, self.s
return Piecewise((exp(-I*pi*mu/s)/2, Eq(t, -pi/s)),
(exp(I*pi*mu/s)/2, Eq(t, pi/s)),
(pi**2*sin(s*t)*exp(I*mu*t) / (s*t*(pi**2 - s**2*t**2)), True))
def _moment_generating_function(self, t):
mu, s = self.mu, self.s
return pi**2 * sinh(s*t) * exp(mu*t) / (s*t*(pi**2 + s**2*t**2))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import RaisedCosine, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z >= mu - s, z <= mu + s)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighDistribution(SingleContinuousDistribution):
_argnames = ('sigma',)
set = Interval(0, oo)
@staticmethod
def check(sigma):
_value_check(sigma > 0, "Scale parameter sigma must be positive.")
def pdf(self, x):
sigma = self.sigma
return x/sigma**2*exp(-x**2/(2*sigma**2))
def _cdf(self, x):
sigma = self.sigma
return 1 - exp(-(x**2/(2*sigma**2)))
def _characteristic_function(self, t):
sigma = self.sigma
return 1 - sigma*t*exp(-sigma**2*t**2/2) * sqrt(pi/2) * (erfi(sigma*t/sqrt(2)) - I)
def _moment_generating_function(self, t):
sigma = self.sigma
return 1 + sigma*t*exp(sigma**2*t**2/2) * sqrt(pi/2) * (erf(sigma*t/sqrt(2)) + 1)
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `\sigma > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Rayleigh("x", sigma)
>>> density(X)(z)
z*exp(-z**2/(2*sigma**2))/sigma**2
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Rayleigh_distribution
.. [2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return rv(name, RayleighDistribution, (sigma, ))
#-------------------------------------------------------------------------------
# Reciprocal distribution --------------------------------------------------------
class ReciprocalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(a > 0, "Parameter > 0. a = %s"%a)
_value_check((a < b),
"Parameter b must be in range (%s, +oo]. b = %s"%(a, b))
def pdf(self, x):
a, b = self.a, self.b
return 1/(x*(log(b) - log(a)))
def Reciprocal(name, a, b):
r"""Creates a continuous random variable with a reciprocal distribution.
Parameters
==========
a : Real number, :math:`0 < a`
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Reciprocal, density, cdf
>>> from sympy import symbols
>>> a, b, x = symbols('a, b, x', positive=True)
>>> R = Reciprocal('R', a, b)
>>> density(R)(x)
1/(x*(-log(a) + log(b)))
>>> cdf(R)(x)
Piecewise((log(a)/(log(a) - log(b)) - log(x)/(log(a) - log(b)), a <= x), (0, True))
Reference
=========
.. [1] https://en.wikipedia.org/wiki/Reciprocal_distribution
"""
return rv(name, ReciprocalDistribution, (a, b))
#-------------------------------------------------------------------------------
# Shifted Gompertz distribution ------------------------------------------------
class ShiftedGompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
b, eta = self.b, self.eta
return b*exp(-b*x)*exp(-eta*exp(-b*x))*(1+eta*(1-exp(-b*x)))
def ShiftedGompertz(name, b, eta):
r"""
Create a continuous random variable with a Shifted Gompertz distribution.
The density of the Shifted Gompertz distribution is given by
.. math::
f(x) := b e^{-b x} e^{-\eta \exp(-b x)} \left[1 + \eta(1 - e^(-bx)) \right]
with :math: 'x \in [0, \inf)'.
Parameters
==========
b: Real number, 'b > 0' a scale
eta: Real number, 'eta > 0' a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ShiftedGompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> x = Symbol("x")
>>> X = ShiftedGompertz("x", b, eta)
>>> density(X)(x)
b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
References
==========
.. [1] https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution
"""
return rv(name, ShiftedGompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTDistribution(SingleContinuousDistribution):
_argnames = ('nu',)
set = Interval(-oo, oo)
@staticmethod
def check(nu):
_value_check(nu > 0, "Degrees of freedom nu must be positive.")
def pdf(self, x):
nu = self.nu
return 1/(sqrt(nu)*beta_fn(S.Half, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2)
def _cdf(self, x):
nu = self.nu
return S.Half + x*gamma((nu+1)/2)*hyper((S.Half, (nu+1)/2),
(Rational(3, 2),), -x**2/nu)/(sqrt(pi*nu)*gamma(nu/2))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the Student-T distribution is undefined.')
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `\nu > 0`, the degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import StudentT, density, cdf
>>> from sympy import Symbol, pprint
>>> nu = Symbol("nu", positive=True)
>>> z = Symbol("z")
>>> X = StudentT("x", nu)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
nu 1
- -- - -
2 2
/ 2\
| z |
|1 + --|
\ nu/
-----------------
____ / nu\
\/ nu *B|1/2, --|
\ 2 /
>>> cdf(X)(z)
1/2 + z*gamma(nu/2 + 1/2)*hyper((1/2, nu/2 + 1/2), (3/2,),
-z**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Student_t-distribution
.. [2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return rv(name, StudentTDistribution, (nu, ))
#-------------------------------------------------------------------------------
# Trapezoidal distribution ------------------------------------------------------
class TrapezoidalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c', 'd')
@property
def set(self):
return Interval(self.a, self.d)
@staticmethod
def check(a, b, c, d):
_value_check(a < d, "Lower bound parameter a < %s. a = %s"%(d, a))
_value_check((a <= b, b < c),
"Level start parameter b must be in range [%s, %s). b = %s"%(a, c, b))
_value_check((b < c, c <= d),
"Level end parameter c must be in range (%s, %s]. c = %s"%(b, d, c))
_value_check(d >= c, "Upper bound parameter d > %s. d = %s"%(c, d))
def pdf(self, x):
a, b, c, d = self.a, self.b, self.c, self.d
return Piecewise(
(2*(x-a) / ((b-a)*(d+c-a-b)), And(a <= x, x < b)),
(2 / (d+c-a-b), And(b <= x, x < c)),
(2*(d-x) / ((d-c)*(d+c-a-b)), And(c <= x, x <= d)),
(S.Zero, True))
def Trapezoidal(name, a, b, c, d):
r"""
Create a continuous random variable with a trapezoidal distribution.
The density of the trapezoidal distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(d+c-a-b)} & \mathrm{for\ } a \le x < b, \\
\frac{2}{d+c-a-b} & \mathrm{for\ } b \le x < c, \\
\frac{2(d-x)}{(d-c)(d+c-a-b)} & \mathrm{for\ } c \le x < d, \\
0 & \mathrm{for\ } d < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a < d`
b : Real number, :math:`a <= b < c`
c : Real number, :math:`b < c <= d`
d : Real number
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Trapezoidal, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> d = Symbol("d")
>>> z = Symbol("z")
>>> X = Trapezoidal("x", a,b,c,d)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|------------------------- for And(a <= z, b > z)
|(-a + b)*(-a - b + c + d)
|
| 2
| -------------- for And(b <= z, c > z)
< -a - b + c + d
|
| 2*d - 2*z
|------------------------- for And(d >= z, c <= z)
|(-c + d)*(-a - b + c + d)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Trapezoidal_distribution
"""
return rv(name, TrapezoidalDistribution, (a, b, c, d))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b, c):
_value_check(b > a, "Parameter b > %s. b = %s"%(a, b))
_value_check((a <= c, c <= b),
"Parameter c must be in range [%s, %s]. c = %s"%(a, b, c))
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def _characteristic_function(self, t):
a, b, c = self.a, self.b, self.c
return -2 *((b-c) * exp(I*a*t) - (b-a) * exp(I*c*t) + (c-a) * exp(I*b*t)) / ((b-a)*(c-a)*(b-c)*t**2)
def _moment_generating_function(self, t):
a, b, c = self.a, self.b, self.c
return 2 * ((b - c) * exp(a * t) - (b - a) * exp(c * t) + (c - a) * exp(b * t)) / (
(b - a) * (c - a) * (b - c) * t ** 2)
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Triangular, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, c > z)
|(-a + b)*(-a + c)
|
| 2
| ------ for c = z
< -a + b
|
| 2*b - 2*z
|---------------- for And(b >= z, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_distribution
.. [2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformDistribution(SingleContinuousDistribution):
_argnames = ('left', 'right')
@property
def set(self):
return Interval(self.left, self.right)
@staticmethod
def check(left, right):
_value_check(left < right, "Lower limit should be less than Upper limit.")
def pdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.One/(right - left), And(left <= x, x <= right)),
(S.Zero, True)
)
def _cdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.Zero, x < left),
((x - left)/(right - left), x <= right),
(S.One, True)
)
def _characteristic_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(I*t*right) - exp(I*t*left)) / (I*t*(right - left)), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(t*right) - exp(t*left)) / (t * (right - left)), Ne(t, 0)),
(S.One, True))
def expectation(self, expr, var, **kwargs):
from sympy import Max, Min
kwargs['evaluate'] = True
result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs)
result = result.subs({Max(self.left, self.right): self.right,
Min(self.left, self.right): self.left})
return result
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", negative=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Uniform("x", a, b)
>>> density(X)(z)
Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True))
>>> cdf(X)(z)
Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True))
>>> E(X)
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
.. [2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumDistribution(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
@staticmethod
def check(n):
_value_check((n > 0, n.is_integer),
"Parameter n must be positive integer.")
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def _cdf(self, x):
n = self.n
k = Dummy("k")
return Piecewise((S.Zero, x < 0),
(1/factorial(n)*Sum((-1)**k*binomial(n, k)*(x - k)**(n),
(k, 0, floor(x))), x <= n),
(S.One, True))
def _characteristic_function(self, t):
return ((exp(I*t) - 1) / (I*t))**self.n
def _moment_generating_function(self, t):
return ((exp(t) - 1) / t)**self.n
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
The probability distribution function depends on a single parameter
`n` which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\left\lfloor x\right\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive Integer, `n > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import UniformSum, density, cdf
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
>>> cdf(X)(z)
Piecewise((0, z < 0), (Sum((-1)**_k*(-_k + z)**n*binomial(n, _k),
(_k, 0, floor(z)))/factorial(n), n >= z), (1, True))
Compute cdf with specific 'x' and 'n' values as follows :
>>> cdf(UniformSum("x", 5), evaluate=False)(2).doit()
9/40
The argument evaluate=False prevents an attempt at evaluation
of the sum for general n, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
class VonMisesDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'k')
set = Interval(0, 2*pi)
@staticmethod
def check(mu, k):
_value_check(k > 0, "k must be positive")
def pdf(self, x):
mu, k = self.mu, self.k
return exp(k*cos(x-mu)) / (2*pi*besseli(0, k))
def VonMises(name, mu, k):
r"""
Create a Continuous Random Variable with a von Mises distribution.
The density of the von Mises distribution is given by
.. math::
f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
with :math:`x \in [0,2\pi]`.
Parameters
==========
mu : Real number, measure of location
k : Real number, measure of concentration
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import VonMises, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = VonMises("x", mu, k)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k*cos(mu - z)
e
------------------
2*pi*besseli(0, k)
References
==========
.. [1] https://en.wikipedia.org/wiki/Von_Mises_distribution
.. [2] http://mathworld.wolfram.com/vonMisesDistribution.html
"""
return rv(name, VonMisesDistribution, (mu, k))
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, :math:`\lambda > 0` a scale
k : Real number, `k > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = Weibull("x", l, k)
>>> density(X)(z)
k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
.. [1] https://en.wikipedia.org/wiki/Weibull_distribution
.. [2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return rv(name, WeibullDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicircleDistribution(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
@staticmethod
def check(R):
_value_check(R > 0, "Radius R must be positive.")
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def _characteristic_function(self, t):
return Piecewise((2 * besselj(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
return Piecewise((2 * besseli(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A `RandomSymbol`.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
|
b67372fdc6405e50ee8e075929b0b7056124aed52e4088ccc37f770f0f1c37cc | """
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
BetaBinomial
Hypergeometric
Rademacher
IdealSoliton
RobustSoliton
"""
from sympy import (S, sympify, Rational, binomial, cacheit, Integer,
Dummy, Eq, Intersection, Interval, log, Range,
Symbol, Lambda, Piecewise, Or, Gt, Lt, Ge, Le, Contains)
from sympy import beta as beta_fn
from sympy.stats.frv import (SingleFiniteDistribution,
SingleFinitePSpace)
from sympy.stats.rv import _value_check, Density, is_random
__all__ = ['FiniteRV',
'DiscreteUniform',
'Die',
'Bernoulli',
'Coin',
'Binomial',
'BetaBinomial',
'Hypergeometric',
'Rademacher',
'IdealSoliton',
'RobustSoliton',
]
def rv(name, cls, *args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleFinitePSpace(name, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(name, CompoundDistribution(dist))
return pspace.value
class FiniteDistributionHandmade(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def pmf(self, x):
x = Symbol('x')
return Lambda(x, Piecewise(*(
[(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)])))
@property
def set(self):
return set(self.dict.keys())
@staticmethod
def check(density):
for p in density.values():
_value_check((p >= 0, p <= 1),
"Probability at a point must be between 0 and 1.")
val = sum(density.values())
_value_check(Eq(val, 1) != S.false, "Total Probability must be 1.")
def FiniteRV(name, density, **kwargs):
r"""
Create a Finite Random Variable given a dict representing the density.
Parameters
==========
name : Symbol
Represents name of the random variable.
density: A dict
Dictionary conatining the pdf of finite distribution
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X >= 2)
0.700000000000000
Returns
=======
RandomSymbol
"""
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(name, FiniteDistributionHandmade, density, **kwargs)
class DiscreteUniformDistribution(SingleFiniteDistribution):
@staticmethod
def check(*args):
# not using _value_check since there is a
# suggestion for the user
if len(set(args)) != len(args):
from sympy.utilities.iterables import multiset
from sympy.utilities.misc import filldedent
weights = multiset(args)
n = Integer(len(args))
for k in weights:
weights[k] /= n
raise ValueError(filldedent("""
Repeated args detected but set expected. For a
distribution having different weights for each
item use the following:""") + (
'\nS("FiniteRV(%s, %s)")' % ("'X'", weights)))
@property
def p(self):
return Rational(1, len(self.args))
@property # type: ignore
@cacheit
def dict(self):
return {k: self.p for k in self.set}
@property
def set(self):
return set(self.args)
def pmf(self, x):
if x in self.args:
return self.p
else:
return S.Zero
def DiscreteUniform(name, items):
r"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Parameters
==========
items: list/tuple
Items over which Uniform distribution is to be made
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X).dict
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range
>>> density(Y).dict
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Discrete_uniform_distribution
.. [2] http://mathworld.wolfram.com/DiscreteUniformDistribution.html
"""
return rv(name, DiscreteUniformDistribution, *items)
class DieDistribution(SingleFiniteDistribution):
_argnames = ('sides',)
@staticmethod
def check(sides):
_value_check((sides.is_positive, sides.is_integer),
"number of sides must be a positive integer.")
@property
def is_symbolic(self):
return not self.sides.is_number
@property
def high(self):
return self.sides
@property
def low(self):
return S.One
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.sides))
return set(map(Integer, list(range(1, self.sides + 1))))
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 1) & Le(x, self.sides) & Contains(x, S.Integers)
return Piecewise((S.One/self.sides, cond), (S.Zero, True))
def Die(name, sides=6):
r"""
Create a Finite Random Variable representing a fair die.
Parameters
==========
sides: Integer
Represents the number of sides of the Die, by default is 6
Examples
========
>>> from sympy.stats import Die, density
>>> from sympy import Symbol
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4).dict
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
>>> n = Symbol('n', positive=True, integer=True)
>>> Dn = Die('Dn', n) # n sided Die
>>> density(Dn).dict
Density(DieDistribution(n))
>>> density(Dn).dict.subs(n, 4).doit()
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
Returns
=======
RandomSymbol
"""
return rv(name, DieDistribution, sides)
class BernoulliDistribution(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@staticmethod
def check(p, succ, fail):
_value_check((p >= 0, p <= 1),
"p should be in range [0, 1].")
@property
def set(self):
return {self.succ, self.fail}
def pmf(self, x):
if isinstance(self.succ, Symbol) and isinstance(self.fail, Symbol):
return Piecewise((self.p, x == self.succ),
(1 - self.p, x == self.fail),
(S.Zero, True))
return Piecewise((self.p, Eq(x, self.succ)),
(1 - self.p, Eq(x, self.fail)),
(S.Zero, True))
def Bernoulli(name, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a Bernoulli process.
Parameters
==========
p : Rational number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success
fail : Integer/symbol/string
Represents event of failure
Examples
========
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution
.. [2] http://mathworld.wolfram.com/BernoulliDistribution.html
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
r"""
Create a Finite Random Variable representing a Coin toss.
Parameters
==========
p : Rational Numeber between 0 and 1
Represents probability of getting "Heads", by default is Half
Examples
========
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Binomial
References
==========
.. [1] https://en.wikipedia.org/wiki/Coin_flipping
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
class BinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'p', 'succ', 'fail')
@staticmethod
def check(n, p, succ, fail):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer.")
_value_check((p <= 1, p >= 0),
"p should be in range [0, 1].")
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(self.dict.keys())
def pmf(self, x):
n, p = self.n, self.p
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 0) & Le(x, n) & Contains(x, S.Integers)
return Piecewise((binomial(n, x) * p**x * (1 - p)**(n - x), cond), (S.Zero, True))
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k*self.succ + (self.n-k)*self.fail: self.pmf(k)
for k in range(0, self.n + 1)}
def Binomial(name, n, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
p : Rational Number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success, by default is 1
fail : Integer/symbol/string
Represents event of failure, by default is 0
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S, Symbol
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X).dict
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
>>> n = Symbol('n', positive=True, integer=True)
>>> p = Symbol('p', positive=True)
>>> X = Binomial('X', n, S.Half) # n "coin flips"
>>> density(X).dict
Density(BinomialDistribution(n, 1/2, 1, 0))
>>> density(X).dict.subs(n, 4).doit()
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Binomial_distribution
.. [2] http://mathworld.wolfram.com/BinomialDistribution.html
"""
return rv(name, BinomialDistribution, n, p, succ, fail)
#-------------------------------------------------------------------------------
# Beta-binomial distribution ----------------------------------------------------------
class BetaBinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'alpha', 'beta')
@staticmethod
def check(n, alpha, beta):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((alpha > 0),
"'alpha' must be: alpha > 0 . alpha = %s" % str(alpha))
_value_check((beta > 0),
"'beta' must be: beta > 0 . beta = %s" % str(beta))
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(map(Integer, list(range(0, self.n + 1))))
def pmf(self, k):
n, a, b = self.n, self.alpha, self.beta
return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b)
def BetaBinomial(name, n, alpha, beta):
r"""
Create a Finite Random Variable representing a Beta-binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
alpha : Real positive number
beta : Real positive number
Examples
========
>>> from sympy.stats import BetaBinomial, density
>>> X = BetaBinomial('X', 2, 1, 1)
>>> density(X).dict
{0: 1/3, 1: 2*beta(2, 2), 2: 1/3}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
.. [2] http://mathworld.wolfram.com/BetaBinomialDistribution.html
"""
return rv(name, BetaBinomialDistribution, n, alpha, beta)
class HypergeometricDistribution(SingleFiniteDistribution):
_argnames = ('N', 'm', 'n')
@staticmethod
def check(n, N, m):
_value_check((N.is_integer, N.is_nonnegative),
"'N' must be nonnegative integer. N = %s." % str(n))
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((m.is_integer, m.is_nonnegative),
"'m' must be nonnegative integer. m = %s." % str(n))
@property
def is_symbolic(self):
return any(not x.is_number for x in (self.N, self.m, self.n))
@property
def high(self):
return Piecewise((self.n, Lt(self.n, self.m) != False), (self.m, True))
@property
def low(self):
return Piecewise((0, Gt(0, self.n + self.m - self.N) != False), (self.n + self.m - self.N, True))
@property
def set(self):
N, m, n = self.N, self.m, self.n
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(self.low, self.high))
return {i for i in range(max(0, n + m - N), min(n, m) + 1)}
def pmf(self, k):
N, m, n = self.N, self.m, self.n
return S(binomial(m, k) * binomial(N - m, n - k))/binomial(N, n)
def Hypergeometric(name, N, m, n):
r"""
Create a Finite Random Variable representing a hypergeometric distribution.
Parameters
==========
N : Positive Integer
Represents finite population of size N.
m : Positive Integer
Represents number of trials with required feature.
n : Positive Integer
Represents numbers of draws.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X).dict
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Hypergeometric_distribution
.. [2] http://mathworld.wolfram.com/HypergeometricDistribution.html
"""
return rv(name, HypergeometricDistribution, N, m, n)
class RademacherDistribution(SingleFiniteDistribution):
@property
def set(self):
return {-1, 1}
@property
def pmf(self):
k = Dummy('k')
return Lambda(k, Piecewise((S.Half, Or(Eq(k, -1), Eq(k, 1))), (S.Zero, True)))
def Rademacher(name):
r"""
Create a Finite Random Variable representing a Rademacher distribution.
Examples
========
>>> from sympy.stats import Rademacher, density
>>> X = Rademacher('X')
>>> density(X).dict
{-1: 1/2, 1: 1/2}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Bernoulli
References
==========
.. [1] https://en.wikipedia.org/wiki/Rademacher_distribution
"""
return rv(name, RademacherDistribution)
class IdealSolitonDistribution(SingleFiniteDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer.")
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(list(Range(1, self.k+1)))
@property
@cacheit
def dict(self):
if self.k.is_Symbol:
return Density(self)
d = {1: Rational(1, self.k)}
d.update(dict((i, Rational(1, i*(i - 1))) for i in range(2, self.k + 1)))
return d
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
return Piecewise((1/self.k, cond1), (1/(x*(x - 1)), cond2), (S.Zero, True))
def IdealSoliton(name, k):
r"""
Create a Finite Random Variable of Ideal Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
Examples
========
>>> from sympy.stats import IdealSoliton, density, P, E
>>> sol = IdealSoliton('sol', 5)
>>> density(sol).dict
{1: 1/5, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20}
>>> density(sol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> sol = IdealSoliton('sol', k)
>>> density(sol).dict
Density(IdealSolitonDistribution(k))
>>> density(sol).dict.subs(k, 10).doit()
{1: 1/10, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20, 6: 1/30, 7: 1/42, 8: 1/56, 9: 1/72, 10: 1/90}
>>> E(sol.subs(k, 10))
7381/2520
>>> P(sol.subs(k, 4) > 2)
1/4
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Ideal_distribution
.. [2] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
"""
return rv(name, IdealSolitonDistribution, k)
class RobustSolitonDistribution(SingleFiniteDistribution):
_argnames= ('k', 'delta', 'c')
@staticmethod
def check(k, delta, c):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer")
_value_check(Gt(delta, 0) and Le(delta, 1),
"'delta' must be a real number in the interval (0,1)")
_value_check(c.is_positive,
"'c' must be a positive real number.")
@property
def R(self):
return self.c * log(self.k/self.delta) * self.k**0.5
@property
def Z(self):
z = 0
for i in Range(1, round(self.k/self.R)):
z += (1/i)
z += log(self.R/self.delta)
return 1 + z * self.R/self.k
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(list(Range(1, self.k+1)))
@property
def is_symbolic(self):
return not all([self.k.is_number, self.c.is_number, self.delta.is_number])
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
rho = Piecewise((Rational(1, self.k), cond1), (Rational(1, x*(x-1)), cond2), (S.Zero, True))
cond1 = Ge(x, 1) & Le(x, round(self.k/self.R)-1)
cond2 = Eq(x, round(self.k/self.R))
tau = Piecewise((self.R/(self.k * x), cond1), (self.R * log(self.R/self.delta)/self.k, cond2), (S.Zero, True))
return (rho + tau)/self.Z
def RobustSoliton(name, k, delta, c):
r'''
Create a Finite Random Variable of Robust Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
delta : Positive Rational Number
Represents the failure probability. Must be in the interval (0,1).
c : Positive Rational Number
Constant of proportionality. Values close to 1 are recommended
Examples
========
>>> from sympy.stats import RobustSoliton, density, P, E
>>> robSol = RobustSoliton('robSol', 5, 0.5, 0.01)
>>> density(robSol).dict
{1: 0.204253668152708, 2: 0.490631107897393, 3: 0.165210624506162, 4: 0.0834387731899302, 5: 0.0505633404760675}
>>> density(robSol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> c = Symbol('c', positive=True)
>>> robSol = RobustSoliton('robSol', k, 0.5, c)
>>> density(robSol).dict
Density(RobustSolitonDistribution(k, 0.5, c))
>>> density(robSol).dict.subs(k, 10).subs(c, 0.03).doit()
{1: 0.116641095387194, 2: 0.467045731687165, 3: 0.159984123349381, 4: 0.0821431680681869, 5: 0.0505765646770100,
6: 0.0345781523420719, 7: 0.0253132820710503, 8: 0.0194459129233227, 9: 0.0154831166726115, 10: 0.0126733075238887}
>>> E(robSol.subs(k, 10).subs(c, 0.05))
2.91358846104106
>>> P(robSol.subs(k, 4).subs(c, 0.1) > 2)
0.243650614389834
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Robust_distribution
.. [2] http://www.inference.org.uk/mackay/itprnn/ps/588.596.pdf
.. [3] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
'''
return rv(name, RobustSolitonDistribution, k, delta, c)
|
eb974a4d1c1a419c12fe486929897942998b0f6a1fea1f1f769383ba8fae9705 | import random
import itertools
from typing import Sequence as tSequence, Union as tUnion, List as tList, Tuple as tTuple
from sympy import (Matrix, MatrixSymbol, S, Indexed, Basic, Tuple, Range,
Set, And, Eq, FiniteSet, ImmutableMatrix, Integer, igcd,
Lambda, Mul, Dummy, IndexedBase, Add, Interval, oo,
linsolve, eye, Or, Not, Intersection, factorial, Contains,
Union, Expr, Function, exp, cacheit, sqrt, pi, gamma,
Ge, Piecewise, Symbol, NonSquareMatrixError, EmptySet,
ceiling, MatrixBase, ConditionSet, ones, zeros, Identity,
Rational, Lt, Gt, Le, Ne, BlockMatrix, Sum)
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import strongly_connected_components
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import (RandomIndexedSymbol, random_symbols, RandomSymbol,
_symbol_converter, _value_check, pspace, given,
dependent, is_random, sample_iter)
from sympy.stats.stochastic_process import StochasticPSpace
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.stats.frv_types import Bernoulli, BernoulliDistribution, FiniteRV
from sympy.stats.drv_types import Poisson, PoissonDistribution
from sympy.stats.crv_types import Normal, NormalDistribution, Gamma, GammaDistribution
from sympy.core.sympify import _sympify, sympify
__all__ = [
'StochasticProcess',
'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain',
'TransitionMatrixOf',
'StochasticStateSpaceOf',
'GeneratorMatrixOf',
'ContinuousMarkovChain',
'BernoulliProcess',
'PoissonProcess',
'WienerProcess',
'GammaProcess'
]
@is_random.register(Indexed)
def _(x):
return is_random(x.base)
@is_random.register(RandomIndexedSymbol) # type: ignore
def _(x):
return True
def _set_converter(itr):
"""
Helper function for converting list/tuple/set to Set.
If parameter is not an instance of list/tuple/set then
no operation is performed.
Returns
=======
Set
The argument converted to Set.
Raises
======
TypeError
If the argument is not an instance of list/tuple/set.
"""
if isinstance(itr, (list, tuple, set)):
itr = FiniteSet(*itr)
if not isinstance(itr, Set):
raise TypeError("%s is not an instance of list/tuple/set."%(itr))
return itr
def _state_converter(itr: tSequence) -> tUnion[Tuple, Range]:
"""
Helper function for converting list/tuple/set/Range/Tuple/FiniteSet
to tuple/Range.
"""
if isinstance(itr, (Tuple, set, FiniteSet)):
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, (list, tuple)):
# check if states are unique
if len(set(itr)) != len(itr):
raise ValueError('The state space must have unique elements.')
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, Range):
# the only ordered set in sympy I know of
# try to convert to tuple
try:
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
except ValueError:
pass
else:
raise TypeError("%s is not an instance of list/tuple/set/Range/Tuple/FiniteSet." % (itr))
return itr
def _sym_sympify(arg):
"""
Converts an arbitrary expression to a type that can be used inside SymPy.
As generally strings are unwise to use in the expressions,
it returns the Symbol of argument if the string type argument is passed.
Parameters
=========
arg: The parameter to be converted to be used in Sympy.
Returns
=======
The converted parameter.
"""
if isinstance(arg, str):
return Symbol(arg)
else:
return _sympify(arg)
def _matrix_checks(matrix):
if not isinstance(matrix, (Matrix, MatrixSymbol, ImmutableMatrix)):
raise TypeError("Transition probabilities either should "
"be a Matrix or a MatrixSymbol.")
if matrix.shape[0] != matrix.shape[1]:
raise NonSquareMatrixError("%s is not a square matrix"%(matrix))
if isinstance(matrix, Matrix):
matrix = ImmutableMatrix(matrix.tolist())
return matrix
class StochasticProcess(Basic):
"""
Base class for all the stochastic processes whether
discrete or continuous.
Parameters
==========
sym: Symbol or str
state_space: Set
The state space of the stochastic process, by default S.Reals.
For discrete sets it is zero indexed.
See Also
========
DiscreteTimeStochasticProcess
"""
index_set = S.Reals
def __new__(cls, sym, state_space=S.Reals, **kwargs):
sym = _symbol_converter(sym)
state_space = _set_converter(state_space)
return Basic.__new__(cls, sym, state_space)
@property
def symbol(self):
return self.args[0]
@property
def state_space(self) -> tUnion[FiniteSet, Range]:
if not isinstance(self.args[1], (FiniteSet, Range)):
return FiniteSet(*self.args[1])
return self.args[1]
@property
def distribution(self):
return None
def __call__(self, time):
"""
Overridden in ContinuousTimeStochasticProcess.
"""
raise NotImplementedError("Use [] for indexing discrete time stochastic process.")
def __getitem__(self, time):
"""
Overridden in DiscreteTimeStochasticProcess.
"""
raise NotImplementedError("Use () for indexing continuous time stochastic process.")
def probability(self, condition):
raise NotImplementedError()
def joint_distribution(self, *args):
"""
Computes the joint distribution of the random indexed variables.
Parameters
==========
args: iterable
The finite list of random indexed variables/the key of a stochastic
process whose joint distribution has to be computed.
Returns
=======
JointDistribution
The joint distribution of the list of random indexed variables.
An unevaluated object is returned if it is not possible to
compute the joint distribution.
Raises
======
ValueError: When the arguments passed are not of type RandomIndexSymbol
or Number.
"""
args = list(args)
for i, arg in enumerate(args):
if S(arg).is_Number:
if self.index_set.is_subset(S.Integers):
args[i] = self.__getitem__(arg)
else:
args[i] = self.__call__(arg)
elif not isinstance(arg, RandomIndexedSymbol):
raise ValueError("Expected a RandomIndexedSymbol or "
"key not %s"%(type(arg)))
if args[0].pspace.distribution == None: # checks if there is any distribution available
return JointDistribution(*args)
pdf = Lambda(tuple(args),
expr=Mul.fromiter(arg.pspace.process.density(arg) for arg in args))
return JointDistributionHandmade(pdf)
def expectation(self, condition, given_condition):
raise NotImplementedError("Abstract method for expectation queries.")
def sample(self):
raise NotImplementedError("Abstract method for sampling queries.")
class DiscreteTimeStochasticProcess(StochasticProcess):
"""
Base class for all discrete stochastic processes.
"""
def __getitem__(self, time):
"""
For indexing discrete time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
time = sympify(time)
if not time.is_symbol and time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
idx_obj = Indexed(self.symbol, time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(idx_obj, pspace_obj)
class ContinuousTimeStochasticProcess(StochasticProcess):
"""
Base class for all continuous time stochastic process.
"""
def __call__(self, time):
"""
For indexing continuous time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
time = sympify(time)
if not time.is_symbol and time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
func_obj = Function(self.symbol)(time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(func_obj, pspace_obj)
class TransitionMatrixOf(Boolean):
"""
Assumes that the matrix is the transition matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, DiscreteMarkovChain):
raise ValueError("Currently only DiscreteMarkovChain "
"support TransitionMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
process = property(lambda self: self.args[0])
matrix = property(lambda self: self.args[1])
class GeneratorMatrixOf(TransitionMatrixOf):
"""
Assumes that the matrix is the generator matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, ContinuousMarkovChain):
raise ValueError("Currently only ContinuousMarkovChain "
"support GeneratorMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
class StochasticStateSpaceOf(Boolean):
def __new__(cls, process, state_space):
if not isinstance(process, (DiscreteMarkovChain, ContinuousMarkovChain)):
raise ValueError("Currently only DiscreteMarkovChain and ContinuousMarkovChain "
"support StochasticStateSpaceOf.")
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
state_index = Range(ss_size)
return Basic.__new__(cls, process, state_index)
process = property(lambda self: self.args[0])
state_index = property(lambda self: self.args[1])
class MarkovProcess(StochasticProcess):
"""
Contains methods that handle queries
common to Markov processes.
"""
@property
def number_of_states(self) -> tUnion[Integer, Symbol]:
"""
The number of states in the Markov Chain.
"""
return _sympify(self.args[2].shape[0])
@property
def _state_index(self) -> Range:
"""
Returns state index as Range.
"""
return self.args[1]
@classmethod
def _sanity_checks(cls, state_space, trans_probs):
# Try to never have None as state_space or trans_probs.
# This helps a lot if we get it done at the start.
if (state_space is None) and (trans_probs is None):
_n = Dummy('n', integer=True, nonnegative=True)
state_space = _state_converter(Range(_n))
trans_probs = _matrix_checks(MatrixSymbol('_T', _n, _n))
elif state_space is None:
trans_probs = _matrix_checks(trans_probs)
state_space = _state_converter(Range(trans_probs.shape[0]))
elif trans_probs is None:
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
_n = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
_n = len(state_space)
trans_probs = MatrixSymbol('_T', _n, _n)
else:
state_space = _state_converter(state_space)
trans_probs = _matrix_checks(trans_probs)
# Range object doesn't want to give a symbolic size
# so we do it ourselves.
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
if ss_size != trans_probs.shape[0]:
raise ValueError('The size of the state space and the number of '
'rows of the transition matrix must be the same.')
return state_space, trans_probs
def _extract_information(self, given_condition):
"""
Helper function to extract information, like,
transition matrix/generator matrix, state space, etc.
"""
if isinstance(self, DiscreteMarkovChain):
trans_probs = self.transition_probabilities
state_index = self._state_index
elif isinstance(self, ContinuousMarkovChain):
trans_probs = self.generator_matrix
state_index = self._state_index
if isinstance(given_condition, And):
gcs = given_condition.args
given_condition = S.true
for gc in gcs:
if isinstance(gc, TransitionMatrixOf):
trans_probs = gc.matrix
if isinstance(gc, StochasticStateSpaceOf):
state_index = gc.state_index
if isinstance(gc, Relational):
given_condition = given_condition & gc
if isinstance(given_condition, TransitionMatrixOf):
trans_probs = given_condition.matrix
given_condition = S.true
if isinstance(given_condition, StochasticStateSpaceOf):
state_index = given_condition.state_index
given_condition = S.true
return trans_probs, state_index, given_condition
def _check_trans_probs(self, trans_probs, row_sum=1):
"""
Helper function for checking the validity of transition
probabilities.
"""
if not isinstance(trans_probs, MatrixSymbol):
rows = trans_probs.tolist()
for row in rows:
if (sum(row) - row_sum) != 0:
raise ValueError("Values in a row must sum to %s. "
"If you are using Float or floats then please use Rational."%(row_sum))
def _work_out_state_index(self, state_index, given_condition, trans_probs):
"""
Helper function to extract state space if there
is a random symbol in the given condition.
"""
# if given condition is None, then there is no need to work out
# state_space from random variables
if given_condition != None:
rand_var = list(given_condition.atoms(RandomSymbol) -
given_condition.atoms(RandomIndexedSymbol))
if len(rand_var) == 1:
state_index = rand_var[0].pspace.set
# `not None` is `True`. So the old test fails for symbolic sizes.
# Need to build the statement differently.
sym_cond = not isinstance(self.number_of_states, (int, Integer))
cond1 = not sym_cond and len(state_index) != trans_probs.shape[0]
if cond1:
raise ValueError("state space is not compatible with the transition probabilities.")
if not isinstance(trans_probs.shape[0], Symbol):
state_index = FiniteSet(*[i for i in range(trans_probs.shape[0])])
return state_index
@cacheit
def _preprocess(self, given_condition, evaluate):
"""
Helper function for pre-processing the information.
"""
is_insufficient = False
if not evaluate: # avoid pre-processing if the result is not to be evaluated
return (True, None, None, None)
# extracting transition matrix and state space
trans_probs, state_index, given_condition = self._extract_information(given_condition)
# given_condition does not have sufficient information
# for computations
if trans_probs == None or \
given_condition == None:
is_insufficient = True
else:
# checking transition probabilities
if isinstance(self, DiscreteMarkovChain):
self._check_trans_probs(trans_probs, row_sum=1)
elif isinstance(self, ContinuousMarkovChain):
self._check_trans_probs(trans_probs, row_sum=0)
# working out state space
state_index = self._work_out_state_index(state_index, given_condition, trans_probs)
return is_insufficient, trans_probs, state_index, given_condition
def replace_with_index(self, condition):
if isinstance(condition, Relational):
lhs, rhs = condition.lhs, condition.rhs
if not isinstance(lhs, RandomIndexedSymbol):
lhs, rhs = rhs, lhs
condition = type(condition)(self.index_of.get(lhs, lhs),
self.index_of.get(rhs, rhs))
return condition
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Handles probability queries for Markov process.
Parameters
==========
condition: Relational
given_condition: Relational/And
Returns
=======
Probability
If the information is not sufficient.
Expr
In all other cases.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, new_given_condition = \
self._preprocess(given_condition, evaluate)
rv = list(condition.atoms(RandomIndexedSymbol))
symbolic = False
for sym in rv:
if sym.key.is_symbol:
symbolic = True
break
if check:
return Probability(condition, new_given_condition)
if isinstance(self, ContinuousMarkovChain):
trans_probs = self.transition_probabilities(mat)
elif isinstance(self, DiscreteMarkovChain):
trans_probs = mat
condition = self.replace_with_index(condition)
given_condition = self.replace_with_index(given_condition)
new_given_condition = self.replace_with_index(new_given_condition)
if isinstance(condition, Relational):
if isinstance(new_given_condition, And):
gcs = new_given_condition.args
else:
gcs = (new_given_condition, )
min_key_rv = list(new_given_condition.atoms(RandomIndexedSymbol))
if len(min_key_rv):
min_key_rv = min_key_rv[0]
for r in rv:
if min_key_rv.key.is_symbol or r.key.is_symbol:
continue
if min_key_rv.key > r.key:
return Probability(condition)
else:
min_key_rv = None
return Probability(condition)
if symbolic:
return self._symbolic_probability(condition, new_given_condition, rv, min_key_rv)
if len(rv) > 1:
rv[0] = condition.lhs
rv[1] = condition.rhs
if rv[0].key < rv[1].key:
rv[0], rv[1] = rv[1], rv[0]
if isinstance(condition, Gt):
condition = Lt(condition.lhs, condition.rhs)
elif isinstance(condition, Lt):
condition = Gt(condition.lhs, condition.rhs)
elif isinstance(condition, Ge):
condition = Le(condition.lhs, condition.rhs)
elif isinstance(condition, Le):
condition = Ge(condition.lhs, condition.rhs)
s = Rational(0, 1)
n = len(self.state_space)
if isinstance(condition, Eq) or isinstance(condition, Ne):
for i in range(0, n):
s += self.probability(Eq(rv[0], i), Eq(rv[1], i)) * self.probability(Eq(rv[1], i), new_given_condition)
return s if isinstance(condition, Eq) else 1 - s
else:
upper = 0
greater = False
if isinstance(condition, Ge) or isinstance(condition, Lt):
upper = 1
if isinstance(condition, Gt) or isinstance(condition, Ge):
greater = True
for i in range(0, n):
if i <= n//2:
for j in range(0, i + upper):
s += self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
else:
s += self.probability(Eq(rv[0], i), new_given_condition)
for j in range(i + upper, n):
s -= self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
return s if greater else 1 - s
rv = rv[0]
states = condition.as_set()
prob, gstate = dict(), None
for gc in gcs:
if gc.has(min_key_rv):
if gc.has(Probability):
p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \
else (gc.lhs, gc.rhs)
gr = gp.args[0]
gset = Intersection(gr.as_set(), state_index)
gstate = list(gset)[0]
prob[gset] = p
else:
_, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \
else (gc.rhs.key, gc.lhs)
if any((k not in self.index_set) for k in (rv.key, min_key_rv.key)):
raise IndexError("The timestamps of the process are not in it's index set.")
states = Intersection(states, state_index) if not isinstance(self.number_of_states, Symbol) else states
for state in Union(states, FiniteSet(gstate)):
if not isinstance(state, (int, Integer)) or Ge(state, mat.shape[0]) is True:
raise IndexError("No information is available for (%s, %s) in "
"transition probabilities of shape, (%s, %s). "
"State space is zero indexed."
%(gstate, state, mat.shape[0], mat.shape[1]))
if prob:
gstates = Union(*prob.keys())
if len(gstates) == 1:
gstate = list(gstates)[0]
gprob = list(prob.values())[0]
prob[gstates] = gprob
elif len(gstates) == len(state_index) - 1:
gstate = list(state_index - gstates)[0]
gprob = S.One - sum(prob.values())
prob[state_index - gstates] = gprob
else:
raise ValueError("Conflicting information.")
else:
gprob = S.One
if min_key_rv == rv:
return sum([prob[FiniteSet(state)] for state in states])
if isinstance(self, ContinuousMarkovChain):
return gprob * sum([trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state))
for state in states])
if isinstance(self, DiscreteMarkovChain):
return gprob * sum([(trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state))
for state in states])
if isinstance(condition, Not):
expr = condition.args[0]
return S.One - self.probability(expr, given_condition, evaluate, **kwargs)
if isinstance(condition, And):
compute_later, state2cond, conds = [], dict(), condition.args
for expr in conds:
if isinstance(expr, Relational):
ris = list(expr.atoms(RandomIndexedSymbol))[0]
if state2cond.get(ris, None) is None:
state2cond[ris] = S.true
state2cond[ris] &= expr
else:
compute_later.append(expr)
ris = []
for ri in state2cond:
ris.append(ri)
cset = Intersection(state2cond[ri].as_set(), state_index)
if len(cset) == 0:
return S.Zero
state2cond[ri] = cset.as_relational(ri)
sorted_ris = sorted(ris, key=lambda ri: ri.key)
prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs)
for i in range(1, len(sorted_ris)):
ri, prev_ri = sorted_ris[i], sorted_ris[i-1]
if not isinstance(state2cond[ri], Eq):
raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
prod *= self.probability(state2cond[ri], state2cond[prev_ri]
& mat_of
& StochasticStateSpaceOf(self, state_index),
evaluate, **kwargs)
for expr in compute_later:
prod *= self.probability(expr, given_condition, evaluate, **kwargs)
return prod
if isinstance(condition, Or):
return sum([self.probability(expr, given_condition, evaluate, **kwargs)
for expr in condition.args])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(condition, given_condition))
def _symbolic_probability(self, condition, new_given_condition, rv, min_key_rv):
#Function to calculate probability for queries with symbols
if isinstance(condition, Relational):
curr_state = new_given_condition.rhs if isinstance(new_given_condition.lhs, RandomIndexedSymbol) \
else new_given_condition.lhs
next_state = condition.rhs if isinstance(condition.lhs, RandomIndexedSymbol) \
else condition.lhs
if isinstance(condition, Eq) or isinstance(condition, Ne):
if isinstance(self, DiscreteMarkovChain):
P = self.transition_probabilities**(rv[0].key - min_key_rv.key)
else:
P = exp(self.generator_matrix*(rv[0].key - min_key_rv.key))
prob = P[curr_state, next_state] if isinstance(condition, Eq) else 1 - P[curr_state, next_state]
return Piecewise((prob, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
upper = 1
greater = False
if isinstance(condition, Ge) or isinstance(condition, Lt):
upper = 0
if isinstance(condition, Gt) or isinstance(condition, Ge):
greater = True
k = Dummy('k')
condition = Eq(condition.lhs, k) if isinstance(condition.lhs, RandomIndexedSymbol)\
else Eq(condition.rhs, k)
total = Sum(self.probability(condition, new_given_condition), (k, next_state + upper, self.state_space._sup))
return Piecewise((total, rv[0].key > min_key_rv.key), (Probability(condition), True)) if greater\
else Piecewise((1 - total, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
return Probability(condition, new_given_condition)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Handles expectation queries for markov process.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation
Unevaluated object if computations cannot be done due to
insufficient information.
Expr
In all other cases when the computations are successful.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, condition = \
self._preprocess(condition, evaluate)
if check:
return Expectation(expr, condition)
rvs = random_symbols(expr)
if isinstance(expr, Expr) and isinstance(condition, Eq) \
and len(rvs) == 1:
# handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>))
condition=self.replace_with_index(condition)
state_index=self.replace_with_index(state_index)
rv = list(rvs)[0]
lhsg, rhsg = condition.lhs, condition.rhs
if not isinstance(lhsg, RandomIndexedSymbol):
lhsg, rhsg = (rhsg, lhsg)
if rhsg not in state_index:
raise ValueError("%s state is not in the state space."%(rhsg))
if rv.key < lhsg.key:
raise ValueError("Incorrect given condition is given, expectation "
"time %s < time %s"%(rv.key, rv.key))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
cond = condition & mat_of & \
StochasticStateSpaceOf(self, state_index)
func = lambda s: self.probability(Eq(rv, s), cond) * expr.subs(rv, self._state_index[s])
return sum([func(s) for s in state_index])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(expr, condition))
class DiscreteMarkovChain(DiscreteTimeStochasticProcess, MarkovProcess):
"""
Represents a finite discrete time-homogeneous Markov chain.
This type of Markov Chain can be uniquely characterised by
its (ordered) state space and its one-step transition probability
matrix.
Parameters
==========
sym:
The name given to the Markov Chain
state_space:
Optional, by default, Range(n)
trans_probs:
Optional, by default, MatrixSymbol('_T', n, n)
Examples
========
>>> from sympy.stats import DiscreteMarkovChain, TransitionMatrixOf, P, E
>>> from sympy import Matrix, MatrixSymbol, Eq, symbols
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> YS = DiscreteMarkovChain("Y")
>>> Y.state_space
FiniteSet(0, 1, 2)
>>> Y.transition_probabilities
Matrix([
[0.5, 0.2, 0.3],
[0.2, 0.5, 0.3],
[0.2, 0.3, 0.5]])
>>> TS = MatrixSymbol('T', 3, 3)
>>> P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TS))
T[0, 2]*T[1, 0] + T[1, 1]*T[1, 2] + T[1, 2]*T[2, 2]
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
Probabilities will be calculated based on indexes rather
than state names. For example, with the Sunny-Cloudy-Rainy
model with string state names:
>>> from sympy.core.symbol import Str
>>> Y = DiscreteMarkovChain("Y", [Str('Sunny'), Str('Cloudy'), Str('Rainy')], T)
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
This gives the same answer as the ``[0, 1, 2]`` state space.
Currently, there is no support for state names within probability
and expectation statements. Here is a work-around using ``Str``:
>>> P(Eq(Str('Rainy'), Y[3]), Eq(Y[1], Str('Cloudy'))).round(2)
0.36
Symbol state names can also be used:
>>> sunny, cloudy, rainy = symbols('Sunny, Cloudy, Rainy')
>>> Y = DiscreteMarkovChain("Y", [sunny, cloudy, rainy], T)
>>> P(Eq(Y[3], rainy), Eq(Y[1], cloudy)).round(2)
0.36
Expectations will be calculated as follows:
>>> E(Y[3], Eq(Y[1], cloudy))
0.38*Cloudy + 0.36*Rainy + 0.26*Sunny
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
transition matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> P(Eq(Y[3], Y[1]), Eq(Y[0], 0)).round(3)
0.409
>>> P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2)
0.36
>>> P(Le(Y[15], Y[10]), Eq(Y[8], 2)).round(7)
0.6963328
Symbolic probability queries are also supported
>>> from sympy import symbols, Matrix, Rational, Eq, Gt
>>> from sympy.stats import P, DiscreteMarkovChain
>>> a, b, c, d = symbols('a b c d')
>>> T = Matrix([[Rational(1, 10), Rational(4, 10), Rational(5, 10)], [Rational(3, 10), Rational(4, 10), Rational(3, 10)], [Rational(7, 10), Rational(2, 10), Rational(1, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> query = P(Eq(Y[a], b), Eq(Y[c], d))
>>> query.subs({a:10 ,b:2, c:5, d:1}).round(4)
0.3096
>>> P(Eq(Y[10], 2), Eq(Y[5], 1)).evalf().round(4)
0.3096
>>> query_gt = P(Gt(Y[a], b), Eq(Y[c], d))
>>> query_gt.subs({a:21, b:0, c:5, d:0}).evalf().round(5)
0.64705
>>> P(Gt(Y[21], 0), Eq(Y[5], 0)).round(5)
0.64705
There is limited support for arbitrarily sized states:
>>> n = symbols('n', nonnegative=True, integer=True)
>>> T = MatrixSymbol('T', n, n)
>>> Y = DiscreteMarkovChain("Y", trans_probs=T)
>>> Y.state_space
Range(0, n, 1)
>>> query = P(Eq(Y[a], b), Eq(Y[c], d))
>>> query.subs({a:10, b:2, c:5, d:1})
(T**5)[1, 2]
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Discrete-time_Markov_chain
.. [2] https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, state_space=None, trans_probs=None):
# type: (Basic, tUnion[str, Symbol], tSequence, tUnion[MatrixBase, MatrixSymbol]) -> DiscreteMarkovChain
sym = _symbol_converter(sym)
state_space, trans_probs = MarkovProcess._sanity_checks(state_space, trans_probs)
obj = Basic.__new__(cls, sym, state_space, trans_probs)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj._state_index):
indices[state] = index
obj.index_of = indices
return obj
@property
def transition_probabilities(self) -> tUnion[MatrixBase, MatrixSymbol]:
"""
Transition probabilities of discrete Markov chain,
either an instance of Matrix or MatrixSymbol.
"""
return self.args[2]
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
"""
Returns the list of communication classes that partition
the states of the markov chain.
A communication class is defined to be a set of states
such that every state in that set is reachable from
every other state in that set. Due to its properties
this forms a class in the mathematical sense.
Communication classes are also known as recurrence
classes.
Returns
=======
classes
The ``classes`` are a list of tuples. Each
tuple represents a single communication class
with its properties. The first element in the
tuple is the list of states in the class, the
second element is whether the class is recurrent
and the third element is the period of the
communication class.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0, 1, 0],
... [1, 0, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
>>> classes = X.communication_classes()
>>> for states, is_recurrent, period in classes:
... states, is_recurrent, period
([1, 2], True, 2)
([3], False, 1)
From this we can see that states ``1`` and ``2``
communicate, are recurrent and have a period
of 2. We can also see state ``3`` is transient
with a period of 1.
Notes
=====
The algorithm used is of order ``O(n**2)`` where
``n`` is the number of states in the markov chain.
It uses Tarjan's algorithm to find the classes
themselves and then it uses a breadth-first search
algorithm to find each class's periodicity.
Most of the algorithm's components approach ``O(n)``
as the matrix becomes more and more sparse.
References
==========
.. [1] http://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
.. [2] http://cecas.clemson.edu/~shierd/Shier/markov.pdf
.. [3] https://ujcontent.uj.ac.za/vital/access/services/Download/uj:7506/CONTENT1
.. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
"""
n = self.number_of_states
T = self.transition_probabilities
if isinstance(T, MatrixSymbol):
raise NotImplementedError("Cannot perform the operation with a symbolic matrix.")
# begin Tarjan's algorithm
V = Range(n)
# don't use state names. Rather use state
# indexes since we use them for matrix
# indexing here and later onward
E = [(i, j) for i in V for j in V if T[i, j] != 0]
classes = strongly_connected_components((V, E))
# end Tarjan's algorithm
recurrence = []
periods = []
for class_ in classes:
# begin recurrent check (similar to self._check_trans_probs())
submatrix = T[class_, class_] # get the submatrix with those states
is_recurrent = S.true
rows = submatrix.tolist()
for row in rows:
if (sum(row) - 1) != 0:
is_recurrent = S.false
break
recurrence.append(is_recurrent)
# end recurrent check
# begin breadth-first search
non_tree_edge_values = set()
visited = {class_[0]}
newly_visited = {class_[0]}
level = {class_[0]: 0}
current_level = 0
done = False # imitate a do-while loop
while not done: # runs at most len(class_) times
done = len(visited) == len(class_)
current_level += 1
# this loop and the while loop above run a combined len(class_) number of times.
# so this triple nested loop runs through each of the n states once.
for i in newly_visited:
# the loop below runs len(class_) number of times
# complexity is around about O(n * avg(len(class_)))
newly_visited = {j for j in class_ if T[i, j] != 0}
new_tree_edges = newly_visited.difference(visited)
for j in new_tree_edges:
level[j] = current_level
new_non_tree_edges = newly_visited.intersection(visited)
new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges}
non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values)
visited = visited.union(new_tree_edges)
# igcd needs at least 2 arguments
positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0}
if len(positive_ntev) == 0:
periods.append(len(class_))
elif len(positive_ntev) == 1:
periods.append(positive_ntev.pop())
else:
periods.append(igcd(*positive_ntev))
# end breadth-first search
# convert back to the user's state names
classes = [[self._state_index[i] for i in class_] for class_ in classes]
return sympify(list(zip(classes, recurrence, periods)))
def fundamental_matrix(self):
"""
Each entry fundamental matrix can be interpreted as
the expected number of times the chains is in state j
if it started in state i.
References
==========
.. [1] https://lips.cs.princeton.edu/the-fundamental-matrix-of-a-finite-markov-chain/
"""
_, _, _, Q = self.decompose()
if Q.shape[0] > 0: # if non-ergodic
I = eye(Q.shape[0])
if (I - Q).det() == 0:
raise ValueError("The fundamental matrix doesn't exist.")
return (I - Q).inv().as_immutable()
else: # if ergodic
P = self.transition_probabilities
I = eye(P.shape[0])
w = self.fixed_row_vector()
W = Matrix([list(w) for i in range(0, P.shape[0])])
if (I - P + W).det() == 0:
raise ValueError("The fundamental matrix doesn't exist.")
return (I - P + W).inv().as_immutable()
def absorbing_probabilities(self):
"""
Computes the absorbing probabilities, i.e.,
the ij-th entry of the matrix denotes the
probability of Markov chain being absorbed
in state j starting from state i.
"""
_, _, R, _ = self.decompose()
N = self.fundamental_matrix()
if R is None or N is None:
return None
return N*R
def absorbing_probabilites(self):
SymPyDeprecationWarning(
feature="absorbing_probabilites",
useinstead="absorbing_probabilities",
issue=20042,
deprecated_since_version="1.7"
).warn()
return self.absorbing_probabilities()
def is_regular(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, periods = list(zip(*tuples))
return And(len(classes) == 1, periods[0] == 1)
def is_ergodic(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, _ = list(zip(*tuples))
return S(len(classes) == 1)
def is_absorbing_state(self, state):
trans_probs = self.transition_probabilities
if isinstance(trans_probs, ImmutableMatrix) and \
state < trans_probs.shape[0]:
return S(trans_probs[state, state]) is S.One
def is_absorbing_chain(self):
states, A, B, C = self.decompose()
r = A.shape[0]
return And(r > 0, A == Identity(r).as_explicit())
def stationary_distribution(self, condition_set=False) -> tUnion[ImmutableMatrix, ConditionSet, Lambda]:
"""
The stationary distribution is any row vector, p, that solves p = pP,
is row stochastic and each element in p must be nonnegative.
That means in matrix form: :math:`(P-I)^T p^T = 0` and
:math:`(1, ..., 1) p = 1`
where ``P`` is the one-step transition matrix.
All time-homogeneous Markov Chains with a finite state space
have at least one stationary distribution. In addition, if
a finite time-homogeneous Markov Chain is irreducible, the
stationary distribution is unique.
Parameters
==========
condition_set : bool
If the chain has a symbolic size or transition matrix,
it will return a ``Lambda`` if ``False`` and return a
``ConditionSet`` if ``True``.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
An irreducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13, 5/13, 0]])
A reducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [0, 0, 1]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13 - 8*tau0/13, 5/13 - 5*tau0/13, tau0]])
>>> Y = DiscreteMarkovChain('Y')
>>> Y.stationary_distribution()
Lambda((wm, _T), Eq(wm*_T, wm))
>>> Y.stationary_distribution(condition_set=True)
ConditionSet(wm, Eq(wm*_T, wm))
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_2_6_stationary_and_limiting_distributions.php
.. [2] https://galton.uchicago.edu/~yibi/teaching/stat317/2014/Lectures/Lecture4_6up.pdf
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.limiting_distribution
"""
trans_probs = self.transition_probabilities
n = self.number_of_states
if n == 0:
return ImmutableMatrix(Matrix([[]]))
# symbolic matrix version
if isinstance(trans_probs, MatrixSymbol):
wm = MatrixSymbol('wm', 1, n)
if condition_set:
return ConditionSet(wm, Eq(wm * trans_probs, wm))
else:
return Lambda((wm, trans_probs), Eq(wm * trans_probs, wm))
# numeric matrix version
a = Matrix(trans_probs - Identity(n)).T
a[0, 0:n] = ones(1, n)
b = zeros(n, 1)
b[0, 0] = 1
soln = list(linsolve((a, b)))[0]
return ImmutableMatrix([[sol for sol in soln]])
def fixed_row_vector(self):
"""
A wrapper for ``stationary_distribution()``.
"""
return self.stationary_distribution()
@property
def limiting_distribution(self):
"""
The fixed row vector is the limiting
distribution of a discrete Markov chain.
"""
return self.fixed_row_vector()
def decompose(self) -> tTuple[tList[Basic], ImmutableMatrix, ImmutableMatrix, ImmutableMatrix]:
"""
Decomposes the transition matrix into submatrices with
special properties.
The transition matrix can be decomposed into 4 submatrices:
- A - the submatrix from recurrent states to recurrent states.
- B - the submatrix from transient to recurrent states.
- C - the submatrix from transient to transient states.
- O - the submatrix of zeros for recurrent to transient states.
Returns
=======
states, A, B, C
``states`` - a list of state names with the first being
the recurrent states and the last being
the transient states in the order
of the row names of A and then the row names of C.
``A`` - the submatrix from recurrent states to recurrent states.
``B`` - the submatrix from transient to recurrent states.
``C`` - the submatrix from transient to transient states.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
One can decompose this chain for example:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, A, B, C = X.decompose()
>>> states
[2, 0, 1, 3, 4]
>>> A # recurrent to recurrent
Matrix([[1]])
>>> B # transient to recurrent
Matrix([
[ 0],
[2/5],
[1/2],
[ 0]])
>>> C # transient to transient
Matrix([
[1/2, 1/2, 0, 0],
[2/5, 1/5, 0, 0],
[ 0, 0, 1/2, 0],
[1/2, 0, 0, 1/2]])
This means that state 2 is the only absorbing state
(since A is a 1x1 matrix). B is a 4x1 matrix since
the 4 remaining transient states all merge into reccurent
state 2. And C is the 4x4 matrix that shows how the
transient states 0, 1, 3, 4 all interact.
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.communication_classes
sympy.stats.stochastic_process_types.DiscreteMarkovChain.canonical_form
References
==========
.. [1] https://en.wikipedia.org/wiki/Absorbing_Markov_chain
.. [2] http://people.brandeis.edu/~igusa/Math56aS08/Math56a_S08_notes015.pdf
"""
trans_probs = self.transition_probabilities
classes = self.communication_classes()
r_states = []
t_states = []
for states, recurrent, period in classes:
if recurrent:
r_states += states
else:
t_states += states
states = r_states + t_states
indexes = [self.index_of[state] for state in states]
A = Matrix(len(r_states), len(r_states),
lambda i, j: trans_probs[indexes[i], indexes[j]])
B = Matrix(len(t_states), len(r_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[j]])
C = Matrix(len(t_states), len(t_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[len(r_states) + j]])
return states, A.as_immutable(), B.as_immutable(), C.as_immutable()
def canonical_form(self) -> tTuple[tList[Basic], ImmutableMatrix]:
"""
Reorders the one-step transition matrix
so that recurrent states appear first and transient
states appear last. Other representations include inserting
transient states first and recurrent states last.
Returns
=======
states, P_new
``states`` is the list that describes the order of the
new states in the matrix
so that the ith element in ``states`` is the state of the
ith row of A.
``P_new`` is the new transition matrix in canonical form.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
You can convert your chain into canonical form:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', list(range(1, 6)), trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
The new states are [3, 1, 2, 4, 5] and you can
create a new chain with this and its canonical
form will remain the same (since it is already
in canonical form).
>>> X = DiscreteMarkovChain('X', states, new_matrix)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
This is not limited to absorbing chains:
>>> T = Matrix([[0, 5, 5, 0, 0],
... [0, 0, 0, 10, 0],
... [5, 0, 5, 0, 0],
... [0, 10, 0, 0, 0],
... [0, 3, 0, 3, 4]])/10
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[1, 3, 0, 2, 4]
>>> new_matrix
Matrix([
[ 0, 1, 0, 0, 0],
[ 1, 0, 0, 0, 0],
[ 1/2, 0, 0, 1/2, 0],
[ 0, 0, 1/2, 1/2, 0],
[3/10, 3/10, 0, 0, 2/5]])
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.communication_classes
sympy.stats.stochastic_process_types.DiscreteMarkovChain.decompose
References
==========
.. [1] https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470316887.app1
.. [2] http://www.columbia.edu/~ww2040/6711F12/lect1023big.pdf
"""
states, A, B, C = self.decompose()
O = zeros(A.shape[0], C.shape[1])
return states, BlockMatrix([[A, O], [B, C]]).as_explicit()
def sample(self):
"""
Returns
=======
sample: iterator object
iterator object containing the sample
"""
if not isinstance(self.transition_probabilities, (Matrix, ImmutableMatrix)):
raise ValueError("Transition Matrix must be provided for sampling")
Tlist = self.transition_probabilities.tolist()
samps = [random.choice(list(self.state_space))]
yield samps[0]
time = 1
densities = {}
for state in self.state_space:
states = list(self.state_space)
densities[state] = {states[i]: Tlist[state][i]
for i in range(len(states))}
while time < S.Infinity:
samps.append(next(sample_iter(FiniteRV("_", densities[samps[time - 1]]))))
yield samps[time]
time += 1
class ContinuousMarkovChain(ContinuousTimeStochasticProcess, MarkovProcess):
"""
Represents continuous time Markov chain.
Parameters
==========
sym: Symbol/str
state_space: Set
Optional, by default, S.Reals
gen_mat: Matrix/ImmutableMatrix/MatrixSymbol
Optional, by default, None
Examples
========
>>> from sympy.stats import ContinuousMarkovChain, P
>>> from sympy import Matrix, S, Eq, Gt
>>> G = Matrix([[-S(1), S(1)], [S(1), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1], gen_mat=G)
>>> C.limiting_distribution()
Matrix([[1/2, 1/2]])
>>> C.state_space
FiniteSet(0, 1)
>>> C.generator_matrix
Matrix([
[-1, 1],
[ 1, -1]])
Probability queries are supported
>>> P(Eq(C(1.96), 0), Eq(C(0.78), 1)).round(5)
0.45279
>>> P(Gt(C(1.7), 0), Eq(C(0.82), 1)).round(5)
0.58602
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
generator matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> P(Eq(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.37933
>>> P(Gt(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.34211
>>> P(Le(C(1.57), C(3.14)), Eq(C(1.22), 1)).round(4)
0.7143
Symbolic probability queries are also supported
>>> from sympy import S, symbols, Matrix, Rational, Eq, Gt
>>> from sympy.stats import P, ContinuousMarkovChain
>>> a,b,c,d = symbols('a b c d')
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> query = P(Eq(C(a), b), Eq(C(c), d))
>>> query.subs({a:3.65 ,b:2, c:1.78, d:1}).evalf().round(10)
0.4002723175
>>> P(Eq(C(3.65), 2), Eq(C(1.78), 1)).round(10)
0.4002723175
>>> query_gt = P(Gt(C(a), b), Eq(C(c), d))
>>> query_gt.subs({a:43.2 ,b:0, c:3.29, d:2}).evalf().round(10)
0.6832579186
>>> P(Gt(C(43.2), 0), Eq(C(3.29), 2)).round(10)
0.6832579186
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Continuous-time_Markov_chain
.. [2] http://u.math.biu.ac.il/~amirgi/CTMCnotes.pdf
"""
index_set = S.Reals
def __new__(cls, sym, state_space=None, gen_mat=None):
sym = _symbol_converter(sym)
state_space, gen_mat = MarkovProcess._sanity_checks(state_space, gen_mat)
obj = Basic.__new__(cls, sym, state_space, gen_mat)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj.state_space):
indices[state] = index
obj.index_of = indices
return obj
@property
def generator_matrix(self):
return self.args[2]
@cacheit
def transition_probabilities(self, gen_mat=None):
t = Dummy('t')
if isinstance(gen_mat, (Matrix, ImmutableMatrix)) and \
gen_mat.is_diagonalizable():
# for faster computation use diagonalized generator matrix
Q, D = gen_mat.diagonalize()
return Lambda(t, Q*exp(t*D)*Q.inv())
if gen_mat != None:
return Lambda(t, exp(t*gen_mat))
def limiting_distribution(self):
gen_mat = self.generator_matrix
if gen_mat == None:
return None
if isinstance(gen_mat, MatrixSymbol):
wm = MatrixSymbol('wm', 1, gen_mat.shape[0])
return Lambda((wm, gen_mat), Eq(wm*gen_mat, wm))
w = IndexedBase('w')
wi = [w[i] for i in range(gen_mat.shape[0])]
wm = Matrix([wi])
eqs = (wm*gen_mat).tolist()[0]
eqs.append(sum(wi) - 1)
soln = list(linsolve(eqs, wi))[0]
return ImmutableMatrix([[sol for sol in soln]])
class BernoulliProcess(DiscreteTimeStochasticProcess):
"""
The Bernoulli process consists of repeated
independent Bernoulli process trials with the same parameter `p`.
It's assumed that the probability `p` applies to every
trial and that the outcomes of each trial
are independent of all the rest. Therefore Bernoulli Processs
is Discrete State and Discrete Time Stochastic Process.
Parameters
==========
sym: Symbol/str
success: Integer/str
The event which is considered to be success, by default is 1.
failure: Integer/str
The event which is considered to be failure, by default is 0.
p: Real Number between 0 and 1
Represents the probability of getting success.
Examples
========
>>> from sympy.stats import BernoulliProcess, P, E
>>> from sympy import Eq, Gt
>>> B = BernoulliProcess("B", p=0.7, success=1, failure=0)
>>> B.state_space
FiniteSet(0, 1)
>>> (B.p).round(2)
0.70
>>> B.success
1
>>> B.failure
0
>>> X = B[1] + B[2] + B[3]
>>> P(Eq(X, 0)).round(2)
0.03
>>> P(Eq(X, 2)).round(2)
0.44
>>> P(Eq(X, 4)).round(2)
0
>>> P(Gt(X, 1)).round(2)
0.78
>>> P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2)
0.04
>>> B.joint_distribution(B[1], B[2])
JointDistributionHandmade(Lambda((B[1], B[2]), Piecewise((0.7, Eq(B[1], 1)),
(0.3, Eq(B[1], 0)), (0, True))*Piecewise((0.7, Eq(B[2], 1)), (0.3, Eq(B[2], 0)),
(0, True))))
>>> E(2*B[1] + B[2]).round(2)
2.10
>>> P(B[1] < 1).round(2)
0.30
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_process
.. [2] https://mathcs.clarku.edu/~djoyce/ma217/bernoulli.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, p, success=1, failure=0):
_value_check(p >= 0 and p <= 1, 'Value of p must be between 0 and 1.')
sym = _symbol_converter(sym)
p = _sympify(p)
success = _sym_sympify(success)
failure = _sym_sympify(failure)
return Basic.__new__(cls, sym, p, success, failure)
@property
def symbol(self):
return self.args[0]
@property
def p(self):
return self.args[1]
@property
def success(self):
return self.args[2]
@property
def failure(self):
return self.args[3]
@property
def state_space(self):
return _set_converter([self.success, self.failure])
@property
def distribution(self):
return BernoulliDistribution(self.p)
def simple_rv(self, rv):
return Bernoulli(rv.name, p=self.p,
succ=self.success, fail=self.failure)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability.
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational/And
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
return _SubstituteRV._probability(condition, given_condition, evaluate, **kwargs)
def density(self, x):
return Piecewise((self.p, Eq(x, self.success)),
(1 - self.p, Eq(x, self.failure)),
(S.Zero, True))
class _SubstituteRV:
"""
Internal class to handle the queries of expectation and probability
by substitution.
"""
@staticmethod
def _rvindexed_subs(expr, condition=None):
"""
Substitutes the RandomIndexedSymbol with the RandomSymbol with
same name, distribution and probability as RandomIndexedSymbol.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
"""
rvs_expr = random_symbols(expr)
if len(rvs_expr) != 0:
swapdict_expr = {}
for rv in rvs_expr:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv) # substitute with equivalent simple rv
swapdict_expr[rv] = newrv
expr = expr.subs(swapdict_expr)
rvs_cond = random_symbols(condition)
if len(rvs_cond)!=0:
swapdict_cond = {}
for rv in rvs_cond:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv)
swapdict_cond[rv] = newrv
condition = condition.subs(swapdict_cond)
return expr, condition
@classmethod
def _expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Internal method for computing expectation of indexed RV.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
new_expr, new_condition = self._rvindexed_subs(expr, condition)
if not is_random(new_expr):
return new_expr
new_pspace = pspace(new_expr)
if new_condition is not None:
new_expr = given(new_expr, new_condition)
if new_expr.is_Add: # As E is Linear
return Add(*[new_pspace.compute_expectation(
expr=arg, evaluate=evaluate, **kwargs)
for arg in new_expr.args])
return new_pspace.compute_expectation(
new_expr, evaluate=evaluate, **kwargs)
@classmethod
def _probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Internal method for computing probability of indexed RV
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational/And
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
new_condition, new_givencondition = self._rvindexed_subs(condition, given_condition)
if isinstance(new_givencondition, RandomSymbol):
condrv = random_symbols(new_condition)
if len(condrv) == 1 and condrv[0] == new_givencondition:
return BernoulliDistribution(self._probability(new_condition), 0, 1)
if any([dependent(rv, new_givencondition) for rv in condrv]):
return Probability(new_condition, new_givencondition)
else:
return self._probability(new_condition)
if new_givencondition is not None and \
not isinstance(new_givencondition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_givencondition))
if new_givencondition == False or new_condition == False:
return S.Zero
if new_condition == True:
return S.One
if not isinstance(new_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_condition))
if new_givencondition is not None: # If there is a condition
# Recompute on new conditional expr
return self._probability(given(new_condition, new_givencondition, **kwargs), **kwargs)
result = pspace(new_condition).probability(new_condition, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def get_timerv_swaps(expr, condition):
"""
Finds the appropriate interval for each time stamp in expr by parsing
the given condition and returns intervals for each timestamp and
dictionary that maps variable time-stamped Random Indexed Symbol to its
corresponding Random Indexed variable with fixed time stamp.
Parameters
==========
expr: Sympy Expression
Expression containing Random Indexed Symbols with variable time stamps
condition: Relational/Boolean Expression
Expression containing time bounds of variable time stamps in expr
Examples
========
>>> from sympy.stats.stochastic_process_types import get_timerv_swaps, PoissonProcess
>>> from sympy import symbols, Contains, Interval
>>> x, t, d = symbols('x t d', positive=True)
>>> X = PoissonProcess("X", 3)
>>> get_timerv_swaps(x*X(t), Contains(t, Interval.Lopen(0, 1)))
([Interval.Lopen(0, 1)], {X(t): X(1)})
>>> get_timerv_swaps((X(t)**2 + X(d)**2), Contains(t, Interval.Lopen(0, 1))
... & Contains(d, Interval.Ropen(1, 4))) # doctest: +SKIP
([Interval.Ropen(1, 4), Interval.Lopen(0, 1)], {X(d): X(3), X(t): X(1)})
Returns
=======
intervals: list
List of Intervals/FiniteSet on which each time stamp is defined
rv_swap: dict
Dictionary mapping variable time Random Indexed Symbol to constant time
Random Indexed Variable
"""
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
expr_syms = list(expr.atoms(RandomIndexedSymbol))
if isinstance(condition, (And, Or)):
given_cond_args = condition.args
else: # single condition
given_cond_args = (condition, )
rv_swap = {}
intervals = []
for expr_sym in expr_syms:
for arg in given_cond_args:
if arg.has(expr_sym.key) and isinstance(expr_sym.key, Symbol):
intv = _set_converter(arg.args[1])
diff_key = intv._sup - intv._inf
if diff_key == oo:
raise ValueError("%s should have finite bounds" % str(expr_sym.name))
elif diff_key == S.Zero: # has singleton set
diff_key = intv._sup
rv_swap[expr_sym] = expr_sym.subs({expr_sym.key: diff_key})
intervals.append(intv)
return intervals, rv_swap
class CountingProcess(ContinuousTimeStochasticProcess):
"""
This class handles the common methods of the Counting Processes
such as Poisson, Wiener and Gamma Processes
"""
index_set = _set_converter(Interval(0, oo))
@property
def symbol(self):
return self.args[0]
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Expectation of the given expr
"""
if condition is not None:
intervals, rv_swap = get_timerv_swaps(expr, condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if expr.is_Add:
return Add.fromiter(self.expectation(arg, condition)
for arg in expr.args)
expr = expr.subs(rv_swap)
else:
return Expectation(expr, condition)
return _SubstituteRV._expectation(expr, evaluate=evaluate, **kwargs)
def _solve_argwith_tworvs(self, arg):
if arg.args[0].key >= arg.args[1].key or isinstance(arg, Eq):
diff_key = abs(arg.args[0].key - arg.args[1].key)
rv = arg.args[0]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
else:
diff_key = arg.args[1].key - arg.args[0].key
rv = arg.args[1]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
return arg
def _solve_numerical(self, condition, given_condition=None):
if isinstance(condition, And):
args_list = list(condition.args)
else:
args_list = [condition]
if given_condition is not None:
if isinstance(given_condition, And):
args_list.extend(list(given_condition.args))
else:
args_list.extend([given_condition])
# sort the args based on timestamp to get the independent increments in
# each segment using all the condition args as well as given_condition args
args_list = sorted(args_list, key=lambda x: x.args[0].key)
result = []
cond_args = list(condition.args) if isinstance(condition, And) else [condition]
if args_list[0] in cond_args and not (is_random(args_list[0].args[0])
and is_random(args_list[0].args[1])):
result.append(_SubstituteRV._probability(args_list[0]))
if is_random(args_list[0].args[0]) and is_random(args_list[0].args[1]):
arg = self._solve_argwith_tworvs(args_list[0])
result.append(_SubstituteRV._probability(arg))
for i in range(len(args_list) - 1):
curr, nex = args_list[i], args_list[i + 1]
diff_key = nex.args[0].key - curr.args[0].key
working_set = curr.args[0].pspace.process.state_space
if curr.args[1] > nex.args[1]: #impossible condition so return 0
result.append(0)
break
if isinstance(curr, Eq):
working_set = Intersection(working_set, Interval.Lopen(curr.args[1], oo))
else:
working_set = Intersection(working_set, curr.as_set())
if isinstance(nex, Eq):
working_set = Intersection(working_set, Interval(-oo, nex.args[1]))
else:
working_set = Intersection(working_set, nex.as_set())
if working_set == EmptySet:
rv = Eq(curr.args[0].pspace.process(diff_key), 0)
result.append(_SubstituteRV._probability(rv))
else:
if working_set.is_finite_set:
if isinstance(curr, Eq) and isinstance(nex, Eq):
rv = Eq(curr.args[0].pspace.process(diff_key), len(working_set))
result.append(_SubstituteRV._probability(rv))
elif isinstance(curr, Eq) ^ isinstance(nex, Eq):
result.append(Add.fromiter(_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x))
for x in range(len(working_set))))
else:
n = len(working_set)
result.append(Add.fromiter((n - x)*_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x)) for x in range(n)))
else:
result.append(_SubstituteRV._probability(
curr.args[0].pspace.process(diff_key) <= working_set._sup - working_set._inf))
return Mul.fromiter(result)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Probability of the condition
"""
check_numeric = True
if isinstance(condition, (And, Or)):
cond_args = condition.args
else:
cond_args = (condition, )
# check that condition args are numeric or not
if not all(arg.args[0].key.is_number for arg in cond_args):
check_numeric = False
if given_condition is not None:
check_given_numeric = True
if isinstance(given_condition, (And, Or)):
given_cond_args = given_condition.args
else:
given_cond_args = (given_condition, )
# check that given condition args are numeric or not
if given_condition.has(Contains):
check_given_numeric = False
# Handle numerical queries
if check_numeric and check_given_numeric:
res = []
if isinstance(condition, Or):
res.append(Add.fromiter(self._solve_numerical(arg, given_condition)
for arg in condition.args))
if isinstance(given_condition, Or):
res.append(Add.fromiter(self._solve_numerical(condition, arg)
for arg in given_condition.args))
if res:
return Add.fromiter(res)
return self._solve_numerical(condition, given_condition)
# No numeric queries, go by Contains?... then check that all the
# given condition are in form of `Contains`
if not all(arg.has(Contains) for arg in given_cond_args):
raise ValueError("If given condition is passed with `Contains`, then "
"please pass the evaluated condition with its corresponding information "
"in terms of intervals of each time stamp to be passed in given condition.")
intervals, rv_swap = get_timerv_swaps(condition, given_condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if isinstance(condition, And):
return Mul.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
elif isinstance(condition, Or):
return Add.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
condition = condition.subs(rv_swap)
else:
return Probability(condition, given_condition)
if check_numeric:
return self._solve_numerical(condition)
return _SubstituteRV._probability(condition, evaluate=evaluate, **kwargs)
class PoissonProcess(CountingProcess):
"""
The Poisson process is a counting process. It is usually used in scenarios
where we are counting the occurrences of certain events that appear
to happen at a certain rate, but completely at random.
Parameters
==========
sym: Symbol/str
lamda: Positive number
Rate of the process, ``lamda > 0``
Examples
========
>>> from sympy.stats import PoissonProcess, P, E
>>> from sympy import symbols, Eq, Ne, Contains, Interval
>>> X = PoissonProcess("X", lamda=3)
>>> X.state_space
Naturals0
>>> X.lamda
3
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 4)
(9*t1**3/2 + 9*t1**2/2 + 3*t1 + 1)*exp(-3*t1)
>>> P(Eq(X(t1), 2) | Ne(X(t1), 4), Contains(t1, Interval.Ropen(2, 4)))
1 - 36*exp(-6)
>>> P(Eq(X(t1), 2) & Eq(X(t2), 3), Contains(t1, Interval.Lopen(0, 2))
... & Contains(t2, Interval.Lopen(2, 4)))
648*exp(-12)
>>> E(X(t1))
3*t1
>>> E(X(t1)**2 + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
18
>>> P(X(3) < 1, Eq(X(1), 0))
exp(-6)
>>> P(Eq(X(4), 3), Eq(X(2), 3))
exp(-6)
>>> P(X(2) <= 3, X(1) > 1)
5*exp(-3)
Merging two Poisson Processes
>>> Y = PoissonProcess("Y", lamda=4)
>>> Z = X + Y
>>> Z.lamda
7
Splitting a Poisson Process into two independent Poisson Processes
>>> N, M = Z.split(l1=2, l2=5)
>>> N.lamda, M.lamda
(2, 5)
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_0_0_intro.php
.. [2] https://en.wikipedia.org/wiki/Poisson_point_process
"""
def __new__(cls, sym, lamda):
_value_check(lamda > 0, 'lamda should be a positive number.')
sym = _symbol_converter(sym)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda)
@property
def lamda(self):
return self.args[1]
@property
def state_space(self):
return S.Naturals0
def distribution(self, rv):
return PoissonDistribution(self.lamda*rv.key)
def density(self, x):
return (self.lamda*x.key)**x / factorial(x) * exp(-(self.lamda*x.key))
def simple_rv(self, rv):
return Poisson(rv.name, lamda=self.lamda*rv.key)
def __add__(self, other):
if not isinstance(other, PoissonProcess):
raise ValueError("Only instances of Poisson Process can be merged")
return PoissonProcess(Dummy(self.symbol.name + other.symbol.name),
self.lamda + other.lamda)
def split(self, l1, l2):
if _sympify(l1 + l2) != self.lamda:
raise ValueError("Sum of l1 and l2 should be %s" % str(self.lamda))
return PoissonProcess(Dummy("l1"), l1), PoissonProcess(Dummy("l2"), l2)
class WienerProcess(CountingProcess):
"""
The Wiener process is a real valued continuous-time stochastic process.
In physics it is used to study Brownian motion and therefore also known as
Brownian Motion.
Parameters
==========
sym: Symbol/str
Examples
========
>>> from sympy.stats import WienerProcess, P, E
>>> from sympy import symbols, Contains, Interval
>>> X = WienerProcess("X")
>>> X.state_space
Reals
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 7).simplify()
erf(7*sqrt(2)/(2*sqrt(t1)))/2 + 1/2
>>> P((X(t1) > 2) | (X(t1) < 4), Contains(t1, Interval.Ropen(2, 4))).simplify()
-erf(1)/2 + erf(2)/2 + 1
>>> E(X(t1))
0
>>> E(X(t1) + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
0
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_4_0_brownian_motion_wiener_process.php
.. [2] https://en.wikipedia.org/wiki/Wiener_process
"""
def __new__(cls, sym):
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym)
@property
def state_space(self):
return S.Reals
def distribution(self, rv):
return NormalDistribution(0, sqrt(rv.key))
def density(self, x):
return exp(-x**2/(2*x.key)) / (sqrt(2*pi)*sqrt(x.key))
def simple_rv(self, rv):
return Normal(rv.name, 0, sqrt(rv.key))
class GammaProcess(CountingProcess):
"""
A Gamma process is a random process with independent gamma distributed
increments. It is a pure-jump increasing Levy process.
Parameters
==========
sym: Symbol/str
lamda: Positive number
Jump size of the process, ``lamda > 0``
gamma: Positive number
Rate of jump arrivals, ``gamma > 0``
Examples
========
>>> from sympy.stats import GammaProcess, E, P, variance
>>> from sympy import symbols, Contains, Interval, Not
>>> t, d, x, l, g = symbols('t d x l g', positive=True)
>>> X = GammaProcess("X", l, g)
>>> E(X(t))
g*t/l
>>> variance(X(t)).simplify()
g*t/l**2
>>> X = GammaProcess('X', 1, 2)
>>> P(X(t) < 1).simplify()
lowergamma(2*t, 1)/gamma(2*t)
>>> P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
... Contains(d, Interval.Lopen(7, 8))).simplify()
-4*exp(-3) + 472*exp(-8)/3 + 1
>>> E(X(2) + x*E(X(5)))
10*x + 4
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_process
"""
def __new__(cls, sym, lamda, gamma):
_value_check(lamda > 0, 'lamda should be a positive number')
_value_check(gamma > 0, 'gamma should be a positive number')
sym = _symbol_converter(sym)
gamma = _sympify(gamma)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda, gamma)
@property
def lamda(self):
return self.args[1]
@property
def gamma(self):
return self.args[2]
@property
def state_space(self):
return _set_converter(Interval(0, oo))
def distribution(self, rv):
return GammaDistribution(self.gamma*rv.key, 1/self.lamda)
def density(self, x):
k = self.gamma*x.key
theta = 1/self.lamda
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def simple_rv(self, rv):
return Gamma(rv.name, self.gamma*rv.key, 1/self.lamda)
|
502e68cca20d23031d7ec21ff56ad0756d184911298049c34749c96915827f85 | """
SymPy statistics module
Introduces a random variable type into the SymPy language.
Random variables may be declared using prebuilt functions such as
Normal, Exponential, Coin, Die, etc... or built with functions like FiniteRV.
Queries on random expressions can be made using the functions
========================= =============================
Expression Meaning
------------------------- -----------------------------
``P(condition)`` Probability
``E(expression)`` Expected value
``H(expression)`` Entropy
``variance(expression)`` Variance
``density(expression)`` Probability Density Function
``sample(expression)`` Produce a realization
``where(condition)`` Where the condition is true
========================= =============================
Examples
========
>>> from sympy.stats import P, E, variance, Die, Normal
>>> from sympy import Eq, simplify
>>> X, Y = Die('X', 6), Die('Y', 6) # Define two six sided dice
>>> Z = Normal('Z', 0, 1) # Declare a Normal random variable with mean 0, std 1
>>> P(X>3) # Probability X is greater than 3
1/2
>>> E(X+Y) # Expectation of the sum of two dice
7
>>> variance(X+Y) # Variance of the sum of two dice
35/6
>>> simplify(P(Z>1)) # Probability of Z being greater than 1
1/2 - erf(sqrt(2)/2)/2
One could also create custom distribution and define custom random variables
as follows:
1. If you want to create a Continuous Random Variable:
>>> from sympy.stats import ContinuousRV, P, E
>>> from sympy import exp, Symbol, Interval, oo
>>> x = Symbol('x')
>>> pdf = exp(-x) # pdf of the Continuous Distribution
>>> Z = ContinuousRV(x, pdf, set=Interval(0, oo))
>>> E(Z)
1
>>> P(Z > 5)
exp(-5)
1.1 To create an instance of Continuous Distribution:
>>> from sympy.stats import ContinuousDistributionHandmade
>>> from sympy import Lambda
>>> dist = ContinuousDistributionHandmade(Lambda(x, pdf), set=Interval(0, oo))
>>> dist.pdf(x)
exp(-x)
2. If you want to create a Discrete Random Variable:
>>> from sympy.stats import DiscreteRV, P, E
>>> from sympy import Symbol, S
>>> p = S(1)/2
>>> x = Symbol('x', integer=True, positive=True)
>>> pdf = p*(1 - p)**(x - 1)
>>> D = DiscreteRV(x, pdf, set=S.Naturals)
>>> E(D)
2
>>> P(D > 3)
1/8
2.1 To create an instance of Discrete Distribution:
>>> from sympy.stats import DiscreteDistributionHandmade
>>> from sympy import Lambda
>>> dist = DiscreteDistributionHandmade(Lambda(x, pdf), set=S.Naturals)
>>> dist.pdf(x)
2**(1 - x)/2
3. If you want to create a Finite Random Variable:
>>> from sympy.stats import FiniteRV, P, E
>>> from sympy import Rational
>>> pmf = {1: Rational(1, 3), 2: Rational(1, 6), 3: Rational(1, 4), 4: Rational(1, 4)}
>>> X = FiniteRV('X', pmf)
>>> E(X)
29/12
>>> P(X > 3)
1/4
3.1 To create an instance of Finite Distribution:
>>> from sympy.stats import FiniteDistributionHandmade
>>> dist = FiniteDistributionHandmade(pmf)
>>> dist.pmf(x)
Lambda(x, Piecewise((1/3, Eq(x, 1)), (1/6, Eq(x, 2)), (1/4, Eq(x, 3) | Eq(x, 4)), (0, True)))
"""
__all__ = [
'P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf','median',
'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'independent',
'random_symbols', 'correlation', 'factorial_moment', 'moment', 'cmoment',
'sampling_density', 'moment_generating_function', 'smoment', 'quantile',
'coskewness', 'sample_stochastic_process',
'FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin', 'Binomial',
'BetaBinomial', 'Hypergeometric', 'Rademacher', 'IdealSoliton', 'RobustSoliton',
'FiniteDistributionHandmade',
'ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaNoncentral', 'BetaPrime',
'BoundedPareto', 'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Erlang',
'ExGaussian', 'Exponential', 'ExponentialPower', 'FDistribution',
'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Gompertz', 'Gumbel',
'Kumaraswamy', 'Laplace', 'Levy', 'Logistic', 'LogLogistic', 'LogitNormal', 'LogNormal', 'Lomax',
'Moyal', 'Maxwell', 'Nakagami', 'Normal', 'GaussianInverse', 'Pareto', 'PowerFunction',
'QuadraticU', 'RaisedCosine', 'Rayleigh','Reciprocal', 'StudentT', 'ShiftedGompertz',
'Trapezoidal', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Wald',
'Weibull', 'WignerSemicircle', 'ContinuousDistributionHandmade',
'Geometric','Hermite', 'Logarithmic', 'NegativeBinomial', 'Poisson', 'Skellam',
'YuleSimon', 'Zeta', 'DiscreteRV', 'DiscreteDistributionHandmade',
'JointRV', 'Dirichlet', 'GeneralizedMultivariateLogGamma',
'GeneralizedMultivariateLogGammaOmega', 'Multinomial', 'MultivariateBeta',
'MultivariateEwens', 'MultivariateT', 'NegativeMultinomial',
'NormalGamma', 'MultivariateNormal', 'MultivariateLaplace', 'marginal_distribution',
'StochasticProcess', 'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain', 'TransitionMatrixOf', 'StochasticStateSpaceOf',
'GeneratorMatrixOf', 'ContinuousMarkovChain', 'BernoulliProcess',
'PoissonProcess', 'WienerProcess', 'GammaProcess',
'CircularEnsemble', 'CircularUnitaryEnsemble',
'CircularOrthogonalEnsemble', 'CircularSymplecticEnsemble',
'GaussianEnsemble', 'GaussianUnitaryEnsemble',
'GaussianOrthogonalEnsemble', 'GaussianSymplecticEnsemble',
'joint_eigen_distribution', 'JointEigenDistribution',
'level_spacing_distribution',
'MatrixGamma', 'Wishart', 'MatrixNormal',
'Probability', 'Expectation', 'Variance', 'Covariance', 'Moment',
'CentralMoment',
'ExpectationMatrix', 'VarianceMatrix', 'CrossCovarianceMatrix'
]
from .rv_interface import (P, E, H, density, where, given, sample, cdf, median,
characteristic_function, pspace, sample_iter, variance, std, skewness,
kurtosis, covariance, dependent, entropy, independent, random_symbols,
correlation, factorial_moment, moment, cmoment, sampling_density,
moment_generating_function, smoment, quantile, coskewness,
sample_stochastic_process)
from .frv_types import (FiniteRV, DiscreteUniform, Die, Bernoulli, Coin,
Binomial, BetaBinomial, Hypergeometric, Rademacher,
FiniteDistributionHandmade, IdealSoliton, RobustSoliton)
from .crv_types import (ContinuousRV, Arcsin, Benini, Beta, BetaNoncentral,
BetaPrime, BoundedPareto, Cauchy, Chi, ChiNoncentral, ChiSquared, Dagum, Erlang,
ExGaussian, Exponential, ExponentialPower, FDistribution, FisherZ,
Frechet, Gamma, GammaInverse, Gompertz, Gumbel, Kumaraswamy, Laplace,
Levy, Logistic, LogLogistic, LogitNormal, LogNormal, Lomax, Maxwell, Moyal, Nakagami, Normal,
GaussianInverse, Pareto, QuadraticU, RaisedCosine, Rayleigh, Reciprocal, StudentT,
PowerFunction, ShiftedGompertz, Trapezoidal, Triangular, Uniform, UniformSum,
VonMises, Wald, Weibull, WignerSemicircle, ContinuousDistributionHandmade)
from .drv_types import (Geometric, Hermite, Logarithmic, NegativeBinomial, Poisson,
Skellam, YuleSimon, Zeta, DiscreteRV, DiscreteDistributionHandmade)
from .joint_rv_types import (JointRV, Dirichlet,
GeneralizedMultivariateLogGamma, GeneralizedMultivariateLogGammaOmega,
Multinomial, MultivariateBeta, MultivariateEwens, MultivariateT,
NegativeMultinomial, NormalGamma, MultivariateNormal, MultivariateLaplace,
marginal_distribution)
from .stochastic_process_types import (StochasticProcess,
DiscreteTimeStochasticProcess, DiscreteMarkovChain,
TransitionMatrixOf, StochasticStateSpaceOf, GeneratorMatrixOf,
ContinuousMarkovChain, BernoulliProcess, PoissonProcess, WienerProcess,
GammaProcess)
from .random_matrix_models import (CircularEnsemble, CircularUnitaryEnsemble,
CircularOrthogonalEnsemble, CircularSymplecticEnsemble,
GaussianEnsemble, GaussianUnitaryEnsemble, GaussianOrthogonalEnsemble,
GaussianSymplecticEnsemble, joint_eigen_distribution,
JointEigenDistribution, level_spacing_distribution)
from .matrix_distributions import MatrixGamma, Wishart, MatrixNormal
from .symbolic_probability import (Probability, Expectation, Variance,
Covariance, Moment, CentralMoment)
from .symbolic_multivariate_probability import (ExpectationMatrix, VarianceMatrix,
CrossCovarianceMatrix)
|
ca13e19986380434e1ced312faf456953732e42781b11503dfa26c310aaef387 | from sympy.sets import FiniteSet
from sympy import (sqrt, log, exp, FallingFactorial, Rational, Eq, Dummy,
piecewise_fold, solveset, Integral)
from .rv import (probability, expectation, density, where, given, pspace, cdf, PSpace,
characteristic_function, sample, sample_iter, random_symbols, independent, dependent,
sampling_density, moment_generating_function, quantile, is_random,
sample_stochastic_process)
__all__ = ['P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf',
'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'median',
'independent', 'random_symbols', 'correlation', 'factorial_moment',
'moment', 'cmoment', 'sampling_density', 'moment_generating_function',
'smoment', 'quantile', 'sample_stochastic_process']
def moment(X, n, c=0, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth moment of a random expression about c.
.. math::
moment(X, c, n) = E((X-c)^{n})
Default value of c is 0.
Examples
========
>>> from sympy.stats import Die, moment, E
>>> X = Die('X', 6)
>>> moment(X, 1, 6)
-5/2
>>> moment(X, 2)
91/6
>>> moment(X, 1) == E(X)
True
"""
from sympy.stats.symbolic_probability import Moment
if evaluate:
return Moment(X, n, c, condition).doit()
return Moment(X, n, c, condition).rewrite(Integral)
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression
.. math::
variance(X) = E((X-E(X))^{2})
Examples
========
>>> from sympy.stats import Die, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die('X', 6)
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(1 - p)
"""
if is_random(X) and pspace(X) == PSpace():
from sympy.stats.symbolic_probability import Variance
return Variance(X, condition)
return cmoment(X, 2, condition, **kwargs)
def standard_deviation(X, condition=None, **kwargs):
r"""
Standard Deviation of a random expression
.. math::
std(X) = \sqrt(E((X-E(X))^{2}))
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol, simplify
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> simplify(std(B))
sqrt(p*(1 - p))
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def entropy(expr, condition=None, **kwargs):
"""
Calculuates entropy of a probability distribution
Parameters
==========
expression : the random expression whose entropy is to be calculated
condition : optional, to specify conditions on random expression
b: base of the logarithm, optional
By default, it is taken as Euler's number
Returns
=======
result : Entropy of the expression, a constant
Examples
========
>>> from sympy.stats import Normal, Die, entropy
>>> X = Normal('X', 0, 1)
>>> entropy(X)
log(2)/2 + 1/2 + log(pi)/2
>>> D = Die('D', 4)
>>> entropy(D)
log(4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory)
.. [2] https://www.crmarsh.com/static/pdf/Charles_Marsh_Continuous_Entropy.pdf
.. [3] http://www.math.uconn.edu/~kconrad/blurbs/analysis/entropypost.pdf
"""
pdf = density(expr, condition, **kwargs)
base = kwargs.get('b', exp(1))
if isinstance(pdf, dict):
return sum([-prob*log(prob, base) for prob in pdf.values()])
return expectation(-log(pdf(expr), base))
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions
The expectation that the two variables will rise and fall together
.. math::
covariance(X,Y) = E((X-E(X)) (Y-E(Y)))
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
if (is_random(X) and pspace(X) == PSpace()) or (is_random(Y) and pspace(Y) == PSpace()):
from sympy.stats.symbolic_probability import Covariance
return Covariance(X, Y, condition)
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def correlation(X, Y, condition=None, **kwargs):
r"""
Correlation of two random expressions, also known as correlation
coefficient or Pearson's correlation
The normalized expectation that the two variables will rise
and fall together
.. math::
correlation(X,Y) = E((X-E(X))(Y-E(Y)) / (\sigma_x \sigma_y))
Examples
========
>>> from sympy.stats import Exponential, correlation
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> correlation(X, X)
1
>>> correlation(X, Y)
0
>>> correlation(X, Y + rate*X)
1/sqrt(1 + lambda**(-2))
"""
return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs)
* std(Y, condition, **kwargs))
def cmoment(X, n, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth central moment of a random expression about its mean.
.. math::
cmoment(X, n) = E((X - E(X))^{n})
Examples
========
>>> from sympy.stats import Die, cmoment, variance
>>> X = Die('X', 6)
>>> cmoment(X, 3)
0
>>> cmoment(X, 2)
35/12
>>> cmoment(X, 2) == variance(X)
True
"""
from sympy.stats.symbolic_probability import CentralMoment
if evaluate:
return CentralMoment(X, n, condition).doit()
return CentralMoment(X, n, condition).rewrite(Integral)
def smoment(X, n, condition=None, **kwargs):
r"""
Return the nth Standardized moment of a random expression.
.. math::
smoment(X, n) = E(((X - \mu)/\sigma_X)^{n})
Examples
========
>>> from sympy.stats import skewness, Exponential, smoment
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> smoment(Y, 4)
9
>>> smoment(Y, 4) == smoment(3*Y, 4)
True
>>> smoment(Y, 3) == skewness(Y)
True
"""
sigma = std(X, condition, **kwargs)
return (1/sigma)**n*cmoment(X, n, condition, **kwargs)
def skewness(X, condition=None, **kwargs):
r"""
Measure of the asymmetry of the probability distribution.
Positive skew indicates that most of the values lie to the right of
the mean.
.. math::
skewness(X) = E(((X - E(X))/\sigma_X)^{3})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. skewness(X, X>0) is skewness of X given X > 0
Examples
========
>>> from sympy.stats import skewness, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> skewness(X)
0
>>> skewness(X, X > 0) # find skewness given X > 0
(-sqrt(2)/sqrt(pi) + 4*sqrt(2)/pi**(3/2))/(1 - 2/pi)**(3/2)
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> skewness(Y)
2
"""
return smoment(X, 3, condition=condition, **kwargs)
def kurtosis(X, condition=None, **kwargs):
r"""
Characterizes the tails/outliers of a probability distribution.
Kurtosis of any univariate normal distribution is 3. Kurtosis less than
3 means that the distribution produces fewer and less extreme outliers
than the normal distribution.
.. math::
kurtosis(X) = E(((X - E(X))/\sigma_X)^{4})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. kurtosis(X, X>0) is kurtosis of X given X > 0
Examples
========
>>> from sympy.stats import kurtosis, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> kurtosis(X)
3
>>> kurtosis(X, X > 0) # find kurtosis given X > 0
(-4/pi - 12/pi**2 + 3)/(1 - 2/pi)**2
>>> rate = Symbol('lamda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> kurtosis(Y)
9
References
==========
.. [1] https://en.wikipedia.org/wiki/Kurtosis
.. [2] http://mathworld.wolfram.com/Kurtosis.html
"""
return smoment(X, 4, condition=condition, **kwargs)
def factorial_moment(X, n, condition=None, **kwargs):
"""
The factorial moment is a mathematical quantity defined as the expectation
or average of the falling factorial of a random variable.
.. math::
factorial-moment(X, n) = E(X(X - 1)(X - 2)...(X - n + 1))
Parameters
==========
n: A natural number, n-th factorial moment.
condition : Expr containing RandomSymbols
A conditional expression.
Examples
========
>>> from sympy.stats import factorial_moment, Poisson, Binomial
>>> from sympy import Symbol, S
>>> lamda = Symbol('lamda')
>>> X = Poisson('X', lamda)
>>> factorial_moment(X, 2)
lamda**2
>>> Y = Binomial('Y', 2, S.Half)
>>> factorial_moment(Y, 2)
1/2
>>> factorial_moment(Y, 2, Y > 1) # find factorial moment for Y > 1
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Factorial_moment
.. [2] http://mathworld.wolfram.com/FactorialMoment.html
"""
return expectation(FallingFactorial(X, n), condition=condition, **kwargs)
def median(X, evaluate=True, **kwargs):
r"""
Calculuates the median of the probability distribution.
Mathematically, median of Probability distribution is defined as all those
values of `m` for which the following condition is satisfied
.. math::
P(X\leq m) \geq \frac{1}{2} \text{ and} \text{ } P(X\geq m)\geq \frac{1}{2}
Parameters
==========
X: The random expression whose median is to be calculated.
Returns
=======
The FiniteSet or an Interval which contains the median of the
random expression.
Examples
========
>>> from sympy.stats import Normal, Die, median
>>> N = Normal('N', 3, 1)
>>> median(N)
FiniteSet(3)
>>> D = Die('D')
>>> median(D)
FiniteSet(3, 4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Median#Probability_distributions
"""
from sympy.stats.crv import ContinuousPSpace
from sympy.stats.drv import DiscretePSpace
from sympy.stats.frv import FinitePSpace
if isinstance(pspace(X), FinitePSpace):
cdf = pspace(X).compute_cdf(X)
result = []
for key, value in cdf.items():
if value>= Rational(1, 2) and (1 - value) + \
pspace(X).probability(Eq(X, key)) >= Rational(1, 2):
result.append(key)
return FiniteSet(*result)
if isinstance(pspace(X), ContinuousPSpace) or isinstance(pspace(X), DiscretePSpace):
cdf = pspace(X).compute_cdf(X)
x = Dummy('x')
result = solveset(piecewise_fold(cdf(x) - Rational(1, 2)), x, pspace(X).set)
return result
raise NotImplementedError("The median of %s is not implemeted."%str(pspace(X)))
def coskewness(X, Y, Z, condition=None, **kwargs):
r"""
Calculates the co-skewness of three random variables.
Mathematically Coskewness is defined as
.. math::
coskewness(X,Y,Z)=\frac{E[(X-E[X]) * (Y-E[Y]) * (Z-E[Z])]} {\sigma_{X}\sigma_{Y}\sigma_{Z}}
Parameters
==========
X : RandomSymbol
Random Variable used to calculate coskewness
Y : RandomSymbol
Random Variable used to calculate coskewness
Z : RandomSymbol
Random Variable used to calculate coskewness
condition : Expr containing RandomSymbols
A conditional expression
Examples
========
>>> from sympy.stats import coskewness, Exponential, skewness
>>> from sympy import symbols
>>> p = symbols('p', positive=True)
>>> X = Exponential('X', p)
>>> Y = Exponential('Y', 2*p)
>>> coskewness(X, Y, Y)
0
>>> coskewness(X, Y + X, Y + 2*X)
16*sqrt(85)/85
>>> coskewness(X + 2*Y, Y + X, Y + 2*X, X > 3)
9*sqrt(170)/85
>>> coskewness(Y, Y, Y) == skewness(Y)
True
>>> coskewness(X, Y + p*X, Y + 2*p*X)
4/(sqrt(1 + 1/(4*p**2))*sqrt(4 + 1/(4*p**2)))
Returns
=======
coskewness : The coskewness of the three random variables
References
==========
.. [1] https://en.wikipedia.org/wiki/Coskewness
"""
num = expectation((X - expectation(X, condition, **kwargs)) \
* (Y - expectation(Y, condition, **kwargs)) \
* (Z - expectation(Z, condition, **kwargs)), condition, **kwargs)
den = std(X, condition, **kwargs) * std(Y, condition, **kwargs) \
* std(Z, condition, **kwargs)
return num/den
P = probability
E = expectation
H = entropy
|
47825f21a1b1593c0fa4084d1dcf8f0eefa2d3bcbace107b94df2295a79ba00a | """
Main Random Variables Module
Defines abstract random variable type.
Contains interfaces for probability space object (PSpace) as well as standard
operators, P, E, sample, density, where, quantile
See Also
========
sympy.stats.crv
sympy.stats.frv
sympy.stats.rv_interface
"""
from functools import singledispatch
from typing import Tuple as tTuple
from sympy import (Basic, S, Expr, Symbol, Tuple, And, Add, Eq, lambdify, Or,
Equality, Lambda, sympify, Dummy, Ne, KroneckerDelta,
DiracDelta, Mul, Indexed, MatrixSymbol, Function)
from sympy.core.relational import Relational
from sympy.core.sympify import _sympify
from sympy.sets.sets import FiniteSet, ProductSet, Intersection
from sympy.solvers.solveset import solveset
from sympy.external import import_module
from sympy.utilities.misc import filldedent
import warnings
x = Symbol('x')
@singledispatch
def is_random(x):
return False
@is_random.register(Basic)
def _(x):
atoms = x.free_symbols
return any([is_random(i) for i in atoms])
class RandomDomain(Basic):
"""
Represents a set of variables and the values which they can take
See Also
========
sympy.stats.crv.ContinuousDomain
sympy.stats.frv.FiniteDomain
"""
is_ProductDomain = False
is_Finite = False
is_Continuous = False
is_Discrete = False
def __new__(cls, symbols, *args):
symbols = FiniteSet(*symbols)
return Basic.__new__(cls, symbols, *args)
@property
def symbols(self):
return self.args[0]
@property
def set(self):
return self.args[1]
def __contains__(self, other):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SingleDomain(RandomDomain):
"""
A single variable and its domain
See Also
========
sympy.stats.crv.SingleContinuousDomain
sympy.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
def __contains__(self, other):
if len(other) != 1:
return False
sym, val = tuple(other)[0]
return self.symbol == sym and val in self.set
class MatrixDomain(RandomDomain):
"""
A Random Matrix variable and its domain
"""
def __new__(cls, symbol, set):
symbol, set = _symbol_converter(symbol), _sympify(set)
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
class ConditionalDomain(RandomDomain):
"""
A RandomDomain with an attached condition
See Also
========
sympy.stats.crv.ConditionalContinuousDomain
sympy.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace({rs: rs.symbol
for rs in random_symbols(condition)})
return Basic.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self):
raise NotImplementedError("Set of Conditional Domain not Implemented")
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
class PSpace(Basic):
"""
A Probability Space
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in SymPy
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
sympy.stats.crv.ContinuousPSpace
sympy.stats.frv.FinitePSpace
"""
is_Finite = None # type: bool
is_Continuous = None # type: bool
is_Discrete = None # type: bool
is_real = None # type: bool
@property
def domain(self):
return self.args[0]
@property
def density(self):
return self.args[1]
@property
def values(self):
return frozenset(RandomSymbol(sym, self) for sym in self.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self):
raise NotImplementedError()
def probability(self, condition):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SinglePSpace(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
class RandomSymbol(Expr):
"""
Random Symbols represent ProbabilitySpaces in SymPy Expressions
In principle they can take on any value that their symbol can take on
within the associated PSpace with probability determined by the PSpace
Density.
Random Symbols contain pspace and symbol properties.
The pspace property points to the represented Probability Space
The symbol is a standard SymPy Symbol that is used in that probability space
for example in defining a density.
You can form normal SymPy expressions using RandomSymbols and operate on
those expressions with the Functions
E - Expectation of a random expression
P - Probability of a condition
density - Probability Density of an expression
given - A new random expression (with new random symbols) given a condition
An object of the RandomSymbol type should almost never be created by the
user. They tend to be created instead by the PSpace class's value method.
Traditionally a user doesn't even do this but instead calls one of the
convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc....
"""
def __new__(cls, symbol, pspace=None):
from sympy.stats.joint_rv import JointRandomSymbol
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
symbol = _symbol_converter(symbol)
if not isinstance(pspace, PSpace):
raise TypeError("pspace variable should be of type PSpace")
if cls == JointRandomSymbol and isinstance(pspace, SinglePSpace):
cls = RandomSymbol
return Basic.__new__(cls, symbol, pspace)
is_finite = True
is_symbol = True
is_Atom = True
_diff_wrt = True
pspace = property(lambda self: self.args[1])
symbol = property(lambda self: self.args[0])
name = property(lambda self: self.symbol.name)
def _eval_is_positive(self):
return self.symbol.is_positive
def _eval_is_integer(self):
return self.symbol.is_integer
def _eval_is_real(self):
return self.symbol.is_real or self.pspace.is_real
@property
def is_commutative(self):
return self.symbol.is_commutative
@property
def free_symbols(self):
return {self}
class RandomIndexedSymbol(RandomSymbol):
def __new__(cls, idx_obj, pspace=None):
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
if not isinstance(idx_obj, (Indexed, Function)):
raise TypeError("An Function or Indexed object is expected not %s"%(idx_obj))
return Basic.__new__(cls, idx_obj, pspace)
symbol = property(lambda self: self.args[0])
name = property(lambda self: str(self.args[0]))
@property
def key(self):
if isinstance(self.symbol, Indexed):
return self.symbol.args[1]
elif isinstance(self.symbol, Function):
return self.symbol.args[0]
@property
def free_symbols(self):
if self.key.free_symbols:
free_syms = self.key.free_symbols
free_syms.add(self)
return free_syms
return {self}
class RandomMatrixSymbol(RandomSymbol, MatrixSymbol): # type: ignore
def __new__(cls, symbol, n, m, pspace=None):
n, m = _sympify(n), _sympify(m)
symbol = _symbol_converter(symbol)
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
return Basic.__new__(cls, symbol, n, m, pspace)
symbol = property(lambda self: self.args[0])
pspace = property(lambda self: self.args[3])
class ProductPSpace(PSpace):
"""
Abstract class for representing probability spaces with multiple random
variables.
See Also
========
sympy.stats.rv.IndependentProductPSpace
sympy.stats.joint_rv.JointPSpace
"""
pass
class IndependentProductPSpace(ProductPSpace):
"""
A probability space resulting from the merger of two independent probability
spaces.
Often created using the function, pspace
"""
def __new__(cls, *spaces):
rs_space_dict = {}
for space in spaces:
for value in space.values:
rs_space_dict[value] = space
symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()])
# Overlapping symbols
from sympy.stats.joint_rv import MarginalDistribution
from sympy.stats.compound_rv import CompoundDistribution
if len(symbols) < sum(len(space.symbols) for space in spaces if not
isinstance(space.distribution, (
CompoundDistribution, MarginalDistribution))):
raise ValueError("Overlapping Random Variables")
if all(space.is_Finite for space in spaces):
from sympy.stats.frv import ProductFinitePSpace
cls = ProductFinitePSpace
obj = Basic.__new__(cls, *FiniteSet(*spaces))
return obj
@property
def pdf(self):
p = Mul(*[space.pdf for space in self.spaces])
return p.subs({rv: rv.symbol for rv in self.values})
@property
def rs_space_dict(self):
d = {}
for space in self.spaces:
for value in space.values:
d[value] = space
return d
@property
def symbols(self):
return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()])
@property
def spaces(self):
return FiniteSet(*self.args)
@property
def values(self):
return sumsets(space.values for space in self.spaces)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
rvs = rvs or self.values
rvs = frozenset(rvs)
for space in self.spaces:
expr = space.compute_expectation(expr, rvs & space.values, evaluate=False, **kwargs)
if evaluate and hasattr(expr, 'doit'):
return expr.doit(**kwargs)
return expr
@property
def domain(self):
return ProductDomain(*[space.domain for space in self.spaces])
@property
def density(self):
raise NotImplementedError("Density not available for ProductSpaces")
def sample(self, size=(), library='scipy'):
return {k: v for space in self.spaces
for k, v in space.sample(size=size, library=library).items()}
def probability(self, condition, **kwargs):
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
elif isinstance(condition, And): # they are independent
return Mul(*[self.probability(arg) for arg in condition.args])
elif isinstance(condition, Or): # they are independent
return Add(*[self.probability(arg) for arg in condition.args])
expr = condition.lhs - condition.rhs
rvs = random_symbols(expr)
dens = self.compute_density(expr)
if any([pspace(rv).is_Continuous for rv in rvs]):
from sympy.stats.crv import SingleContinuousPSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.integrate(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
dens = ContinuousDistributionHandmade(dens)
z = Dummy('z', real=True)
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
else:
from sympy.stats.drv import SingleDiscretePSpace
from sympy.stats.drv_types import DiscreteDistributionHandmade
dens = DiscreteDistributionHandmade(dens)
z = Dummy('z', integer=True)
space = SingleDiscretePSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
return result if not cond_inv else S.One - result
def compute_density(self, expr, **kwargs):
rvs = random_symbols(expr)
if any(pspace(rv).is_Continuous for rv in rvs):
z = Dummy('z', real=True)
expr = self.compute_expectation(DiracDelta(expr - z),
**kwargs)
else:
z = Dummy('z', integer=True)
expr = self.compute_expectation(KroneckerDelta(expr, z),
**kwargs)
return Lambda(z, expr)
def compute_cdf(self, expr, **kwargs):
raise ValueError("CDF not well defined on multivariate expressions")
def conditional_space(self, condition, normalize=True, **kwargs):
rvs = random_symbols(condition)
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
if any([pspace(rv).is_Continuous for rv in rvs]):
from sympy.stats.crv import (ConditionalContinuousDomain,
ContinuousPSpace)
space = ContinuousPSpace
domain = ConditionalContinuousDomain(self.domain, condition)
elif any([pspace(rv).is_Discrete for rv in rvs]):
from sympy.stats.drv import (ConditionalDiscreteDomain,
DiscretePSpace)
space = DiscretePSpace
domain = ConditionalDiscreteDomain(self.domain, condition)
elif all([pspace(rv).is_Finite for rv in rvs]):
from sympy.stats.frv import FinitePSpace
return FinitePSpace.conditional_space(self, condition)
if normalize:
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting symbols from set to tuple. The order matters to
# Lambda though so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return space(domain, density)
class ProductDomain(RandomDomain):
"""
A domain resulting from the merger of two independent domains
See Also
========
sympy.stats.crv.ProductContinuousDomain
sympy.stats.frv.ProductFiniteDomain
"""
is_ProductDomain = True
def __new__(cls, *domains):
# Flatten any product of products
domains2 = []
for domain in domains:
if not domain.is_ProductDomain:
domains2.append(domain)
else:
domains2.extend(domain.domains)
domains2 = FiniteSet(*domains2)
if all(domain.is_Finite for domain in domains2):
from sympy.stats.frv import ProductFiniteDomain
cls = ProductFiniteDomain
if all(domain.is_Continuous for domain in domains2):
from sympy.stats.crv import ProductContinuousDomain
cls = ProductContinuousDomain
if all(domain.is_Discrete for domain in domains2):
from sympy.stats.drv import ProductDiscreteDomain
cls = ProductDiscreteDomain
return Basic.__new__(cls, *domains2)
@property
def sym_domain_dict(self):
return {symbol: domain for domain in self.domains
for symbol in domain.symbols}
@property
def symbols(self):
return FiniteSet(*[sym for domain in self.domains
for sym in domain.symbols])
@property
def domains(self):
return self.args
@property
def set(self):
return ProductSet(*(domain.set for domain in self.domains))
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset([item for item in other
if sympify(domain.symbols.contains(item[0]))
is S.true])
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
def random_symbols(expr):
"""
Returns all RandomSymbols within a SymPy Expression.
"""
atoms = getattr(expr, 'atoms', None)
if atoms is not None:
comp = lambda rv: rv.symbol.name
l = list(atoms(RandomSymbol))
return sorted(l, key=comp)
else:
return []
def pspace(expr):
"""
Returns the underlying Probability Space of a random expression.
For internal use.
Examples
========
>>> from sympy.stats import pspace, Normal
>>> X = Normal('X', 0, 1)
>>> pspace(2*X + 1) == X.pspace
True
"""
expr = sympify(expr)
if isinstance(expr, RandomSymbol) and expr.pspace is not None:
return expr.pspace
if expr.has(RandomMatrixSymbol):
rm = list(expr.atoms(RandomMatrixSymbol))[0]
return rm.pspace
rvs = random_symbols(expr)
if not rvs:
raise ValueError("Expression containing Random Variable expected, not %s" % (expr))
# If only one space present
if all(rv.pspace == rvs[0].pspace for rv in rvs):
return rvs[0].pspace
from sympy.stats.compound_rv import CompoundPSpace
for rv in rvs:
if isinstance(rv.pspace, CompoundPSpace):
return rv.pspace
# Otherwise make a product space
return IndependentProductPSpace(*[rv.pspace for rv in rvs])
def sumsets(sets):
"""
Union of sets
"""
return frozenset().union(*sets)
def rs_swap(a, b):
"""
Build a dictionary to swap RandomSymbols based on their underlying symbol.
i.e.
if ``X = ('x', pspace1)``
and ``Y = ('x', pspace2)``
then ``X`` and ``Y`` match and the key, value pair
``{X:Y}`` will appear in the result
Inputs: collections a and b of random variables which share common symbols
Output: dict mapping RVs in a to RVs in b
"""
d = {}
for rsa in a:
d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0]
return d
def given(expr, condition=None, **kwargs):
r""" Conditional Random Expression
From a random expression and a condition on that expression creates a new
probability space from the condition and returns the same expression on that
conditional probability space.
Examples
========
>>> from sympy.stats import given, density, Die
>>> X = Die('X', 6)
>>> Y = given(X, X > 3)
>>> density(Y).dict
{4: 1/3, 5: 1/3, 6: 1/3}
Following convention, if the condition is a random symbol then that symbol
is considered fixed.
>>> from sympy.stats import Normal
>>> from sympy import pprint
>>> from sympy.abc import z
>>> X = Normal('X', 0, 1)
>>> Y = Normal('Y', 0, 1)
>>> pprint(density(X + Y, Y)(z), use_unicode=False)
2
-(-Y + z)
-----------
___ 2
\/ 2 *e
------------------
____
2*\/ pi
"""
if not is_random(condition) or pspace_independent(expr, condition):
return expr
if isinstance(condition, RandomSymbol):
condition = Eq(condition, condition.symbol)
condsymbols = random_symbols(condition)
if (isinstance(condition, Equality) and len(condsymbols) == 1 and
not isinstance(pspace(expr).domain, ConditionalDomain)):
rv = tuple(condsymbols)[0]
results = solveset(condition, rv)
if isinstance(results, Intersection) and S.Reals in results.args:
results = list(results.args[1])
sums = 0
for res in results:
temp = expr.subs(rv, res)
if temp == True:
return True
if temp != False:
# XXX: This seems nonsensical but preserves existing behaviour
# after the change that Relational is no longer a subclass of
# Expr. Here expr is sometimes Relational and sometimes Expr
# but we are trying to add them with +=. This needs to be
# fixed somehow.
if sums == 0 and isinstance(expr, Relational):
sums = expr.subs(rv, res)
else:
sums += expr.subs(rv, res)
if sums == 0:
return False
return sums
# Get full probability space of both the expression and the condition
fullspace = pspace(Tuple(expr, condition))
# Build new space given the condition
space = fullspace.conditional_space(condition, **kwargs)
# Dictionary to swap out RandomSymbols in expr with new RandomSymbols
# That point to the new conditional space
swapdict = rs_swap(fullspace.values, space.values)
# Swap random variables in the expression
expr = expr.xreplace(swapdict)
return expr
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not is_random(expr): # expr isn't random?
return expr
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Expectation
if evaluate:
return Expectation(expr, condition).doit(**kwargs)
return Expectation(expr, condition)
def probability(condition, given_condition=None, numsamples=None,
evaluate=True, **kwargs):
"""
Probability that a condition is true, optionally given a second condition
Parameters
==========
condition : Combination of Relationals containing RandomSymbols
The condition of which you want to compute the probability
given_condition : Combination of Relationals containing RandomSymbols
A conditional expression. P(X > 1, X > 0) is expectation of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the probability with this many samples
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import P, Die
>>> from sympy import Eq
>>> X, Y = Die('X', 6), Die('Y', 6)
>>> P(X > 3)
1/2
>>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2
1/4
>>> P(X > Y)
5/12
"""
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Probability
if evaluate:
return Probability(condition, given_condition).doit(**kwargs)
### TODO: Remove the user warnings in the future releases
message = ("Since version 1.7, using `evaluate=False` returns `Probability` "
"object. If you want unevaluated Integral/Sum use "
"`P(condition, given_condition, evaluate=False).rewrite(Integral)`")
warnings.warn(filldedent(message))
return Probability(condition, given_condition)
class Density(Basic):
expr = property(lambda self: self.args[0])
@property
def condition(self):
if len(self.args) > 1:
return self.args[1]
else:
return None
def doit(self, evaluate=True, **kwargs):
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.stats.joint_rv import JointPSpace
from sympy.stats.matrix_distributions import MatrixPSpace
from sympy.stats.compound_rv import CompoundPSpace
from sympy.stats.frv import SingleFiniteDistribution
expr, condition = self.expr, self.condition
if isinstance(expr, SingleFiniteDistribution):
return expr.dict
if condition is not None:
# Recompute on new conditional expr
expr = given(expr, condition, **kwargs)
if not random_symbols(expr):
return Lambda(x, DiracDelta(x - expr))
if isinstance(expr, RandomSymbol):
if isinstance(expr.pspace, (SinglePSpace, JointPSpace, MatrixPSpace)) and \
hasattr(expr.pspace, 'distribution'):
return expr.pspace.distribution
elif isinstance(expr.pspace, RandomMatrixPSpace):
return expr.pspace.model
if isinstance(pspace(expr), CompoundPSpace):
kwargs['compound_evaluate'] = evaluate
result = pspace(expr).compute_density(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs):
"""
Probability density of a random expression, optionally given a second
condition.
This density will take on different forms for different types of
probability spaces. Discrete variables produce Dicts. Continuous
variables produce Lambdas.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the density value
condition : Relational containing RandomSymbols
A conditional expression. density(X > 1, X > 0) is density of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the density with this many samples
Examples
========
>>> from sympy.stats import density, Die, Normal
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> D = Die('D', 6)
>>> X = Normal(x, 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> density(2*D).dict
{2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6}
>>> density(X)(x)
sqrt(2)*exp(-x**2/2)/(2*sqrt(pi))
"""
if numsamples:
return sampling_density(expr, condition, numsamples=numsamples,
**kwargs)
return Density(expr, condition).doit(evaluate=evaluate, **kwargs)
def cdf(expr, condition=None, evaluate=True, **kwargs):
"""
Cumulative Distribution Function of a random expression.
optionally given a second condition
This density will take on different forms for different types of
probability spaces.
Discrete variables produce Dicts.
Continuous variables produce Lambdas.
Examples
========
>>> from sympy.stats import density, Die, Normal, cdf
>>> D = Die('D', 6)
>>> X = Normal('X', 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> cdf(D)
{1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1}
>>> cdf(3*D, D > 2)
{9: 1/4, 12: 1/2, 15: 3/4, 18: 1}
>>> cdf(X)
Lambda(_z, erf(sqrt(2)*_z/2)/2 + 1/2)
"""
if condition is not None: # If there is a condition
# Recompute on new conditional expr
return cdf(given(expr, condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(expr).compute_cdf(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def characteristic_function(expr, condition=None, evaluate=True, **kwargs):
"""
Characteristic function of a random expression, optionally given a second condition
Returns a Lambda
Examples
========
>>> from sympy.stats import Normal, DiscreteUniform, Poisson, characteristic_function
>>> X = Normal('X', 0, 1)
>>> characteristic_function(X)
Lambda(_t, exp(-_t**2/2))
>>> Y = DiscreteUniform('Y', [1, 2, 7])
>>> characteristic_function(Y)
Lambda(_t, exp(7*_t*I)/3 + exp(2*_t*I)/3 + exp(_t*I)/3)
>>> Z = Poisson('Z', 2)
>>> characteristic_function(Z)
Lambda(_t, exp(2*exp(_t*I) - 2))
"""
if condition is not None:
return characteristic_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_characteristic_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def moment_generating_function(expr, condition=None, evaluate=True, **kwargs):
if condition is not None:
return moment_generating_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_moment_generating_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def where(condition, given_condition=None, **kwargs):
"""
Returns the domain where a condition is True.
Examples
========
>>> from sympy.stats import where, Die, Normal
>>> from sympy import And
>>> D1, D2 = Die('a', 6), Die('b', 6)
>>> a, b = D1.symbol, D2.symbol
>>> X = Normal('x', 0, 1)
>>> where(X**2<1)
Domain: (-1 < x) & (x < 1)
>>> where(X**2<1).set
Interval.open(-1, 1)
>>> where(And(D1<=D2 , D2<3))
Domain: (Eq(a, 1) & Eq(b, 1)) | (Eq(a, 1) & Eq(b, 2)) | (Eq(a, 2) & Eq(b, 2))
"""
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return where(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
return pspace(condition).where(condition, **kwargs)
def sample(expr, condition=None, size=(), library='scipy', numsamples=1,
**kwargs):
"""
A realization of the random expression
Parameters
==========
expr : Expression of random variables
Expression from which sample is extracted
condition : Expr containing RandomSymbols
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
library : str
- 'scipy' : Sample using scipy
- 'numpy' : Sample using numpy
- 'pymc3' : Sample using PyMC3
Choose any of the available options to sample from as string,
by default is 'scipy'
numsamples : int
Number of samples, each with size as ``size``
Examples
========
>>> from sympy.stats import Die, sample, Normal, Geometric
>>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) # Finite Random Variable
>>> die_roll = sample(X + Y + Z) # doctest: +SKIP
>>> next(die_roll) # doctest: +SKIP
6
>>> N = Normal('N', 3, 4) # Continuous Random Variable
>>> samp = next(sample(N)) # doctest: +SKIP
>>> samp in N.pspace.domain.set # doctest: +SKIP
True
>>> samp = next(sample(N, N>0)) # doctest: +SKIP
>>> samp > 0 # doctest: +SKIP
True
>>> samp_list = next(sample(N, size=4)) # doctest: +SKIP
>>> [sam in N.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True, True]
>>> G = Geometric('G', 0.5) # Discrete Random Variable
>>> samp_list = next(sample(G, size=3)) # doctest: +SKIP
>>> samp_list # doctest: +SKIP
array([10, 4, 1])
>>> [sam in G.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True]
>>> MN = Normal("MN", [3, 4], [[2, 1], [1, 2]]) # Joint Random Variable
>>> samp_list = next(sample(MN, size=4)) # doctest: +SKIP
>>> samp_list # doctest: +SKIP
array([[4.22564264, 3.23364418],
[3.41002011, 4.60090908],
[3.76151866, 4.77617143],
[4.71440865, 2.65714157]])
>>> [tuple(sam) in MN.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True, True]
Returns
=======
sample: iterator object
iterator object containing the sample/samples of given expr
"""
### TODO: Remove the user warnings in the future releases
message = ("The return type of sample has been changed to return an "
"iterator object since version 1.7. For more information see "
"https://github.com/sympy/sympy/issues/19061")
warnings.warn(filldedent(message))
return sample_iter(expr, condition, size=size, library=library,
numsamples=numsamples)
def quantile(expr, evaluate=True, **kwargs):
r"""
Return the :math:`p^{th}` order quantile of a probability distribution.
Quantile is defined as the value at which the probability of the random
variable is less than or equal to the given probability.
..math::
Q(p) = inf{x \in (-\infty, \infty) such that p <= F(x)}
Examples
========
>>> from sympy.stats import quantile, Die, Exponential
>>> from sympy import Symbol, pprint
>>> p = Symbol("p")
>>> l = Symbol("lambda", positive=True)
>>> X = Exponential("x", l)
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> D = Die("d", 6)
>>> pprint(quantile(D)(p), use_unicode=False)
/nan for Or(p > 1, p < 0)
|
| 1 for p <= 1/6
|
| 2 for p <= 1/3
|
< 3 for p <= 1/2
|
| 4 for p <= 2/3
|
| 5 for p <= 5/6
|
\ 6 for p <= 1
"""
result = pspace(expr).compute_quantile(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def sample_iter(expr, condition=None, size=(), library='scipy',
numsamples=S.Infinity, **kwargs):
"""
Returns an iterator of realizations from the expression given a condition
Parameters
==========
expr: Expr
Random expression to be realized
condition: Expr, optional
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
numsamples: integer, optional
Length of the iterator (defaults to infinity)
Examples
========
>>> from sympy.stats import Normal, sample_iter
>>> X = Normal('X', 0, 1)
>>> expr = X*X + 3
>>> iterator = sample_iter(expr, numsamples=3) # doctest: +SKIP
>>> list(iterator) # doctest: +SKIP
[12, 4, 7]
Returns
=======
sample_iter: iterator object
iterator object containing the sample/samples of given expr
See Also
========
sample
sampling_P
sampling_E
"""
from sympy.stats.joint_rv import JointRandomSymbol
if not import_module(library):
raise ValueError("Failed to import %s" % library)
if condition is not None:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
rvs = list(ps.values)
if isinstance(expr, JointRandomSymbol):
expr = expr.subs({expr: RandomSymbol(expr.symbol, expr.pspace)})
else:
sub = {}
for arg in expr.args:
if isinstance(arg, JointRandomSymbol):
sub[arg] = RandomSymbol(arg.symbol, arg.pspace)
expr = expr.subs(sub)
if library == 'pymc3':
# Currently unable to lambdify in pymc3
# TODO : Remove 'pymc3' when lambdify accepts 'pymc3' as module
fn = lambdify(rvs, expr, **kwargs)
else:
fn = lambdify(rvs, expr, modules=library, **kwargs)
if condition is not None:
given_fn = lambdify(rvs, condition, **kwargs)
def return_generator():
count = 0
while count < numsamples:
d = ps.sample(size=size, library=library) # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
if condition is not None: # Check that these values satisfy the condition
gd = given_fn(*args)
if gd != True and gd != False:
raise ValueError(
"Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
continue
yield fn(*args)
count += 1
return return_generator()
def sample_iter_lambdify(expr, condition=None, size=(), numsamples=S.Infinity,
**kwargs):
return sample_iter(expr, condition=condition, size=size, numsamples=numsamples,
**kwargs)
def sample_iter_subs(expr, condition=None, size=(), numsamples=S.Infinity,
**kwargs):
return sample_iter(expr, condition=condition, size=size, numsamples=numsamples,
**kwargs)
def sampling_P(condition, given_condition=None, library='scipy', numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of P
See Also
========
P
sampling_E
sampling_density
"""
count_true = 0
count_false = 0
samples = sample_iter(condition, given_condition, library=library,
numsamples=numsamples, **kwargs)
for sample in samples:
if sample:
count_true += 1
else:
count_false += 1
result = S(count_true) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_E(expr, given_condition=None, library='scipy', numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of E
See Also
========
P
sampling_P
sampling_density
"""
samples = list(sample_iter(expr, given_condition, library=library,
numsamples=numsamples, **kwargs))
result = Add(*[samp for samp in samples]) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_density(expr, given_condition=None, library='scipy',
numsamples=1, **kwargs):
"""
Sampling version of density
See Also
========
density
sampling_P
sampling_E
"""
results = {}
for result in sample_iter(expr, given_condition, library=library,
numsamples=numsamples, **kwargs):
results[result] = results.get(result, 0) + 1
return results
def dependent(a, b):
"""
Dependence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, dependent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> dependent(X, Y)
False
>>> dependent(2*X + Y, -Y)
True
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> dependent(X, Y)
True
See Also
========
independent
"""
if pspace_independent(a, b):
return False
z = Symbol('z', real=True)
# Dependent if density is unchanged when one is given information about
# the other
return (density(a, Eq(b, z)) != density(a) or
density(b, Eq(a, z)) != density(b))
def independent(a, b):
"""
Independence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, independent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> independent(X, Y)
True
>>> independent(2*X + Y, -Y)
False
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> independent(X, Y)
False
See Also
========
dependent
"""
return not dependent(a, b)
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0:
return False
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None
def rv_subs(expr, symbols=None):
"""
Given a random expression replace all random variables with their symbols.
If symbols keyword is given restrict the swap to only the symbols listed.
"""
if symbols is None:
symbols = random_symbols(expr)
if not symbols:
return expr
swapdict = {rv: rv.symbol for rv in symbols}
return expr.subs(swapdict)
class NamedArgsMixin:
_argnames = () # type: tTuple[str, ...]
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
def _value_check(condition, message):
"""
Raise a ValueError with message if condition is False, else
return True if all conditions were True, else False.
Examples
========
>>> from sympy.stats.rv import _value_check
>>> from sympy.abc import a, b, c
>>> from sympy import And, Dummy
>>> _value_check(2 < 3, '')
True
Here, the condition is not False, but it doesn't evaluate to True
so False is returned (but no error is raised). So checking if the
return value is True or False will tell you if all conditions were
evaluated.
>>> _value_check(a < b, '')
False
In this case the condition is False so an error is raised:
>>> r = Dummy(real=True)
>>> _value_check(r < r - 1, 'condition is not true')
Traceback (most recent call last):
...
ValueError: condition is not true
If no condition of many conditions must be False, they can be
checked by passing them as an iterable:
>>> _value_check((a < 0, b < 0, c < 0), '')
False
The iterable can be a generator, too:
>>> _value_check((i < 0 for i in (a, b, c)), '')
False
The following are equivalent to the above but do not pass
an iterable:
>>> all(_value_check(i < 0, '') for i in (a, b, c))
False
>>> _value_check(And(a < 0, b < 0, c < 0), '')
False
"""
from sympy.core.compatibility import iterable
from sympy.core.logic import fuzzy_and
if not iterable(condition):
condition = [condition]
truth = fuzzy_and(condition)
if truth == False:
raise ValueError(message)
return truth == True
def _symbol_converter(sym):
"""
Casts the parameter to Symbol if it is 'str'
otherwise no operation is performed on it.
Parameters
==========
sym
The parameter to be converted.
Returns
=======
Symbol
the parameter converted to Symbol.
Raises
======
TypeError
If the parameter is not an instance of both str and
Symbol.
Examples
========
>>> from sympy import Symbol
>>> from sympy.stats.rv import _symbol_converter
>>> s = _symbol_converter('s')
>>> isinstance(s, Symbol)
True
>>> _symbol_converter(1)
Traceback (most recent call last):
...
TypeError: 1 is neither a Symbol nor a string
>>> r = Symbol('r')
>>> isinstance(r, Symbol)
True
"""
if isinstance(sym, str):
sym = Symbol(sym)
if not isinstance(sym, Symbol):
raise TypeError("%s is neither a Symbol nor a string"%(sym))
return sym
def sample_stochastic_process(process):
"""
This function is used to sample from stochastic process.
Parameters
==========
process: StochasticProcess
Process used to extract the samples. It must be an instance of
StochasticProcess
Examples
========
>>> from sympy.stats import sample_stochastic_process, DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> next(sample_stochastic_process(Y)) in Y.state_space # doctest: +SKIP
True
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
0
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
2
Returns
=======
sample: iterator object
iterator object containing the sample of given process
"""
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise ValueError("Process must be an instance of Stochastic Process")
return process.sample()
|
d973148f9efc8d7bfc9d58afc0d1f6ef6d7c2fd513bcbae60bc376143c11a364 | from sympy import (Basic, sympify, symbols, Dummy, Lambda, summation,
Piecewise, S, cacheit, Sum, exp, I, Ne, Eq, poly,
series, factorial, And, lambdify, floor)
from sympy.polys.polyerrors import PolynomialError
from sympy.stats.crv import reduce_rational_inequalities_wrap
from sympy.stats.rv import (NamedArgsMixin, SinglePSpace, SingleDomain,
random_symbols, PSpace, ConditionalDomain, RandomDomain,
ProductDomain)
from sympy.stats.symbolic_probability import Probability
from sympy.sets.fancysets import Range, FiniteSet
from sympy.sets.sets import Union
from sympy.sets.contains import Contains
from sympy.utilities import filldedent
from sympy.core.sympify import _sympify
from sympy.external import import_module
class DiscreteDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SampleDiscreteScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
from scipy import stats as scipy_stats
scipy_rv_map = {
'GeometricDistribution': lambda dist, size: scipy_stats.geom.rvs(p=float(dist.p),
size=size),
'LogarithmicDistribution': lambda dist, size: scipy_stats.logser.rvs(p=float(dist.p),
size=size),
'NegativeBinomialDistribution': lambda dist, size: scipy_stats.nbinom.rvs(n=float(dist.r),
p=float(dist.p), size=size),
'PoissonDistribution': lambda dist, size: scipy_stats.poisson.rvs(mu=float(dist.lamda),
size=size),
'SkellamDistribution': lambda dist, size: scipy_stats.skellam.rvs(mu1=float(dist.mu1),
mu2=float(dist.mu2), size=size),
'YuleSimonDistribution': lambda dist, size: scipy_stats.yulesimon.rvs(alpha=float(dist.rho),
size=size),
'ZetaDistribution': lambda dist, size: scipy_stats.zipf.rvs(a=float(dist.s),
size=size)
}
dist_list = scipy_rv_map.keys()
if dist.__class__.__name__ == 'DiscreteDistributionHandmade':
from scipy.stats import rv_discrete
z = Dummy('z')
handmade_pmf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pmf(rv_discrete):
def _pmf(self, x):
return handmade_pmf(x)
scipy_rv = scipy_pmf(a=float(dist.set._inf), b=float(dist.set._sup),
name='scipy_pmf')
return scipy_rv.rvs(size=size)
if dist.__class__.__name__ not in dist_list:
return None
return scipy_rv_map[dist.__class__.__name__](dist, size)
class SampleDiscreteNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'GeometricDistribution': lambda dist, size: numpy.random.geometric(p=float(dist.p),
size=size),
'PoissonDistribution': lambda dist, size: numpy.random.poisson(lam=float(dist.lamda),
size=size),
'ZetaDistribution': lambda dist, size: numpy.random.zipf(a=float(dist.s),
size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleDiscretePymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'GeometricDistribution': lambda dist: pymc3.Geometric('X', p=float(dist.p)),
'PoissonDistribution': lambda dist: pymc3.Poisson('X', mu=float(dist.lamda)),
'NegativeBinomialDistribution': lambda dist: pymc3.NegativeBinomial('X',
mu=float((dist.p*dist.r)/(1-dist.p)), alpha=float(dist.r))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_drv = {
'scipy': SampleDiscreteScipy,
'pymc3': SampleDiscretePymc,
'numpy': SampleDiscreteNumpy
}
class SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin):
""" Discrete distribution of a single variable
Serves as superclass for PoissonDistribution etc....
Provides methods for pdf, cdf, and sampling
See Also:
sympy.stats.crv_types.*
"""
set = S.Integers
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self, size=(), library='scipy'):
""" A random realization from the distribution"""
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_drv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x = symbols('x', integer=True, cls=Dummy)
z = symbols('z', real=True, cls=Dummy)
left_bound = self.set.inf
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, floor(z)), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
cf = summation(exp(I*t*x)*pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if not kwargs:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
t = Dummy('t', real=True)
x = Dummy('x', integer=True)
pdf = self.pdf(x)
mgf = summation(exp(t*x)*pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x = Dummy('x', integer=True)
p = Dummy('p', real=True)
left_bound = self.set.inf
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, x), **kwargs)
set = ((x, p <= cdf), )
return Lambda(p, Piecewise(*set))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
# TODO: support discrete sets with non integer stepsizes
if evaluate:
try:
p = poly(expr, var)
t = Dummy('t', real=True)
mgf = self.moment_generating_function(t)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg+1):
result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)
return result
except PolynomialError:
return summation(expr * self.pdf(var),
(var, self.set.inf, self.set.sup), **kwargs)
else:
return Sum(expr * self.pdf(var),
(var, self.set.inf, self.set.sup), **kwargs)
def __call__(self, *args):
return self.pdf(*args)
class DiscreteDomain(RandomDomain):
"""
A domain with discrete support with step size one.
Represented using symbols and Range.
"""
is_Discrete = True
class SingleDiscreteDomain(DiscreteDomain, SingleDomain):
def as_boolean(self):
return Contains(self.symbol, self.set)
class ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain):
"""
Domain with discrete support of step size one, that is restricted by
some condition.
"""
@property
def set(self):
rv = self.symbols
if len(self.symbols) > 1:
raise NotImplementedError(filldedent('''
Multivariate conditional domains are not yet implemented.'''))
rv = list(rv)[0]
return reduce_rational_inequalities_wrap(self.condition,
rv).intersect(self.fulldomain.set)
class DiscretePSpace(PSpace):
is_real = True
is_Discrete = True
@property
def pdf(self):
return self.density(*self.symbols)
def where(self, condition):
rvs = random_symbols(condition)
assert all(r.symbol in self.symbols for r in rvs)
if len(rvs) > 1:
raise NotImplementedError(filldedent('''Multivariate discrete
random variables are not yet supported.'''))
conditional_domain = reduce_rational_inequalities_wrap(condition,
rvs[0])
conditional_domain = conditional_domain.intersect(self.domain.set)
return SingleDiscreteDomain(rvs[0].symbol, conditional_domain)
def probability(self, condition):
complement = isinstance(condition, Ne)
if complement:
condition = Eq(condition.args[0], condition.args[1])
try:
_domain = self.where(condition).set
if condition == False or _domain is S.EmptySet:
return S.Zero
if condition == True or _domain == self.domain.set:
return S.One
prob = self.eval_prob(_domain)
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
dens = density(expr)
if not isinstance(dens, DiscreteDistribution):
from sympy.stats.drv_types import DiscreteDistributionHandmade
dens = DiscreteDistributionHandmade(dens)
z = Dummy('z', real=True)
space = SingleDiscretePSpace(z, dens)
prob = space.probability(condition.__class__(space.value, 0))
if prob is None:
prob = Probability(condition)
return prob if not complement else S.One - prob
def eval_prob(self, _domain):
sym = list(self.symbols)[0]
if isinstance(_domain, Range):
n = symbols('n', integer=True)
inf, sup, step = (r for r in _domain.args)
summand = ((self.pdf).replace(
sym, n*step))
rv = summation(summand,
(n, inf/step, (sup)/step - 1)).doit()
return rv
elif isinstance(_domain, FiniteSet):
pdf = Lambda(sym, self.pdf)
rv = sum(pdf(x) for x in _domain)
return rv
elif isinstance(_domain, Union):
rv = sum(self.eval_prob(x) for x in _domain.args)
return rv
def conditional_space(self, condition):
# XXX: Converting from set to tuple. The order matters to Lambda
# though so we should be starting with a set...
density = Lambda(tuple(self.symbols), self.pdf/self.probability(condition))
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
domain = ConditionalDiscreteDomain(self.domain, condition)
return DiscretePSpace(domain, density)
class ProductDiscreteDomain(ProductDomain, DiscreteDomain):
def as_boolean(self):
return And(*[domain.as_boolean for domain in self.domains])
class SingleDiscretePSpace(DiscretePSpace, SinglePSpace):
""" Discrete probability space over a single univariate variable """
is_real = True
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleDiscreteDomain(self.symbol, self.set)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample(size, library=library)}
def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate,
**kwargs)
except NotImplementedError:
return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup),
**kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
x = Dummy("x", real=True)
return Lambda(x, self.distribution.cdf(x, **kwargs))
else:
raise NotImplementedError()
def compute_density(self, expr, **kwargs):
if expr == self.value:
return self.distribution
raise NotImplementedError()
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
raise NotImplementedError()
|
2ae7b48f67f20e4ee5d9c971e9d414c804d735eacbb28149f987765458d653e6 | """
Continuous Random Variables Module
See Also
========
sympy.stats.crv_types
sympy.stats.rv
sympy.stats.frv
"""
from sympy import (Interval, Intersection, symbols, sympify, Dummy, nan,
Integral, And, Or, Piecewise, cacheit, integrate, oo, Lambda,
Basic, S, exp, I, FiniteSet, Ne, Eq, Union, poly, series, factorial,
lambdify)
from sympy.core.function import PoleError
from sympy.functions.special.delta_functions import DiracDelta
from sympy.polys.polyerrors import PolynomialError
from sympy.solvers.solveset import solveset
from sympy.solvers.inequalities import reduce_rational_inequalities
from sympy.core.sympify import _sympify
from sympy.external import import_module
from sympy.stats.rv import (RandomDomain, SingleDomain, ConditionalDomain, is_random,
ProductDomain, PSpace, SinglePSpace, random_symbols, NamedArgsMixin)
class ContinuousDomain(RandomDomain):
"""
A domain with continuous support
Represented using symbols and Intervals.
"""
is_Continuous = True
def as_boolean(self):
raise NotImplementedError("Not Implemented for generic Domains")
class SingleContinuousDomain(ContinuousDomain, SingleDomain):
"""
A univariate domain with continuous support
Represented using a single symbol and interval.
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
if not variables:
return expr
if frozenset(variables) != frozenset(self.symbols):
raise ValueError("Values should be equal")
# assumes only intervals
return Integral(expr, (self.symbol, self.set), **kwargs)
def as_boolean(self):
return self.set.as_relational(self.symbol)
class ProductContinuousDomain(ProductDomain, ContinuousDomain):
"""
A collection of independent domains with continuous support
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
for domain in self.domains:
domain_vars = frozenset(variables) & frozenset(domain.symbols)
if domain_vars:
expr = domain.compute_expectation(expr, domain_vars, **kwargs)
return expr
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
class ConditionalContinuousDomain(ContinuousDomain, ConditionalDomain):
"""
A domain with continuous support that has been further restricted by a
condition such as x > 3
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
if not variables:
return expr
# Extract the full integral
fullintgrl = self.fulldomain.compute_expectation(expr, variables)
# separate into integrand and limits
integrand, limits = fullintgrl.function, list(fullintgrl.limits)
conditions = [self.condition]
while conditions:
cond = conditions.pop()
if cond.is_Boolean:
if isinstance(cond, And):
conditions.extend(cond.args)
elif isinstance(cond, Or):
raise NotImplementedError("Or not implemented here")
elif cond.is_Relational:
if cond.is_Equality:
# Add the appropriate Delta to the integrand
integrand *= DiracDelta(cond.lhs - cond.rhs)
else:
symbols = cond.free_symbols & set(self.symbols)
if len(symbols) != 1: # Can't handle x > y
raise NotImplementedError(
"Multivariate Inequalities not yet implemented")
# Can handle x > 0
symbol = symbols.pop()
# Find the limit with x, such as (x, -oo, oo)
for i, limit in enumerate(limits):
if limit[0] == symbol:
# Make condition into an Interval like [0, oo]
cintvl = reduce_rational_inequalities_wrap(
cond, symbol)
# Make limit into an Interval like [-oo, oo]
lintvl = Interval(limit[1], limit[2])
# Intersect them to get [0, oo]
intvl = cintvl.intersect(lintvl)
# Put back into limits list
limits[i] = (symbol, intvl.left, intvl.right)
else:
raise TypeError(
"Condition %s is not a relational or Boolean" % cond)
return Integral(integrand, *limits, **kwargs)
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
@property
def set(self):
if len(self.symbols) == 1:
return (self.fulldomain.set & reduce_rational_inequalities_wrap(
self.condition, tuple(self.symbols)[0]))
else:
raise NotImplementedError(
"Set of Conditional Domain not Implemented")
class ContinuousDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SampleContinuousScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
import scipy.stats
# scipy does not require map as it can handle using custom distributions.
# However, we will still use a map where we can.
# TODO: do this for drv.py and frv.py if necessary.
# TODO: add more distributions here if there are more
# See links below referring to sections beginning with "A common parametrization..."
# I will remove all these comments if everything is ok.
scipy_rv_map = {
'BetaDistribution': lambda dist, size: scipy.stats.beta.rvs(
a=float(dist.alpha), b=float(dist.beta), size=size),
# same parametrisation
'CauchyDistribution': lambda dist, size: scipy.stats.cauchy.rvs(
loc=float(dist.x0), scale=float(dist.gamma), size=size),
# same parametrisation
'ChiSquaredDistribution': lambda dist, size: scipy.stats.chi2.rvs(
df=float(dist.k), size=size),
# same parametrisation
'ExponentialDistribution': lambda dist, size: scipy.stats.expon.rvs(
scale=1 / float(dist.rate), size=size),
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html#scipy.stats.expon
'GammaDistribution': lambda dist, size: scipy.stats.gamma.rvs(
a=float(dist.k), scale=float(dist.theta), size=size),
# https://stackoverflow.com/questions/42150965/how-to-plot-gamma-distribution-with-alpha-and-beta-parameters-in-python
'LogNormalDistribution': lambda dist, size: scipy.stats.lognorm.rvs(
scale=float(exp(dist.mean)), s=float(dist.std), size=size),
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.lognorm.html
'NormalDistribution': lambda dist, size: scipy.stats.norm.rvs(
loc=float(dist.mean), scale=float(dist.std), size=size),
# same parametrisation
'ParetoDistribution': lambda dist, size: scipy.stats.pareto.rvs(
b=float(dist.alpha), scale=float(dist.xm), size=size),
# https://stackoverflow.com/questions/42260519/defining-pareto-distribution-in-python-scipy
'StudentTDistribution': lambda dist, size: scipy.stats.t.rvs(
df=float(dist.nu), size=size),
# same parametrisation
'UniformDistribution': lambda dist, size: scipy.stats.uniform.rvs(
loc=float(dist.left), scale=float(dist.right - dist.left), size=size)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.uniform.html
}
# if we don't need to make a handmade pdf, we won't
if dist.__class__.__name__ in scipy_rv_map:
return scipy_rv_map[dist.__class__.__name__](dist, size)
z = Dummy('z')
handmade_pdf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pdf(scipy.stats.rv_continuous):
def _pdf(self, x):
return handmade_pdf(x)
scipy_rv = scipy_pdf(a=float(dist.set._inf),
b=float(dist.set._sup), name='scipy_pdf')
return scipy_rv.rvs(size=size)
class SampleContinuousNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'BetaDistribution': lambda dist, size: numpy.random.beta(a=float(dist.alpha),
b=float(dist.beta), size=size),
'ChiSquaredDistribution': lambda dist, size: numpy.random.chisquare(
df=float(dist.k), size=size),
'ExponentialDistribution': lambda dist, size: numpy.random.exponential(
1/float(dist.rate), size=size),
'GammaDistribution': lambda dist, size: numpy.random.gamma(float(dist.k),
float(dist.theta), size=size),
'LogNormalDistribution': lambda dist, size: numpy.random.lognormal(
float(dist.mean), float(dist.std), size=size),
'NormalDistribution': lambda dist, size: numpy.random.normal(
float(dist.mean), float(dist.std), size=size),
'ParetoDistribution': lambda dist, size: (numpy.random.pareto(
a=float(dist.alpha), size=size) + 1) * float(dist.xm),
'UniformDistribution': lambda dist, size: numpy.random.uniform(
low=float(dist.left), high=float(dist.right), size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleContinuousPymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'BetaDistribution': lambda dist:
pymc3.Beta('X', alpha=float(dist.alpha), beta=float(dist.beta)),
'CauchyDistribution': lambda dist:
pymc3.Cauchy('X', alpha=float(dist.x0), beta=float(dist.gamma)),
'ChiSquaredDistribution': lambda dist:
pymc3.ChiSquared('X', nu=float(dist.k)),
'ExponentialDistribution': lambda dist:
pymc3.Exponential('X', lam=float(dist.rate)),
'GammaDistribution': lambda dist:
pymc3.Gamma('X', alpha=float(dist.k), beta=1/float(dist.theta)),
'LogNormalDistribution': lambda dist:
pymc3.Lognormal('X', mu=float(dist.mean), sigma=float(dist.std)),
'NormalDistribution': lambda dist:
pymc3.Normal('X', float(dist.mean), float(dist.std)),
'GaussianInverseDistribution': lambda dist:
pymc3.Wald('X', mu=float(dist.mean), lam=float(dist.shape)),
'ParetoDistribution': lambda dist:
pymc3.Pareto('X', alpha=float(dist.alpha), m=float(dist.xm)),
'UniformDistribution': lambda dist:
pymc3.Uniform('X', lower=float(dist.left), upper=float(dist.right))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_crv = {
'scipy': SampleContinuousScipy,
'pymc3': SampleContinuousPymc,
'numpy': SampleContinuousNumpy
}
class SingleContinuousDistribution(ContinuousDistribution, NamedArgsMixin):
""" Continuous distribution of a single variable
Serves as superclass for Normal/Exponential/UniformDistribution etc....
Represented by parameters for each of the specific classes. E.g
NormalDistribution is represented by a mean and standard deviation.
Provides methods for pdf, cdf, and sampling
See Also
========
sympy.stats.crv_types.*
"""
set = Interval(-oo, oo)
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self, size=(), library='scipy'):
""" A random realization from the distribution """
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_crv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x, z = symbols('x, z', real=True, cls=Dummy)
left_bound = self.set.start
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = integrate(pdf.doit(), (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if len(kwargs) == 0:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
cf = integrate(exp(I*t*x)*pdf, (x, self.set))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if len(kwargs) == 0:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
""" Compute the moment generating function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
mgf = integrate(exp(t * x) * pdf, (x, self.set))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
""" Moment generating function """
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
if evaluate:
try:
p = poly(expr, var)
t = Dummy('t', real=True)
mgf = self._moment_generating_function(t)
if mgf is None:
return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg+1):
result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)
return result
except PolynomialError:
return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
else:
return Integral(expr * self.pdf(var), (var, self.set), **kwargs)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x, p = symbols('x, p', real=True, cls=Dummy)
left_bound = self.set.start
pdf = self.pdf(x)
cdf = integrate(pdf, (x, left_bound, x), **kwargs)
quantile = solveset(cdf - p, x, self.set)
return Lambda(p, Piecewise((quantile, (p >= 0) & (p <= 1) ), (nan, True)))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if len(kwargs) == 0:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
class ContinuousPSpace(PSpace):
""" Continuous Probability Space
Represents the likelihood of an event space defined over a continuum.
Represented with a ContinuousDomain and a PDF (Lambda-Like)
"""
is_Continuous = True
is_real = True
@property
def pdf(self):
return self.density(*self.domain.symbols)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
if rvs is None:
rvs = self.values
else:
rvs = frozenset(rvs)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
domain_symbols = frozenset(rv.symbol for rv in rvs)
return self.domain.compute_expectation(self.pdf * expr,
domain_symbols, **kwargs)
def compute_density(self, expr, **kwargs):
# Common case Density(X) where X in self.values
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.compute_expectation(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
z = Dummy('z', real=True)
return Lambda(z, self.compute_expectation(DiracDelta(expr - z), **kwargs))
@cacheit
def compute_cdf(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"CDF not well defined on multivariate expressions")
d = self.compute_density(expr, **kwargs)
x, z = symbols('x, z', real=True, cls=Dummy)
left_bound = self.domain.set.start
# CDF is integral of PDF from left bound to z
cdf = integrate(d(x), (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
@cacheit
def compute_characteristic_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Characteristic function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
cf = integrate(exp(I*t*x)*d(x), (x, -oo, oo), **kwargs)
return Lambda(t, cf)
@cacheit
def compute_moment_generating_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Moment generating function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
mgf = integrate(exp(t * x) * d(x), (x, -oo, oo), **kwargs)
return Lambda(t, mgf)
@cacheit
def compute_quantile(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"Quantile not well defined on multivariate expressions")
d = self.compute_cdf(expr, **kwargs)
x = Dummy('x', real=True)
p = Dummy('p', positive=True)
quantile = solveset(d(x) - p, x, self.set)
return Lambda(p, quantile)
def probability(self, condition, **kwargs):
z = Dummy('z', real=True)
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
# Univariate case can be handled by where
try:
domain = self.where(condition)
rv = [rv for rv in self.values if rv.symbol == domain.symbol][0]
# Integrate out all other random variables
pdf = self.compute_density(rv, **kwargs)
# return S.Zero if `domain` is empty set
if domain.set is S.EmptySet or isinstance(domain.set, FiniteSet):
return S.Zero if not cond_inv else S.One
if isinstance(domain.set, Union):
return sum(
Integral(pdf(z), (z, subset), **kwargs) for subset in
domain.set.args if isinstance(subset, Interval))
# Integrate out the last variable over the special domain
return Integral(pdf(z), (z, domain.set), **kwargs)
# Other cases can be turned into univariate case
# by computing a density handled by density computation
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
if not is_random(expr):
dens = self.density
comp = condition.rhs
else:
dens = density(expr, **kwargs)
comp = 0
if not isinstance(dens, ContinuousDistribution):
from sympy.stats.crv_types import ContinuousDistributionHandmade
dens = ContinuousDistributionHandmade(dens, set=self.domain.set)
# Turn problem into univariate case
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, comp))
return result if not cond_inv else S.One - result
def where(self, condition):
rvs = frozenset(random_symbols(condition))
if not (len(rvs) == 1 and rvs.issubset(self.values)):
raise NotImplementedError(
"Multiple continuous random variables not supported")
rv = tuple(rvs)[0]
interval = reduce_rational_inequalities_wrap(condition, rv)
interval = interval.intersect(self.domain.set)
return SingleContinuousDomain(rv.symbol, interval)
def conditional_space(self, condition, normalize=True, **kwargs):
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
domain = ConditionalContinuousDomain(self.domain, condition)
if normalize:
# create a clone of the variable to
# make sure that variables in nested integrals are different
# from the variables outside the integral
# this makes sure that they are evaluated separately
# and in the correct order
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting set to tuple. The order matters to Lambda though
# so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return ContinuousPSpace(domain, density)
class SingleContinuousPSpace(ContinuousPSpace, SinglePSpace):
"""
A continuous probability space over a single univariate variable
These consist of a Symbol and a SingleContinuousDistribution
This class is normally accessed through the various random variable
functions, Normal, Exponential, Uniform, etc....
"""
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleContinuousDomain(sympify(self.symbol), self.set)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample(size, library=library)}
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs)
except PoleError:
return Integral(expr * self.pdf, (x, self.set), **kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
z = Dummy("z", real=True)
return Lambda(z, self.distribution.cdf(z, **kwargs))
else:
return ContinuousPSpace.compute_cdf(self, expr, **kwargs)
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
return ContinuousPSpace.compute_characteristic_function(self, expr, **kwargs)
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
return ContinuousPSpace.compute_moment_generating_function(self, expr, **kwargs)
def compute_density(self, expr, **kwargs):
# https://en.wikipedia.org/wiki/Random_variable#Functions_of_random_variables
if expr == self.value:
return self.density
y = Dummy('y', real=True)
gs = solveset(expr - y, self.value, S.Reals)
if isinstance(gs, Intersection) and S.Reals in gs.args:
gs = list(gs.args[1])
if not gs:
raise ValueError("Can not solve %s for %s"%(expr, self.value))
fx = self.compute_density(self.value)
fy = sum(fx(g) * abs(g.diff(y)) for g in gs)
return Lambda(y, fy)
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
return ContinuousPSpace.compute_quantile(self, expr, **kwargs)
def _reduce_inequalities(conditions, var, **kwargs):
try:
return reduce_rational_inequalities(conditions, var, **kwargs)
except PolynomialError:
raise ValueError("Reduction of condition failed %s\n" % conditions[0])
def reduce_rational_inequalities_wrap(condition, var):
if condition.is_Relational:
return _reduce_inequalities([[condition]], var, relational=False)
if isinstance(condition, Or):
return Union(*[_reduce_inequalities([[arg]], var, relational=False)
for arg in condition.args])
if isinstance(condition, And):
intervals = [_reduce_inequalities([[arg]], var, relational=False)
for arg in condition.args]
I = intervals[0]
for i in intervals:
I = I.intersect(i)
return I
|
be507ffc1a7ae14a69fe5cdd52f58c21d9020b8464fb4738d3026478e28f70b3 | from functools import reduce
from sympy.core.compatibility import as_int
from sympy.core.mul import prod
from sympy.core.numbers import igcdex, igcd
from sympy.ntheory.primetest import isprime
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_crt, gf_crt1, gf_crt2
def symmetric_residue(a, m):
"""Return the residual mod m such that it is within half of the modulus.
>>> from sympy.ntheory.modular import symmetric_residue
>>> symmetric_residue(1, 6)
1
>>> symmetric_residue(4, 6)
-2
"""
if a <= m // 2:
return a
return a - m
def crt(m, v, symmetric=False, check=True):
r"""Chinese Remainder Theorem.
The moduli in m are assumed to be pairwise coprime. The output
is then an integer f, such that f = v_i mod m_i for each pair out
of v and m. If ``symmetric`` is False a positive integer will be
returned, else \|f\| will be less than or equal to the LCM of the
moduli, and thus f may be negative.
If the moduli are not co-prime the correct result will be returned
if/when the test of the result is found to be incorrect. This result
will be None if there is no solution.
The keyword ``check`` can be set to False if it is known that the moduli
are coprime.
Examples
========
As an example consider a set of residues ``U = [49, 76, 65]``
and a set of moduli ``M = [99, 97, 95]``. Then we have::
>>> from sympy.ntheory.modular import crt
>>> crt([99, 97, 95], [49, 76, 65])
(639985, 912285)
This is the correct result because::
>>> [639985 % m for m in [99, 97, 95]]
[49, 76, 65]
If the moduli are not co-prime, you may receive an incorrect result
if you use ``check=False``:
>>> crt([12, 6, 17], [3, 4, 2], check=False)
(954, 1224)
>>> [954 % m for m in [12, 6, 17]]
[6, 0, 2]
>>> crt([12, 6, 17], [3, 4, 2]) is None
True
>>> crt([3, 6], [2, 5])
(5, 6)
Note: the order of gf_crt's arguments is reversed relative to crt,
and that solve_congruence takes residue, modulus pairs.
Programmer's note: rather than checking that all pairs of moduli share
no GCD (an O(n**2) test) and rather than factoring all moduli and seeing
that there is no factor in common, a check that the result gives the
indicated residuals is performed -- an O(n) operation.
See Also
========
solve_congruence
sympy.polys.galoistools.gf_crt : low level crt routine used by this routine
"""
if check:
m = list(map(as_int, m))
v = list(map(as_int, v))
result = gf_crt(v, m, ZZ)
mm = prod(m)
if check:
if not all(v % m == result % m for v, m in zip(v, m)):
result = solve_congruence(*list(zip(v, m)),
check=False, symmetric=symmetric)
if result is None:
return result
result, mm = result
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def crt1(m):
"""First part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1
>>> crt1([18, 42, 6])
(4536, [252, 108, 756], [0, 2, 0])
"""
return gf_crt1(m, ZZ)
def crt2(m, v, mm, e, s, symmetric=False):
"""Second part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1, crt2
>>> mm, e, s = crt1([18, 42, 6])
>>> crt2([18, 42, 6], [0, 0, 0], mm, e, s)
(0, 4536)
"""
result = gf_crt2(v, m, mm, e, s, ZZ)
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def solve_congruence(*remainder_modulus_pairs, **hint):
"""Compute the integer ``n`` that has the residual ``ai`` when it is
divided by ``mi`` where the ``ai`` and ``mi`` are given as pairs to
this function: ((a1, m1), (a2, m2), ...). If there is no solution,
return None. Otherwise return ``n`` and its modulus.
The ``mi`` values need not be co-prime. If it is known that the moduli are
not co-prime then the hint ``check`` can be set to False (default=True) and
the check for a quicker solution via crt() (valid when the moduli are
co-prime) will be skipped.
If the hint ``symmetric`` is True (default is False), the value of ``n``
will be within 1/2 of the modulus, possibly negative.
Examples
========
>>> from sympy.ntheory.modular import solve_congruence
What number is 2 mod 3, 3 mod 5 and 2 mod 7?
>>> solve_congruence((2, 3), (3, 5), (2, 7))
(23, 105)
>>> [23 % m for m in [3, 5, 7]]
[2, 3, 2]
If you prefer to work with all remainder in one list and
all moduli in another, send the arguments like this:
>>> solve_congruence(*zip((2, 3, 2), (3, 5, 7)))
(23, 105)
The moduli need not be co-prime; in this case there may or
may not be a solution:
>>> solve_congruence((2, 3), (4, 6)) is None
True
>>> solve_congruence((2, 3), (5, 6))
(5, 6)
The symmetric flag will make the result be within 1/2 of the modulus:
>>> solve_congruence((2, 3), (5, 6), symmetric=True)
(-1, 6)
See Also
========
crt : high level routine implementing the Chinese Remainder Theorem
"""
def combine(c1, c2):
"""Return the tuple (a, m) which satisfies the requirement
that n = a + i*m satisfy n = a1 + j*m1 and n = a2 = k*m2.
References
==========
- https://en.wikipedia.org/wiki/Method_of_successive_substitution
"""
a1, m1 = c1
a2, m2 = c2
a, b, c = m1, a2 - a1, m2
g = reduce(igcd, [a, b, c])
a, b, c = [i//g for i in [a, b, c]]
if a != 1:
inv_a, _, g = igcdex(a, c)
if g != 1:
return None
b *= inv_a
a, m = a1 + m1*b, m1*c
return a, m
rm = remainder_modulus_pairs
symmetric = hint.get('symmetric', False)
if hint.get('check', True):
rm = [(as_int(r), as_int(m)) for r, m in rm]
# ignore redundant pairs but raise an error otherwise; also
# make sure that a unique set of bases is sent to gf_crt if
# they are all prime.
#
# The routine will work out less-trivial violations and
# return None, e.g. for the pairs (1,3) and (14,42) there
# is no answer because 14 mod 42 (having a gcd of 14) implies
# (14/2) mod (42/2), (14/7) mod (42/7) and (14/14) mod (42/14)
# which, being 0 mod 3, is inconsistent with 1 mod 3. But to
# preprocess the input beyond checking of another pair with 42
# or 3 as the modulus (for this example) is not necessary.
uniq = {}
for r, m in rm:
r %= m
if m in uniq:
if r != uniq[m]:
return None
continue
uniq[m] = r
rm = [(r, m) for m, r in uniq.items()]
del uniq
# if the moduli are co-prime, the crt will be significantly faster;
# checking all pairs for being co-prime gets to be slow but a prime
# test is a good trade-off
if all(isprime(m) for r, m in rm):
r, m = list(zip(*rm))
return crt(m, r, symmetric=symmetric, check=False)
rv = (0, 1)
for rmi in rm:
rv = combine(rv, rmi)
if rv is None:
break
n, m = rv
n = n % m
else:
if symmetric:
return symmetric_residue(n, m), m
return n, m
|
c21d7b4463fce6c88e7478228735a006a9bb74bbfcb33a33c5fdb451018f9393 | import random
from collections import defaultdict
from collections.abc import Iterable
from functools import reduce
from sympy.core.parameters import global_parameters
from sympy.core.basic import Atom
from sympy.core.expr import Expr
from sympy.core.compatibility import \
is_sequence, as_int
from sympy.core.numbers import Integer
from sympy.core.sympify import _sympify
from sympy.matrices import zeros
from sympy.polys.polytools import lcm
from sympy.utilities.iterables import (flatten, has_variety, minlex,
has_dups, runs)
from mpmath.libmp.libintmath import ifac
from sympy.multipledispatch import dispatch
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
def _af_rmuln(*abc):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
a = abc
m = len(a)
if m == 3:
p0, p1, p2 = a
return [p0[p1[i]] for i in p2]
if m == 4:
p0, p1, p2, p3 = a
return [p0[p1[p2[i]]] for i in p3]
if m == 5:
p0, p1, p2, p3, p4 = a
return [p0[p1[p2[p3[i]]]] for i in p4]
if m == 6:
p0, p1, p2, p3, p4, p5 = a
return [p0[p1[p2[p3[p4[i]]]]] for i in p5]
if m == 7:
p0, p1, p2, p3, p4, p5, p6 = a
return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6]
if m == 8:
p0, p1, p2, p3, p4, p5, p6, p7 = a
return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7]
if m == 1:
return a[0][:]
if m == 2:
a, b = a
return [a[i] for i in b]
if m == 0:
raise ValueError("String must not be empty")
p0 = _af_rmuln(*a[:m//2])
p1 = _af_rmuln(*a[m//2:])
return [p0[i] for i in p1]
def _af_parity(pi):
"""
Computes the parity of a permutation in array form.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that x > y but p[x] < p[y].
Examples
========
>>> from sympy.combinatorics.permutations import _af_parity
>>> _af_parity([0, 1, 2, 3])
0
>>> _af_parity([3, 2, 0, 1])
1
See Also
========
Permutation
"""
n = len(pi)
a = [0] * n
c = 0
for j in range(n):
if a[j] == 0:
c += 1
a[j] = 1
i = j
while pi[i] != j:
i = pi[i]
a[i] = 1
return (n - c) % 2
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> from sympy.combinatorics.permutations import _af_invert, _af_rmul
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form
def _af_pow(a, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation, _af_pow
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> _af_pow(p._array_form, 4)
[0, 1, 2, 3]
"""
if n == 0:
return list(range(len(a)))
if n < 0:
return _af_pow(_af_invert(a), -n)
if n == 1:
return a[:]
elif n == 2:
b = [a[i] for i in a]
elif n == 3:
b = [a[a[i]] for i in a]
elif n == 4:
b = [a[a[a[i]]] for i in a]
else:
# use binary multiplication
b = list(range(len(a)))
while 1:
if n & 1:
b = [b[i] for i in a]
n -= 1
if not n:
break
if n % 4 == 0:
a = [a[a[a[i]]] for i in a]
n = n // 4
elif n % 2 == 0:
a = [a[i] for i in a]
n = n // 2
return b
def _af_commutes_with(a, b):
"""
Checks if the two permutations with array forms
given by ``a`` and ``b`` commute.
Examples
========
>>> from sympy.combinatorics.permutations import _af_commutes_with
>>> _af_commutes_with([1, 2, 0], [0, 2, 1])
False
See Also
========
Permutation, commutes_with
"""
return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1))
class Cycle(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
Explanation
===========
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
>>> from sympy.combinatorics.permutations import Perm, Cycle
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
(1 3 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
(1 3 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
(1 5 4 10)
>>> Cycle(1, 2)(4)(5)(10)
(1 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
return as_int(arg)
def __iter__(self):
yield from self.list()
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle as C
>>> C(1, 2)(2, 3)
(1 3 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = C(1, 2)
>>> a(2, 3)
(1 3 2)
>>> a(2, 3)(4, 5)
(1 3 2)(4 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle
>>> p = Cycle(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Cycle(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
if size is not None:
big = max([i for i in self.keys() if self[i] != i] + [0])
size = max(size, big + 1)
else:
size = self.size
return [self[i] for i in range(size)]
def __repr__(self):
"""We want it to print as a Cycle, not as a dict.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> print(_)
(1 2)
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
"""
if not self:
return 'Cycle()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
return 'Cycle%s' % s
def __str__(self):
"""We want it to be printed in a Cycle notation with no
comma in-between.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> Cycle(1, 2, 4)(5, 6)
(1 2 4)(5 6)
"""
if not self:
return '()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
s = s.replace(',', '')
return s
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle
>>> Cycle(1, 2, 6)
(1 2 6)
"""
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
class Permutation(Atom):
"""
A permutation, alternatively known as an 'arrangement number' or 'ordering'
is an arrangement of the elements of an ordered list into a one-to-one
mapping with itself. The permutation of a given arrangement is given by
indicating the positions of the elements after re-arrangement [2]_. For
example, if one started with elements [x, y, a, b] (in that order) and
they were reordered as [x, y, b, a] then the permutation would be
[0, 1, 3, 2]. Notice that (in SymPy) the first element is always referred
to as 0 and the permutation uses the indices of the elements in the
original ordering, not the elements (a, b, etc...) themselves.
>>> from sympy.combinatorics import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations Notation
=====================
Permutations are commonly represented in disjoint cycle or array forms.
Array Notation and 2-line Form
------------------------------------
In the 2-line form, the elements and their final positions are shown
as a matrix with 2 rows:
[0 1 2 ... n-1]
[p(0) p(1) p(2) ... p(n-1)]
Since the first line is always range(n), where n is the size of p,
it is sufficient to represent the permutation by the second line,
referred to as the "array form" of the permutation. This is entered
in brackets as the argument to the Permutation class:
>>> p = Permutation([0, 2, 1]); p
Permutation([0, 2, 1])
Given i in range(p.size), the permutation maps i to i^p
>>> [i^p for i in range(p.size)]
[0, 2, 1]
The composite of two permutations p*q means first apply p, then q, so
i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules:
>>> q = Permutation([2, 1, 0])
>>> [i^p^q for i in range(3)]
[2, 0, 1]
>>> [i^(p*q) for i in range(3)]
[2, 0, 1]
One can use also the notation p(i) = i^p, but then the composition
rule is (p*q)(i) = q(p(i)), not p(q(i)):
>>> [(p*q)(i) for i in range(p.size)]
[2, 0, 1]
>>> [q(p(i)) for i in range(p.size)]
[2, 0, 1]
>>> [p(q(i)) for i in range(p.size)]
[1, 2, 0]
Disjoint Cycle Notation
-----------------------
In disjoint cycle notation, only the elements that have shifted are
indicated. In the above case, the 2 and 1 switched places. This can
be entered in two ways:
>>> Permutation(1, 2) == Permutation([[1, 2]]) == p
True
Only the relative ordering of elements in a cycle matter:
>>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2)
True
The disjoint cycle notation is convenient when representing
permutations that have several cycles in them:
>>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]])
True
It also provides some economy in entry when computing products of
permutations that are written in disjoint cycle notation:
>>> Permutation(1, 2)(1, 3)(2, 3)
Permutation([0, 3, 2, 1])
>>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]])
True
Caution: when the cycles have common elements
between them then the order in which the
permutations are applied matters. The
convention is that the permutations are
applied from *right to left*. In the following, the
transposition of elements 2 and 3 is followed
by the transposition of elements 1 and 2:
>>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)])
True
>>> Permutation(1, 2)(2, 3).list()
[0, 3, 1, 2]
If the first and second elements had been
swapped first, followed by the swapping of the second
and third, the result would have been [0, 2, 3, 1].
If, for some reason, you want to apply the cycles
in the order they are entered, you can simply reverse
the order of cycles:
>>> Permutation([(1, 2), (2, 3)][::-1]).list()
[0, 2, 3, 1]
Entering a singleton in a permutation is a way to indicate the size of the
permutation. The ``size`` keyword can also be used.
Array-form entry:
>>> Permutation([[1, 2], [9]])
Permutation([0, 2, 1], size=10)
>>> Permutation([[1, 2]], size=10)
Permutation([0, 2, 1], size=10)
Cyclic-form entry:
>>> Permutation(1, 2, size=10)
Permutation([0, 2, 1], size=10)
>>> Permutation(9)(1, 2)
Permutation([0, 2, 1], size=10)
Caution: no singleton containing an element larger than the largest
in any previous cycle can be entered. This is an important difference
in how Permutation and Cycle handle the __call__ syntax. A singleton
argument at the start of a Permutation performs instantiation of the
Permutation and is permitted:
>>> Permutation(5)
Permutation([], size=6)
A singleton entered after instantiation is a call to the permutation
-- a function call -- and if the argument is out of range it will
trigger an error. For this reason, it is better to start the cycle
with the singleton:
The following fails because there is no element 3:
>>> Permutation(1, 2)(3)
Traceback (most recent call last):
...
IndexError: list index out of range
This is ok: only the call to an out of range singleton is prohibited;
otherwise the permutation autosizes:
>>> Permutation(3)(1, 2)
Permutation([0, 2, 1, 3])
>>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2)
True
Equality testing
----------------
The array forms must be the same in order for permutations to be equal:
>>> Permutation([1, 0, 2, 3]) == Permutation([1, 0])
False
Identity Permutation
--------------------
The identity permutation is a permutation in which no element is out of
place. It can be entered in a variety of ways. All the following create
an identity permutation of size 4:
>>> I = Permutation([0, 1, 2, 3])
>>> all(p == I for p in [
... Permutation(3),
... Permutation(range(4)),
... Permutation([], size=4),
... Permutation(size=4)])
True
Watch out for entering the range *inside* a set of brackets (which is
cycle notation):
>>> I == Permutation([range(4)])
False
Permutation Printing
====================
There are a few things to note about how Permutations are printed.
1) If you prefer one form (array or cycle) over another, you can set
``init_printing`` with the ``perm_cyclic`` flag.
>>> from sympy import init_printing
>>> p = Permutation(1, 2)(4, 5)(3, 4)
>>> p
Permutation([0, 2, 1, 4, 5, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(1 2)(3 4 5)
2) Regardless of the setting, a list of elements in the array for cyclic
form can be obtained and either of those can be copied and supplied as
the argument to Permutation:
>>> p.array_form
[0, 2, 1, 4, 5, 3]
>>> p.cyclic_form
[[1, 2], [3, 4, 5]]
>>> Permutation(_) == p
True
3) Printing is economical in that as little as possible is printed while
retaining all information about the size of the permutation:
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation([1, 0, 2, 3])
Permutation([1, 0, 2, 3])
>>> Permutation([1, 0, 2, 3], size=20)
Permutation([1, 0], size=20)
>>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20)
Permutation([1, 0, 2, 4, 3], size=20)
>>> p = Permutation([1, 0, 2, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(3)(0 1)
>>> init_printing(perm_cyclic=False, pretty_print=False)
The 2 was not printed but it is still there as can be seen with the
array_form and size methods:
>>> p.array_form
[1, 0, 2, 3]
>>> p.size
4
Short introduction to other methods
===================================
The permutation can act as a bijective function, telling what element is
located at a given position
>>> q = Permutation([5, 2, 3, 4, 1, 0])
>>> q.array_form[1] # the hard way
2
>>> q(1) # the easy way
2
>>> {i: q(i) for i in range(q.size)} # showing the bijection
{0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0}
The full cyclic form (including singletons) can be obtained:
>>> p.full_cyclic_form
[[0, 1], [2], [3]]
Any permutation can be factored into transpositions of pairs of elements:
>>> Permutation([[1, 2], [3, 4, 5]]).transpositions()
[(1, 2), (3, 5), (3, 4)]
>>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form
[[1, 2], [3, 4, 5]]
The number of permutations on a set of n elements is given by n! and is
called the cardinality.
>>> p.size
4
>>> p.cardinality
24
A given permutation has a rank among all the possible permutations of the
same elements, but what that rank is depends on how the permutations are
enumerated. (There are a number of different methods of doing so.) The
lexicographic rank is given by the rank method and this rank is used to
increment a permutation with addition/subtraction:
>>> p.rank()
6
>>> p + 1
Permutation([1, 0, 3, 2])
>>> p.next_lex()
Permutation([1, 0, 3, 2])
>>> _.rank()
7
>>> p.unrank_lex(p.size, rank=7)
Permutation([1, 0, 3, 2])
The product of two permutations p and q is defined as their composition as
functions, (p*q)(i) = q(p(i)) [6]_.
>>> p = Permutation([1, 0, 2, 3])
>>> q = Permutation([2, 3, 1, 0])
>>> list(q*p)
[2, 3, 0, 1]
>>> list(p*q)
[3, 2, 1, 0]
>>> [q(p(i)) for i in range(p.size)]
[3, 2, 1, 0]
The permutation can be 'applied' to any list-like object, not only
Permutations:
>>> p(['zero', 'one', 'four', 'two'])
['one', 'zero', 'four', 'two']
>>> p('zo42')
['o', 'z', '4', '2']
If you have a list of arbitrary elements, the corresponding permutation
can be found with the from_sequence method:
>>> Permutation.from_sequence('SymPy')
Permutation([1, 3, 2, 0, 4])
Checking if a Permutation is contained in a Group
=================================================
Generally if you have a group of permutations G on n symbols, and
you're checking if a permutation on less than n symbols is part
of that group, the check will fail.
Here is an example for n=5 and we check if the cycle
(1,2,3) is in G:
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> from sympy.combinatorics import Cycle, Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup(Cycle(2, 3)(4, 5), Cycle(1, 2, 3, 4, 5))
>>> p1 = Permutation(Cycle(2, 5, 3))
>>> p2 = Permutation(Cycle(1, 2, 3))
>>> a1 = Permutation(Cycle(1, 2, 3).list(6))
>>> a2 = Permutation(Cycle(1, 2, 3)(5))
>>> a3 = Permutation(Cycle(1, 2, 3),size=6)
>>> for p in [p1,p2,a1,a2,a3]: p, G.contains(p)
((2 5 3), True)
((1 2 3), False)
((5)(1 2 3), True)
((5)(1 2 3), True)
((5)(1 2 3), True)
The check for p2 above will fail.
Checking if p1 is in G works because SymPy knows
G is a group on 5 symbols, and p1 is also on 5 symbols
(its largest element is 5).
For ``a1``, the ``.list(6)`` call will extend the permutation to 5
symbols, so the test will work as well. In the case of ``a2`` the
permutation is being extended to 5 symbols by using a singleton,
and in the case of ``a3`` it's extended through the constructor
argument ``size=6``.
There is another way to do this, which is to tell the ``contains``
method that the number of symbols the group is on doesn't need to
match perfectly the number of symbols for the permutation:
>>> G.contains(p2,strict=False)
True
This can be via the ``strict`` argument to the ``contains`` method,
and SymPy will try to extend the permutation on its own and then
perform the containment check.
See Also
========
Cycle
References
==========
.. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics
Combinatorics and Graph Theory with Mathematica. Reading, MA:
Addison-Wesley, pp. 3-16, 1990.
.. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial
Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011.
.. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking
permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001),
281-284. DOI=10.1016/S0020-0190(01)00141-7
.. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms'
CRC Press, 1999
.. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O.
Concrete Mathematics: A Foundation for Computer Science, 2nd ed.
Reading, MA: Addison-Wesley, 1994.
.. [6] https://en.wikipedia.org/wiki/Permutation#Product_and_inverse
.. [7] https://en.wikipedia.org/wiki/Lehmer_code
"""
is_Permutation = True
_array_form = None
_cyclic_form = None
_cycle_structure = None
_size = None
_rank = None
def __new__(cls, *args, size=None, **kwargs):
"""
Constructor for the Permutation object from a list or a
list of lists in which all elements of the permutation may
appear only once.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations entered in array-form are left unaltered:
>>> Permutation([0, 2, 1])
Permutation([0, 2, 1])
Permutations entered in cyclic form are converted to array form;
singletons need not be entered, but can be entered to indicate the
largest element:
>>> Permutation([[4, 5, 6], [0, 1]])
Permutation([1, 0, 2, 3, 5, 6, 4])
>>> Permutation([[4, 5, 6], [0, 1], [19]])
Permutation([1, 0, 2, 3, 5, 6, 4], size=20)
All manipulation of permutations assumes that the smallest element
is 0 (in keeping with 0-based indexing in Python) so if the 0 is
missing when entering a permutation in array form, an error will be
raised:
>>> Permutation([2, 1])
Traceback (most recent call last):
...
ValueError: Integers 0 through 2 must be present.
If a permutation is entered in cyclic form, it can be entered without
singletons and the ``size`` specified so those values can be filled
in, otherwise the array form will only extend to the maximum value
in the cycles:
>>> Permutation([[1, 4], [3, 5, 2]], size=10)
Permutation([0, 4, 3, 5, 1, 2], size=10)
>>> _.array_form
[0, 4, 3, 5, 1, 2, 6, 7, 8, 9]
"""
if size is not None:
size = int(size)
#a) ()
#b) (1) = identity
#c) (1, 2) = cycle
#d) ([1, 2, 3]) = array form
#e) ([[1, 2]]) = cyclic form
#f) (Cycle) = conversion to permutation
#g) (Permutation) = adjust size or return copy
ok = True
if not args: # a
return cls._af_new(list(range(size or 0)))
elif len(args) > 1: # c
return cls._af_new(Cycle(*args).list(size))
if len(args) == 1:
a = args[0]
if isinstance(a, cls): # g
if size is None or size == a.size:
return a
return cls(a.array_form, size=size)
if isinstance(a, Cycle): # f
return cls._af_new(a.list(size))
if not is_sequence(a): # b
if size is not None and a + 1 > size:
raise ValueError('size is too small when max is %s' % a)
return cls._af_new(list(range(a + 1)))
if has_variety(is_sequence(ai) for ai in a):
ok = False
else:
ok = False
if not ok:
raise ValueError("Permutation argument must be a list of ints, "
"a list of lists, Permutation or Cycle.")
# safe to assume args are valid; this also makes a copy
# of the args
args = list(args[0])
is_cycle = args and is_sequence(args[0])
if is_cycle: # e
args = [[int(i) for i in c] for c in args]
else: # d
args = [int(i) for i in args]
# if there are n elements present, 0, 1, ..., n-1 should be present
# unless a cycle notation has been provided. A 0 will be added
# for convenience in case one wants to enter permutations where
# counting starts from 1.
temp = flatten(args)
if has_dups(temp) and not is_cycle:
raise ValueError('there were repeated elements.')
temp = set(temp)
if not is_cycle:
if any(i not in temp for i in range(len(temp))):
raise ValueError('Integers 0 through %s must be present.' %
max(temp))
if size is not None and temp and max(temp) + 1 > size:
raise ValueError('max element should not exceed %s' % (size - 1))
if is_cycle:
# it's not necessarily canonical so we won't store
# it -- use the array form instead
c = Cycle()
for ci in args:
c = c(*ci)
aform = c.list()
else:
aform = list(args)
if size and size > len(aform):
# don't allow for truncation of permutation which
# might split a cycle and lead to an invalid aform
# but do allow the permutation size to be increased
aform.extend(list(range(len(aform), size)))
return cls._af_new(aform)
@classmethod
def _af_new(cls, perm):
"""A method to produce a Permutation object from a list;
the list is bound to the _array_form attribute, so it must
not be modified; this method is meant for internal use only;
the list ``a`` is supposed to be generated as a temporary value
in a method, so p = Perm._af_new(a) is the only object
to hold a reference to ``a``::
Examples
========
>>> from sympy.combinatorics.permutations import Perm
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = [2, 1, 3, 0]
>>> p = Perm._af_new(a)
>>> p
Permutation([2, 1, 3, 0])
"""
p = super().__new__(cls)
p._array_form = perm
p._size = len(perm)
return p
def _hashable_content(self):
# the array_form (a list) is the Permutation arg, so we need to
# return a tuple, instead
return tuple(self.array_form)
@property
def array_form(self):
"""
Return a copy of the attribute _array_form
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> Permutation([[2, 0, 3, 1]]).array_form
[3, 2, 0, 1]
>>> Permutation([2, 0, 3, 1]).array_form
[2, 0, 3, 1]
>>> Permutation([[1, 2], [4, 5]]).array_form
[0, 2, 1, 3, 5, 4]
"""
return self._array_form[:]
def list(self, size=None):
"""Return the permutation as an explicit list, possibly
trimming unmoved elements if size is less than the maximum
element in the permutation; if this is desired, setting
``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Permutation(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
>>> Permutation(3).list(-1)
[]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
rv = self.array_form
if size is not None:
if size > self.size:
rv.extend(list(range(self.size, size)))
else:
# find first value from rhs where rv[i] != i
i = self.size - 1
while rv:
if rv[-1] != i:
break
rv.pop()
i -= 1
return rv
@property
def cyclic_form(self):
"""
This is used to convert to the cyclic notation
from the canonical notation. Singletons are omitted.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2])
>>> p.cyclic_form
[[1, 3, 2]]
>>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form
[[0, 1], [3, 4]]
See Also
========
array_form, full_cyclic_form
"""
if self._cyclic_form is not None:
return list(self._cyclic_form)
array_form = self.array_form
unchecked = [True] * len(array_form)
cyclic_form = []
for i in range(len(array_form)):
if unchecked[i]:
cycle = []
cycle.append(i)
unchecked[i] = False
j = i
while unchecked[array_form[j]]:
j = array_form[j]
cycle.append(j)
unchecked[j] = False
if len(cycle) > 1:
cyclic_form.append(cycle)
assert cycle == list(minlex(cycle))
cyclic_form.sort()
self._cyclic_form = cyclic_form[:]
return cyclic_form
@property
def full_cyclic_form(self):
"""Return permutation in cyclic form including singletons.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> Permutation([0, 2, 1]).full_cyclic_form
[[0], [1, 2]]
"""
need = set(range(self.size)) - set(flatten(self.cyclic_form))
rv = self.cyclic_form
rv.extend([[i] for i in need])
rv.sort()
return rv
@property
def size(self):
"""
Returns the number of elements in the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([[3, 2], [0, 1]]).size
4
See Also
========
cardinality, length, order, rank
"""
return self._size
def support(self):
"""Return the elements in permutation, P, for which P[i] != i.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[3, 2], [0, 1], [4]])
>>> p.array_form
[1, 0, 3, 2, 4]
>>> p.support()
[0, 1, 2, 3]
"""
a = self.array_form
return [i for i, e in enumerate(a) if a[i] != i]
def __add__(self, other):
"""Return permutation that is other higher in rank than self.
The rank is the lexicographical rank, with the identity permutation
having rank of 0.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> I = Permutation([0, 1, 2, 3])
>>> a = Permutation([2, 1, 3, 0])
>>> I + a.rank() == a
True
See Also
========
__sub__, inversion_vector
"""
rank = (self.rank() + other) % self.cardinality
rv = self.unrank_lex(self.size, rank)
rv._rank = rank
return rv
def __sub__(self, other):
"""Return the permutation that is other lower in rank than self.
See Also
========
__add__
"""
return self.__add__(-other)
@staticmethod
def rmul(*args):
"""
Return product of Permutations [a, b, c, ...] as the Permutation whose
ith value is a(b(c(i))).
a, b, c, ... can be Permutation objects or tuples.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(Permutation.rmul(a, b))
[1, 2, 0]
>>> [a(b(i)) for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
Notes
=====
All items in the sequence will be parsed by Permutation as
necessary as long as the first item is a Permutation:
>>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b)
True
The reverse order of arguments will raise a TypeError.
"""
rv = args[0]
for i in range(1, len(args)):
rv = args[i]*rv
return rv
@classmethod
def rmul_with_af(cls, *args):
"""
same as rmul, but the elements of args are Permutation objects
which have _array_form
"""
a = [x._array_form for x in args]
rv = cls._af_new(_af_rmuln(*a))
return rv
def mul_inv(self, other):
"""
other*~self, self and other have _array_form
"""
a = _af_invert(self._array_form)
b = other._array_form
return self._af_new(_af_rmul(a, b))
def __rmul__(self, other):
"""This is needed to coerce other to Permutation in rmul."""
cls = type(self)
return cls(other)*self
def __mul__(self, other):
"""
Return the product a*b as a Permutation; the ith value is b(a(i)).
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
This handles operands in reverse order compared to _af_rmul and rmul:
>>> al = list(a); bl = list(b)
>>> _af_rmul(al, bl)
[1, 2, 0]
>>> [al[bl[i]] for i in range(3)]
[1, 2, 0]
It is acceptable for the arrays to have different lengths; the shorter
one will be padded to match the longer one:
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> b*Permutation([1, 0])
Permutation([1, 2, 0])
>>> Permutation([1, 0])*b
Permutation([2, 0, 1])
It is also acceptable to allow coercion to handle conversion of a
single list to the left of a Permutation:
>>> [0, 1]*a # no change: 2-element identity
Permutation([1, 0, 2])
>>> [[0, 1]]*a # exchange first two elements
Permutation([0, 1, 2])
You cannot use more than 1 cycle notation in a product of cycles
since coercion can only handle one argument to the left. To handle
multiple cycles it is convenient to use Cycle instead of Permutation:
>>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP
>>> from sympy.combinatorics.permutations import Cycle
>>> Cycle(1, 2)(2, 3)
(1 3 2)
"""
from sympy.combinatorics.perm_groups import PermutationGroup, Coset
if isinstance(other, PermutationGroup):
return Coset(self, other, dir='-')
a = self.array_form
# __rmul__ makes sure the other is a Permutation
b = other.array_form
if not b:
perm = a
else:
b.extend(list(range(len(b), len(a))))
perm = [b[i] for i in a] + b[len(a):]
return self._af_new(perm)
def commutes_with(self, other):
"""
Checks if the elements are commuting.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 4, 3, 0, 2, 5])
>>> b = Permutation([0, 1, 2, 3, 4, 5])
>>> a.commutes_with(b)
True
>>> b = Permutation([2, 3, 5, 4, 1, 0])
>>> a.commutes_with(b)
False
"""
a = self.array_form
b = other.array_form
return _af_commutes_with(a, b)
def __pow__(self, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> p**4
Permutation([0, 1, 2, 3])
"""
if isinstance(n, Permutation):
raise NotImplementedError(
'p**p is not defined; do you mean p^p (conjugate)?')
n = int(n)
return self._af_new(_af_pow(self.array_form, n))
def __rxor__(self, i):
"""Return self(i) when ``i`` is an int.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation(1, 2, 9)
>>> 2^p == p(2) == 9
True
"""
if int(i) == i:
return self(i)
else:
raise NotImplementedError(
"i^p = p(i) when i is an integer, not %s." % i)
def __xor__(self, h):
"""Return the conjugate permutation ``~h*self*h` `.
Explanation
===========
If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and
``b = ~h*a*h`` and both have the same cycle structure.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation(1, 2, 9)
>>> q = Permutation(6, 9, 8)
>>> p*q != q*p
True
Calculate and check properties of the conjugate:
>>> c = p^q
>>> c == ~q*p*q and p == q*c*~q
True
The expression q^p^r is equivalent to q^(p*r):
>>> r = Permutation(9)(4, 6, 8)
>>> q^p^r == q^(p*r)
True
If the term to the left of the conjugate operator, i, is an integer
then this is interpreted as selecting the ith element from the
permutation to the right:
>>> all(i^p == p(i) for i in range(p.size))
True
Note that the * operator as higher precedence than the ^ operator:
>>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4)
True
Notes
=====
In Python the precedence rule is p^q^r = (p^q)^r which differs
in general from p^(q^r)
>>> q^p^r
(9)(1 4 8)
>>> q^(p^r)
(9)(1 8 6)
For a given r and p, both of the following are conjugates of p:
~r*p*r and r*p*~r. But these are not necessarily the same:
>>> ~r*p*r == r*p*~r
True
>>> p = Permutation(1, 2, 9)(5, 6)
>>> ~r*p*r == r*p*~r
False
The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent
to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to
this method:
>>> p^~r == r*p*~r
True
"""
if self.size != h.size:
raise ValueError("The permutations must be of equal size.")
a = [None]*self.size
h = h._array_form
p = self._array_form
for i in range(self.size):
a[h[i]] = h[p[i]]
return self._af_new(a)
def transpositions(self):
"""
Return the permutation decomposed into a list of transpositions.
Explanation
===========
It is always possible to express a permutation as the product of
transpositions, see [1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]])
>>> t = p.transpositions()
>>> t
[(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)]
>>> print(''.join(str(c) for c in t))
(0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2)
>>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties
"""
a = self.cyclic_form
res = []
for x in a:
nx = len(x)
if nx == 2:
res.append(tuple(x))
elif nx > 2:
first = x[0]
for y in x[nx - 1:0:-1]:
res.append((first, y))
return res
@classmethod
def from_sequence(self, i, key=None):
"""Return the permutation needed to obtain ``i`` from the sorted
elements of ``i``. If custom sorting is desired, a key can be given.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.from_sequence('SymPy')
(4)(0 1 3)
>>> _(sorted("SymPy"))
['S', 'y', 'm', 'P', 'y']
>>> Permutation.from_sequence('SymPy', key=lambda x: x.lower())
(4)(0 2)(1 3)
"""
ic = list(zip(i, list(range(len(i)))))
if key:
ic.sort(key=lambda x: key(x[0]))
else:
ic.sort()
return ~Permutation([i[1] for i in ic])
def __invert__(self):
"""
Return the inverse of the permutation.
A permutation multiplied by its inverse is the identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([[2, 0], [3, 1]])
>>> ~p
Permutation([2, 3, 0, 1])
>>> _ == p**-1
True
>>> p*~p == ~p*p == Permutation([0, 1, 2, 3])
True
"""
return self._af_new(_af_invert(self._array_form))
def __iter__(self):
"""Yield elements from array form.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> list(Permutation(range(3)))
[0, 1, 2]
"""
yield from self.array_form
def __repr__(self):
from sympy.printing.repr import srepr
return srepr(self)
def __call__(self, *i):
"""
Allows applying a permutation instance as a bijective function.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> [p(i) for i in range(4)]
[2, 3, 0, 1]
If an array is given then the permutation selects the items
from the array (i.e. the permutation is applied to the array):
>>> from sympy.abc import x
>>> p([x, 1, 0, x**2])
[0, x**2, x, 1]
"""
# list indices can be Integer or int; leave this
# as it is (don't test or convert it) because this
# gets called a lot and should be fast
if len(i) == 1:
i = i[0]
if not isinstance(i, Iterable):
i = as_int(i)
if i < 0 or i > self.size:
raise TypeError(
"{} should be an integer between 0 and {}"
.format(i, self.size-1))
return self._array_form[i]
# P([a, b, c])
if len(i) != self.size:
raise TypeError(
"{} should have the length {}.".format(i, self.size))
return [i[j] for j in self._array_form]
# P(1, 2, 3)
return self*Permutation(Cycle(*i), size=self.size)
def atoms(self):
"""
Returns all the elements of a permutation
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2, 3, 4, 5]).atoms()
{0, 1, 2, 3, 4, 5}
>>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms()
{0, 1, 2, 3, 4, 5}
"""
return set(self.array_form)
def apply(self, i):
r"""Apply the permutation to an expression.
Parameters
==========
i : Expr
It should be an integer between $0$ and $n-1$ where $n$
is the size of the permutation.
If it is a symbol or a symbolic expression that can
have integer values, an ``AppliedPermutation`` object
will be returned which can represent an unevaluated
function.
Notes
=====
Any permutation can be defined as a bijective function
$\sigma : \{ 0, 1, ..., n-1 \} \rightarrow \{ 0, 1, ..., n-1 \}$
where $n$ denotes the size of the permutation.
The definition may even be extended for any set with distinctive
elements, such that the permutation can even be applied for
real numbers or such, however, it is not implemented for now for
computational reasons and the integrity with the group theory
module.
This function is similar to the ``__call__`` magic, however,
``__call__`` magic already has some other applications like
permuting an array or attatching new cycles, which would
not always be mathematically consistent.
This also guarantees that the return type is a SymPy integer,
which guarantees the safety to use assumptions.
"""
i = _sympify(i)
if i.is_integer is False:
raise NotImplementedError("{} should be an integer.".format(i))
n = self.size
if (i < 0) == True or (i >= n) == True:
raise NotImplementedError(
"{} should be an integer between 0 and {}".format(i, n-1))
if i.is_Integer:
return Integer(self._array_form[i])
return AppliedPermutation(self, i)
def next_lex(self):
"""
Returns the next permutation in lexicographical order.
If self is the last permutation in lexicographical order
it returns None.
See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 3, 1, 0])
>>> p = Permutation([2, 3, 1, 0]); p.rank()
17
>>> p = p.next_lex(); p.rank()
18
See Also
========
rank, unrank_lex
"""
perm = self.array_form[:]
n = len(perm)
i = n - 2
while perm[i + 1] < perm[i]:
i -= 1
if i == -1:
return None
else:
j = n - 1
while perm[j] < perm[i]:
j -= 1
perm[j], perm[i] = perm[i], perm[j]
i += 1
j = n - 1
while i < j:
perm[j], perm[i] = perm[i], perm[j]
i += 1
j -= 1
return self._af_new(perm)
@classmethod
def unrank_nonlex(self, n, r):
"""
This is a linear time unranking algorithm that does not
respect lexicographic order [3].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_nonlex(4, 5)
Permutation([2, 0, 3, 1])
>>> Permutation.unrank_nonlex(4, -1)
Permutation([0, 1, 2, 3])
See Also
========
next_nonlex, rank_nonlex
"""
def _unrank1(n, r, a):
if n > 0:
a[n - 1], a[r % n] = a[r % n], a[n - 1]
_unrank1(n - 1, r//n, a)
id_perm = list(range(n))
n = int(n)
r = r % ifac(n)
_unrank1(n, r, id_perm)
return self._af_new(id_perm)
def rank_nonlex(self, inv_perm=None):
"""
This is a linear time ranking algorithm that does not
enforce lexicographic order [3].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_nonlex()
23
See Also
========
next_nonlex, unrank_nonlex
"""
def _rank1(n, perm, inv_perm):
if n == 1:
return 0
s = perm[n - 1]
t = inv_perm[n - 1]
perm[n - 1], perm[t] = perm[t], s
inv_perm[n - 1], inv_perm[s] = inv_perm[s], t
return s + n*_rank1(n - 1, perm, inv_perm)
if inv_perm is None:
inv_perm = (~self).array_form
if not inv_perm:
return 0
perm = self.array_form[:]
r = _rank1(len(perm), perm, inv_perm)
return r
def next_nonlex(self):
"""
Returns the next permutation in nonlex order [3].
If self is the last permutation in this order it returns None.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex()
5
>>> p = p.next_nonlex(); p
Permutation([3, 0, 1, 2])
>>> p.rank_nonlex()
6
See Also
========
rank_nonlex, unrank_nonlex
"""
r = self.rank_nonlex()
if r == ifac(self.size) - 1:
return None
return self.unrank_nonlex(self.size, r + 1)
def rank(self):
"""
Returns the lexicographic rank of the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank()
0
>>> p = Permutation([3, 2, 1, 0])
>>> p.rank()
23
See Also
========
next_lex, unrank_lex, cardinality, length, order, size
"""
if not self._rank is None:
return self._rank
rank = 0
rho = self.array_form[:]
n = self.size - 1
size = n + 1
psize = int(ifac(n))
for j in range(size - 1):
rank += rho[j]*psize
for i in range(j + 1, size):
if rho[i] > rho[j]:
rho[i] -= 1
psize //= n
n -= 1
self._rank = rank
return rank
@property
def cardinality(self):
"""
Returns the number of all possible permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.cardinality
24
See Also
========
length, order, rank, size
"""
return int(ifac(self.size))
def parity(self):
"""
Computes the parity of a permutation.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.parity()
0
>>> p = Permutation([3, 2, 0, 1])
>>> p.parity()
1
See Also
========
_af_parity
"""
if self._cyclic_form is not None:
return (self.size - self.cycles) % 2
return _af_parity(self.array_form)
@property
def is_even(self):
"""
Checks if a permutation is even.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_even
True
>>> p = Permutation([3, 2, 1, 0])
>>> p.is_even
True
See Also
========
is_odd
"""
return not self.is_odd
@property
def is_odd(self):
"""
Checks if a permutation is odd.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_odd
False
>>> p = Permutation([3, 2, 0, 1])
>>> p.is_odd
True
See Also
========
is_even
"""
return bool(self.parity() % 2)
@property
def is_Singleton(self):
"""
Checks to see if the permutation contains only one number and is
thus the only possible permutation of this set of numbers
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0]).is_Singleton
True
>>> Permutation([0, 1]).is_Singleton
False
See Also
========
is_Empty
"""
return self.size == 1
@property
def is_Empty(self):
"""
Checks to see if the permutation is a set with zero elements
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([]).is_Empty
True
>>> Permutation([0]).is_Empty
False
See Also
========
is_Singleton
"""
return self.size == 0
@property
def is_identity(self):
return self.is_Identity
@property
def is_Identity(self):
"""
Returns True if the Permutation is an identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([])
>>> p.is_Identity
True
>>> p = Permutation([[0], [1], [2]])
>>> p.is_Identity
True
>>> p = Permutation([0, 1, 2])
>>> p.is_Identity
True
>>> p = Permutation([0, 2, 1])
>>> p.is_Identity
False
See Also
========
order
"""
af = self.array_form
return not af or all(i == af[i] for i in range(self.size))
def ascents(self):
"""
Returns the positions of ascents in a permutation, ie, the location
where p[i] < p[i+1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.ascents()
[1, 2]
See Also
========
descents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]]
return pos
def descents(self):
"""
Returns the positions of descents in a permutation, ie, the location
where p[i] > p[i+1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.descents()
[0, 3]
See Also
========
ascents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]]
return pos
def max(self):
"""
The maximum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([1, 0, 2, 3, 4])
>>> p.max()
1
See Also
========
min, descents, ascents, inversions
"""
max = 0
a = self.array_form
for i in range(len(a)):
if a[i] != i and a[i] > max:
max = a[i]
return max
def min(self):
"""
The minimum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 4, 3, 2])
>>> p.min()
2
See Also
========
max, descents, ascents, inversions
"""
a = self.array_form
min = len(a)
for i in range(len(a)):
if a[i] != i and a[i] < min:
min = a[i]
return min
def inversions(self):
"""
Computes the number of inversions of a permutation.
Explanation
===========
An inversion is where i > j but p[i] < p[j].
For small length of p, it iterates over all i and j
values and calculates the number of inversions.
For large length of p, it uses a variation of merge
sort to calculate the number of inversions.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3, 4, 5])
>>> p.inversions()
0
>>> Permutation([3, 2, 1, 0]).inversions()
6
See Also
========
descents, ascents, min, max
References
==========
.. [1] http://www.cp.eng.chula.ac.th/~piak/teaching/algo/algo2008/count-inv.htm
"""
inversions = 0
a = self.array_form
n = len(a)
if n < 130:
for i in range(n - 1):
b = a[i]
for c in a[i + 1:]:
if b > c:
inversions += 1
else:
k = 1
right = 0
arr = a[:]
temp = a[:]
while k < n:
i = 0
while i + k < n:
right = i + k * 2 - 1
if right >= n:
right = n - 1
inversions += _merge(arr, temp, i, i + k, right)
i = i + k * 2
k = k * 2
return inversions
def commutator(self, x):
"""Return the commutator of ``self`` and ``x``: ``~x*~self*x*self``
If f and g are part of a group, G, then the commutator of f and g
is the group identity iff f and g commute, i.e. fg == gf.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([0, 2, 3, 1])
>>> x = Permutation([2, 0, 3, 1])
>>> c = p.commutator(x); c
Permutation([2, 1, 3, 0])
>>> c == ~x*~p*x*p
True
>>> I = Permutation(3)
>>> p = [I + i for i in range(6)]
>>> for i in range(len(p)):
... for j in range(len(p)):
... c = p[i].commutator(p[j])
... if p[i]*p[j] == p[j]*p[i]:
... assert c == I
... else:
... assert c != I
...
References
==========
https://en.wikipedia.org/wiki/Commutator
"""
a = self.array_form
b = x.array_form
n = len(a)
if len(b) != n:
raise ValueError("The permutations must be of equal size.")
inva = [None]*n
for i in range(n):
inva[a[i]] = i
invb = [None]*n
for i in range(n):
invb[b[i]] = i
return self._af_new([a[b[inva[i]]] for i in invb])
def signature(self):
"""
Gives the signature of the permutation needed to place the
elements of the permutation in canonical order.
The signature is calculated as (-1)^<number of inversions>
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2])
>>> p.inversions()
0
>>> p.signature()
1
>>> q = Permutation([0,2,1])
>>> q.inversions()
1
>>> q.signature()
-1
See Also
========
inversions
"""
if self.is_even:
return 1
return -1
def order(self):
"""
Computes the order of a permutation.
When the permutation is raised to the power of its
order it equals the identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 1, 5, 2, 4, 0])
>>> p.order()
4
>>> (p**(p.order()))
Permutation([], size=6)
See Also
========
identity, cardinality, length, rank, size
"""
return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1)
def length(self):
"""
Returns the number of integers moved by a permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 3, 2, 1]).length()
2
>>> Permutation([[0, 1], [2, 3]]).length()
4
See Also
========
min, max, support, cardinality, order, rank, size
"""
return len(self.support())
@property
def cycle_structure(self):
"""Return the cycle structure of the permutation as a dictionary
indicating the multiplicity of each cycle length.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation(3).cycle_structure
{1: 4}
>>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure
{2: 2, 3: 1}
"""
if self._cycle_structure:
rv = self._cycle_structure
else:
rv = defaultdict(int)
singletons = self.size
for c in self.cyclic_form:
rv[len(c)] += 1
singletons -= len(c)
if singletons:
rv[1] = singletons
self._cycle_structure = rv
return dict(rv) # make a copy
@property
def cycles(self):
"""
Returns the number of cycles contained in the permutation
(including singletons).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2]).cycles
3
>>> Permutation([0, 1, 2]).full_cyclic_form
[[0], [1], [2]]
>>> Permutation(0, 1)(2, 3).cycles
2
See Also
========
sympy.functions.combinatorial.numbers.stirling
"""
return len(self.full_cyclic_form)
def index(self):
"""
Returns the index of a permutation.
The index of a permutation is the sum of all subscripts j such
that p[j] is greater than p[j+1].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([3, 0, 2, 1, 4])
>>> p.index()
2
"""
a = self.array_form
return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]])
def runs(self):
"""
Returns the runs of a permutation.
An ascending sequence in a permutation is called a run [5].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8])
>>> p.runs()
[[2, 5, 7], [3, 6], [0, 1, 4, 8]]
>>> q = Permutation([1,3,2,0])
>>> q.runs()
[[1, 3], [2], [0]]
"""
return runs(self.array_form)
def inversion_vector(self):
"""Return the inversion vector of the permutation.
The inversion vector consists of elements whose value
indicates the number of elements in the permutation
that are lesser than it and lie on its right hand side.
The inversion vector is the same as the Lehmer encoding of a
permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2])
>>> p.inversion_vector()
[4, 7, 0, 5, 0, 2, 1, 1]
>>> p = Permutation([3, 2, 1, 0])
>>> p.inversion_vector()
[3, 2, 1]
The inversion vector increases lexicographically with the rank
of the permutation, the -ith element cycling through 0..i.
>>> p = Permutation(2)
>>> while p:
... print('%s %s %s' % (p, p.inversion_vector(), p.rank()))
... p = p.next_lex()
(2) [0, 0] 0
(1 2) [0, 1] 1
(2)(0 1) [1, 0] 2
(0 1 2) [1, 1] 3
(0 2 1) [2, 0] 4
(0 2) [2, 1] 5
See Also
========
from_inversion_vector
"""
self_array_form = self.array_form
n = len(self_array_form)
inversion_vector = [0] * (n - 1)
for i in range(n - 1):
val = 0
for j in range(i + 1, n):
if self_array_form[j] < self_array_form[i]:
val += 1
inversion_vector[i] = val
return inversion_vector
def rank_trotterjohnson(self):
"""
Returns the Trotter Johnson rank, which we get from the minimal
change algorithm. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_trotterjohnson()
0
>>> p = Permutation([0, 2, 1, 3])
>>> p.rank_trotterjohnson()
7
See Also
========
unrank_trotterjohnson, next_trotterjohnson
"""
if self.array_form == [] or self.is_Identity:
return 0
if self.array_form == [1, 0]:
return 1
perm = self.array_form
n = self.size
rank = 0
for j in range(1, n):
k = 1
i = 0
while perm[i] != j:
if perm[i] < j:
k += 1
i += 1
j1 = j + 1
if rank % 2 == 0:
rank = j1*rank + j1 - k
else:
rank = j1*rank + k - 1
return rank
@classmethod
def unrank_trotterjohnson(cls, size, rank):
"""
Trotter Johnson permutation unranking. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_trotterjohnson(5, 10)
Permutation([0, 3, 1, 2, 4])
See Also
========
rank_trotterjohnson, next_trotterjohnson
"""
perm = [0]*size
r2 = 0
n = ifac(size)
pj = 1
for j in range(2, size + 1):
pj *= j
r1 = (rank * pj) // n
k = r1 - j*r2
if r2 % 2 == 0:
for i in range(j - 1, j - k - 1, -1):
perm[i] = perm[i - 1]
perm[j - k - 1] = j - 1
else:
for i in range(j - 1, k, -1):
perm[i] = perm[i - 1]
perm[k] = j - 1
r2 = r1
return cls._af_new(perm)
def next_trotterjohnson(self):
"""
Returns the next permutation in Trotter-Johnson order.
If self is the last permutation it returns None.
See [4] section 2.4. If it is desired to generate all such
permutations, they can be generated in order more quickly
with the ``generate_bell`` function.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 0, 2, 1])
>>> p.rank_trotterjohnson()
4
>>> p = p.next_trotterjohnson(); p
Permutation([0, 3, 2, 1])
>>> p.rank_trotterjohnson()
5
See Also
========
rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell
"""
pi = self.array_form[:]
n = len(pi)
st = 0
rho = pi[:]
done = False
m = n-1
while m > 0 and not done:
d = rho.index(m)
for i in range(d, m):
rho[i] = rho[i + 1]
par = _af_parity(rho[:m])
if par == 1:
if d == m:
m -= 1
else:
pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d]
done = True
else:
if d == 0:
m -= 1
st += 1
else:
pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d]
done = True
if m == 0:
return None
return self._af_new(pi)
def get_precedence_matrix(self):
"""
Gets the precedence matrix. This is used for computing the
distance between two permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation.josephus(3, 6, 1)
>>> p
Permutation([2, 5, 3, 1, 4, 0])
>>> p.get_precedence_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0]])
See Also
========
get_precedence_distance, get_adjacency_matrix, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(m.rows):
for j in range(i + 1, m.cols):
m[perm[i], perm[j]] = 1
return m
def get_precedence_distance(self, other):
"""
Computes the precedence distance between two permutations.
Explanation
===========
Suppose p and p' represent n jobs. The precedence metric
counts the number of times a job j is preceded by job i
in both p and p'. This metric is commutative.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 0, 4, 3, 1])
>>> q = Permutation([3, 1, 2, 4, 0])
>>> p.get_precedence_distance(q)
7
>>> q.get_precedence_distance(p)
7
See Also
========
get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance
"""
if self.size != other.size:
raise ValueError("The permutations must be of equal size.")
self_prec_mat = self.get_precedence_matrix()
other_prec_mat = other.get_precedence_matrix()
n_prec = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_prec_mat[i, j] * other_prec_mat[i, j] == 1:
n_prec += 1
d = self.size * (self.size - 1)//2 - n_prec
return d
def get_adjacency_matrix(self):
"""
Computes the adjacency matrix of a permutation.
Explanation
===========
If job i is adjacent to job j in a permutation p
then we set m[i, j] = 1 where m is the adjacency
matrix of p.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation.josephus(3, 6, 1)
>>> p.get_adjacency_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
>>> q = Permutation([0, 1, 2, 3])
>>> q.get_adjacency_matrix()
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(self.size - 1):
m[perm[i], perm[i + 1]] = 1
return m
def get_adjacency_distance(self, other):
"""
Computes the adjacency distance between two permutations.
Explanation
===========
This metric counts the number of times a pair i,j of jobs is
adjacent in both p and p'. If n_adj is this quantity then
the adjacency distance is n - n_adj - 1 [1]
[1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals
of Operational Research, 86, pp 473-490. (1999)
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> p.get_adjacency_distance(q)
3
>>> r = Permutation([0, 2, 1, 4, 3])
>>> p.get_adjacency_distance(r)
4
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_matrix
"""
if self.size != other.size:
raise ValueError("The permutations must be of the same size.")
self_adj_mat = self.get_adjacency_matrix()
other_adj_mat = other.get_adjacency_matrix()
n_adj = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_adj_mat[i, j] * other_adj_mat[i, j] == 1:
n_adj += 1
d = self.size - n_adj - 1
return d
def get_positional_distance(self, other):
"""
Computes the positional distance between two permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> r = Permutation([3, 1, 4, 0, 2])
>>> p.get_positional_distance(q)
12
>>> p.get_positional_distance(r)
12
See Also
========
get_precedence_distance, get_adjacency_distance
"""
a = self.array_form
b = other.array_form
if len(a) != len(b):
raise ValueError("The permutations must be of the same size.")
return sum([abs(a[i] - b[i]) for i in range(len(a))])
@classmethod
def josephus(cls, m, n, s=1):
"""Return as a permutation the shuffling of range(n) using the Josephus
scheme in which every m-th item is selected until all have been chosen.
The returned permutation has elements listed by the order in which they
were selected.
The parameter ``s`` stops the selection process when there are ``s``
items remaining and these are selected by continuing the selection,
counting by 1 rather than by ``m``.
Consider selecting every 3rd item from 6 until only 2 remain::
choices chosen
======== ======
012345
01 345 2
01 34 25
01 4 253
0 4 2531
0 25314
253140
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.josephus(3, 6, 2).array_form
[2, 5, 3, 1, 4, 0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Flavius_Josephus
.. [2] https://en.wikipedia.org/wiki/Josephus_problem
.. [3] http://www.wou.edu/~burtonl/josephus.html
"""
from collections import deque
m -= 1
Q = deque(list(range(n)))
perm = []
while len(Q) > max(s, 1):
for dp in range(m):
Q.append(Q.popleft())
perm.append(Q.popleft())
perm.extend(list(Q))
return cls(perm)
@classmethod
def from_inversion_vector(cls, inversion):
"""
Calculates the permutation from the inversion vector.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.from_inversion_vector([3, 2, 1, 0, 0])
Permutation([3, 2, 1, 0, 4, 5])
"""
size = len(inversion)
N = list(range(size + 1))
perm = []
try:
for k in range(size):
val = N[inversion[k]]
perm.append(val)
N.remove(val)
except IndexError:
raise ValueError("The inversion vector is not valid.")
perm.extend(N)
return cls._af_new(perm)
@classmethod
def random(cls, n):
"""
Generates a random permutation of length ``n``.
Uses the underlying Python pseudo-random number generator.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
True
"""
perm_array = list(range(n))
random.shuffle(perm_array)
return cls._af_new(perm_array)
@classmethod
def unrank_lex(cls, size, rank):
"""
Lexicographic permutation unranking.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = Permutation.unrank_lex(5, 10)
>>> a.rank()
10
>>> a
Permutation([0, 2, 4, 1, 3])
See Also
========
rank, next_lex
"""
perm_array = [0] * size
psize = 1
for i in range(size):
new_psize = psize*(i + 1)
d = (rank % new_psize) // psize
rank -= d*psize
perm_array[size - i - 1] = d
for j in range(size - i, size):
if perm_array[j] > d - 1:
perm_array[j] += 1
psize = new_psize
return cls._af_new(perm_array)
def resize(self, n):
"""Resize the permutation to the new size ``n``.
Parameters
==========
n : int
The new size of the permutation.
Raises
======
ValueError
If the permutation cannot be resized to the given size.
This may only happen when resized to a smaller size than
the original.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
Increasing the size of a permutation:
>>> p = Permutation(0, 1, 2)
>>> p = p.resize(5)
>>> p
(4)(0 1 2)
Decreasing the size of the permutation:
>>> p = p.resize(4)
>>> p
(3)(0 1 2)
If resizing to the specific size breaks the cycles:
>>> p.resize(2)
Traceback (most recent call last):
...
ValueError: The permutation can not be resized to 2 because the
cycle (0, 1, 2) may break.
"""
aform = self.array_form
l = len(aform)
if n > l:
aform += list(range(l, n))
return Permutation._af_new(aform)
elif n < l:
cyclic_form = self.full_cyclic_form
new_cyclic_form = []
for cycle in cyclic_form:
cycle_min = min(cycle)
cycle_max = max(cycle)
if cycle_min <= n-1:
if cycle_max > n-1:
raise ValueError(
"The permutation can not be resized to {} "
"because the cycle {} may break."
.format(n, tuple(cycle)))
new_cyclic_form.append(cycle)
return Permutation(new_cyclic_form)
return self
# XXX Deprecated flag
print_cyclic = None
def _merge(arr, temp, left, mid, right):
"""
Merges two sorted arrays and calculates the inversion count.
Helper function for calculating inversions. This method is
for internal use only.
"""
i = k = left
j = mid
inv_count = 0
while i < mid and j <= right:
if arr[i] < arr[j]:
temp[k] = arr[i]
k += 1
i += 1
else:
temp[k] = arr[j]
k += 1
j += 1
inv_count += (mid -i)
while i < mid:
temp[k] = arr[i]
k += 1
i += 1
if j <= right:
k += right - j + 1
j += right - j + 1
arr[left:k + 1] = temp[left:k + 1]
else:
arr[left:right + 1] = temp[left:right + 1]
return inv_count
Perm = Permutation
_af_new = Perm._af_new
class AppliedPermutation(Expr):
"""A permutation applied to a symbolic variable.
Parameters
==========
perm : Permutation
x : Expr
Examples
========
>>> from sympy import Symbol
>>> from sympy.combinatorics import Permutation
Creating a symbolic permutation function application:
>>> x = Symbol('x')
>>> p = Permutation(0, 1, 2)
>>> p.apply(x)
AppliedPermutation((0 1 2), x)
>>> _.subs(x, 1)
2
"""
def __new__(cls, perm, x, evaluate=None):
if evaluate is None:
evaluate = global_parameters.evaluate
perm = _sympify(perm)
x = _sympify(x)
if not isinstance(perm, Permutation):
raise ValueError("{} must be a Permutation instance."
.format(perm))
if evaluate:
if x.is_Integer:
return perm.apply(x)
obj = super().__new__(cls, perm, x)
return obj
@dispatch(Permutation, Permutation)
def _eval_is_eq(lhs, rhs):
if lhs._size != rhs._size:
return None
return lhs._array_form == rhs._array_form
|
8a85932bd7267ca46945fa15ba79f2e0ebeaced620d8d297c4b5cc92ad0f48d4 | from sympy.combinatorics import Permutation as Perm
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core import Basic, Tuple
from sympy.core.compatibility import as_int
from sympy.sets import FiniteSet
from sympy.utilities.iterables import (minlex, unflatten, flatten, default_sort_key)
rmul = Perm.rmul
class Polyhedron(Basic):
"""
Represents the polyhedral symmetry group (PSG).
Explanation
===========
The PSG is one of the symmetry groups of the Platonic solids.
There are three polyhedral groups: the tetrahedral group
of order 12, the octahedral group of order 24, and the
icosahedral group of order 60.
All doctests have been given in the docstring of the
constructor of the object.
References
==========
.. [1] http://mathworld.wolfram.com/PolyhedralGroup.html
"""
_edges = None
def __new__(cls, corners, faces=[], pgroup=[]):
"""
The constructor of the Polyhedron group object.
Explanation
===========
It takes up to three parameters: the corners, faces, and
allowed transformations.
The corners/vertices are entered as a list of arbitrary
expressions that are used to identify each vertex.
The faces are entered as a list of tuples of indices; a tuple
of indices identifies the vertices which define the face. They
should be entered in a cw or ccw order; they will be standardized
by reversal and rotation to be give the lowest lexical ordering.
If no faces are given then no edges will be computed.
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> Polyhedron(list('abc'), [(1, 2, 0)]).faces
FiniteSet((0, 1, 2))
>>> Polyhedron(list('abc'), [(1, 0, 2)]).faces
FiniteSet((0, 1, 2))
The allowed transformations are entered as allowable permutations
of the vertices for the polyhedron. Instance of Permutations
(as with faces) should refer to the supplied vertices by index.
These permutation are stored as a PermutationGroup.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> from sympy.abc import w, x, y, z
>>> init_printing(pretty_print=False, perm_cyclic=False)
Here we construct the Polyhedron object for a tetrahedron.
>>> corners = [w, x, y, z]
>>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]
Next, allowed transformations of the polyhedron must be given. This
is given as permutations of vertices.
Although the vertices of a tetrahedron can be numbered in 24 (4!)
different ways, there are only 12 different orientations for a
physical tetrahedron. The following permutations, applied once or
twice, will generate all 12 of the orientations. (The identity
permutation, Permutation(range(4)), is not included since it does
not change the orientation of the vertices.)
>>> pgroup = [Permutation([[0, 1, 2], [3]]), \
Permutation([[0, 1, 3], [2]]), \
Permutation([[0, 2, 3], [1]]), \
Permutation([[1, 2, 3], [0]]), \
Permutation([[0, 1], [2, 3]]), \
Permutation([[0, 2], [1, 3]]), \
Permutation([[0, 3], [1, 2]])]
The Polyhedron is now constructed and demonstrated:
>>> tetra = Polyhedron(corners, faces, pgroup)
>>> tetra.size
4
>>> tetra.edges
FiniteSet((0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3))
>>> tetra.corners
(w, x, y, z)
It can be rotated with an arbitrary permutation of vertices, e.g.
the following permutation is not in the pgroup:
>>> tetra.rotate(Permutation([0, 1, 3, 2]))
>>> tetra.corners
(w, x, z, y)
An allowed permutation of the vertices can be constructed by
repeatedly applying permutations from the pgroup to the vertices.
Here is a demonstration that applying p and p**2 for every p in
pgroup generates all the orientations of a tetrahedron and no others:
>>> all = ( (w, x, y, z), \
(x, y, w, z), \
(y, w, x, z), \
(w, z, x, y), \
(z, w, y, x), \
(w, y, z, x), \
(y, z, w, x), \
(x, z, y, w), \
(z, y, x, w), \
(y, x, z, w), \
(x, w, z, y), \
(z, x, w, y) )
>>> got = []
>>> for p in (pgroup + [p**2 for p in pgroup]):
... h = Polyhedron(corners)
... h.rotate(p)
... got.append(h.corners)
...
>>> set(got) == set(all)
True
The make_perm method of a PermutationGroup will randomly pick
permutations, multiply them together, and return the permutation that
can be applied to the polyhedron to give the orientation produced
by those individual permutations.
Here, 3 permutations are used:
>>> tetra.pgroup.make_perm(3) # doctest: +SKIP
Permutation([0, 3, 1, 2])
To select the permutations that should be used, supply a list
of indices to the permutations in pgroup in the order they should
be applied:
>>> use = [0, 0, 2]
>>> p002 = tetra.pgroup.make_perm(3, use)
>>> p002
Permutation([1, 0, 3, 2])
Apply them one at a time:
>>> tetra.reset()
>>> for i in use:
... tetra.rotate(pgroup[i])
...
>>> tetra.vertices
(x, w, z, y)
>>> sequentially = tetra.vertices
Apply the composite permutation:
>>> tetra.reset()
>>> tetra.rotate(p002)
>>> tetra.corners
(x, w, z, y)
>>> tetra.corners in all and tetra.corners == sequentially
True
Notes
=====
Defining permutation groups
---------------------------
It is not necessary to enter any permutations, nor is necessary to
enter a complete set of transformations. In fact, for a polyhedron,
all configurations can be constructed from just two permutations.
For example, the orientations of a tetrahedron can be generated from
an axis passing through a vertex and face and another axis passing
through a different vertex or from an axis passing through the
midpoints of two edges opposite of each other.
For simplicity of presentation, consider a square --
not a cube -- with vertices 1, 2, 3, and 4:
1-----2 We could think of axes of rotation being:
| | 1) through the face
| | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4
3-----4 3) lines 1-4 or 2-3
To determine how to write the permutations, imagine 4 cameras,
one at each corner, labeled A-D:
A B A B
1-----2 1-----3 vertex index:
| | | | 1 0
| | | | 2 1
3-----4 2-----4 3 2
C D C D 4 3
original after rotation
along 1-4
A diagonal and a face axis will be chosen for the "permutation group"
from which any orientation can be constructed.
>>> pgroup = []
Imagine a clockwise rotation when viewing 1-4 from camera A. The new
orientation is (in camera-order): 1, 3, 2, 4 so the permutation is
given using the *indices* of the vertices as:
>>> pgroup.append(Permutation((0, 2, 1, 3)))
Now imagine rotating clockwise when looking down an axis entering the
center of the square as viewed. The new camera-order would be
3, 1, 4, 2 so the permutation is (using indices):
>>> pgroup.append(Permutation((2, 0, 3, 1)))
The square can now be constructed:
** use real-world labels for the vertices, entering them in
camera order
** for the faces we use zero-based indices of the vertices
in *edge-order* as the face is traversed; neither the
direction nor the starting point matter -- the faces are
only used to define edges (if so desired).
>>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)
To rotate the square with a single permutation we can do:
>>> square.rotate(square.pgroup[0])
>>> square.corners
(1, 3, 2, 4)
To use more than one permutation (or to use one permutation more
than once) it is more convenient to use the make_perm method:
>>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations
>>> square.reset() # return to initial orientation
>>> square.rotate(p011)
>>> square.corners
(4, 2, 3, 1)
Thinking outside the box
------------------------
Although the Polyhedron object has a direct physical meaning, it
actually has broader application. In the most general sense it is
just a decorated PermutationGroup, allowing one to connect the
permutations to something physical. For example, a Rubik's cube is
not a proper polyhedron, but the Polyhedron class can be used to
represent it in a way that helps to visualize the Rubik's cube.
>>> from sympy.utilities.iterables import flatten, unflatten
>>> from sympy import symbols
>>> from sympy.combinatorics import RubikGroup
>>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])
>>> def show():
... pairs = unflatten(r2.corners, 2)
... print(pairs[::2])
... print(pairs[1::2])
...
>>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))
>>> show()
[(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]
[(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]
>>> r2.rotate(0) # cw rotation of F
>>> show()
[(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]
[(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]
Predefined Polyhedra
====================
For convenience, the vertices and faces are defined for the following
standard solids along with a permutation group for transformations.
When the polyhedron is oriented as indicated below, the vertices in
a given horizontal plane are numbered in ccw direction, starting from
the vertex that will give the lowest indices in a given face. (In the
net of the vertices, indices preceded by "-" indicate replication of
the lhs index in the net.)
tetrahedron, tetrahedron_faces
------------------------------
4 vertices (vertex up) net:
0 0-0
1 2 3-1
4 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3)
cube, cube_faces
----------------
8 vertices (face up) net:
0 1 2 3-0
4 5 6 7-4
6 faces:
(0, 1, 2, 3)
(0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4)
(4, 5, 6, 7)
octahedron, octahedron_faces
----------------------------
6 vertices (vertex up) net:
0 0 0-0
1 2 3 4-1
5 5 5-5
8 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4)
(1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5)
dodecahedron, dodecahedron_faces
--------------------------------
20 vertices (vertex up) net:
0 1 2 3 4 -0
5 6 7 8 9 -5
14 10 11 12 13-14
15 16 17 18 19-15
12 faces:
(0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6)
(2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5)
(5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12)
(8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19)
icosahedron, icosahedron_faces
------------------------------
12 vertices (face up) net:
0 0 0 0 -0
1 2 3 4 5 -1
6 7 8 9 10 -6
11 11 11 11 -11
20 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4)
(0, 4, 5) (0, 1, 5) (1, 2, 6)
(2, 3, 7) (3, 4, 8) (4, 5, 9)
(1, 5, 10) (2, 6, 7) (3, 7, 8)
(4, 8, 9) (5, 9, 10) (1, 6, 10)
(6, 7, 11) (7, 8, 11) (8, 9, 11)
(9, 10, 11) (6, 10, 11)
>>> from sympy.combinatorics.polyhedron import cube
>>> cube.edges
FiniteSet((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7))
If you want to use letters or other names for the corners you
can still use the pre-calculated faces:
>>> corners = list('abcdefgh')
>>> Polyhedron(corners, cube.faces).corners
(a, b, c, d, e, f, g, h)
References
==========
.. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf
"""
faces = [minlex(f, directed=False, key=default_sort_key) for f in faces]
corners, faces, pgroup = args = \
[Tuple(*a) for a in (corners, faces, pgroup)]
obj = Basic.__new__(cls, *args)
obj._corners = tuple(corners) # in order given
obj._faces = FiniteSet(*faces)
if pgroup and pgroup[0].size != len(corners):
raise ValueError("Permutation size unequal to number of corners.")
# use the identity permutation if none are given
obj._pgroup = PermutationGroup(
pgroup or [Perm(range(len(corners)))] )
return obj
@property
def corners(self):
"""
Get the corners of the Polyhedron.
The method ``vertices`` is an alias for ``corners``.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c, d
>>> p = Polyhedron(list('abcd'))
>>> p.corners == p.vertices == (a, b, c, d)
True
See Also
========
array_form, cyclic_form
"""
return self._corners
vertices = corners
@property
def array_form(self):
"""Return the indices of the corners.
The indices are given relative to the original position of corners.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron
>>> tetrahedron = tetrahedron.copy()
>>> tetrahedron.array_form
[0, 1, 2, 3]
>>> tetrahedron.rotate(0)
>>> tetrahedron.array_form
[0, 2, 3, 1]
>>> tetrahedron.pgroup[0].array_form
[0, 2, 3, 1]
See Also
========
corners, cyclic_form
"""
corners = list(self.args[0])
return [corners.index(c) for c in self.corners]
@property
def cyclic_form(self):
"""Return the indices of the corners in cyclic notation.
The indices are given relative to the original position of corners.
See Also
========
corners, array_form
"""
return Perm._af_new(self.array_form).cyclic_form
@property
def size(self):
"""
Get the number of corners of the Polyhedron.
"""
return len(self._corners)
@property
def faces(self):
"""
Get the faces of the Polyhedron.
"""
return self._faces
@property
def pgroup(self):
"""
Get the permutations of the Polyhedron.
"""
return self._pgroup
@property
def edges(self):
"""
Given the faces of the polyhedra we can get the edges.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c
>>> corners = (a, b, c)
>>> faces = [(0, 1, 2)]
>>> Polyhedron(corners, faces).edges
FiniteSet((0, 1), (0, 2), (1, 2))
"""
if self._edges is None:
output = set()
for face in self.faces:
for i in range(len(face)):
edge = tuple(sorted([face[i], face[i - 1]]))
output.add(edge)
self._edges = FiniteSet(*output)
return self._edges
def rotate(self, perm):
"""
Apply a permutation to the polyhedron *in place*. The permutation
may be given as a Permutation instance or an integer indicating
which permutation from pgroup of the Polyhedron should be
applied.
This is an operation that is analogous to rotation about
an axis by a fixed increment.
Notes
=====
When a Permutation is applied, no check is done to see if that
is a valid permutation for the Polyhedron. For example, a cube
could be given a permutation which effectively swaps only 2
vertices. A valid permutation (that rotates the object in a
physical way) will be obtained if one only uses
permutations from the ``pgroup`` of the Polyhedron. On the other
hand, allowing arbitrary rotations (applications of permutations)
gives a way to follow named elements rather than indices since
Polyhedron allows vertices to be named while Permutation works
only with indices.
Examples
========
>>> from sympy.combinatorics import Polyhedron, Permutation
>>> from sympy.combinatorics.polyhedron import cube
>>> cube = cube.copy()
>>> cube.corners
(0, 1, 2, 3, 4, 5, 6, 7)
>>> cube.rotate(0)
>>> cube.corners
(1, 2, 3, 0, 5, 6, 7, 4)
A non-physical "rotation" that is not prohibited by this method:
>>> cube.reset()
>>> cube.rotate(Permutation([[1, 2]], size=8))
>>> cube.corners
(0, 2, 1, 3, 4, 5, 6, 7)
Polyhedron can be used to follow elements of set that are
identified by letters instead of integers:
>>> shadow = h5 = Polyhedron(list('abcde'))
>>> p = Permutation([3, 0, 1, 2, 4])
>>> h5.rotate(p)
>>> h5.corners
(d, a, b, c, e)
>>> _ == shadow.corners
True
>>> copy = h5.copy()
>>> h5.rotate(p)
>>> h5.corners == copy.corners
False
"""
if not isinstance(perm, Perm):
perm = self.pgroup[perm]
# and we know it's valid
else:
if perm.size != self.size:
raise ValueError('Polyhedron and Permutation sizes differ.')
a = perm.array_form
corners = [self.corners[a[i]] for i in range(len(self.corners))]
self._corners = tuple(corners)
def reset(self):
"""Return corners to their original positions.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron as T
>>> T = T.copy()
>>> T.corners
(0, 1, 2, 3)
>>> T.rotate(0)
>>> T.corners
(0, 2, 3, 1)
>>> T.reset()
>>> T.corners
(0, 1, 2, 3)
"""
self._corners = self.args[0]
def _pgroup_calcs():
"""Return the permutation groups for each of the polyhedra and the face
definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,
icosahedron_faces
Explanation
===========
(This author didn't find and didn't know of a better way to do it though
there likely is such a way.)
Although only 2 permutations are needed for a polyhedron in order to
generate all the possible orientations, a group of permutations is
provided instead. A set of permutations is called a "group" if::
a*b = c (for any pair of permutations in the group, a and b, their
product, c, is in the group)
a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)
there is an identity permutation, I, such that I*a = a*I for all elements
in the group
a*b = I (the inverse of each permutation is also in the group)
None of the polyhedron groups defined follow these definitions of a group.
Instead, they are selected to contain those permutations whose powers
alone will construct all orientations of the polyhedron, i.e. for
permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,
``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of
permutation ``i``) generate all permutations of the polyhedron instead of
mixed products like ``a*b``, ``a*b**2``, etc....
Note that for a polyhedron with n vertices, the valid permutations of the
vertices exclude those that do not maintain its faces. e.g. the
permutation BCDE of a square's four corners, ABCD, is a valid
permutation while CBDE is not (because this would twist the square).
Examples
========
The is_group checks for: closure, the presence of the Identity permutation,
and the presence of the inverse for each of the elements in the group. This
confirms that none of the polyhedra are true groups:
>>> from sympy.combinatorics.polyhedron import (
... tetrahedron, cube, octahedron, dodecahedron, icosahedron)
...
>>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)
>>> [h.pgroup.is_group for h in polyhedra]
...
[True, True, True, True, True]
Although tests in polyhedron's test suite check that powers of the
permutations in the groups generate all permutations of the vertices
of the polyhedron, here we also demonstrate the powers of the given
permutations create a complete group for the tetrahedron:
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> for h in polyhedra[:1]:
... G = h.pgroup
... perms = set()
... for g in G:
... for e in range(g.order()):
... p = tuple((g**e).array_form)
... perms.add(p)
...
... perms = [Permutation(p) for p in perms]
... assert PermutationGroup(perms).is_group
In addition to doing the above, the tests in the suite confirm that the
faces are all present after the application of each permutation.
References
==========
.. [1] http://dogschool.tripod.com/trianglegroup.html
"""
def _pgroup_of_double(polyh, ordered_faces, pgroup):
n = len(ordered_faces[0])
# the vertices of the double which sits inside a give polyhedron
# can be found by tracking the faces of the outer polyhedron.
# A map between face and the vertex of the double is made so that
# after rotation the position of the vertices can be located
fmap = dict(zip(ordered_faces,
range(len(ordered_faces))))
flat_faces = flatten(ordered_faces)
new_pgroup = []
for i, p in enumerate(pgroup):
h = polyh.copy()
h.rotate(p)
c = h.corners
# reorder corners in the order they should appear when
# enumerating the faces
reorder = unflatten([c[j] for j in flat_faces], n)
# make them canonical
reorder = [tuple(map(as_int,
minlex(f, directed=False)))
for f in reorder]
# map face to vertex: the resulting list of vertices are the
# permutation that we seek for the double
new_pgroup.append(Perm([fmap[f] for f in reorder]))
return new_pgroup
tetrahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3
(1, 2, 3), # bottom
]
# cw from top
#
_t_pgroup = [
Perm([[1, 2, 3], [0]]), # cw from top
Perm([[0, 1, 2], [3]]), # cw from front face
Perm([[0, 3, 2], [1]]), # cw from back right face
Perm([[0, 3, 1], [2]]), # cw from back left face
Perm([[0, 1], [2, 3]]), # through front left edge
Perm([[0, 2], [1, 3]]), # through front right edge
Perm([[0, 3], [1, 2]]), # through back edge
]
tetrahedron = Polyhedron(
range(4),
tetrahedron_faces,
_t_pgroup)
cube_faces = [
(0, 1, 2, 3), # upper
(0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4
(4, 5, 6, 7), # lower
]
# U, D, F, B, L, R = up, down, front, back, left, right
_c_pgroup = [Perm(p) for p in
[
[1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U
[4, 0, 3, 7, 5, 1, 2, 6], # cw from F face
[4, 5, 1, 0, 7, 6, 2, 3], # cw from R face
[1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge
[6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge
[6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge
[3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge
[4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge
[6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge
[0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex
[5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex
[5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex
[7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL
]]
cube = Polyhedron(
range(8),
cube_faces,
_c_pgroup)
octahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4
(1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4
]
octahedron = Polyhedron(
range(6),
octahedron_faces,
_pgroup_of_double(cube, cube_faces, _c_pgroup))
dodecahedron_faces = [
(0, 1, 2, 3, 4), # top
(0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5
(3, 4, 9, 13, 8), (0, 4, 9, 14, 5),
(5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,
12), # lower 5
(8, 12, 18, 19, 13), (9, 13, 19, 15, 14),
(15, 16, 17, 18, 19) # bottom
]
def _string_to_perm(s):
rv = [Perm(range(20))]
p = None
for si in s:
if si not in '01':
count = int(si) - 1
else:
count = 1
if si == '0':
p = _f0
elif si == '1':
p = _f1
rv.extend([p]*count)
return Perm.rmul(*rv)
# top face cw
_f0 = Perm([
1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,
12, 13, 14, 10, 16, 17, 18, 19, 15])
# front face cw
_f1 = Perm([
5, 0, 4, 9, 14, 10, 1, 3, 13, 15,
6, 2, 8, 19, 16, 17, 11, 7, 12, 18])
# the strings below, like 0104 are shorthand for F0*F1*F0**4 and are
# the remaining 4 face rotations, 15 edge permutations, and the
# 10 vertex rotations.
_dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in '''
0104 140 014 0410
010 1403 03104 04103 102
120 1304 01303 021302 03130
0412041 041204103 04120410 041204104 041204102
10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()]
dodecahedron = Polyhedron(
range(20),
dodecahedron_faces,
_dodeca_pgroup)
icosahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5),
(1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9),
(3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6),
(6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)]
icosahedron = Polyhedron(
range(12),
icosahedron_faces,
_pgroup_of_double(
dodecahedron, dodecahedron_faces, _dodeca_pgroup))
return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces)
# -----------------------------------------------------------------------
# Standard Polyhedron groups
#
# These are generated using _pgroup_calcs() above. However to save
# import time we encode them explicitly here.
# -----------------------------------------------------------------------
tetrahedron = Polyhedron(
Tuple(0, 1, 2, 3),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 1, 3),
Tuple(1, 2, 3)),
Tuple(
Perm(1, 2, 3),
Perm(3)(0, 1, 2),
Perm(0, 3, 2),
Perm(0, 3, 1),
Perm(0, 1)(2, 3),
Perm(0, 2)(1, 3),
Perm(0, 3)(1, 2)
))
cube = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7),
Tuple(
Tuple(0, 1, 2, 3),
Tuple(0, 1, 5, 4),
Tuple(1, 2, 6, 5),
Tuple(2, 3, 7, 6),
Tuple(0, 3, 7, 4),
Tuple(4, 5, 6, 7)),
Tuple(
Perm(0, 1, 2, 3)(4, 5, 6, 7),
Perm(0, 4, 5, 1)(2, 3, 7, 6),
Perm(0, 4, 7, 3)(1, 5, 6, 2),
Perm(0, 1)(2, 4)(3, 5)(6, 7),
Perm(0, 6)(1, 2)(3, 5)(4, 7),
Perm(0, 6)(1, 7)(2, 3)(4, 5),
Perm(0, 3)(1, 7)(2, 4)(5, 6),
Perm(0, 4)(1, 7)(2, 6)(3, 5),
Perm(0, 6)(1, 5)(2, 4)(3, 7),
Perm(1, 3, 4)(2, 7, 5),
Perm(7)(0, 5, 2)(3, 4, 6),
Perm(0, 5, 7)(1, 6, 3),
Perm(0, 7, 2)(1, 4, 6)))
octahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 1, 4),
Tuple(1, 2, 5),
Tuple(2, 3, 5),
Tuple(3, 4, 5),
Tuple(1, 4, 5)),
Tuple(
Perm(5)(1, 2, 3, 4),
Perm(0, 4, 5, 2),
Perm(0, 1, 5, 3),
Perm(0, 1)(2, 4)(3, 5),
Perm(0, 2)(1, 3)(4, 5),
Perm(0, 3)(1, 5)(2, 4),
Perm(0, 4)(1, 3)(2, 5),
Perm(0, 5)(1, 4)(2, 3),
Perm(0, 5)(1, 2)(3, 4),
Perm(0, 4, 1)(2, 3, 5),
Perm(0, 1, 2)(3, 4, 5),
Perm(0, 2, 3)(1, 5, 4),
Perm(0, 4, 3)(1, 5, 2)))
dodecahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
Tuple(
Tuple(0, 1, 2, 3, 4),
Tuple(0, 1, 6, 10, 5),
Tuple(1, 2, 7, 11, 6),
Tuple(2, 3, 8, 12, 7),
Tuple(3, 4, 9, 13, 8),
Tuple(0, 4, 9, 14, 5),
Tuple(5, 10, 16, 15, 14),
Tuple(6, 10, 16, 17, 11),
Tuple(7, 11, 17, 18, 12),
Tuple(8, 12, 18, 19, 13),
Tuple(9, 13, 19, 15, 14),
Tuple(15, 16, 17, 18, 19)),
Tuple(
Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19),
Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12),
Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13),
Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14),
Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10),
Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11),
Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19),
Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16),
Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17),
Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18),
Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18),
Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19),
Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15),
Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14),
Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17),
Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11),
Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13),
Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10),
Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12),
Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14),
Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17),
Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18),
Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19),
Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15),
Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16),
Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18),
Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19),
Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9),
Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16),
Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17)))
icosahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 4, 5),
Tuple(0, 1, 5),
Tuple(1, 6, 7),
Tuple(1, 2, 7),
Tuple(2, 7, 8),
Tuple(2, 3, 8),
Tuple(3, 8, 9),
Tuple(3, 4, 9),
Tuple(4, 9, 10),
Tuple(4, 5, 10),
Tuple(5, 6, 10),
Tuple(1, 5, 6),
Tuple(6, 7, 11),
Tuple(7, 8, 11),
Tuple(8, 9, 11),
Tuple(9, 10, 11),
Tuple(6, 10, 11)),
Tuple(
Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10),
Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8),
Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9),
Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5),
Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6),
Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7),
Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11),
Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11),
Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10),
Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11),
Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11),
Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10),
Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10),
Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7),
Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8),
Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7),
Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9),
Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6),
Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8),
Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10),
Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11),
Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11),
Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10),
Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11),
Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11),
Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8),
Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9),
Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10),
Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4),
Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5)))
tetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces)
cube_faces = list(tuple(arg) for arg in cube.faces)
octahedron_faces = list(tuple(arg) for arg in octahedron.faces)
dodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces)
icosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)
|
783f3175a1a2507825fc79ff0bc857692cb863b2a1b4a2fa3b8d1861808d3852 | from sympy.core import Basic, Dict, sympify
from sympy.core.compatibility import as_int, default_sort_key
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
Partition(FiniteSet(1, 2), FiniteSet(3))
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
Creating Partition from SymPy finite sets:
>>> from sympy.sets.sets import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " \
"a list, set, or a FiniteSet")
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[Partition(FiniteSet(1, 2)), Partition(FiniteSet(1), FiniteSet(2)), Partition(FiniteSet(1, x)), Partition(FiniteSet(3, 4)), Partition(FiniteSet(0, 1, 2, 3))]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
Explanation
===========
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
Partition(FiniteSet(1, 2), FiniteSet(3), FiniteSet(4), FiniteSet(5))
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
Explanation
===========
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
Partition(FiniteSet(c), FiniteSet(a, d), FiniteSet(b, e))
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
Partition(FiniteSet(e), FiniteSet(a, c), FiniteSet(b, d))
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
Partition(FiniteSet(1, 4), FiniteSet(2), FiniteSet(3, 5))
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
Explanation
===========
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
Explantion
==========
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("All integer summands must be greater than one")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.testing.randtest import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(0, m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
|
365656a7a8a5d2c9e6e07a96cdd87e0c29c486663fff9d1871d03178ac4a454e | from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.exponential import exp, log
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(ExprWithIntLimits):
r"""
Represents unevaluated products.
Explanation
===========
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
factorial(m)**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*factorial(n)**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import combsimp, pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 - pi/2, n)*RisingFactorial(1 + pi/2, n)/(2*factorial(n)**2)
>>> limit(Pe, n, oo).gammasimp()
sin(pi**2/2)
>>> Pe.rewrite(gamma)
(-1)**n*pi**2*gamma(pi/2)*gamma(n + 1 + pi/2)/(2*gamma(1 + pi/2)*gamma(-n + pi/2)*gamma(n + 1)**2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> combsimp(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] https://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] https://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ('is_commutative',)
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args, **kwargs):
from sympy.concrete.summations import Sum
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
if self.has_empty_sequence:
return False
z = self.term.is_zero
if z is True:
return True
if self.has_finite_limits:
# A Product is zero only if its term is zero assuming finite limits.
return z
def _eval_is_extended_real(self):
if self.has_empty_sequence:
return True
return self.function.is_extended_real
def _eval_is_positive(self):
if self.has_empty_sequence:
return True
if self.function.is_positive and self.has_finite_limits:
return True
def _eval_is_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_nonnegative and self.has_finite_limits:
return True
def _eval_is_extended_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_extended_nonnegative:
return True
def _eval_is_extended_nonpositive(self):
if self.has_empty_sequence:
return True
def _eval_is_finite(self):
if self.has_finite_limits and self.function.is_finite:
return True
def doit(self, **hints):
# first make sure any definite limits have product
# variables with matching assumptions
reps = {}
for xab in self.limits:
# Must be imported here to avoid circular imports
from .summations import _dummy_with_inherited_properties_concrete
d = _dummy_with_inherited_properties_concrete(xab)
if d:
reps[xab[0]] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if type(did) is tuple: # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_integer and dif.is_negative:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta, RisingFactorial
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
definite = dif.is_Integer
if definite and (dif < 100):
return self._eval_product_direct(term, limits)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
factored = factor_terms(term, fraction=True)
if factored.is_Mul:
return self._eval_product(factored, (k, a, n))
elif term.is_Mul:
# Factor in part without the summation variable and part with
without_k, with_k = term.as_coeff_mul(k)
if len(with_k) >= 2:
# More than one term including k, so still a multiplication
exclude, include = [], []
for t in with_k:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return without_k**(n - a + 1)*A * B
else:
# Just a single term
p = self._eval_product(with_k[0], (k, a, n))
if p is None:
p = self.func(with_k[0], (k, a, n)).doit()
return without_k**(n - a + 1)*p
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
if definite:
return self._eval_product_direct(term, limits)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import product_simplify
rv = product_simplify(self)
return rv.doit() if kwargs['doit'] else rv
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_product_direct(self, term, limits):
(k, a, n) = limits
return Mul(*[term.subs(k, a + i) for i in range(n - a + 1)])
def is_convergent(self):
r"""
See docs of :obj:`.Sum.is_convergent()` for explanation of convergence
in SymPy.
Explanation
===========
The infinite product:
.. math::
\prod_{1 \leq i < \infty} f(i)
is defined by the sequence of partial products:
.. math::
\prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
as n increases without bound. The product converges to a non-zero
value if and only if the sum:
.. math::
\sum_{1 \leq i < \infty} \log{f(n)}
converges.
Examples
========
>>> from sympy import Product, Symbol, cos, pi, exp, oo
>>> n = Symbol('n', integer=True)
>>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
False
>>> Product(1/n**2, (n, 1, oo)).is_convergent()
False
>>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
True
>>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinite_product
"""
from sympy.concrete.summations import Sum
sequence_term = self.function
log_sum = log(sequence_term)
lim = self.limits
try:
is_conv = Sum(log_sum, *lim).is_convergent()
except NotImplementedError:
if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
return S.true
raise NotImplementedError("The algorithm to find the product convergence of %s "
"is not yet implemented" % (sequence_term))
return is_conv
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Explanation
===========
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import gamma, Product, simplify, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), b > -1), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), a > 0), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
reorder_limit,
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
Explanation
===========
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
Examples
========
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
|
9e61e99fd8a0418741dc0bf7d514f82e081b2c103f0c37e6008b8ef02750b7f2 | from sympy.concrete.expr_with_limits import ExprWithLimits
from sympy.core.singleton import S
from sympy.core.relational import Eq
class ReorderError(NotImplementedError):
"""
Exception raised when trying to reorder dependent limits.
"""
def __init__(self, expr, msg):
super().__init__(
"%s could not be reordered: %s." % (expr, msg))
class ExprWithIntLimits(ExprWithLimits):
"""
Superclass for Product and Sum.
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits
sympy.concrete.products.Product
sympy.concrete.summations.Sum
"""
def change_index(self, var, trafo, newvar=None):
r"""
Change index of a Sum or Product.
Perform a linear transformation `x \mapsto a x + b` on the index variable
`x`. For `a` the only values allowed are `\pm 1`. A new variable to be used
after the change of index can also be specified.
Explanation
===========
``change_index(expr, var, trafo, newvar=None)`` where ``var`` specifies the
index variable `x` to transform. The transformation ``trafo`` must be linear
and given in terms of ``var``. If the optional argument ``newvar`` is
provided then ``var`` gets replaced by ``newvar`` in the final expression.
Examples
========
>>> from sympy import Sum, Product, simplify
>>> from sympy.abc import x, y, a, b, c, d, u, v, i, j, k, l
>>> S = Sum(x, (x, a, b))
>>> S.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = S.change_index(x, x + 1, y)
>>> Sn
Sum(y - 1, (y, a + 1, b + 1))
>>> Sn.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = S.change_index(x, -x, y)
>>> Sn
Sum(-y, (y, -b, -a))
>>> Sn.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = S.change_index(x, x+u)
>>> Sn
Sum(-u + x, (x, a + u, b + u))
>>> Sn.doit()
-a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
>>> simplify(Sn.doit())
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = S.change_index(x, -x - u, y)
>>> Sn
Sum(-u - y, (y, -b - u, -a - u))
>>> Sn.doit()
-a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
>>> simplify(Sn.doit())
-a**2/2 + a/2 + b**2/2 + b/2
>>> P = Product(i*j**2, (i, a, b), (j, c, d))
>>> P
Product(i*j**2, (i, a, b), (j, c, d))
>>> P2 = P.change_index(i, i+3, k)
>>> P2
Product(j**2*(k - 3), (k, a + 3, b + 3), (j, c, d))
>>> P3 = P2.change_index(j, -j, l)
>>> P3
Product(l**2*(k - 3), (k, a + 3, b + 3), (l, -d, -c))
When dealing with symbols only, we can make a
general linear transformation:
>>> Sn = S.change_index(x, u*x+v, y)
>>> Sn
Sum((-v + y)/u, (y, b*u + v, a*u + v))
>>> Sn.doit()
-v*(a*u - b*u + 1)/u + (a**2*u**2/2 + a*u*v + a*u/2 - b**2*u**2/2 - b*u*v + b*u/2 + v)/u
>>> simplify(Sn.doit())
a**2*u/2 + a/2 - b**2*u/2 + b/2
However, the last result can be inconsistent with usual
summation where the index increment is always 1. This is
obvious as we get back the original value only for ``u``
equal +1 or -1.
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
reorder_limit,
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder,
sympy.concrete.summations.Sum.reverse_order,
sympy.concrete.products.Product.reverse_order
"""
if newvar is None:
newvar = var
limits = []
for limit in self.limits:
if limit[0] == var:
p = trafo.as_poly(var)
if p.degree() != 1:
raise ValueError("Index transformation is not linear")
alpha = p.coeff_monomial(var)
beta = p.coeff_monomial(S.One)
if alpha.is_number:
if alpha == S.One:
limits.append((newvar, alpha*limit[1] + beta, alpha*limit[2] + beta))
elif alpha == S.NegativeOne:
limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
else:
raise ValueError("Linear transformation results in non-linear summation stepsize")
else:
# Note that the case of alpha being symbolic can give issues if alpha < 0.
limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
else:
limits.append(limit)
function = self.function.subs(var, (var - beta)/alpha)
function = function.subs(var, newvar)
return self.func(function, *limits)
def index(expr, x):
"""
Return the index of a dummy variable in the list of limits.
Explanation
===========
``index(expr, x)`` returns the index of the dummy variable ``x`` in the
limits of ``expr``. Note that we start counting with 0 at the inner-most
limits tuple.
Examples
========
>>> from sympy.abc import x, y, a, b, c, d
>>> from sympy import Sum, Product
>>> Sum(x*y, (x, a, b), (y, c, d)).index(x)
0
>>> Sum(x*y, (x, a, b), (y, c, d)).index(y)
1
>>> Product(x*y, (x, a, b), (y, c, d)).index(x)
0
>>> Product(x*y, (x, a, b), (y, c, d)).index(y)
1
See Also
========
reorder_limit, reorder, sympy.concrete.summations.Sum.reverse_order,
sympy.concrete.products.Product.reverse_order
"""
variables = [limit[0] for limit in expr.limits]
if variables.count(x) != 1:
raise ValueError(expr, "Number of instances of variable not equal to one")
else:
return variables.index(x)
def reorder(expr, *arg):
"""
Reorder limits in a expression containing a Sum or a Product.
Explanation
===========
``expr.reorder(*arg)`` reorders the limits in the expression ``expr``
according to the list of tuples given by ``arg``. These tuples can
contain numerical indices or index variable names or involve both.
Examples
========
>>> from sympy import Sum, Product
>>> from sympy.abc import x, y, z, a, b, c, d, e, f
>>> Sum(x*y, (x, a, b), (y, c, d)).reorder((x, y))
Sum(x*y, (y, c, d), (x, a, b))
>>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder((x, y), (x, z), (y, z))
Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
>>> P = Product(x*y*z, (x, a, b), (y, c, d), (z, e, f))
>>> P.reorder((x, y), (x, z), (y, z))
Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
We can also select the index variables by counting them, starting
with the inner-most one:
>>> Sum(x**2, (x, a, b), (x, c, d)).reorder((0, 1))
Sum(x**2, (x, c, d), (x, a, b))
And of course we can mix both schemes:
>>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, x))
Sum(x*y, (y, c, d), (x, a, b))
>>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, 0))
Sum(x*y, (y, c, d), (x, a, b))
See Also
========
reorder_limit, index, sympy.concrete.summations.Sum.reverse_order,
sympy.concrete.products.Product.reverse_order
"""
new_expr = expr
for r in arg:
if len(r) != 2:
raise ValueError(r, "Invalid number of arguments")
index1 = r[0]
index2 = r[1]
if not isinstance(r[0], int):
index1 = expr.index(r[0])
if not isinstance(r[1], int):
index2 = expr.index(r[1])
new_expr = new_expr.reorder_limit(index1, index2)
return new_expr
def reorder_limit(expr, x, y):
"""
Interchange two limit tuples of a Sum or Product expression.
Explanation
===========
``expr.reorder_limit(x, y)`` interchanges two limit tuples. The
arguments ``x`` and ``y`` are integers corresponding to the index
variables of the two limits which are to be interchanged. The
expression ``expr`` has to be either a Sum or a Product.
Examples
========
>>> from sympy.abc import x, y, z, a, b, c, d, e, f
>>> from sympy import Sum, Product
>>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2)
Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
>>> Sum(x**2, (x, a, b), (x, c, d)).reorder_limit(1, 0)
Sum(x**2, (x, c, d), (x, a, b))
>>> Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2)
Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
See Also
========
index, reorder, sympy.concrete.summations.Sum.reverse_order,
sympy.concrete.products.Product.reverse_order
"""
var = {limit[0] for limit in expr.limits}
limit_x = expr.limits[x]
limit_y = expr.limits[y]
if (len(set(limit_x[1].free_symbols).intersection(var)) == 0 and
len(set(limit_x[2].free_symbols).intersection(var)) == 0 and
len(set(limit_y[1].free_symbols).intersection(var)) == 0 and
len(set(limit_y[2].free_symbols).intersection(var)) == 0):
limits = []
for i, limit in enumerate(expr.limits):
if i == x:
limits.append(limit_y)
elif i == y:
limits.append(limit_x)
else:
limits.append(limit)
return type(expr)(expr.function, *limits)
else:
raise ReorderError(expr, "could not interchange the two limits specified")
@property
def has_empty_sequence(self):
"""
Returns True if the Sum or Product is computed for an empty sequence.
Examples
========
>>> from sympy import Sum, Product, Symbol
>>> m = Symbol('m')
>>> Sum(m, (m, 1, 0)).has_empty_sequence
True
>>> Sum(m, (m, 1, 1)).has_empty_sequence
False
>>> M = Symbol('M', integer=True, positive=True)
>>> Product(m, (m, 1, M)).has_empty_sequence
False
>>> Product(m, (m, 2, M)).has_empty_sequence
>>> Product(m, (m, M + 1, M)).has_empty_sequence
True
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(m, (m, N, M)).has_empty_sequence
>>> N = Symbol('N', integer=True, negative=True)
>>> Sum(m, (m, N, M)).has_empty_sequence
False
See Also
========
has_reversed_limits
has_finite_limits
"""
ret_None = False
for lim in self.limits:
dif = lim[1] - lim[2]
eq = Eq(dif, 1)
if eq == True:
return True
elif eq == False:
continue
else:
ret_None = True
if ret_None:
return None
return False
|
ad2eb8fa80bb8dc6e3deae495b0ddd83ff8c68ec03169ecd3da62eef68a7356e | """
This module implements sums and products containing the Kronecker Delta function.
References
==========
.. [1] http://mathworld.wolfram.com/KroneckerDelta.html
"""
from sympy.core import Add, Mul, S, Dummy
from sympy.core.cache import cacheit
from sympy.core.compatibility import default_sort_key
from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold
from sympy.sets import Interval
@cacheit
def _expand_delta(expr, index):
"""
Expand the first Add containing a simple KroneckerDelta.
"""
if not expr.is_Mul:
return expr
delta = None
func = Add
terms = [S.One]
for h in expr.args:
if delta is None and h.is_Add and _has_simple_delta(h, index):
delta = True
func = h.func
terms = [terms[0]*t for t in h.args]
else:
terms = [t*h for t in terms]
return func(*terms)
@cacheit
def _extract_delta(expr, index):
"""
Extract a simple KroneckerDelta from the expression.
Explanation
===========
Returns the tuple ``(delta, newexpr)`` where:
- ``delta`` is a simple KroneckerDelta expression if one was found,
or ``None`` if no simple KroneckerDelta expression was found.
- ``newexpr`` is a Mul containing the remaining terms; ``expr`` is
returned unchanged if no simple KroneckerDelta expression was found.
Examples
========
>>> from sympy import KroneckerDelta
>>> from sympy.concrete.delta import _extract_delta
>>> from sympy.abc import x, y, i, j, k
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), i)
(KroneckerDelta(i, j), 4*x*y)
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), k)
(None, 4*x*y*KroneckerDelta(i, j))
See Also
========
sympy.functions.special.tensor_functions.KroneckerDelta
deltaproduct
deltasummation
"""
if not _has_simple_delta(expr, index):
return (None, expr)
if isinstance(expr, KroneckerDelta):
return (expr, S.One)
if not expr.is_Mul:
raise ValueError("Incorrect expr")
delta = None
terms = []
for arg in expr.args:
if delta is None and _is_simple_delta(arg, index):
delta = arg
else:
terms.append(arg)
return (delta, expr.func(*terms))
@cacheit
def _has_simple_delta(expr, index):
"""
Returns True if ``expr`` is an expression that contains a KroneckerDelta
that is simple in the index ``index``, meaning that this KroneckerDelta
is nonzero for a single value of the index ``index``.
"""
if expr.has(KroneckerDelta):
if _is_simple_delta(expr, index):
return True
if expr.is_Add or expr.is_Mul:
for arg in expr.args:
if _has_simple_delta(arg, index):
return True
return False
@cacheit
def _is_simple_delta(delta, index):
"""
Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single
value of the index ``index``.
"""
if isinstance(delta, KroneckerDelta) and delta.has(index):
p = (delta.args[0] - delta.args[1]).as_poly(index)
if p:
return p.degree() == 1
return False
@cacheit
def _remove_multiple_delta(expr):
"""
Evaluate products of KroneckerDelta's.
"""
from sympy.solvers import solve
if expr.is_Add:
return expr.func(*list(map(_remove_multiple_delta, expr.args)))
if not expr.is_Mul:
return expr
eqs = []
newargs = []
for arg in expr.args:
if isinstance(arg, KroneckerDelta):
eqs.append(arg.args[0] - arg.args[1])
else:
newargs.append(arg)
if not eqs:
return expr
solns = solve(eqs, dict=True)
if len(solns) == 0:
return S.Zero
elif len(solns) == 1:
for key in solns[0].keys():
newargs.append(KroneckerDelta(key, solns[0][key]))
expr2 = expr.func(*newargs)
if expr != expr2:
return _remove_multiple_delta(expr2)
return expr
@cacheit
def _simplify_delta(expr):
"""
Rewrite a KroneckerDelta's indices in its simplest form.
"""
from sympy.solvers import solve
if isinstance(expr, KroneckerDelta):
try:
slns = solve(expr.args[0] - expr.args[1], dict=True)
if slns and len(slns) == 1:
return Mul(*[KroneckerDelta(*(key, value))
for key, value in slns[0].items()])
except NotImplementedError:
pass
return expr
@cacheit
def deltaproduct(f, limit):
"""
Handle products containing a KroneckerDelta.
See Also
========
deltasummation
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.products.product
"""
from sympy.concrete.products import product
if ((limit[2] - limit[1]) < 0) == True:
return S.One
if not f.has(KroneckerDelta):
return product(f, limit)
if f.is_Add:
# Identify the term in the Add that has a simple KroneckerDelta
delta = None
terms = []
for arg in sorted(f.args, key=default_sort_key):
if delta is None and _has_simple_delta(arg, limit[0]):
delta = arg
else:
terms.append(arg)
newexpr = f.func(*terms)
k = Dummy("kprime", integer=True)
if isinstance(limit[1], int) and isinstance(limit[2], int):
result = deltaproduct(newexpr, limit) + sum([
deltaproduct(newexpr, (limit[0], limit[1], ik - 1)) *
delta.subs(limit[0], ik) *
deltaproduct(newexpr, (limit[0], ik + 1, limit[2])) for ik in range(int(limit[1]), int(limit[2] + 1))]
)
else:
result = deltaproduct(newexpr, limit) + deltasummation(
deltaproduct(newexpr, (limit[0], limit[1], k - 1)) *
delta.subs(limit[0], k) *
deltaproduct(newexpr, (limit[0], k + 1, limit[2])),
(k, limit[1], limit[2]),
no_piecewise=_has_simple_delta(newexpr, limit[0])
)
return _remove_multiple_delta(result)
delta, _ = _extract_delta(f, limit[0])
if not delta:
g = _expand_delta(f, limit[0])
if f != g:
from sympy import factor
try:
return factor(deltaproduct(g, limit))
except AssertionError:
return deltaproduct(g, limit)
return product(f, limit)
return _remove_multiple_delta(f.subs(limit[0], limit[1])*KroneckerDelta(limit[2], limit[1])) + \
S.One*_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1))
@cacheit
def deltasummation(f, limit, no_piecewise=False):
"""
Handle summations containing a KroneckerDelta.
Explanation
===========
The idea for summation is the following:
- If we are dealing with a KroneckerDelta expression, i.e. KroneckerDelta(g(x), j),
we try to simplify it.
If we could simplify it, then we sum the resulting expression.
We already know we can sum a simplified expression, because only
simple KroneckerDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the summation,
taking care if we are dealing with a Derivative or with a proper
KroneckerDelta.
2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we can do
nothing at all.
- If the expr is a multiplication expr having a KroneckerDelta term:
First we expand it.
If the expansion did work, then we try to sum the expansion.
If not, we try to extract a simple KroneckerDelta term, then we have two
cases:
1) We have a simple KroneckerDelta term, so we return the summation.
2) We didn't have a simple term, but we do have an expression with
simplified KroneckerDelta terms, so we sum this expression.
Examples
========
>>> from sympy import oo, symbols
>>> from sympy.abc import k
>>> i, j = symbols('i, j', integer=True, finite=True)
>>> from sympy.concrete.delta import deltasummation
>>> from sympy import KroneckerDelta
>>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo))
1
>>> deltasummation(KroneckerDelta(i, k), (k, 0, oo))
Piecewise((1, i >= 0), (0, True))
>>> deltasummation(KroneckerDelta(i, k), (k, 1, 3))
Piecewise((1, (i >= 1) & (i <= 3)), (0, True))
>>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k), (k, -oo, oo))
j*KroneckerDelta(i, j)
>>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo))
i
>>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo))
j
See Also
========
deltaproduct
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.sums.summation
"""
from sympy.concrete.summations import summation
from sympy.solvers import solve
if ((limit[2] - limit[1]) < 0) == True:
return S.Zero
if not f.has(KroneckerDelta):
return summation(f, limit)
x = limit[0]
g = _expand_delta(f, x)
if g.is_Add:
return piecewise_fold(
g.func(*[deltasummation(h, limit, no_piecewise) for h in g.args]))
# try to extract a simple KroneckerDelta term
delta, expr = _extract_delta(g, x)
if (delta is not None) and (delta.delta_range is not None):
dinf, dsup = delta.delta_range
if (limit[1] - dinf <= 0) == True and (limit[2] - dsup >= 0) == True:
no_piecewise = True
if not delta:
return summation(f, limit)
solns = solve(delta.args[0] - delta.args[1], x)
if len(solns) == 0:
return S.Zero
elif len(solns) != 1:
from sympy.concrete.summations import Sum
return Sum(f, limit)
value = solns[0]
if no_piecewise:
return expr.subs(x, value)
return Piecewise(
(expr.subs(x, value), Interval(*limit[1:3]).as_relational(value)),
(S.Zero, True)
)
|
8cd8f03be569e69927be0799922d2f746caf5cd266cdbcbe61fefc3b16dc4f38 | """Gosper's algorithm for hypergeometric summation. """
from sympy.core import S, Dummy, symbols
from sympy.core.compatibility import is_sequence
from sympy.polys import Poly, parallel_poly_from_expr, factor
from sympy.solvers import solve
from sympy.simplify import hypersimp
def gosper_normal(f, g, n, polys=True):
r"""
Compute the Gosper's normal form of ``f`` and ``g``.
Explanation
===========
Given relatively prime univariate polynomials ``f`` and ``g``,
rewrite their quotient to a normal form defined as follows:
.. math::
\frac{f(n)}{g(n)} = Z \cdot \frac{A(n) C(n+1)}{B(n) C(n)}
where ``Z`` is an arbitrary constant and ``A``, ``B``, ``C`` are
monic polynomials in ``n`` with the following properties:
1. `\gcd(A(n), B(n+h)) = 1 \forall h \in \mathbb{N}`
2. `\gcd(B(n), C(n+1)) = 1`
3. `\gcd(A(n), C(n)) = 1`
This normal form, or rational factorization in other words, is a
crucial step in Gosper's algorithm and in solving of difference
equations. It can be also used to decide if two hypergeometric
terms are similar or not.
This procedure will return a tuple containing elements of this
factorization in the form ``(Z*A, B, C)``.
Examples
========
>>> from sympy.concrete.gosper import gosper_normal
>>> from sympy.abc import n
>>> gosper_normal(4*n+5, 2*(4*n+1)*(2*n+3), n, polys=False)
(1/4, n + 3/2, n + 1/4)
"""
(p, q), opt = parallel_poly_from_expr(
(f, g), n, field=True, extension=True)
a, A = p.LC(), p.monic()
b, B = q.LC(), q.monic()
C, Z = A.one, a/b
h = Dummy('h')
D = Poly(n + h, n, h, domain=opt.domain)
R = A.resultant(B.compose(D))
roots = set(R.ground_roots().keys())
for r in set(roots):
if not r.is_Integer or r < 0:
roots.remove(r)
for i in sorted(roots):
d = A.gcd(B.shift(+i))
A = A.quo(d)
B = B.quo(d.shift(-i))
for j in range(1, i + 1):
C *= d.shift(-j)
A = A.mul_ground(Z)
if not polys:
A = A.as_expr()
B = B.as_expr()
C = C.as_expr()
return A, B, C
def gosper_term(f, n):
r"""
Compute Gosper's hypergeometric term for ``f``.
Explanation
===========
Suppose ``f`` is a hypergeometric term such that:
.. math::
s_n = \sum_{k=0}^{n-1} f_k
and `f_k` doesn't depend on `n`. Returns a hypergeometric
term `g_n` such that `g_{n+1} - g_n = f_n`.
Examples
========
>>> from sympy.concrete.gosper import gosper_term
>>> from sympy.functions import factorial
>>> from sympy.abc import n
>>> gosper_term((4*n + 1)*factorial(n)/factorial(2*n + 1), n)
(-n - 1/2)/(n + 1/4)
"""
r = hypersimp(f, n)
if r is None:
return None # 'f' is *not* a hypergeometric term
p, q = r.as_numer_denom()
A, B, C = gosper_normal(p, q, n)
B = B.shift(-1)
N = S(A.degree())
M = S(B.degree())
K = S(C.degree())
if (N != M) or (A.LC() != B.LC()):
D = {K - max(N, M)}
elif not N:
D = {K - N + 1, S.Zero}
else:
D = {K - N + 1, (B.nth(N - 1) - A.nth(N - 1))/A.LC()}
for d in set(D):
if not d.is_Integer or d < 0:
D.remove(d)
if not D:
return None # 'f(n)' is *not* Gosper-summable
d = max(D)
coeffs = symbols('c:%s' % (d + 1), cls=Dummy)
domain = A.get_domain().inject(*coeffs)
x = Poly(coeffs, n, domain=domain)
H = A*x.shift(1) - B*x - C
solution = solve(H.coeffs(), coeffs)
if solution is None:
return None # 'f(n)' is *not* Gosper-summable
x = x.as_expr().subs(solution)
for coeff in coeffs:
if coeff not in solution:
x = x.subs(coeff, 0)
if x.is_zero:
return None # 'f(n)' is *not* Gosper-summable
else:
return B.as_expr()*x/C.as_expr()
def gosper_sum(f, k):
r"""
Gosper's hypergeometric summation algorithm.
Explanation
===========
Given a hypergeometric term ``f`` such that:
.. math ::
s_n = \sum_{k=0}^{n-1} f_k
and `f(n)` doesn't depend on `n`, returns `g_{n} - g(0)` where
`g_{n+1} - g_n = f_n`, or ``None`` if `s_n` can not be expressed
in closed form as a sum of hypergeometric terms.
Examples
========
>>> from sympy.concrete.gosper import gosper_sum
>>> from sympy.functions import factorial
>>> from sympy.abc import n, k
>>> f = (4*k + 1)*factorial(k)/factorial(2*k + 1)
>>> gosper_sum(f, (k, 0, n))
(-factorial(n) + 2*factorial(2*n + 1))/factorial(2*n + 1)
>>> _.subs(n, 2) == sum(f.subs(k, i) for i in [0, 1, 2])
True
>>> gosper_sum(f, (k, 3, n))
(-60*factorial(n) + factorial(2*n + 1))/(60*factorial(2*n + 1))
>>> _.subs(n, 5) == sum(f.subs(k, i) for i in [3, 4, 5])
True
References
==========
.. [1] Marko Petkovsek, Herbert S. Wilf, Doron Zeilberger, A = B,
AK Peters, Ltd., Wellesley, MA, USA, 1997, pp. 73--100
"""
indefinite = False
if is_sequence(k):
k, a, b = k
else:
indefinite = True
g = gosper_term(f, k)
if g is None:
return None
if indefinite:
result = f*g
else:
result = (f*(g + 1)).subs(k, b) - (f*g).subs(k, a)
if result is S.NaN:
try:
result = (f*(g + 1)).limit(k, b) - (f*g).limit(k, a)
except NotImplementedError:
result = None
return factor(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.