hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
c3acd61cff7c611530f93e6913e9d075d9def10314ab96d2fc2a25d37b3b53c7 | from sympy.core.mul import Mul
from sympy.core.numbers import (I, Integer, Rational)
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.gate import H, XGate
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.spin import Jx, Jy, Jz, Jplus, Jminus, J2, JzKet
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.state import Ket
from sympy.physics.quantum.density import Density
from sympy.physics.quantum.qubit import Qubit, QubitBra
from sympy.physics.quantum.boson import BosonOp, BosonFockKet, BosonFockBra
j, jp, m, mp = symbols("j j' m m'")
z = JzKet(1, 0)
po = JzKet(1, 1)
mo = JzKet(1, -1)
A = Operator('A')
class Foo(Operator):
def _apply_operator_JzKet(self, ket, **options):
return ket
def test_basic():
assert qapply(Jz*po) == hbar*po
assert qapply(Jx*z) == hbar*po/sqrt(2) + hbar*mo/sqrt(2)
assert qapply((Jplus + Jminus)*z/sqrt(2)) == hbar*po + hbar*mo
assert qapply(Jz*(po + mo)) == hbar*po - hbar*mo
assert qapply(Jz*po + Jz*mo) == hbar*po - hbar*mo
assert qapply(Jminus*Jminus*po) == 2*hbar**2*mo
assert qapply(Jplus**2*mo) == 2*hbar**2*po
assert qapply(Jplus**2*Jminus**2*po) == 4*hbar**4*po
def test_extra():
extra = z.dual*A*z
assert qapply(Jz*po*extra) == hbar*po*extra
assert qapply(Jx*z*extra) == (hbar*po/sqrt(2) + hbar*mo/sqrt(2))*extra
assert qapply(
(Jplus + Jminus)*z/sqrt(2)*extra) == hbar*po*extra + hbar*mo*extra
assert qapply(Jz*(po + mo)*extra) == hbar*po*extra - hbar*mo*extra
assert qapply(Jz*po*extra + Jz*mo*extra) == hbar*po*extra - hbar*mo*extra
assert qapply(Jminus*Jminus*po*extra) == 2*hbar**2*mo*extra
assert qapply(Jplus**2*mo*extra) == 2*hbar**2*po*extra
assert qapply(Jplus**2*Jminus**2*po*extra) == 4*hbar**4*po*extra
def test_innerproduct():
assert qapply(po.dual*Jz*po, ip_doit=False) == hbar*(po.dual*po)
assert qapply(po.dual*Jz*po) == hbar
def test_zero():
assert qapply(0) == 0
assert qapply(Integer(0)) == 0
def test_commutator():
assert qapply(Commutator(Jx, Jy)*Jz*po) == I*hbar**3*po
assert qapply(Commutator(J2, Jz)*Jz*po) == 0
assert qapply(Commutator(Jz, Foo('F'))*po) == 0
assert qapply(Commutator(Foo('F'), Jz)*po) == 0
def test_anticommutator():
assert qapply(AntiCommutator(Jz, Foo('F'))*po) == 2*hbar*po
assert qapply(AntiCommutator(Foo('F'), Jz)*po) == 2*hbar*po
def test_outerproduct():
e = Jz*(mo*po.dual)*Jz*po
assert qapply(e) == -hbar**2*mo
assert qapply(e, ip_doit=False) == -hbar**2*(po.dual*po)*mo
assert qapply(e).doit() == -hbar**2*mo
def test_tensorproduct():
a = BosonOp("a")
b = BosonOp("b")
ket1 = TensorProduct(BosonFockKet(1), BosonFockKet(2))
ket2 = TensorProduct(BosonFockKet(0), BosonFockKet(0))
ket3 = TensorProduct(BosonFockKet(0), BosonFockKet(2))
bra1 = TensorProduct(BosonFockBra(0), BosonFockBra(0))
bra2 = TensorProduct(BosonFockBra(1), BosonFockBra(2))
assert qapply(TensorProduct(a, b ** 2) * ket1) == sqrt(2) * ket2
assert qapply(TensorProduct(a, Dagger(b) * b) * ket1) == 2 * ket3
assert qapply(bra1 * TensorProduct(a, b * b),
dagger=True) == sqrt(2) * bra2
assert qapply(bra2 * ket1).doit() == TensorProduct(1, 1)
assert qapply(TensorProduct(a, b * b) * ket1) == sqrt(2) * ket2
assert qapply(Dagger(TensorProduct(a, b * b) * ket1),
dagger=True) == sqrt(2) * Dagger(ket2)
def test_dagger():
lhs = Dagger(Qubit(0))*Dagger(H(0))
rhs = Dagger(Qubit(1))/sqrt(2) + Dagger(Qubit(0))/sqrt(2)
assert qapply(lhs, dagger=True) == rhs
def test_issue_6073():
x, y = symbols('x y', commutative=False)
A = Ket(x, y)
B = Operator('B')
assert qapply(A) == A
assert qapply(A.dual*B) == A.dual*B
def test_density():
d = Density([Jz*mo, 0.5], [Jz*po, 0.5])
assert qapply(d) == Density([-hbar*mo, 0.5], [hbar*po, 0.5])
def test_issue3044():
expr1 = TensorProduct(Jz*JzKet(S(2),S.NegativeOne)/sqrt(2), Jz*JzKet(S.Half,S.Half))
result = Mul(S.NegativeOne, Rational(1, 4), 2**S.Half, hbar**2)
result *= TensorProduct(JzKet(2,-1), JzKet(S.Half,S.Half))
assert qapply(expr1) == result
# Issue 24158: Tests whether qapply incorrectly evaluates some ket*op as op*ket
def test_issue24158_ket_times_op():
P = BosonFockKet(0) * BosonOp("a") # undefined term
# Does lhs._apply_operator_BosonOp(rhs) still evaluate ket*op as op*ket?
assert qapply(P) == P # qapply(P) -> BosonOp("a")*BosonFockKet(0) = 0 before fix
P = Qubit(1) * XGate(0) # undefined term
# Does rhs._apply_operator_Qubit(lhs) still evaluate ket*op as op*ket?
assert qapply(P) == P # qapply(P) -> Qubit(0) before fix
P1 = Mul(QubitBra(0), Mul(QubitBra(0), Qubit(0)), XGate(0)) # legal expr <0| * (<1|*|1>) * X
assert qapply(P1) == QubitBra(0) * XGate(0) # qapply(P1) -> 0 before fix
P1 = qapply(P1, dagger = True) # unsatisfactorily -> <0|*X(0), expect <1| since dagger=True
assert qapply(P1, dagger = True) == QubitBra(1) # qapply(P1, dagger=True) -> 0 before fix
P2 = QubitBra(0) * QubitBra(0) * Qubit(0) * XGate(0) # 'forgot' to set brackets
P2 = qapply(P2, dagger = True) # unsatisfactorily -> <0|*X(0), expect <1| since dagger=True
assert qapply(P2, dagger = True) == QubitBra(1) # qapply(P1) -> 0 before fix
|
5277208d015e257dc139d1f2f1461b6ff2f08adebb09f36ec0b6856119b22942 | from sympy.core.numbers import I
from sympy.core.symbol import symbols
from sympy.core.expr import unchanged
from sympy.matrices import Matrix, SparseMatrix
from sympy.physics.quantum.commutator import Commutator as Comm
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.tensorproduct import TensorProduct as TP
from sympy.physics.quantum.tensorproduct import tensor_product_simp
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.qubit import Qubit, QubitBra
from sympy.physics.quantum.operator import OuterProduct
from sympy.physics.quantum.density import Density
from sympy.physics.quantum.trace import Tr
A, B, C, D = symbols('A,B,C,D', commutative=False)
x = symbols('x')
mat1 = Matrix([[1, 2*I], [1 + I, 3]])
mat2 = Matrix([[2*I, 3], [4*I, 2]])
def test_sparse_matrices():
spm = SparseMatrix.diag(1, 0)
assert unchanged(TensorProduct, spm, spm)
def test_tensor_product_dagger():
assert Dagger(TensorProduct(I*A, B)) == \
-I*TensorProduct(Dagger(A), Dagger(B))
assert Dagger(TensorProduct(mat1, mat2)) == \
TensorProduct(Dagger(mat1), Dagger(mat2))
def test_tensor_product_abstract():
assert TP(x*A, 2*B) == x*2*TP(A, B)
assert TP(A, B) != TP(B, A)
assert TP(A, B).is_commutative is False
assert isinstance(TP(A, B), TP)
assert TP(A, B).subs(A, C) == TP(C, B)
def test_tensor_product_expand():
assert TP(A + B, B + C).expand(tensorproduct=True) == \
TP(A, B) + TP(A, C) + TP(B, B) + TP(B, C)
#Tests for fix of issue #24142
assert TP(A-B, B-A).expand(tensorproduct=True) == \
TP(A, B) - TP(A, A) - TP(B, B) + TP(B, A)
assert TP(2*A + B, A + B).expand(tensorproduct=True) == \
2 * TP(A, A) + 2 * TP(A, B) + TP(B, A) + TP(B, B)
assert TP(2 * A * B + A, A + B).expand(tensorproduct=True) == \
2 * TP(A*B, A) + 2 * TP(A*B, B) + TP(A, A) + TP(A, B)
def test_tensor_product_commutator():
assert TP(Comm(A, B), C).doit().expand(tensorproduct=True) == \
TP(A*B, C) - TP(B*A, C)
assert Comm(TP(A, B), TP(B, C)).doit() == \
TP(A, B)*TP(B, C) - TP(B, C)*TP(A, B)
def test_tensor_product_simp():
assert tensor_product_simp(TP(A, B)*TP(B, C)) == TP(A*B, B*C)
# tests for Pow-expressions
assert tensor_product_simp(TP(A, B)**x) == TP(A**x, B**x)
assert tensor_product_simp(x*TP(A, B)**2) == x*TP(A**2,B**2)
assert tensor_product_simp(x*(TP(A, B)**2)*TP(C,D)) == x*TP(A**2*C,B**2*D)
assert tensor_product_simp(TP(A,B)-TP(C,D)**x) == TP(A,B)-TP(C**x,D**x)
def test_issue_5923():
# most of the issue regarding sympification of args has been handled
# and is tested internally by the use of args_cnc through the quantum
# module, but the following is a test from the issue that used to raise.
assert TensorProduct(1, Qubit('1')*Qubit('1').dual) == \
TensorProduct(1, OuterProduct(Qubit(1), QubitBra(1)))
def test_eval_trace():
# This test includes tests with dependencies between TensorProducts
#and density operators. Since, the test is more to test the behavior of
#TensorProducts it remains here
A, B, C, D, E, F = symbols('A B C D E F', commutative=False)
# Density with simple tensor products as args
t = TensorProduct(A, B)
d = Density([t, 1.0])
tr = Tr(d)
assert tr.doit() == 1.0*Tr(A*Dagger(A))*Tr(B*Dagger(B))
## partial trace with simple tensor products as args
t = TensorProduct(A, B, C)
d = Density([t, 1.0])
tr = Tr(d, [1])
assert tr.doit() == 1.0*A*Dagger(A)*Tr(B*Dagger(B))*C*Dagger(C)
tr = Tr(d, [0, 2])
assert tr.doit() == 1.0*Tr(A*Dagger(A))*B*Dagger(B)*Tr(C*Dagger(C))
# Density with multiple Tensorproducts as states
t2 = TensorProduct(A, B)
t3 = TensorProduct(C, D)
d = Density([t2, 0.5], [t3, 0.5])
t = Tr(d)
assert t.doit() == (0.5*Tr(A*Dagger(A))*Tr(B*Dagger(B)) +
0.5*Tr(C*Dagger(C))*Tr(D*Dagger(D)))
t = Tr(d, [0])
assert t.doit() == (0.5*Tr(A*Dagger(A))*B*Dagger(B) +
0.5*Tr(C*Dagger(C))*D*Dagger(D))
#Density with mixed states
d = Density([t2 + t3, 1.0])
t = Tr(d)
assert t.doit() == ( 1.0*Tr(A*Dagger(A))*Tr(B*Dagger(B)) +
1.0*Tr(A*Dagger(C))*Tr(B*Dagger(D)) +
1.0*Tr(C*Dagger(A))*Tr(D*Dagger(B)) +
1.0*Tr(C*Dagger(C))*Tr(D*Dagger(D)))
t = Tr(d, [1] )
assert t.doit() == ( 1.0*A*Dagger(A)*Tr(B*Dagger(B)) +
1.0*A*Dagger(C)*Tr(B*Dagger(D)) +
1.0*C*Dagger(A)*Tr(D*Dagger(B)) +
1.0*C*Dagger(C)*Tr(D*Dagger(D)))
|
7e8add501482edc96f293b8a0ad6fe2a1ace194a27305d70fbdd46fa96b0cca9 | from sympy.core.function import expand_mul
from sympy.core.numbers import pi
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.core.backend import Matrix, _simplify_matrix, eye, zeros
from sympy.core.symbol import symbols
from sympy.physics.mechanics import (dynamicsymbols, Body, JointsMethod,
PinJoint, PrismaticJoint, CylindricalJoint,
PlanarJoint, SphericalJoint, WeldJoint)
from sympy.physics.mechanics.joint import Joint
from sympy.physics.vector import Vector, ReferenceFrame, Point
from sympy.testing.pytest import raises, warns_deprecated_sympy
Vector.simp = True
t = dynamicsymbols._t # type: ignore
def _generate_body(interframe=False):
N = ReferenceFrame('N')
A = ReferenceFrame('A')
P = Body('P', frame=N)
C = Body('C', frame=A)
if interframe:
Pint, Cint = ReferenceFrame('P_int'), ReferenceFrame('C_int')
Pint.orient_axis(N, N.x, pi)
Cint.orient_axis(A, A.y, -pi / 2)
return N, A, P, C, Pint, Cint
return N, A, P, C
def test_Joint():
parent = Body('parent')
child = Body('child')
raises(TypeError, lambda: Joint('J', parent, child))
def test_coordinate_generation():
q, u, qj, uj = dynamicsymbols('q u q_J u_J')
q0j, q1j, q2j, q3j, u0j, u1j, u2j, u3j = dynamicsymbols('q0:4_J u0:4_J')
q0, q1, q2, q3, u0, u1, u2, u3 = dynamicsymbols('q0:4 u0:4')
_, _, P, C = _generate_body()
# Using PinJoint to access Joint's coordinate generation method
J = PinJoint('J', P, C)
# Test single given
assert J._fill_coordinate_list(q, 1) == Matrix([q])
assert J._fill_coordinate_list([u], 1) == Matrix([u])
assert J._fill_coordinate_list([u], 1, offset=2) == Matrix([u])
# Test None
assert J._fill_coordinate_list(None, 1) == Matrix([qj])
assert J._fill_coordinate_list([None], 1) == Matrix([qj])
assert J._fill_coordinate_list([q0, None, None], 3) == Matrix(
[q0, q1j, q2j])
# Test autofill
assert J._fill_coordinate_list(None, 3) == Matrix([q0j, q1j, q2j])
assert J._fill_coordinate_list([], 3) == Matrix([q0j, q1j, q2j])
# Test offset
assert J._fill_coordinate_list([], 3, offset=1) == Matrix([q1j, q2j, q3j])
assert J._fill_coordinate_list([q1, None, q3], 3, offset=1) == Matrix(
[q1, q2j, q3])
assert J._fill_coordinate_list(None, 2, offset=2) == Matrix([q2j, q3j])
# Test label
assert J._fill_coordinate_list(None, 1, 'u') == Matrix([uj])
assert J._fill_coordinate_list([], 3, 'u') == Matrix([u0j, u1j, u2j])
# Test single numbering
assert J._fill_coordinate_list(None, 1, number_single=True) == Matrix([q0j])
assert J._fill_coordinate_list([], 1, 'u', 2, True) == Matrix([u2j])
assert J._fill_coordinate_list([], 3, 'q') == Matrix([q0j, q1j, q2j])
# Test invalid number of coordinates supplied
raises(ValueError, lambda: J._fill_coordinate_list([q0, q1], 1))
raises(ValueError, lambda: J._fill_coordinate_list([u0, u1, None], 2, 'u'))
raises(ValueError, lambda: J._fill_coordinate_list([q0, q1], 3))
# Test incorrect coordinate type
raises(TypeError, lambda: J._fill_coordinate_list([q0, symbols('q1')], 2))
raises(TypeError, lambda: J._fill_coordinate_list([q0 + q1, q1], 2))
# Test if derivative as generalized speed is allowed
_, _, P, C = _generate_body()
PinJoint('J', P, C, q1, q1.diff(t))
# Test duplicate coordinates
_, _, P, C = _generate_body()
raises(ValueError, lambda: SphericalJoint('J', P, C, [q1j, None, None]))
raises(ValueError, lambda: SphericalJoint('J', P, C, speeds=[u0, u0, u1]))
def test_pin_joint():
P = Body('P')
C = Body('C')
l, m = symbols('l m')
q, u = dynamicsymbols('q_J, u_J')
Pj = PinJoint('J', P, C)
assert Pj.name == 'J'
assert Pj.parent == P
assert Pj.child == C
assert Pj.coordinates == Matrix([q])
assert Pj.speeds == Matrix([u])
assert Pj.kdes == Matrix([u - q.diff(t)])
assert Pj.joint_axis == P.frame.x
assert Pj.child_point.pos_from(C.masscenter) == Vector(0)
assert Pj.parent_point.pos_from(P.masscenter) == Vector(0)
assert Pj.parent_point.pos_from(Pj._child_point) == Vector(0)
assert C.masscenter.pos_from(P.masscenter) == Vector(0)
assert Pj.parent_interframe == P.frame
assert Pj.child_interframe == C.frame
assert Pj.__str__() == 'PinJoint: J parent: P child: C'
P1 = Body('P1')
C1 = Body('C1')
Pint = ReferenceFrame('P_int')
Pint.orient_axis(P1.frame, P1.y, pi / 2)
J1 = PinJoint('J1', P1, C1, parent_point=l*P1.frame.x,
child_point=m*C1.frame.y, joint_axis=P1.frame.z,
parent_interframe=Pint)
assert J1._joint_axis == P1.frame.z
assert J1._child_point.pos_from(C1.masscenter) == m * C1.frame.y
assert J1._parent_point.pos_from(P1.masscenter) == l * P1.frame.x
assert J1._parent_point.pos_from(J1._child_point) == Vector(0)
assert (P1.masscenter.pos_from(C1.masscenter) ==
-l*P1.frame.x + m*C1.frame.y)
assert J1.parent_interframe == Pint
assert J1.child_interframe == C1.frame
q, u = dynamicsymbols('q, u')
N, A, P, C, Pint, Cint = _generate_body(True)
parent_point = P.masscenter.locatenew('parent_point', N.x + N.y)
child_point = C.masscenter.locatenew('child_point', C.y + C.z)
J = PinJoint('J', P, C, q, u, parent_point=parent_point,
child_point=child_point, parent_interframe=Pint,
child_interframe=Cint, joint_axis=N.z)
assert J.joint_axis == N.z
assert J.parent_point.vel(N) == 0
assert J.parent_point == parent_point
assert J.child_point == child_point
assert J.child_point.pos_from(P.masscenter) == N.x + N.y
assert J.parent_point.pos_from(C.masscenter) == C.y + C.z
assert C.masscenter.pos_from(P.masscenter) == N.x + N.y - C.y - C.z
assert C.masscenter.vel(N).express(N) == (u * sin(q) - u * cos(q)) * N.x + (
-u * sin(q) - u * cos(q)) * N.y
assert J.parent_interframe == Pint
assert J.child_interframe == Cint
def test_pin_joint_double_pendulum():
q1, q2 = dynamicsymbols('q1 q2')
u1, u2 = dynamicsymbols('u1 u2')
m, l = symbols('m l')
N = ReferenceFrame('N')
A = ReferenceFrame('A')
B = ReferenceFrame('B')
C = Body('C', frame=N) # ceiling
PartP = Body('P', frame=A, mass=m)
PartR = Body('R', frame=B, mass=m)
J1 = PinJoint('J1', C, PartP, speeds=u1, coordinates=q1,
child_point=-l*A.x, joint_axis=C.frame.z)
J2 = PinJoint('J2', PartP, PartR, speeds=u2, coordinates=q2,
child_point=-l*B.x, joint_axis=PartP.frame.z)
# Check orientation
assert N.dcm(A) == Matrix([[cos(q1), -sin(q1), 0],
[sin(q1), cos(q1), 0], [0, 0, 1]])
assert A.dcm(B) == Matrix([[cos(q2), -sin(q2), 0],
[sin(q2), cos(q2), 0], [0, 0, 1]])
assert _simplify_matrix(N.dcm(B)) == Matrix([[cos(q1 + q2), -sin(q1 + q2), 0],
[sin(q1 + q2), cos(q1 + q2), 0],
[0, 0, 1]])
# Check Angular Velocity
assert A.ang_vel_in(N) == u1 * N.z
assert B.ang_vel_in(A) == u2 * A.z
assert B.ang_vel_in(N) == u1 * N.z + u2 * A.z
# Check kde
assert J1.kdes == Matrix([u1 - q1.diff(t)])
assert J2.kdes == Matrix([u2 - q2.diff(t)])
# Check Linear Velocity
assert PartP.masscenter.vel(N) == l*u1*A.y
assert PartR.masscenter.vel(A) == l*u2*B.y
assert PartR.masscenter.vel(N) == l*u1*A.y + l*(u1 + u2)*B.y
def test_pin_joint_chaos_pendulum():
mA, mB, lA, lB, h = symbols('mA, mB, lA, lB, h')
theta, phi, omega, alpha = dynamicsymbols('theta phi omega alpha')
N = ReferenceFrame('N')
A = ReferenceFrame('A')
B = ReferenceFrame('B')
lA = (lB - h / 2) / 2
lC = (lB/2 + h/4)
rod = Body('rod', frame=A, mass=mA)
plate = Body('plate', mass=mB, frame=B)
C = Body('C', frame=N)
J1 = PinJoint('J1', C, rod, coordinates=theta, speeds=omega,
child_point=lA*A.z, joint_axis=N.y)
J2 = PinJoint('J2', rod, plate, coordinates=phi, speeds=alpha,
parent_point=lC*A.z, joint_axis=A.z)
# Check orientation
assert A.dcm(N) == Matrix([[cos(theta), 0, -sin(theta)],
[0, 1, 0],
[sin(theta), 0, cos(theta)]])
assert A.dcm(B) == Matrix([[cos(phi), -sin(phi), 0],
[sin(phi), cos(phi), 0],
[0, 0, 1]])
assert B.dcm(N) == Matrix([
[cos(phi)*cos(theta), sin(phi), -sin(theta)*cos(phi)],
[-sin(phi)*cos(theta), cos(phi), sin(phi)*sin(theta)],
[sin(theta), 0, cos(theta)]])
# Check Angular Velocity
assert A.ang_vel_in(N) == omega*N.y
assert A.ang_vel_in(B) == -alpha*A.z
assert N.ang_vel_in(B) == -omega*N.y - alpha*A.z
# Check kde
assert J1.kdes == Matrix([omega - theta.diff(t)])
assert J2.kdes == Matrix([alpha - phi.diff(t)])
# Check pos of masscenters
assert C.masscenter.pos_from(rod.masscenter) == lA*A.z
assert rod.masscenter.pos_from(plate.masscenter) == - lC * A.z
# Check Linear Velocities
assert rod.masscenter.vel(N) == (h/4 - lB/2)*omega*A.x
assert plate.masscenter.vel(N) == ((h/4 - lB/2)*omega +
(h/4 + lB/2)*omega)*A.x
def test_pin_joint_interframe():
q, u = dynamicsymbols('q, u')
# Check not connected
N, A, P, C = _generate_body()
Pint, Cint = ReferenceFrame('Pint'), ReferenceFrame('Cint')
raises(ValueError, lambda: PinJoint('J', P, C, parent_interframe=Pint))
raises(ValueError, lambda: PinJoint('J', P, C, child_interframe=Cint))
# Check not fixed interframe
Pint.orient_axis(N, N.z, q)
Cint.orient_axis(A, A.z, q)
raises(ValueError, lambda: PinJoint('J', P, C, parent_interframe=Pint))
raises(ValueError, lambda: PinJoint('J', P, C, child_interframe=Cint))
# Check only parent_interframe
N, A, P, C = _generate_body()
Pint = ReferenceFrame('Pint')
Pint.orient_body_fixed(N, (pi / 4, pi, pi / 3), 'xyz')
PinJoint('J', P, C, q, u, parent_point=N.x, child_point=-C.y,
parent_interframe=Pint, joint_axis=Pint.x)
assert _simplify_matrix(N.dcm(A)) - Matrix([
[-1 / 2, sqrt(3) * cos(q) / 2, -sqrt(3) * sin(q) / 2],
[sqrt(6) / 4, sqrt(2) * (2 * sin(q) + cos(q)) / 4,
sqrt(2) * (-sin(q) + 2 * cos(q)) / 4],
[sqrt(6) / 4, sqrt(2) * (-2 * sin(q) + cos(q)) / 4,
-sqrt(2) * (sin(q) + 2 * cos(q)) / 4]]) == zeros(3)
assert A.ang_vel_in(N) == u * Pint.x
assert C.masscenter.pos_from(P.masscenter) == N.x + A.y
assert C.masscenter.vel(N) == u * A.z
assert P.masscenter.vel(Pint) == Vector(0)
assert C.masscenter.vel(Pint) == u * A.z
# Check only child_interframe
N, A, P, C = _generate_body()
Cint = ReferenceFrame('Cint')
Cint.orient_body_fixed(A, (2 * pi / 3, -pi, pi / 2), 'xyz')
PinJoint('J', P, C, q, u, parent_point=-N.z, child_point=C.x,
child_interframe=Cint, joint_axis=P.x + P.z)
assert _simplify_matrix(N.dcm(A)) == Matrix([
[-sqrt(2) * sin(q) / 2,
-sqrt(3) * (cos(q) - 1) / 4 - cos(q) / 4 - S(1) / 4,
sqrt(3) * (cos(q) + 1) / 4 - cos(q) / 4 + S(1) / 4],
[cos(q), (sqrt(2) + sqrt(6)) * -sin(q) / 4,
(-sqrt(2) + sqrt(6)) * sin(q) / 4],
[sqrt(2) * sin(q) / 2,
sqrt(3) * (cos(q) + 1) / 4 + cos(q) / 4 - S(1) / 4,
sqrt(3) * (1 - cos(q)) / 4 + cos(q) / 4 + S(1) / 4]])
assert A.ang_vel_in(N) == sqrt(2) * u / 2 * N.x + sqrt(2) * u / 2 * N.z
assert C.masscenter.pos_from(P.masscenter) == - N.z - A.x
assert C.masscenter.vel(N).simplify() == (
-sqrt(6) - sqrt(2)) * u / 4 * A.y + (
-sqrt(2) + sqrt(6)) * u / 4 * A.z
assert C.masscenter.vel(Cint) == Vector(0)
# Check combination
N, A, P, C = _generate_body()
Pint, Cint = ReferenceFrame('Pint'), ReferenceFrame('Cint')
Pint.orient_body_fixed(N, (-pi / 2, pi, pi / 2), 'xyz')
Cint.orient_body_fixed(A, (2 * pi / 3, -pi, pi / 2), 'xyz')
PinJoint('J', P, C, q, u, parent_point=N.x - N.y, child_point=-C.z,
parent_interframe=Pint, child_interframe=Cint,
joint_axis=Pint.x + Pint.z)
assert _simplify_matrix(N.dcm(A)) == Matrix([
[cos(q), (sqrt(2) + sqrt(6)) * -sin(q) / 4,
(-sqrt(2) + sqrt(6)) * sin(q) / 4],
[-sqrt(2) * sin(q) / 2,
-sqrt(3) * (cos(q) + 1) / 4 - cos(q) / 4 + S(1) / 4,
sqrt(3) * (cos(q) - 1) / 4 - cos(q) / 4 - S(1) / 4],
[sqrt(2) * sin(q) / 2,
sqrt(3) * (cos(q) - 1) / 4 + cos(q) / 4 + S(1) / 4,
-sqrt(3) * (cos(q) + 1) / 4 + cos(q) / 4 - S(1) / 4]])
assert A.ang_vel_in(N) == sqrt(2) * u / 2 * Pint.x + sqrt(
2) * u / 2 * Pint.z
assert C.masscenter.pos_from(P.masscenter) == N.x - N.y + A.z
N_v_C = (-sqrt(2) + sqrt(6)) * u / 4 * A.x
assert C.masscenter.vel(N).simplify() == N_v_C
assert C.masscenter.vel(Pint).simplify() == N_v_C
assert C.masscenter.vel(Cint) == Vector(0)
def test_pin_joint_joint_axis():
q, u = dynamicsymbols('q, u')
# Check parent as reference
N, A, P, C, Pint, Cint = _generate_body(True)
pin = PinJoint('J', P, C, q, u, parent_interframe=Pint,
child_interframe=Cint, joint_axis=P.y)
assert pin.joint_axis == P.y
assert N.dcm(A) == Matrix([[sin(q), 0, cos(q)], [0, -1, 0],
[cos(q), 0, -sin(q)]])
# Check parent_interframe as reference
N, A, P, C, Pint, Cint = _generate_body(True)
pin = PinJoint('J', P, C, q, u, parent_interframe=Pint,
child_interframe=Cint, joint_axis=Pint.y)
assert pin.joint_axis == Pint.y
assert N.dcm(A) == Matrix([[-sin(q), 0, cos(q)], [0, -1, 0],
[cos(q), 0, sin(q)]])
# Check combination of joint_axis with interframes supplied as vectors (2x)
N, A, P, C = _generate_body()
pin = PinJoint('J', P, C, q, u, parent_interframe=N.z,
child_interframe=-C.z, joint_axis=N.z)
assert pin.joint_axis == N.z
assert N.dcm(A) == Matrix([[-cos(q), -sin(q), 0], [-sin(q), cos(q), 0],
[0, 0, -1]])
N, A, P, C = _generate_body()
pin = PinJoint('J', P, C, q, u, parent_interframe=N.z,
child_interframe=-C.z, joint_axis=N.x)
assert pin.joint_axis == N.x
assert N.dcm(A) == Matrix([[-1, 0, 0], [0, cos(q), sin(q)],
[0, sin(q), -cos(q)]])
# Check time varying axis
N, A, P, C, Pint, Cint = _generate_body(True)
raises(ValueError, lambda: PinJoint('J', P, C,
joint_axis=cos(q) * N.x + sin(q) * N.y))
# Check joint_axis provided in child frame
raises(ValueError, lambda: PinJoint('J', P, C, joint_axis=C.x))
# Check some invalid combinations
raises(ValueError, lambda: PinJoint('J', P, C, joint_axis=P.x + C.y))
raises(ValueError, lambda: PinJoint(
'J', P, C, parent_interframe=Pint, child_interframe=Cint,
joint_axis=Pint.x + C.y))
raises(ValueError, lambda: PinJoint(
'J', P, C, parent_interframe=Pint, child_interframe=Cint,
joint_axis=P.x + Cint.y))
# Check valid special combination
N, A, P, C, Pint, Cint = _generate_body(True)
PinJoint('J', P, C, parent_interframe=Pint, child_interframe=Cint,
joint_axis=Pint.x + P.y)
# Check invalid zero vector
raises(Exception, lambda: PinJoint(
'J', P, C, parent_interframe=Pint, child_interframe=Cint,
joint_axis=Vector(0)))
raises(Exception, lambda: PinJoint(
'J', P, C, parent_interframe=Pint, child_interframe=Cint,
joint_axis=P.y + Pint.y))
def test_pin_joint_arbitrary_axis():
q, u = dynamicsymbols('q_J, u_J')
# When the bodies are attached though masscenters but axes are opposite.
N, A, P, C = _generate_body()
PinJoint('J', P, C, child_interframe=-A.x)
assert (-A.x).angle_between(N.x) == 0
assert -A.x.express(N) == N.x
assert A.dcm(N) == Matrix([[-1, 0, 0],
[0, -cos(q), -sin(q)],
[0, -sin(q), cos(q)]])
assert A.ang_vel_in(N) == u*N.x
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
assert C.masscenter.pos_from(P.masscenter) == 0
assert C.masscenter.pos_from(P.masscenter).express(N).simplify() == 0
assert C.masscenter.vel(N) == 0
# When axes are different and parent joint is at masscenter but child joint
# is at a unit vector from child masscenter.
N, A, P, C = _generate_body()
PinJoint('J', P, C, child_interframe=A.y, child_point=A.x)
assert A.y.angle_between(N.x) == 0 # Axis are aligned
assert A.y.express(N) == N.x
assert A.dcm(N) == Matrix([[0, -cos(q), -sin(q)],
[1, 0, 0],
[0, -sin(q), cos(q)]])
assert A.ang_vel_in(N) == u*N.x
assert A.ang_vel_in(N).express(A) == u * A.y
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
assert A.ang_vel_in(N).cross(A.y) == 0
assert C.masscenter.vel(N) == u*A.z
assert C.masscenter.pos_from(P.masscenter) == -A.x
assert (C.masscenter.pos_from(P.masscenter).express(N).simplify() ==
cos(q)*N.y + sin(q)*N.z)
assert C.masscenter.vel(N).angle_between(A.x) == pi/2
# Similar to previous case but wrt parent body
N, A, P, C = _generate_body()
PinJoint('J', P, C, parent_interframe=N.y, parent_point=N.x)
assert N.y.angle_between(A.x) == 0 # Axis are aligned
assert N.y.express(A) == A.x
assert A.dcm(N) == Matrix([[0, 1, 0],
[-cos(q), 0, sin(q)],
[sin(q), 0, cos(q)]])
assert A.ang_vel_in(N) == u*N.y
assert A.ang_vel_in(N).express(A) == u*A.x
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
angle = A.ang_vel_in(N).angle_between(A.x)
assert angle.xreplace({u: 1}) == 0
assert C.masscenter.vel(N) == 0
assert C.masscenter.pos_from(P.masscenter) == N.x
# Both joint pos id defined but different axes
N, A, P, C = _generate_body()
PinJoint('J', P, C, parent_point=N.x, child_point=A.x,
child_interframe=A.x + A.y)
assert expand_mul(N.x.angle_between(A.x + A.y)) == 0 # Axis are aligned
assert (A.x + A.y).express(N).simplify() == sqrt(2)*N.x
assert _simplify_matrix(A.dcm(N)) == Matrix([
[sqrt(2)/2, -sqrt(2)*cos(q)/2, -sqrt(2)*sin(q)/2],
[sqrt(2)/2, sqrt(2)*cos(q)/2, sqrt(2)*sin(q)/2],
[0, -sin(q), cos(q)]])
assert A.ang_vel_in(N) == u*N.x
assert (A.ang_vel_in(N).express(A).simplify() ==
(u*A.x + u*A.y)/sqrt(2))
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
angle = A.ang_vel_in(N).angle_between(A.x + A.y)
assert angle.xreplace({u: 1}) == 0
assert C.masscenter.vel(N).simplify() == (u * A.z)/sqrt(2)
assert C.masscenter.pos_from(P.masscenter) == N.x - A.x
assert (C.masscenter.pos_from(P.masscenter).express(N).simplify() ==
(1 - sqrt(2)/2)*N.x + sqrt(2)*cos(q)/2*N.y +
sqrt(2)*sin(q)/2*N.z)
assert (C.masscenter.vel(N).express(N).simplify() ==
-sqrt(2)*u*sin(q)/2*N.y + sqrt(2)*u*cos(q)/2*N.z)
assert C.masscenter.vel(N).angle_between(A.x) == pi/2
N, A, P, C = _generate_body()
PinJoint('J', P, C, parent_point=N.x, child_point=A.x,
child_interframe=A.x + A.y - A.z)
assert expand_mul(N.x.angle_between(A.x + A.y - A.z)) == 0 # Axis aligned
assert (A.x + A.y - A.z).express(N).simplify() == sqrt(3)*N.x
assert _simplify_matrix(A.dcm(N)) == Matrix([
[sqrt(3)/3, -sqrt(6)*sin(q + pi/4)/3,
sqrt(6)*cos(q + pi/4)/3],
[sqrt(3)/3, sqrt(6)*cos(q + pi/12)/3,
sqrt(6)*sin(q + pi/12)/3],
[-sqrt(3)/3, sqrt(6)*cos(q + 5*pi/12)/3,
sqrt(6)*sin(q + 5*pi/12)/3]])
assert A.ang_vel_in(N) == u*N.x
assert A.ang_vel_in(N).express(A).simplify() == (u*A.x + u*A.y -
u*A.z)/sqrt(3)
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
angle = A.ang_vel_in(N).angle_between(A.x + A.y-A.z)
assert angle.xreplace({u: 1}) == 0
assert C.masscenter.vel(N).simplify() == (u*A.y + u*A.z)/sqrt(3)
assert C.masscenter.pos_from(P.masscenter) == N.x - A.x
assert (C.masscenter.pos_from(P.masscenter).express(N).simplify() ==
(1 - sqrt(3)/3)*N.x + sqrt(6)*sin(q + pi/4)/3*N.y -
sqrt(6)*cos(q + pi/4)/3*N.z)
assert (C.masscenter.vel(N).express(N).simplify() ==
sqrt(6)*u*cos(q + pi/4)/3*N.y +
sqrt(6)*u*sin(q + pi/4)/3*N.z)
assert C.masscenter.vel(N).angle_between(A.x) == pi/2
N, A, P, C = _generate_body()
m, n = symbols('m n')
PinJoint('J', P, C, parent_point=m * N.x, child_point=n * A.x,
child_interframe=A.x + A.y - A.z,
parent_interframe=N.x - N.y + N.z)
angle = (N.x - N.y + N.z).angle_between(A.x + A.y - A.z)
assert expand_mul(angle) == 0 # Axis are aligned
assert ((A.x-A.y+A.z).express(N).simplify() ==
(-4*cos(q)/3 - S(1)/3)*N.x + (S(1)/3 - 4*sin(q + pi/6)/3)*N.y +
(4*cos(q + pi/3)/3 - S(1)/3)*N.z)
assert _simplify_matrix(A.dcm(N)) == Matrix([
[S(1)/3 - 2*cos(q)/3, -2*sin(q + pi/6)/3 - S(1)/3,
2*cos(q + pi/3)/3 + S(1)/3],
[2*cos(q + pi/3)/3 + S(1)/3, 2*cos(q)/3 - S(1)/3,
2*sin(q + pi/6)/3 + S(1)/3],
[-2*sin(q + pi/6)/3 - S(1)/3, 2*cos(q + pi/3)/3 + S(1)/3,
2*cos(q)/3 - S(1)/3]])
assert A.ang_vel_in(N) == (u*N.x - u*N.y + u*N.z)/sqrt(3)
assert A.ang_vel_in(N).express(A).simplify() == (u*A.x + u*A.y -
u*A.z)/sqrt(3)
assert A.ang_vel_in(N).magnitude() == sqrt(u**2)
angle = A.ang_vel_in(N).angle_between(A.x+A.y-A.z)
assert angle.xreplace({u: 1}) == 0
assert (C.masscenter.vel(N).simplify() ==
sqrt(3)*n*u/3*A.y + sqrt(3)*n*u/3*A.z)
assert C.masscenter.pos_from(P.masscenter) == m*N.x - n*A.x
assert (C.masscenter.pos_from(P.masscenter).express(N).simplify() ==
(m + n*(2*cos(q) - 1)/3)*N.x + n*(2*sin(q + pi/6) +
1)/3*N.y - n*(2*cos(q + pi/3) + 1)/3*N.z)
assert (C.masscenter.vel(N).express(N).simplify() ==
- 2*n*u*sin(q)/3*N.x + 2*n*u*cos(q + pi/6)/3*N.y +
2*n*u*sin(q + pi/3)/3*N.z)
assert C.masscenter.vel(N).dot(N.x - N.y + N.z).simplify() == 0
def test_create_aligned_frame_pi():
N, A, P, C = _generate_body()
f = Joint._create_aligned_interframe(P, -P.x, P.x)
assert f.z == P.z
f = Joint._create_aligned_interframe(P, -P.y, P.y)
assert f.x == P.x
f = Joint._create_aligned_interframe(P, -P.z, P.z)
assert f.y == P.y
f = Joint._create_aligned_interframe(P, -P.x - P.y, P.x + P.y)
assert f.z == P.z
f = Joint._create_aligned_interframe(P, -P.y - P.z, P.y + P.z)
assert f.x == P.x
f = Joint._create_aligned_interframe(P, -P.x - P.z, P.x + P.z)
assert f.y == P.y
f = Joint._create_aligned_interframe(P, -P.x - P.y - P.z, P.x + P.y + P.z)
assert f.y - f.z == P.y - P.z
def test_pin_joint_axis():
q, u = dynamicsymbols('q u')
# Test default joint axis
N, A, P, C, Pint, Cint = _generate_body(True)
J = PinJoint('J', P, C, q, u, parent_interframe=Pint, child_interframe=Cint)
assert J.joint_axis == Pint.x
# Test for the same joint axis expressed in different frames
N_R_A = Matrix([[0, sin(q), cos(q)],
[0, -cos(q), sin(q)],
[1, 0, 0]])
N, A, P, C, Pint, Cint = _generate_body(True)
PinJoint('J', P, C, q, u, parent_interframe=Pint, child_interframe=Cint,
joint_axis=N.z)
assert N.dcm(A) == N_R_A
N, A, P, C, Pint, Cint = _generate_body(True)
PinJoint('J', P, C, q, u, parent_interframe=Pint, child_interframe=Cint,
joint_axis=-Pint.z)
assert N.dcm(A) == N_R_A
# Test time varying joint axis
N, A, P, C, Pint, Cint = _generate_body(True)
raises(ValueError, lambda: PinJoint('J', P, C, joint_axis=q * N.z))
def test_locate_joint_pos():
# Test Vector and default
N, A, P, C = _generate_body()
joint = PinJoint('J', P, C, parent_point=N.y + N.z)
assert joint.parent_point.name == 'J_P_joint'
assert joint.parent_point.pos_from(P.masscenter) == N.y + N.z
assert joint.child_point == C.masscenter
# Test Point objects
N, A, P, C = _generate_body()
parent_point = P.masscenter.locatenew('p', N.y + N.z)
joint = PinJoint('J', P, C, parent_point=parent_point,
child_point=C.masscenter)
assert joint.parent_point == parent_point
assert joint.child_point == C.masscenter
# Check invalid type
N, A, P, C = _generate_body()
raises(TypeError,
lambda: PinJoint('J', P, C, parent_point=N.x.to_matrix(N)))
# Test time varying positions
q = dynamicsymbols('q')
N, A, P, C = _generate_body()
raises(ValueError, lambda: PinJoint('J', P, C, parent_point=q * N.x))
N, A, P, C = _generate_body()
child_point = C.masscenter.locatenew('p', q * A.y)
raises(ValueError, lambda: PinJoint('J', P, C, child_point=child_point))
# Test undefined position
child_point = Point('p')
raises(ValueError, lambda: PinJoint('J', P, C, child_point=child_point))
def test_locate_joint_frame():
# Test rotated frame and default
N, A, P, C = _generate_body()
parent_interframe = ReferenceFrame('int_frame')
parent_interframe.orient_axis(N, N.z, 1)
joint = PinJoint('J', P, C, parent_interframe=parent_interframe)
assert joint.parent_interframe == parent_interframe
assert joint.parent_interframe.ang_vel_in(N) == 0
assert joint.child_interframe == A
# Test time varying orientations
q = dynamicsymbols('q')
N, A, P, C = _generate_body()
parent_interframe = ReferenceFrame('int_frame')
parent_interframe.orient_axis(N, N.z, q)
raises(ValueError,
lambda: PinJoint('J', P, C, parent_interframe=parent_interframe))
# Test undefined frame
N, A, P, C = _generate_body()
child_interframe = ReferenceFrame('int_frame')
child_interframe.orient_axis(N, N.z, 1) # Defined with respect to parent
raises(ValueError,
lambda: PinJoint('J', P, C, child_interframe=child_interframe))
def test_sliding_joint():
_, _, P, C = _generate_body()
q, u = dynamicsymbols('q_S, u_S')
S = PrismaticJoint('S', P, C)
assert S.name == 'S'
assert S.parent == P
assert S.child == C
assert S.coordinates == Matrix([q])
assert S.speeds == Matrix([u])
assert S.kdes == Matrix([u - q.diff(t)])
assert S.joint_axis == P.frame.x
assert S.child_point.pos_from(C.masscenter) == Vector(0)
assert S.parent_point.pos_from(P.masscenter) == Vector(0)
assert S.parent_point.pos_from(S.child_point) == - q * P.frame.x
assert P.masscenter.pos_from(C.masscenter) == - q * P.frame.x
assert C.masscenter.vel(P.frame) == u * P.frame.x
assert P.ang_vel_in(C) == 0
assert C.ang_vel_in(P) == 0
assert S.__str__() == 'PrismaticJoint: S parent: P child: C'
N, A, P, C = _generate_body()
l, m = symbols('l m')
Pint = ReferenceFrame('P_int')
Pint.orient_axis(P.frame, P.y, pi / 2)
S = PrismaticJoint('S', P, C, parent_point=l * P.frame.x,
child_point=m * C.frame.y, joint_axis=P.frame.z,
parent_interframe=Pint)
assert S.joint_axis == P.frame.z
assert S.child_point.pos_from(C.masscenter) == m * C.frame.y
assert S.parent_point.pos_from(P.masscenter) == l * P.frame.x
assert S.parent_point.pos_from(S.child_point) == - q * P.frame.z
assert P.masscenter.pos_from(C.masscenter) == - l*N.x - q*N.z + m*A.y
assert C.masscenter.vel(P.frame) == u * P.frame.z
assert P.masscenter.vel(Pint) == Vector(0)
assert C.ang_vel_in(P) == 0
assert P.ang_vel_in(C) == 0
_, _, P, C = _generate_body()
Pint = ReferenceFrame('P_int')
Pint.orient_axis(P.frame, P.y, pi / 2)
S = PrismaticJoint('S', P, C, parent_point=l * P.frame.z,
child_point=m * C.frame.x, joint_axis=P.frame.z,
parent_interframe=Pint)
assert S.joint_axis == P.frame.z
assert S.child_point.pos_from(C.masscenter) == m * C.frame.x
assert S.parent_point.pos_from(P.masscenter) == l * P.frame.z
assert S.parent_point.pos_from(S.child_point) == - q * P.frame.z
assert P.masscenter.pos_from(C.masscenter) == (-l - q)*P.frame.z + m*C.frame.x
assert C.masscenter.vel(P.frame) == u * P.frame.z
assert C.ang_vel_in(P) == 0
assert P.ang_vel_in(C) == 0
def test_sliding_joint_arbitrary_axis():
q, u = dynamicsymbols('q_S, u_S')
N, A, P, C = _generate_body()
PrismaticJoint('S', P, C, child_interframe=-A.x)
assert (-A.x).angle_between(N.x) == 0
assert -A.x.express(N) == N.x
assert A.dcm(N) == Matrix([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
assert C.masscenter.pos_from(P.masscenter) == q * N.x
assert C.masscenter.pos_from(P.masscenter).express(A).simplify() == -q * A.x
assert C.masscenter.vel(N) == u * N.x
assert C.masscenter.vel(N).express(A) == -u * A.x
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
#When axes are different and parent joint is at masscenter but child joint is at a unit vector from
#child masscenter.
N, A, P, C = _generate_body()
PrismaticJoint('S', P, C, child_interframe=A.y, child_point=A.x)
assert A.y.angle_between(N.x) == 0 #Axis are aligned
assert A.y.express(N) == N.x
assert A.dcm(N) == Matrix([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
assert C.masscenter.vel(N) == u * N.x
assert C.masscenter.vel(N).express(A) == u * A.y
assert C.masscenter.pos_from(P.masscenter) == q*N.x - A.x
assert C.masscenter.pos_from(P.masscenter).express(N).simplify() == q*N.x + N.y
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
#Similar to previous case but wrt parent body
N, A, P, C = _generate_body()
PrismaticJoint('S', P, C, parent_interframe=N.y, parent_point=N.x)
assert N.y.angle_between(A.x) == 0 #Axis are aligned
assert N.y.express(A) == A.x
assert A.dcm(N) == Matrix([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
assert C.masscenter.vel(N) == u * N.y
assert C.masscenter.vel(N).express(A) == u * A.x
assert C.masscenter.pos_from(P.masscenter) == N.x + q*N.y
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
#Both joint pos is defined but different axes
N, A, P, C = _generate_body()
PrismaticJoint('S', P, C, parent_point=N.x, child_point=A.x,
child_interframe=A.x + A.y)
assert N.x.angle_between(A.x + A.y) == 0 #Axis are aligned
assert (A.x + A.y).express(N) == sqrt(2)*N.x
assert A.dcm(N) == Matrix([[sqrt(2)/2, -sqrt(2)/2, 0], [sqrt(2)/2, sqrt(2)/2, 0], [0, 0, 1]])
assert C.masscenter.pos_from(P.masscenter) == (q + 1)*N.x - A.x
assert C.masscenter.pos_from(P.masscenter).express(N) == (q - sqrt(2)/2 + 1)*N.x + sqrt(2)/2*N.y
assert C.masscenter.vel(N).express(A) == u * (A.x + A.y)/sqrt(2)
assert C.masscenter.vel(N) == u*N.x
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
N, A, P, C = _generate_body()
PrismaticJoint('S', P, C, parent_point=N.x, child_point=A.x,
child_interframe=A.x + A.y - A.z)
assert N.x.angle_between(A.x + A.y - A.z) == 0 #Axis are aligned
assert (A.x + A.y - A.z).express(N) == sqrt(3)*N.x
assert _simplify_matrix(A.dcm(N)) == Matrix([[sqrt(3)/3, -sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)/6 + S(1)/2, S(1)/2 - sqrt(3)/6],
[-sqrt(3)/3, S(1)/2 - sqrt(3)/6, sqrt(3)/6 + S(1)/2]])
assert C.masscenter.pos_from(P.masscenter) == (q + 1)*N.x - A.x
assert C.masscenter.pos_from(P.masscenter).express(N) == \
(q - sqrt(3)/3 + 1)*N.x + sqrt(3)/3*N.y - sqrt(3)/3*N.z
assert C.masscenter.vel(N) == u*N.x
assert C.masscenter.vel(N).express(A) == sqrt(3)*u/3*A.x + sqrt(3)*u/3*A.y - sqrt(3)*u/3*A.z
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
N, A, P, C = _generate_body()
m, n = symbols('m n')
PrismaticJoint('S', P, C, parent_point=m*N.x, child_point=n*A.x,
child_interframe=A.x + A.y - A.z,
parent_interframe=N.x - N.y + N.z)
# 0 angle means that the axis are aligned
assert (N.x-N.y+N.z).angle_between(A.x+A.y-A.z).simplify() == 0
assert (A.x+A.y-A.z).express(N) == N.x - N.y + N.z
assert _simplify_matrix(A.dcm(N)) == Matrix([[-S(1)/3, -S(2)/3, S(2)/3],
[S(2)/3, S(1)/3, S(2)/3],
[-S(2)/3, S(2)/3, S(1)/3]])
assert C.masscenter.pos_from(P.masscenter) == \
(m + sqrt(3)*q/3)*N.x - sqrt(3)*q/3*N.y + sqrt(3)*q/3*N.z - n*A.x
assert C.masscenter.pos_from(P.masscenter).express(N) == \
(m + n/3 + sqrt(3)*q/3)*N.x + (2*n/3 - sqrt(3)*q/3)*N.y + (-2*n/3 + sqrt(3)*q/3)*N.z
assert C.masscenter.vel(N) == sqrt(3)*u/3*N.x - sqrt(3)*u/3*N.y + sqrt(3)*u/3*N.z
assert C.masscenter.vel(N).express(A) == sqrt(3)*u/3*A.x + sqrt(3)*u/3*A.y - sqrt(3)*u/3*A.z
assert A.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == 0
def test_cylindrical_joint():
N, A, P, C = _generate_body()
q0_def, q1_def, u0_def, u1_def = dynamicsymbols('q0:2_J, u0:2_J')
Cj = CylindricalJoint('J', P, C)
assert Cj.name == 'J'
assert Cj.parent == P
assert Cj.child == C
assert Cj.coordinates == Matrix([q0_def, q1_def])
assert Cj.speeds == Matrix([u0_def, u1_def])
assert Cj.rotation_coordinate == q0_def
assert Cj.translation_coordinate == q1_def
assert Cj.rotation_speed == u0_def
assert Cj.translation_speed == u1_def
assert Cj.kdes == Matrix([u0_def - q0_def.diff(t), u1_def - q1_def.diff(t)])
assert Cj.joint_axis == N.x
assert Cj.child_point.pos_from(C.masscenter) == Vector(0)
assert Cj.parent_point.pos_from(P.masscenter) == Vector(0)
assert Cj.parent_point.pos_from(Cj._child_point) == -q1_def * N.x
assert C.masscenter.pos_from(P.masscenter) == q1_def * N.x
assert Cj.child_point.vel(N) == u1_def * N.x
assert A.ang_vel_in(N) == u0_def * N.x
assert Cj.parent_interframe == N
assert Cj.child_interframe == A
assert Cj.__str__() == 'CylindricalJoint: J parent: P child: C'
q0, q1, u0, u1 = dynamicsymbols('q0:2, u0:2')
l, m = symbols('l, m')
N, A, P, C, Pint, Cint = _generate_body(True)
Cj = CylindricalJoint('J', P, C, rotation_coordinate=q0, rotation_speed=u0,
translation_speed=u1, parent_point=m * N.x,
child_point=l * A.y, parent_interframe=Pint,
child_interframe=Cint, joint_axis=2 * N.z)
assert Cj.coordinates == Matrix([q0, q1_def])
assert Cj.speeds == Matrix([u0, u1])
assert Cj.rotation_coordinate == q0
assert Cj.translation_coordinate == q1_def
assert Cj.rotation_speed == u0
assert Cj.translation_speed == u1
assert Cj.kdes == Matrix([u0 - q0.diff(t), u1 - q1_def.diff(t)])
assert Cj.joint_axis == 2 * N.z
assert Cj.child_point.pos_from(C.masscenter) == l * A.y
assert Cj.parent_point.pos_from(P.masscenter) == m * N.x
assert Cj.parent_point.pos_from(Cj._child_point) == -q1_def * N.z
assert C.masscenter.pos_from(
P.masscenter) == m * N.x + q1_def * N.z - l * A.y
assert C.masscenter.vel(N) == u1 * N.z - u0 * l * A.z
assert A.ang_vel_in(N) == u0 * N.z
def test_planar_joint():
N, A, P, C = _generate_body()
q0_def, q1_def, q2_def = dynamicsymbols('q0:3_J')
u0_def, u1_def, u2_def = dynamicsymbols('u0:3_J')
Cj = PlanarJoint('J', P, C)
assert Cj.name == 'J'
assert Cj.parent == P
assert Cj.child == C
assert Cj.coordinates == Matrix([q0_def, q1_def, q2_def])
assert Cj.speeds == Matrix([u0_def, u1_def, u2_def])
assert Cj.rotation_coordinate == q0_def
assert Cj.planar_coordinates == Matrix([q1_def, q2_def])
assert Cj.rotation_speed == u0_def
assert Cj.planar_speeds == Matrix([u1_def, u2_def])
assert Cj.kdes == Matrix([u0_def - q0_def.diff(t), u1_def - q1_def.diff(t),
u2_def - q2_def.diff(t)])
assert Cj.rotation_axis == N.x
assert Cj.planar_vectors == [N.y, N.z]
assert Cj.child_point.pos_from(C.masscenter) == Vector(0)
assert Cj.parent_point.pos_from(P.masscenter) == Vector(0)
r_P_C = q1_def * N.y + q2_def * N.z
assert Cj.parent_point.pos_from(Cj.child_point) == -r_P_C
assert C.masscenter.pos_from(P.masscenter) == r_P_C
assert Cj.child_point.vel(N) == u1_def * N.y + u2_def * N.z
assert A.ang_vel_in(N) == u0_def * N.x
assert Cj.parent_interframe == N
assert Cj.child_interframe == A
assert Cj.__str__() == 'PlanarJoint: J parent: P child: C'
q0, q1, q2, u0, u1, u2 = dynamicsymbols('q0:3, u0:3')
l, m = symbols('l, m')
N, A, P, C, Pint, Cint = _generate_body(True)
Cj = PlanarJoint('J', P, C, rotation_coordinate=q0,
planar_coordinates=[q1, q2], planar_speeds=[u1, u2],
parent_point=m * N.x, child_point=l * A.y,
parent_interframe=Pint, child_interframe=Cint)
assert Cj.coordinates == Matrix([q0, q1, q2])
assert Cj.speeds == Matrix([u0_def, u1, u2])
assert Cj.rotation_coordinate == q0
assert Cj.planar_coordinates == Matrix([q1, q2])
assert Cj.rotation_speed == u0_def
assert Cj.planar_speeds == Matrix([u1, u2])
assert Cj.kdes == Matrix([u0_def - q0.diff(t), u1 - q1.diff(t),
u2 - q2.diff(t)])
assert Cj.rotation_axis == Pint.x
assert Cj.planar_vectors == [Pint.y, Pint.z]
assert Cj.child_point.pos_from(C.masscenter) == l * A.y
assert Cj.parent_point.pos_from(P.masscenter) == m * N.x
assert Cj.parent_point.pos_from(Cj.child_point) == q1 * N.y + q2 * N.z
assert C.masscenter.pos_from(
P.masscenter) == m * N.x - q1 * N.y - q2 * N.z - l * A.y
assert C.masscenter.vel(N) == -u1 * N.y - u2 * N.z + u0_def * l * A.x
assert A.ang_vel_in(N) == u0_def * N.x
def test_planar_joint_advanced():
# Tests whether someone is able to just specify two normals, which will form
# the rotation axis seen from the parent and child body.
# This specific example is a block on a slope, which has that same slope of
# 30 degrees, so in the zero configuration the frames of the parent and
# child are actually aligned.
q0, q1, q2, u0, u1, u2 = dynamicsymbols('q0:3, u0:3')
l1, l2 = symbols('l1:3')
N, A, P, C = _generate_body()
J = PlanarJoint('J', P, C, q0, [q1, q2], u0, [u1, u2],
parent_point=l1 * N.z,
child_point=-l2 * C.z,
parent_interframe=N.z + N.y / sqrt(3),
child_interframe=A.z + A.y / sqrt(3))
assert J.rotation_axis.express(N) == (N.z + N.y / sqrt(3)).normalize()
assert J.rotation_axis.express(A) == (A.z + A.y / sqrt(3)).normalize()
assert J.rotation_axis.angle_between(N.z) == pi / 6
assert N.dcm(A).xreplace({q0: 0, q1: 0, q2: 0}) == eye(3)
N_R_A = Matrix([
[cos(q0), -sqrt(3) * sin(q0) / 2, sin(q0) / 2],
[sqrt(3) * sin(q0) / 2, 3 * cos(q0) / 4 + 1 / 4,
sqrt(3) * (1 - cos(q0)) / 4],
[-sin(q0) / 2, sqrt(3) * (1 - cos(q0)) / 4, cos(q0) / 4 + 3 / 4]])
# N.dcm(A) == N_R_A did not work
assert _simplify_matrix(N.dcm(A) - N_R_A) == zeros(3)
def test_spherical_joint():
N, A, P, C = _generate_body()
q0, q1, q2, u0, u1, u2 = dynamicsymbols('q0:3_S, u0:3_S')
S = SphericalJoint('S', P, C)
assert S.name == 'S'
assert S.parent == P
assert S.child == C
assert S.coordinates == Matrix([q0, q1, q2])
assert S.speeds == Matrix([u0, u1, u2])
assert S.kdes == Matrix([u0 - q0.diff(t), u1 - q1.diff(t), u2 - q2.diff(t)])
assert S.child_point.pos_from(C.masscenter) == Vector(0)
assert S.parent_point.pos_from(P.masscenter) == Vector(0)
assert S.parent_point.pos_from(S.child_point) == Vector(0)
assert P.masscenter.pos_from(C.masscenter) == Vector(0)
assert C.masscenter.vel(N) == Vector(0)
assert P.ang_vel_in(C) == (-u0 * cos(q1) * cos(q2) - u1 * sin(q2)) * A.x + (
u0 * sin(q2) * cos(q1) - u1 * cos(q2)) * A.y + (
-u0 * sin(q1) - u2) * A.z
assert C.ang_vel_in(P) == (u0 * cos(q1) * cos(q2) + u1 * sin(q2)) * A.x + (
-u0 * sin(q2) * cos(q1) + u1 * cos(q2)) * A.y + (
u0 * sin(q1) + u2) * A.z
assert S.__str__() == 'SphericalJoint: S parent: P child: C'
assert S._rot_type == 'BODY'
assert S._rot_order == 123
assert S._amounts is None
def test_spherical_joint_speeds_as_derivative_terms():
# This tests checks whether the system remains valid if the user chooses to
# pass the derivative of the generalized coordinates as generalized speeds
q0, q1, q2 = dynamicsymbols('q0:3')
u0, u1, u2 = dynamicsymbols('q0:3', 1)
N, A, P, C = _generate_body()
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2])
assert S.coordinates == Matrix([q0, q1, q2])
assert S.speeds == Matrix([u0, u1, u2])
assert S.kdes == Matrix([0, 0, 0])
assert P.ang_vel_in(C) == (-u0 * cos(q1) * cos(q2) - u1 * sin(q2)) * A.x + (
u0 * sin(q2) * cos(q1) - u1 * cos(q2)) * A.y + (
-u0 * sin(q1) - u2) * A.z
def test_spherical_joint_coords():
q0s, q1s, q2s, u0s, u1s, u2s = dynamicsymbols('q0:3_S, u0:3_S')
q0, q1, q2, q3, u0, u1, u2, u4 = dynamicsymbols('q0:4, u0:4')
# Test coordinates as list
N, A, P, C = _generate_body()
S = SphericalJoint('S', P, C, [q0, q1, q2], [u0, u1, u2])
assert S.coordinates == Matrix([q0, q1, q2])
assert S.speeds == Matrix([u0, u1, u2])
# Test coordinates as Matrix
N, A, P, C = _generate_body()
S = SphericalJoint('S', P, C, Matrix([q0, q1, q2]),
Matrix([u0, u1, u2]))
assert S.coordinates == Matrix([q0, q1, q2])
assert S.speeds == Matrix([u0, u1, u2])
# Test too few generalized coordinates
N, A, P, C = _generate_body()
raises(ValueError,
lambda: SphericalJoint('S', P, C, Matrix([q0, q1]), Matrix([u0])))
# Test too many generalized coordinates
raises(ValueError, lambda: SphericalJoint(
'S', P, C, Matrix([q0, q1, q2, q3]), Matrix([u0, u1, u2])))
raises(ValueError, lambda: SphericalJoint(
'S', P, C, Matrix([q0, q1, q2]), Matrix([u0, u1, u2, u4])))
def test_spherical_joint_orient_body():
q0, q1, q2, u0, u1, u2 = dynamicsymbols('q0:3, u0:3')
N_R_A = Matrix([
[-sin(q1), -sin(q2) * cos(q1), cos(q1) * cos(q2)],
[-sin(q0) * cos(q1), sin(q0) * sin(q1) * sin(q2) - cos(q0) * cos(q2),
-sin(q0) * sin(q1) * cos(q2) - sin(q2) * cos(q0)],
[cos(q0) * cos(q1), -sin(q0) * cos(q2) - sin(q1) * sin(q2) * cos(q0),
-sin(q0) * sin(q2) + sin(q1) * cos(q0) * cos(q2)]])
N_w_A = Matrix([[-u0 * sin(q1) - u2],
[-u0 * sin(q2) * cos(q1) + u1 * cos(q2)],
[u0 * cos(q1) * cos(q2) + u1 * sin(q2)]])
N_v_Co = Matrix([
[-sqrt(2) * (u0 * cos(q2 + pi / 4) * cos(q1) + u1 * sin(q2 + pi / 4))],
[-u0 * sin(q1) - u2], [-u0 * sin(q1) - u2]])
# Test default rot_type='BODY', rot_order=123
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.y, child_point=-A.y + A.z,
parent_interframe=Pint, child_interframe=Cint,
rot_type='body', rot_order=123)
assert S._rot_type.upper() == 'BODY'
assert S._rot_order == 123
assert _simplify_matrix(N.dcm(A) - N_R_A) == zeros(3)
assert A.ang_vel_in(N).to_matrix(A) == N_w_A
assert C.masscenter.vel(N).to_matrix(A) == N_v_Co
# Test change of amounts
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.y, child_point=-A.y + A.z,
parent_interframe=Pint, child_interframe=Cint,
rot_type='BODY', amounts=(q1, q0, q2), rot_order=123)
switch_order = lambda expr: expr.xreplace(
{q0: q1, q1: q0, q2: q2, u0: u1, u1: u0, u2: u2})
assert S._rot_type.upper() == 'BODY'
assert S._rot_order == 123
assert _simplify_matrix(N.dcm(A) - switch_order(N_R_A)) == zeros(3)
assert A.ang_vel_in(N).to_matrix(A) == switch_order(N_w_A)
assert C.masscenter.vel(N).to_matrix(A) == switch_order(N_v_Co)
# Test different rot_order
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.y, child_point=-A.y + A.z,
parent_interframe=Pint, child_interframe=Cint,
rot_type='BodY', rot_order='yxz')
assert S._rot_type.upper() == 'BODY'
assert S._rot_order == 'yxz'
assert _simplify_matrix(N.dcm(A) - Matrix([
[-sin(q0) * cos(q1), sin(q0) * sin(q1) * cos(q2) - sin(q2) * cos(q0),
sin(q0) * sin(q1) * sin(q2) + cos(q0) * cos(q2)],
[-sin(q1), -cos(q1) * cos(q2), -sin(q2) * cos(q1)],
[cos(q0) * cos(q1), -sin(q0) * sin(q2) - sin(q1) * cos(q0) * cos(q2),
sin(q0) * cos(q2) - sin(q1) * sin(q2) * cos(q0)]])) == zeros(3)
assert A.ang_vel_in(N).to_matrix(A) == Matrix([
[u0 * sin(q1) - u2], [u0 * cos(q1) * cos(q2) - u1 * sin(q2)],
[u0 * sin(q2) * cos(q1) + u1 * cos(q2)]])
assert C.masscenter.vel(N).to_matrix(A) == Matrix([
[-sqrt(2) * (u0 * sin(q2 + pi / 4) * cos(q1) + u1 * cos(q2 + pi / 4))],
[u0 * sin(q1) - u2], [u0 * sin(q1) - u2]])
def test_spherical_joint_orient_space():
q0, q1, q2, u0, u1, u2 = dynamicsymbols('q0:3, u0:3')
N_R_A = Matrix([
[-sin(q0) * sin(q2) - sin(q1) * cos(q0) * cos(q2),
sin(q0) * sin(q1) * cos(q2) - sin(q2) * cos(q0), cos(q1) * cos(q2)],
[-sin(q0) * cos(q2) + sin(q1) * sin(q2) * cos(q0),
-sin(q0) * sin(q1) * sin(q2) - cos(q0) * cos(q2), -sin(q2) * cos(q1)],
[cos(q0) * cos(q1), -sin(q0) * cos(q1), sin(q1)]])
N_w_A = Matrix([
[u1 * sin(q0) - u2 * cos(q0) * cos(q1)],
[u1 * cos(q0) + u2 * sin(q0) * cos(q1)], [u0 - u2 * sin(q1)]])
N_v_Co = Matrix([
[u0 - u2 * sin(q1)], [u0 - u2 * sin(q1)],
[sqrt(2) * (-u1 * sin(q0 + pi / 4) + u2 * cos(q0 + pi / 4) * cos(q1))]])
# Test default rot_type='BODY', rot_order=123
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.z, child_point=-A.x + A.y,
parent_interframe=Pint, child_interframe=Cint,
rot_type='space', rot_order=123)
assert S._rot_type.upper() == 'SPACE'
assert S._rot_order == 123
assert _simplify_matrix(N.dcm(A) - N_R_A) == zeros(3)
assert _simplify_matrix(A.ang_vel_in(N).to_matrix(A)) == N_w_A
assert _simplify_matrix(C.masscenter.vel(N).to_matrix(A)) == N_v_Co
# Test change of amounts
switch_order = lambda expr: expr.xreplace(
{q0: q1, q1: q0, q2: q2, u0: u1, u1: u0, u2: u2})
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.z, child_point=-A.x + A.y,
parent_interframe=Pint, child_interframe=Cint,
rot_type='SPACE', amounts=(q1, q0, q2), rot_order=123)
assert S._rot_type.upper() == 'SPACE'
assert S._rot_order == 123
assert _simplify_matrix(N.dcm(A) - switch_order(N_R_A)) == zeros(3)
assert _simplify_matrix(A.ang_vel_in(N).to_matrix(A)) == switch_order(N_w_A)
assert _simplify_matrix(C.masscenter.vel(N).to_matrix(A)) == switch_order(N_v_Co)
# Test different rot_order
N, A, P, C, Pint, Cint = _generate_body(True)
S = SphericalJoint('S', P, C, coordinates=[q0, q1, q2], speeds=[u0, u1, u2],
parent_point=N.x + N.z, child_point=-A.x + A.y,
parent_interframe=Pint, child_interframe=Cint,
rot_type='SPaCe', rot_order='zxy')
assert S._rot_type.upper() == 'SPACE'
assert S._rot_order == 'zxy'
assert _simplify_matrix(N.dcm(A) - Matrix([
[-sin(q2) * cos(q1), -sin(q0) * cos(q2) + sin(q1) * sin(q2) * cos(q0),
sin(q0) * sin(q1) * sin(q2) + cos(q0) * cos(q2)],
[-sin(q1), -cos(q0) * cos(q1), -sin(q0) * cos(q1)],
[cos(q1) * cos(q2), -sin(q0) * sin(q2) - sin(q1) * cos(q0) * cos(q2),
-sin(q0) * sin(q1) * cos(q2) + sin(q2) * cos(q0)]]))
assert _simplify_matrix(A.ang_vel_in(N).to_matrix(A) - Matrix([
[-u0 + u2 * sin(q1)], [-u1 * sin(q0) + u2 * cos(q0) * cos(q1)],
[u1 * cos(q0) + u2 * sin(q0) * cos(q1)]])) == zeros(3, 1)
assert _simplify_matrix(C.masscenter.vel(N).to_matrix(A) - Matrix([
[u1 * cos(q0) + u2 * sin(q0) * cos(q1)],
[u1 * cos(q0) + u2 * sin(q0) * cos(q1)],
[u0 + u1 * sin(q0) - u2 * sin(q1) -
u2 * cos(q0) * cos(q1)]])) == zeros(3, 1)
def test_weld_joint():
_, _, P, C = _generate_body()
W = WeldJoint('W', P, C)
assert W.name == 'W'
assert W.parent == P
assert W.child == C
assert W.coordinates == Matrix()
assert W.speeds == Matrix()
assert W.kdes == Matrix(1, 0, []).T
assert P.dcm(C) == eye(3)
assert W.child_point.pos_from(C.masscenter) == Vector(0)
assert W.parent_point.pos_from(P.masscenter) == Vector(0)
assert W.parent_point.pos_from(W.child_point) == Vector(0)
assert P.masscenter.pos_from(C.masscenter) == Vector(0)
assert C.masscenter.vel(P.frame) == Vector(0)
assert P.ang_vel_in(C) == 0
assert C.ang_vel_in(P) == 0
assert W.__str__() == 'WeldJoint: W parent: P child: C'
N, A, P, C = _generate_body()
l, m = symbols('l m')
Pint = ReferenceFrame('P_int')
Pint.orient_axis(P.frame, P.y, pi / 2)
W = WeldJoint('W', P, C, parent_point=l * P.frame.x,
child_point=m * C.frame.y, parent_interframe=Pint)
assert W.child_point.pos_from(C.masscenter) == m * C.frame.y
assert W.parent_point.pos_from(P.masscenter) == l * P.frame.x
assert W.parent_point.pos_from(W.child_point) == Vector(0)
assert P.masscenter.pos_from(C.masscenter) == - l * N.x + m * A.y
assert C.masscenter.vel(P.frame) == Vector(0)
assert P.masscenter.vel(Pint) == Vector(0)
assert C.ang_vel_in(P) == 0
assert P.ang_vel_in(C) == 0
assert P.x == A.z
JointsMethod(P, W) # Tests #10770
def test_deprecated_parent_child_axis():
q, u = dynamicsymbols('q_J, u_J')
N, A, P, C = _generate_body()
with warns_deprecated_sympy():
PinJoint('J', P, C, child_axis=-A.x)
assert (-A.x).angle_between(N.x) == 0
assert -A.x.express(N) == N.x
assert A.dcm(N) == Matrix([[-1, 0, 0],
[0, -cos(q), -sin(q)],
[0, -sin(q), cos(q)]])
assert A.ang_vel_in(N) == u * N.x
assert A.ang_vel_in(N).magnitude() == sqrt(u ** 2)
N, A, P, C = _generate_body()
with warns_deprecated_sympy():
PrismaticJoint('J', P, C, parent_axis=P.x + P.y)
assert (A.x).angle_between(N.x + N.y) == 0
assert A.x.express(N) == (N.x + N.y) / sqrt(2)
assert A.dcm(N) == Matrix([[sqrt(2) / 2, sqrt(2) / 2, 0],
[-sqrt(2) / 2, sqrt(2) / 2, 0], [0, 0, 1]])
assert A.ang_vel_in(N) == Vector(0)
def test_deprecated_joint_pos():
N, A, P, C = _generate_body()
with warns_deprecated_sympy():
pin = PinJoint('J', P, C, parent_joint_pos=N.x + N.y,
child_joint_pos=C.y - C.z)
assert pin.parent_point.pos_from(P.masscenter) == N.x + N.y
assert pin.child_point.pos_from(C.masscenter) == C.y - C.z
N, A, P, C = _generate_body()
with warns_deprecated_sympy():
slider = PrismaticJoint('J', P, C, parent_joint_pos=N.z + N.y,
child_joint_pos=C.y - C.x)
assert slider.parent_point.pos_from(P.masscenter) == N.z + N.y
assert slider.child_point.pos_from(C.masscenter) == C.y - C.x
|
8ae1e89ae94117c31a7cb7a8aca19a2c750417c0af1615dcc44b4dd219356a43 | from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, LagrangesMethod, Particle,
inertia, Lagrangian)
from sympy.core.function import (Derivative, Function)
from sympy.core.numbers import pi
from sympy.core.symbol import symbols
from sympy.functions.elementary.trigonometric import (cos, sin, tan)
from sympy.matrices.dense import Matrix
from sympy.simplify.simplify import simplify
from sympy.testing.pytest import raises
def test_invalid_coordinates():
# Simple pendulum, but use symbol instead of dynamicsymbol
l, m, g = symbols('l m g')
q = symbols('q') # Generalized coordinate
N, O = ReferenceFrame('N'), Point('O')
O.set_vel(N, 0)
P = Particle('P', Point('P'), m)
P.point.set_pos(O, l * (sin(q) * N.x - cos(q) * N.y))
P.potential_energy = m * g * P.point.pos_from(O).dot(N.y)
L = Lagrangian(N, P)
raises(ValueError, lambda: LagrangesMethod(L, [q], bodies=P))
def test_disc_on_an_incline_plane():
# Disc rolling on an inclined plane
# First the generalized coordinates are created. The mass center of the
# disc is located from top vertex of the inclined plane by the generalized
# coordinate 'y'. The orientation of the disc is defined by the angle
# 'theta'. The mass of the disc is 'm' and its radius is 'R'. The length of
# the inclined path is 'l', the angle of inclination is 'alpha'. 'g' is the
# gravitational constant.
y, theta = dynamicsymbols('y theta')
yd, thetad = dynamicsymbols('y theta', 1)
m, g, R, l, alpha = symbols('m g R l alpha')
# Next, we create the inertial reference frame 'N'. A reference frame 'A'
# is attached to the inclined plane. Finally a frame is created which is attached to the disk.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [pi/2 - alpha, N.z])
B = A.orientnew('B', 'Axis', [-theta, A.z])
# Creating the disc 'D'; we create the point that represents the mass
# center of the disc and set its velocity. The inertia dyadic of the disc
# is created. Finally, we create the disc.
Do = Point('Do')
Do.set_vel(N, yd * A.x)
I = m * R**2/2 * B.z | B.z
D = RigidBody('D', Do, B, m, (I, Do))
# To construct the Lagrangian, 'L', of the disc, we determine its kinetic
# and potential energies, T and U, respectively. L is defined as the
# difference between T and U.
D.potential_energy = m * g * (l - y) * sin(alpha)
L = Lagrangian(N, D)
# We then create the list of generalized coordinates and constraint
# equations. The constraint arises due to the disc rolling without slip on
# on the inclined path. We then invoke the 'LagrangesMethod' class and
# supply it the necessary arguments and generate the equations of motion.
# The'rhs' method solves for the q_double_dots (i.e. the second derivative
# with respect to time of the generalized coordinates and the lagrange
# multipliers.
q = [y, theta]
hol_coneqs = [y - R * theta]
m = LagrangesMethod(L, q, hol_coneqs=hol_coneqs)
m.form_lagranges_equations()
rhs = m.rhs()
rhs.simplify()
assert rhs[2] == 2*g*sin(alpha)/3
def test_simp_pen():
# This tests that the equations generated by LagrangesMethod are identical
# to those obtained by hand calculations. The system under consideration is
# the simple pendulum.
# We begin by creating the generalized coordinates as per the requirements
# of LagrangesMethod. Also we created the associate symbols
# that characterize the system: 'm' is the mass of the bob, l is the length
# of the massless rigid rod connecting the bob to a point O fixed in the
# inertial frame.
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u ', 1)
l, m, g = symbols('l m g')
# We then create the inertial frame and a frame attached to the massless
# string following which we define the inertial angular velocity of the
# string.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q, N.z])
A.set_ang_vel(N, qd * N.z)
# Next, we create the point O and fix it in the inertial frame. We then
# locate the point P to which the bob is attached. Its corresponding
# velocity is then determined by the 'two point formula'.
O = Point('O')
O.set_vel(N, 0)
P = O.locatenew('P', l * A.x)
P.v2pt_theory(O, N, A)
# The 'Particle' which represents the bob is then created and its
# Lagrangian generated.
Pa = Particle('Pa', P, m)
Pa.potential_energy = - m * g * l * cos(q)
L = Lagrangian(N, Pa)
# The 'LagrangesMethod' class is invoked to obtain equations of motion.
lm = LagrangesMethod(L, [q])
lm.form_lagranges_equations()
RHS = lm.rhs()
assert RHS[1] == -g*sin(q)/l
def test_nonminimal_pendulum():
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose World Frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# Create point P, the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
P.set_vel(N, P.pos_from(pN).dt(N))
pP = Particle('pP', P, m)
# Constraint Equations
f_c = Matrix([q1**2 + q2**2 - L**2])
# Calculate the lagrangian, and form the equations of motion
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1, q2], hol_coneqs=f_c,
forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Check solution
lam1 = LM.lam_vec[0, 0]
eom_sol = Matrix([[m*Derivative(q1, t, t) - 9.8*m + 2*lam1*q1],
[m*Derivative(q2, t, t) + 2*lam1*q2]])
assert LM.eom == eom_sol
# Check multiplier solution
lam_sol = Matrix([(19.6*q1 + 2*q1d**2 + 2*q2d**2)/(4*q1**2/m + 4*q2**2/m)])
assert simplify(LM.solve_multipliers(sol_type='Matrix')) == simplify(lam_sol)
def test_dub_pen():
# The system considered is the double pendulum. Like in the
# test of the simple pendulum above, we begin by creating the generalized
# coordinates and the simple generalized speeds and accelerations which
# will be used later. Following this we create frames and points necessary
# for the kinematics. The procedure isn't explicitly explained as this is
# similar to the simple pendulum. Also this is documented on the pydy.org
# website.
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
q1dd, q2dd = dynamicsymbols('q1 q2', 2)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
l, m, g = symbols('l m g')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = N.orientnew('B', 'Axis', [q2, N.z])
A.set_ang_vel(N, q1d * A.z)
B.set_ang_vel(N, q2d * A.z)
O = Point('O')
P = O.locatenew('P', l * A.x)
R = P.locatenew('R', l * B.x)
O.set_vel(N, 0)
P.v2pt_theory(O, N, A)
R.v2pt_theory(P, N, B)
ParP = Particle('ParP', P, m)
ParR = Particle('ParR', R, m)
ParP.potential_energy = - m * g * l * cos(q1)
ParR.potential_energy = - m * g * l * cos(q1) - m * g * l * cos(q2)
L = Lagrangian(N, ParP, ParR)
lm = LagrangesMethod(L, [q1, q2], bodies=[ParP, ParR])
lm.form_lagranges_equations()
assert simplify(l*m*(2*g*sin(q1) + l*sin(q1)*sin(q2)*q2dd
+ l*sin(q1)*cos(q2)*q2d**2 - l*sin(q2)*cos(q1)*q2d**2
+ l*cos(q1)*cos(q2)*q2dd + 2*l*q1dd) - lm.eom[0]) == 0
assert simplify(l*m*(g*sin(q2) + l*sin(q1)*sin(q2)*q1dd
- l*sin(q1)*cos(q2)*q1d**2 + l*sin(q2)*cos(q1)*q1d**2
+ l*cos(q1)*cos(q2)*q1dd + l*q2dd) - lm.eom[1]) == 0
assert lm.bodies == [ParP, ParR]
def test_rolling_disc():
# Rolling Disc Example
# Here the rolling disc is formed from the contact point up, removing the
# need to introduce generalized speeds. Only 3 configuration and 3
# speed variables are need to describe this system, along with the
# disc's mass and radius, and the local gravity.
q1, q2, q3 = dynamicsymbols('q1 q2 q3')
q1d, q2d, q3d = dynamicsymbols('q1 q2 q3', 1)
r, m, g = symbols('r m g')
# The kinematics are formed by a series of simple rotations. Each simple
# rotation creates a new frame, and the next rotation is defined by the new
# frame's basis vectors. This example uses a 3-1-2 series of rotations, or
# Z, X, Y series of rotations. Angular velocity for this is defined using
# the second frame's basis (the lean frame).
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
# This is the translational kinematics. We create a point with no velocity
# in N; this is the contact point between the disc and ground. Next we form
# the position vector from the contact point to the disc's center of mass.
# Finally we form the velocity and acceleration of the disc.
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
# Forming the inertia dyadic.
I = inertia(L, m/4 * r**2, m/2 * r**2, m/4 * r**2)
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
# Finally we form the equations of motion, using the same steps we did
# before. Supply the Lagrangian, the generalized speeds.
BodyD.potential_energy = - m * g * r * cos(q2)
Lag = Lagrangian(N, BodyD)
q = [q1, q2, q3]
q1 = Function('q1')
q2 = Function('q2')
q3 = Function('q3')
l = LagrangesMethod(Lag, q)
l.form_lagranges_equations()
RHS = l.rhs()
RHS.simplify()
t = symbols('t')
assert (l.mass_matrix[3:6] == [0, 5*m*r**2/4, 0])
assert RHS[4].simplify() == (
(-8*g*sin(q2(t)) + r*(5*sin(2*q2(t))*Derivative(q1(t), t) +
12*cos(q2(t))*Derivative(q3(t), t))*Derivative(q1(t), t))/(10*r))
assert RHS[5] == (-5*cos(q2(t))*Derivative(q1(t), t) + 6*tan(q2(t)
)*Derivative(q3(t), t) + 4*Derivative(q1(t), t)/cos(q2(t))
)*Derivative(q2(t), t)
|
86bedbcc0503232137e7006aff6fd87362ffd66756fd795d0cc274c53cdcb4bb | from sympy.core.backend import sin, cos, tan, pi, symbols, Matrix, S
from sympy.physics.mechanics import (Particle, Point, ReferenceFrame,
RigidBody)
from sympy.physics.mechanics import (angular_momentum, dynamicsymbols,
inertia, inertia_of_point_mass,
kinetic_energy, linear_momentum,
outer, potential_energy, msubs,
find_dynamicsymbols, Lagrangian)
from sympy.physics.mechanics.functions import (gravity, center_of_mass,
_validate_coordinates)
from sympy.testing.pytest import raises
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
raises(TypeError, lambda: inertia(0, 0, 0, 0))
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
raises(TypeError, lambda: linear_momentum(A, A, Pa))
raises(TypeError, lambda: linear_momentum(N, N, Pa))
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
"""A rod with length 2l, centroidal inertia I, and mass M along with a
particle of mass m fixed to the end of the rod rotate with an angular rate
of omega about point O which is fixed to the non-particle end of the rod.
The rod's reference frame is A and the inertial frame is N."""
m, M, l, I = symbols('m, M, l, I')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
a = ReferenceFrame('a')
O = Point('O')
Ac = O.locatenew('Ac', l * N.x)
P = Ac.locatenew('P', l * N.x)
O.set_vel(N, 0 * N.x)
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
A = RigidBody('A', Ac, a, M, (I * outer(N.z, N.z), Ac))
expected = 2 * m * omega * l * N.y + M * l * omega * N.y
assert linear_momentum(N, A, Pa) == expected
raises(TypeError, lambda: angular_momentum(N, N, A, Pa))
raises(TypeError, lambda: angular_momentum(O, O, A, Pa))
raises(TypeError, lambda: angular_momentum(O, N, O, Pa))
expected = (I + M * l**2 + 4 * m * l**2) * omega * N.z
assert angular_momentum(O, N, A, Pa) == expected
def test_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
raises(TypeError, lambda: kinetic_energy(Pa, Pa, A))
raises(TypeError, lambda: kinetic_energy(N, N, A))
assert 0 == (kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)).expand()
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.potential_energy = m * g * h
A.potential_energy = M * g * H
assert potential_energy(A, Pa) == m * g * h + M * g * H
def test_Lagrangian():
M, m, g, h = symbols('M m g h')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
P = O.locatenew('P', 1 * N.x)
P.set_vel(N, 10 * N.x)
Pa = Particle('Pa', P, 1)
Ac = O.locatenew('Ac', 2 * N.y)
Ac.set_vel(N, 5 * N.y)
a = ReferenceFrame('a')
a.set_ang_vel(N, 10 * N.z)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, 20, (I, Ac))
Pa.potential_energy = m * g * h
A.potential_energy = M * g * h
raises(TypeError, lambda: Lagrangian(A, A, Pa))
raises(TypeError, lambda: Lagrangian(N, N, Pa))
def test_msubs():
a, b = symbols('a, b')
x, y, z = dynamicsymbols('x, y, z')
# Test simple substitution
expr = Matrix([[a*x + b, x*y.diff() + y],
[x.diff().diff(), z + sin(z.diff())]])
sol = Matrix([[a + b, y],
[x.diff().diff(), 1]])
sd = {x: 1, z: 1, z.diff(): 0, y.diff(): 0}
assert msubs(expr, sd) == sol
# Test smart substitution
expr = cos(x + y)*tan(x + y) + b*x.diff()
sd = {x: 0, y: pi/2, x.diff(): 1}
assert msubs(expr, sd, smart=True) == b + 1
N = ReferenceFrame('N')
v = x*N.x + y*N.y
d = x*(N.x|N.x) + y*(N.y|N.y)
v_sol = 1*N.y
d_sol = 1*(N.y|N.y)
sd = {x: 0, y: 1}
assert msubs(v, sd) == v_sol
assert msubs(d, sd) == d_sol
def test_find_dynamicsymbols():
a, b = symbols('a, b')
x, y, z = dynamicsymbols('x, y, z')
expr = Matrix([[a*x + b, x*y.diff() + y],
[x.diff().diff(), z + sin(z.diff())]])
# Test finding all dynamicsymbols
sol = {x, y.diff(), y, x.diff().diff(), z, z.diff()}
assert find_dynamicsymbols(expr) == sol
# Test finding all but those in sym_list
exclude_list = [x, y, z]
sol = {y.diff(), x.diff().diff(), z.diff()}
assert find_dynamicsymbols(expr, exclude=exclude_list) == sol
# Test finding all dynamicsymbols in a vector with a given reference frame
d, e, f = dynamicsymbols('d, e, f')
A = ReferenceFrame('A')
v = d * A.x + e * A.y + f * A.z
sol = {d, e, f}
assert find_dynamicsymbols(v, reference_frame=A) == sol
# Test if a ValueError is raised on supplying only a vector as input
raises(ValueError, lambda: find_dynamicsymbols(v))
def test_gravity():
N = ReferenceFrame('N')
m, M, g = symbols('m M g')
F1, F2 = dynamicsymbols('F1 F2')
po = Point('po')
pa = Particle('pa', po, m)
A = ReferenceFrame('A')
P = Point('P')
I = outer(A.x, A.x)
B = RigidBody('B', P, A, M, (I, P))
forceList = [(po, F1), (P, F2)]
forceList.extend(gravity(g*N.y, pa, B))
l = [(po, F1), (P, F2), (po, g*m*N.y), (P, g*M*N.y)]
for i in range(len(l)):
for j in range(len(l[i])):
assert forceList[i][j] == l[i][j]
# This function tests the center_of_mass() function
# that was added in PR #14758 to compute the center of
# mass of a system of bodies.
def test_center_of_mass():
a = ReferenceFrame('a')
m = symbols('m', real=True)
p1 = Particle('p1', Point('p1_pt'), S.One)
p2 = Particle('p2', Point('p2_pt'), S(2))
p3 = Particle('p3', Point('p3_pt'), S(3))
p4 = Particle('p4', Point('p4_pt'), m)
b_f = ReferenceFrame('b_f')
b_cm = Point('b_cm')
mb = symbols('mb')
b = RigidBody('b', b_cm, b_f, mb, (outer(b_f.x, b_f.x), b_cm))
p2.point.set_pos(p1.point, a.x)
p3.point.set_pos(p1.point, a.x + a.y)
p4.point.set_pos(p1.point, a.y)
b.masscenter.set_pos(p1.point, a.y + a.z)
point_o=Point('o')
point_o.set_pos(p1.point, center_of_mass(p1.point, p1, p2, p3, p4, b))
expr = 5/(m + mb + 6)*a.x + (m + mb + 3)/(m + mb + 6)*a.y + mb/(m + mb + 6)*a.z
assert point_o.pos_from(p1.point)-expr == 0
def test_validate_coordinates():
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1:4 u1:4')
s1, s2, s3 = symbols('s1:4')
# Test normal
_validate_coordinates([q1, q2, q3], [u1, u2, u3])
# Test not equal number of coordinates and speeds
_validate_coordinates([q1, q2])
_validate_coordinates([q1, q2], [u1])
_validate_coordinates(speeds=[u1, u2])
# Test duplicate
_validate_coordinates([q1, q2, q2], [u1, u2, u3], check_duplicates=False)
raises(ValueError, lambda: _validate_coordinates(
[q1, q2, q2], [u1, u2, u3]))
_validate_coordinates([q1, q2, q3], [u1, u2, u2], check_duplicates=False)
raises(ValueError, lambda: _validate_coordinates(
[q1, q2, q3], [u1, u2, u2], check_duplicates=True))
# Test is_dynamicsymbols
_validate_coordinates([q1 + q2, q3], is_dynamicsymbols=False)
raises(ValueError, lambda: _validate_coordinates([q1 + q2, q3]))
_validate_coordinates([s1, q1, q2], [0, u1, u2], is_dynamicsymbols=False)
raises(ValueError, lambda: _validate_coordinates(
[s1, q1, q2], [0, u1, u2], is_dynamicsymbols=True))
_validate_coordinates([s1 + s2 + s3, q1], [0, u1], is_dynamicsymbols=False)
raises(ValueError, lambda: _validate_coordinates(
[s1 + s2 + s3, q1], [0, u1], is_dynamicsymbols=True))
|
6d39328f6566c472fb64ecd4d7f9465d06c703a31b80db3b4ce0e393ef898b38 | from sympy import solve
from sympy.core.backend import (cos, expand, Matrix, sin, symbols, tan, sqrt, S,
zeros, eye)
from sympy.simplify.simplify import simplify
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, KanesMethod, inertia, Particle,
dot)
from sympy.testing.pytest import raises
from sympy.core.backend import USE_SYMENGINE
def test_invalid_coordinates():
# Simple pendulum, but use symbols instead of dynamicsymbols
l, m, g = symbols('l m g')
q, u = symbols('q u') # Generalized coordinate
kd = [q.diff(dynamicsymbols._t) - u]
N, O = ReferenceFrame('N'), Point('O')
O.set_vel(N, 0)
P = Particle('P', Point('P'), m)
P.point.set_pos(O, l * (sin(q) * N.x - cos(q) * N.y))
F = (P.point, -m * g * N.y)
raises(ValueError, lambda: KanesMethod(N, [q], [u], kd, bodies=[P],
forcelist=[F]))
def test_one_dof():
# This is for a 1 dof spring-mass-damper case.
# It is described in more detail in the KanesMethod docstring.
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, c, k = symbols('m c k')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, u * N.x)
kd = [qd - u]
FL = [(P, (-k * q - c * u) * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
KM.kanes_equations(BL, FL)
assert KM.bodies == BL
assert KM.loads == FL
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand(-(q * k + u * c) / m)
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(2, 1)
assert (KM.linearize(A_and_B=True, )[0] == Matrix([[0, 1], [-k/m, -c/m]]))
def test_two_dof():
# This is for a 2 d.o.f., 2 particle spring-mass-damper.
# The first coordinate is the displacement of the first particle, and the
# second is the relative displacement between the first and second
# particles. Speeds are defined as the time derivatives of the particles.
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
q1d, q2d, u1d, u2d = dynamicsymbols('q1 q2 u1 u2', 1)
m, c1, c2, k1, k2 = symbols('m c1 c2 k1 k2')
N = ReferenceFrame('N')
P1 = Point('P1')
P2 = Point('P2')
P1.set_vel(N, u1 * N.x)
P2.set_vel(N, (u1 + u2) * N.x)
# Note we multiply the kinematic equation by an arbitrary factor
# to test the implicit vs explicit kinematics attribute
kd = [q1d/2 - u1/2, 2*q2d - 2*u2]
# Now we create the list of forces, then assign properties to each
# particle, then create a list of all particles.
FL = [(P1, (-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2) * N.x), (P2, (-k2 *
q2 - c2 * u2) * N.x)]
pa1 = Particle('pa1', P1, m)
pa2 = Particle('pa2', P2, m)
BL = [pa1, pa2]
# Finally we create the KanesMethod object, specify the inertial frame,
# pass relevant information, and form Fr & Fr*. Then we calculate the mass
# matrix and forcing terms, and finally solve for the udots.
KM = KanesMethod(N, q_ind=[q1, q2], u_ind=[u1, u2], kd_eqs=kd)
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand((-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2)/m)
assert expand(rhs[1]) == expand((k1 * q1 + c1 * u1 - 2 * k2 * q2 - 2 *
c2 * u2) / m)
# Check that the explicit form is the default and kinematic mass matrix is identity
assert KM.explicit_kinematics
assert KM.mass_matrix_kin == eye(2)
# Check that for the implicit form the mass matrix is not identity
KM.explicit_kinematics = False
assert KM.mass_matrix_kin == Matrix([[S(1)/2, 0], [0, 2]])
# Check that whether using implicit or explicit kinematics the RHS
# equations are consisten with the matrix form
for explicit_kinematics in [False, True]:
KM.explicit_kinematics = explicit_kinematics
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(4, 1)
# Make sure an error is raised if nonlinear kinematic differential
# equations are supplied.
kd = [q1d - u1**2, sin(q2d) - cos(u2)]
raises(ValueError, lambda: KanesMethod(N, q_ind=[q1, q2],
u_ind=[u1, u2], kd_eqs=kd))
def test_pend():
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, l, g = symbols('m l g')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, -l * u * sin(q) * N.x + l * u * cos(q) * N.y)
kd = [qd - u]
FL = [(P, m * g * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
rhs.simplify()
assert expand(rhs[0]) == expand(-g / l * sin(q))
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(2, 1)
def test_rolling_disc():
# Rolling Disc Example
# Here the rolling disc is formed from the contact point up, removing the
# need to introduce generalized speeds. Only 3 configuration and three
# speed variables are need to describe this system, along with the disc's
# mass and radius, and the local gravity (note that mass will drop out).
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
q1d, q2d, q3d, u1d, u2d, u3d = dynamicsymbols('q1 q2 q3 u1 u2 u3', 1)
r, m, g = symbols('r m g')
# The kinematics are formed by a series of simple rotations. Each simple
# rotation creates a new frame, and the next rotation is defined by the new
# frame's basis vectors. This example uses a 3-1-2 series of rotations, or
# Z, X, Y series of rotations. Angular velocity for this is defined using
# the second frame's basis (the lean frame).
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
w_R_N_qd = R.ang_vel_in(N)
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
# This is the translational kinematics. We create a point with no velocity
# in N; this is the contact point between the disc and ground. Next we form
# the position vector from the contact point to the disc's center of mass.
# Finally we form the velocity and acceleration of the disc.
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
# This is a simple way to form the inertia dyadic.
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
# Kinematic differential equations; how the generalized coordinate time
# derivatives relate to generalized speeds.
kd = [dot(R.ang_vel_in(N) - w_R_N_qd, uv) for uv in L]
# Creation of the force list; it is the gravitational force at the mass
# center of the disc. Then we create the disc by assigning a Point to the
# center of mass attribute, a ReferenceFrame to the frame attribute, and mass
# and inertia. Then we form the body list.
ForceList = [(Dmc, - m * g * Y.z)]
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyList = [BodyD]
# Finally we form the equations of motion, using the same steps we did
# before. Specify inertial frame, supply generalized speeds, supply
# kinematic differential equation dictionary, compute Fr from the force
# list and Fr* from the body list, compute the mass matrix and forcing
# terms, then solve for the u dots (time derivatives of the generalized
# speeds).
KM = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3], kd_eqs=kd)
KM.kanes_equations(BodyList, ForceList)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
kdd = KM.kindiffdict()
rhs = rhs.subs(kdd)
rhs.simplify()
assert rhs.expand() == Matrix([(6*u2*u3*r - u3**2*r*tan(q2) +
4*g*sin(q2))/(5*r), -2*u1*u3/3, u1*(-2*u2 + u3*tan(q2))]).expand()
assert simplify(KM.rhs() -
KM.mass_matrix_full.LUsolve(KM.forcing_full)) == zeros(6, 1)
# This code tests our output vs. benchmark values. When r=g=m=1, the
# critical speed (where all eigenvalues of the linearized equations are 0)
# is 1 / sqrt(3) for the upright case.
A = KM.linearize(A_and_B=True)[0]
A_upright = A.subs({r: 1, g: 1, m: 1}).subs({q1: 0, q2: 0, q3: 0, u1: 0, u3: 0})
import sympy
assert sympy.sympify(A_upright.subs({u2: 1 / sqrt(3)})).eigenvals() == {S.Zero: 6}
def test_aux():
# Same as above, except we have 2 auxiliary speeds for the ground contact
# point, which is known to be zero. In one case, we go through then
# substitute the aux. speeds in at the end (they are zero, as well as their
# derivative), in the other case, we use the built-in auxiliary speed part
# of KanesMethod. The equations from each should be the same.
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
q1d, q2d, q3d, u1d, u2d, u3d = dynamicsymbols('q1 q2 q3 u1 u2 u3', 1)
u4, u5, f1, f2 = dynamicsymbols('u4, u5, f1, f2')
u4d, u5d = dynamicsymbols('u4, u5', 1)
r, m, g = symbols('r m g')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
w_R_N_qd = R.ang_vel_in(N)
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
C = Point('C')
C.set_vel(N, u4 * L.x + u5 * (Y.z ^ L.x))
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
Dmc.a2pt_theory(C, N, R)
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
kd = [dot(R.ang_vel_in(N) - w_R_N_qd, uv) for uv in L]
ForceList = [(Dmc, - m * g * Y.z), (C, f1 * L.x + f2 * (Y.z ^ L.x))]
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyList = [BodyD]
KM = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3, u4, u5],
kd_eqs=kd)
(fr, frstar) = KM.kanes_equations(BodyList, ForceList)
fr = fr.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar = frstar.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
KM2 = KanesMethod(N, q_ind=[q1, q2, q3], u_ind=[u1, u2, u3], kd_eqs=kd,
u_auxiliary=[u4, u5])
(fr2, frstar2) = KM2.kanes_equations(BodyList, ForceList)
fr2 = fr2.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar2 = frstar2.subs({u4d: 0, u5d: 0}).subs({u4: 0, u5: 0})
frstar.simplify()
frstar2.simplify()
assert (fr - fr2).expand() == Matrix([0, 0, 0, 0, 0])
assert (frstar - frstar2).expand() == Matrix([0, 0, 0, 0, 0])
def test_parallel_axis():
# This is for a 2 dof inverted pendulum on a cart.
# This tests the parallel axis code in KanesMethod. The inertia of the
# pendulum is defined about the hinge, not about the center of mass.
# Defining the constants and knowns of the system
gravity = symbols('g')
k, ls = symbols('k ls')
a, mA, mC = symbols('a mA mC')
F = dynamicsymbols('F')
Ix, Iy, Iz = symbols('Ix Iy Iz')
# Declaring the Generalized coordinates and speeds
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
# Creating reference frames
N = ReferenceFrame('N')
A = ReferenceFrame('A')
A.orient(N, 'Axis', [-q2, N.z])
A.set_ang_vel(N, -u2 * N.z)
# Origin of Newtonian reference frame
O = Point('O')
# Creating and Locating the positions of the cart, C, and the
# center of mass of the pendulum, A
C = O.locatenew('C', q1 * N.x)
Ao = C.locatenew('Ao', a * A.y)
# Defining velocities of the points
O.set_vel(N, 0)
C.set_vel(N, u1 * N.x)
Ao.v2pt_theory(C, N, A)
Cart = Particle('Cart', C, mC)
Pendulum = RigidBody('Pendulum', Ao, A, mA, (inertia(A, Ix, Iy, Iz), C))
# kinematical differential equations
kindiffs = [q1d - u1, q2d - u2]
bodyList = [Cart, Pendulum]
forceList = [(Ao, -N.y * gravity * mA),
(C, -N.y * gravity * mC),
(C, -N.x * k * (q1 - ls)),
(C, N.x * F)]
km = KanesMethod(N, [q1, q2], [u1, u2], kindiffs)
(fr, frstar) = km.kanes_equations(bodyList, forceList)
mm = km.mass_matrix_full
assert mm[3, 3] == Iz
def test_input_format():
# 1 dof problem from test_one_dof
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u', 1)
m, c, k = symbols('m c k')
N = ReferenceFrame('N')
P = Point('P')
P.set_vel(N, u * N.x)
kd = [qd - u]
FL = [(P, (-k * q - c * u) * N.x)]
pa = Particle('pa', P, m)
BL = [pa]
KM = KanesMethod(N, [q], [u], kd)
# test for input format kane.kanes_equations((body1, body2, particle1))
assert KM.kanes_equations(BL)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2), loads=(load1,load2))
assert KM.kanes_equations(bodies=BL, loads=None)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2), loads=None)
assert KM.kanes_equations(BL, loads=None)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body 2))
assert KM.kanes_equations(BL)[0] == Matrix([0])
# test for input format kane.kanes_equations(bodies=(body1, body2), loads=[])
assert KM.kanes_equations(BL, [])[0] == Matrix([0])
# test for error raised when a wrong force list (in this case a string) is provided
raises(ValueError, lambda: KM._form_fr('bad input'))
# 1 dof problem from test_one_dof with FL & BL in instance
KM = KanesMethod(N, [q], [u], kd, bodies=BL, forcelist=FL)
assert KM.kanes_equations()[0] == Matrix([-c*u - k*q])
# 2 dof problem from test_two_dof
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
q1d, q2d, u1d, u2d = dynamicsymbols('q1 q2 u1 u2', 1)
m, c1, c2, k1, k2 = symbols('m c1 c2 k1 k2')
N = ReferenceFrame('N')
P1 = Point('P1')
P2 = Point('P2')
P1.set_vel(N, u1 * N.x)
P2.set_vel(N, (u1 + u2) * N.x)
kd = [q1d - u1, q2d - u2]
FL = ((P1, (-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2) * N.x), (P2, (-k2 *
q2 - c2 * u2) * N.x))
pa1 = Particle('pa1', P1, m)
pa2 = Particle('pa2', P2, m)
BL = (pa1, pa2)
KM = KanesMethod(N, q_ind=[q1, q2], u_ind=[u1, u2], kd_eqs=kd)
# test for input format
# kane.kanes_equations((body1, body2), (load1, load2))
KM.kanes_equations(BL, FL)
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
assert expand(rhs[0]) == expand((-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2)/m)
assert expand(rhs[1]) == expand((k1 * q1 + c1 * u1 - 2 * k2 * q2 - 2 *
c2 * u2) / m)
def test_implicit_kinematics():
# Test that implicit kinematics can handle complicated
# equations that explicit form struggles with
# See https://github.com/sympy/sympy/issues/22626
# Inertial frame
NED = ReferenceFrame('NED')
NED_o = Point('NED_o')
NED_o.set_vel(NED, 0)
# body frame
q_att = dynamicsymbols('lambda_0:4', real=True)
B = NED.orientnew('B', 'Quaternion', q_att)
# Generalized coordinates
q_pos = dynamicsymbols('B_x:z')
B_cm = NED_o.locatenew('B_cm', q_pos[0]*B.x + q_pos[1]*B.y + q_pos[2]*B.z)
q_ind = q_att[1:] + q_pos
q_dep = [q_att[0]]
kinematic_eqs = []
# Generalized velocities
B_ang_vel = B.ang_vel_in(NED)
P, Q, R = dynamicsymbols('P Q R')
B.set_ang_vel(NED, P*B.x + Q*B.y + R*B.z)
B_ang_vel_kd = (B.ang_vel_in(NED) - B_ang_vel).simplify()
# Equating the two gives us the kinematic equation
kinematic_eqs += [
B_ang_vel_kd & B.x,
B_ang_vel_kd & B.y,
B_ang_vel_kd & B.z
]
B_cm_vel = B_cm.vel(NED)
U, V, W = dynamicsymbols('U V W')
B_cm.set_vel(NED, U*B.x + V*B.y + W*B.z)
# Compute the velocity of the point using the two methods
B_ref_vel_kd = (B_cm.vel(NED) - B_cm_vel)
# taking dot product with unit vectors to get kinematic equations
# relating body coordinates and velocities
# Note, there is a choice to dot with NED.xyz here. That makes
# the implicit form have some bigger terms but is still fine, the
# explicit form still struggles though
kinematic_eqs += [
B_ref_vel_kd & B.x,
B_ref_vel_kd & B.y,
B_ref_vel_kd & B.z,
]
u_ind = [U, V, W, P, Q, R]
# constraints
q_att_vec = Matrix(q_att)
config_cons = [(q_att_vec.T*q_att_vec)[0] - 1] #unit norm
kinematic_eqs = kinematic_eqs + [(q_att_vec.T * q_att_vec.diff())[0]]
try:
KM = KanesMethod(NED, q_ind, u_ind,
q_dependent= q_dep,
kd_eqs = kinematic_eqs,
configuration_constraints = config_cons,
velocity_constraints= [],
u_dependent= [], #no dependent speeds
u_auxiliary = [], # No auxiliary speeds
explicit_kinematics = False # implicit kinematics
)
except Exception as e:
# symengine struggles with these kinematic equations
if USE_SYMENGINE and 'Matrix is rank deficient' in str(e):
return
else:
raise e
# mass and inertia dyadic relative to CM
M_B = symbols('M_B')
J_B = inertia(B, *[S(f'J_B_{ax}')*(1 if ax[0] == ax[1] else -1)
for ax in ['xx', 'yy', 'zz', 'xy', 'yz', 'xz']])
J_B = J_B.subs({S('J_B_xy'): 0, S('J_B_yz'): 0})
RB = RigidBody('RB', B_cm, B, M_B, (J_B, B_cm))
rigid_bodies = [RB]
# Forces
force_list = [
#gravity pointing down
(RB.masscenter, RB.mass*S('g')*NED.z),
#generic forces and torques in body frame(inputs)
(RB.frame, dynamicsymbols('T_z')*B.z),
(RB.masscenter, dynamicsymbols('F_z')*B.z)
]
KM.kanes_equations(rigid_bodies, force_list)
# Expecting implicit form to be less than 5% of the flops
n_ops_implicit = sum(
[x.count_ops() for x in KM.forcing_full] +
[x.count_ops() for x in KM.mass_matrix_full]
)
# Save implicit kinematic matrices to use later
mass_matrix_kin_implicit = KM.mass_matrix_kin
forcing_kin_implicit = KM.forcing_kin
KM.explicit_kinematics = True
n_ops_explicit = sum(
[x.count_ops() for x in KM.forcing_full] +
[x.count_ops() for x in KM.mass_matrix_full]
)
forcing_kin_explicit = KM.forcing_kin
assert n_ops_implicit / n_ops_explicit < .05
# Ideally we would check that implicit and explicit equations give the same result as done in test_one_dof
# But the whole raison-d'etre of the implicit equations is to deal with problems such
# as this one where the explicit form is too complicated to handle, especially the angular part
# (i.e. tests would be too slow)
# Instead, we check that the kinematic equations are correct using more fundamental tests:
#
# (1) that we recover the kinematic equations we have provided
assert (mass_matrix_kin_implicit * KM.q.diff() - forcing_kin_implicit) == Matrix(kinematic_eqs)
# (2) that rate of quaternions matches what 'textbook' solutions give
# Note that we just use the explicit kinematics for the linear velocities
# as they are not as complicated as the angular ones
qdot_candidate = forcing_kin_explicit
quat_dot_textbook = Matrix([
[0, -P, -Q, -R],
[P, 0, R, -Q],
[Q, -R, 0, P],
[R, Q, -P, 0],
]) * q_att_vec / 2
# Again, if we don't use this "textbook" solution
# sympy will struggle to deal with the terms related to quaternion rates
# due to the number of operations involved
qdot_candidate[-1] = quat_dot_textbook[0] # lambda_0, note the [-1] as sympy's Kane puts the dependent coordinate last
qdot_candidate[0] = quat_dot_textbook[1] # lambda_1
qdot_candidate[1] = quat_dot_textbook[2] # lambda_2
qdot_candidate[2] = quat_dot_textbook[3] # lambda_3
# sub the config constraint in the candidate solution and compare to the implicit rhs
lambda_0_sol = solve(config_cons[0], q_att_vec[0])[1]
lhs_candidate = simplify(mass_matrix_kin_implicit * qdot_candidate).subs({q_att_vec[0]: lambda_0_sol})
assert lhs_candidate == forcing_kin_implicit
|
61e6e9e8c85384810c8d28062d1bac31e95c87ab60a5bbdcbbe1281c00aea0fd | import warnings
from sympy.core.add import Add
from sympy.core.function import (Function, diff)
from sympy.core.numbers import (Number, Rational)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin
from sympy.integrals.integrals import integrate
from sympy.physics.units import (amount_of_substance, area, convert_to, find_unit,
volume, kilometer, joule, molar_gas_constant,
vacuum_permittivity, elementary_charge, volt,
ohm)
from sympy.physics.units.definitions import (amu, au, centimeter, coulomb,
day, foot, grams, hour, inch, kg, km, m, meter, millimeter,
minute, quart, s, second, speed_of_light, bit,
byte, kibibyte, mebibyte, gibibyte, tebibyte, pebibyte, exbibyte,
kilogram, gravitational_constant)
from sympy.physics.units.definitions.dimension_definitions import (
Dimension, charge, length, time, temperature, pressure,
energy, mass
)
from sympy.physics.units.prefixes import PREFIXES, kilo
from sympy.physics.units.quantities import PhysicalConstant, Quantity
from sympy.physics.units.systems import SI
from sympy.testing.pytest import XFAIL, raises, warns_deprecated_sympy
k = PREFIXES["k"]
def test_str_repr():
assert str(kg) == "kilogram"
def test_eq():
# simple test
assert 10*m == 10*m
assert 10*m != 10*s
def test_convert_to():
q = Quantity("q1")
q.set_global_relative_scale_factor(S(5000), meter)
assert q.convert_to(m) == 5000*m
assert speed_of_light.convert_to(m / s) == 299792458 * m / s
# TODO: eventually support this kind of conversion:
# assert (2*speed_of_light).convert_to(m / s) == 2 * 299792458 * m / s
assert day.convert_to(s) == 86400*s
# Wrong dimension to convert:
assert q.convert_to(s) == q
assert speed_of_light.convert_to(m) == speed_of_light
expr = joule*second
conv = convert_to(expr, joule)
assert conv == joule*second
def test_Quantity_definition():
q = Quantity("s10", abbrev="sabbr")
q.set_global_relative_scale_factor(10, second)
u = Quantity("u", abbrev="dam")
u.set_global_relative_scale_factor(10, meter)
km = Quantity("km")
km.set_global_relative_scale_factor(kilo, meter)
v = Quantity("u")
v.set_global_relative_scale_factor(5*kilo, meter)
assert q.scale_factor == 10
assert q.dimension == time
assert q.abbrev == Symbol("sabbr")
assert u.dimension == length
assert u.scale_factor == 10
assert u.abbrev == Symbol("dam")
assert km.scale_factor == 1000
assert km.func(*km.args) == km
assert km.func(*km.args).args == km.args
assert v.dimension == length
assert v.scale_factor == 5000
with warns_deprecated_sympy():
Quantity('invalid', 'dimension', 1)
with warns_deprecated_sympy():
Quantity('mismatch', dimension=length, scale_factor=kg)
def test_abbrev():
u = Quantity("u")
u.set_global_relative_scale_factor(S.One, meter)
assert u.name == Symbol("u")
assert u.abbrev == Symbol("u")
u = Quantity("u", abbrev="om")
u.set_global_relative_scale_factor(S(2), meter)
assert u.name == Symbol("u")
assert u.abbrev == Symbol("om")
assert u.scale_factor == 2
assert isinstance(u.scale_factor, Number)
u = Quantity("u", abbrev="ikm")
u.set_global_relative_scale_factor(3*kilo, meter)
assert u.abbrev == Symbol("ikm")
assert u.scale_factor == 3000
def test_print():
u = Quantity("unitname", abbrev="dam")
assert repr(u) == "unitname"
assert str(u) == "unitname"
def test_Quantity_eq():
u = Quantity("u", abbrev="dam")
v = Quantity("v1")
assert u != v
v = Quantity("v2", abbrev="ds")
assert u != v
v = Quantity("v3", abbrev="dm")
assert u != v
def test_add_sub():
u = Quantity("u")
v = Quantity("v")
w = Quantity("w")
u.set_global_relative_scale_factor(S(10), meter)
v.set_global_relative_scale_factor(S(5), meter)
w.set_global_relative_scale_factor(S(2), second)
assert isinstance(u + v, Add)
assert (u + v.convert_to(u)) == (1 + S.Half)*u
# TODO: eventually add this:
# assert (u + v).convert_to(u) == (1 + S.Half)*u
assert isinstance(u - v, Add)
assert (u - v.convert_to(u)) == S.Half*u
# TODO: eventually add this:
# assert (u - v).convert_to(u) == S.Half*u
def test_quantity_abs():
v_w1 = Quantity('v_w1')
v_w2 = Quantity('v_w2')
v_w3 = Quantity('v_w3')
v_w1.set_global_relative_scale_factor(1, meter/second)
v_w2.set_global_relative_scale_factor(1, meter/second)
v_w3.set_global_relative_scale_factor(1, meter/second)
expr = v_w3 - Abs(v_w1 - v_w2)
assert SI.get_dimensional_expr(v_w1) == (length/time).name
Dq = Dimension(SI.get_dimensional_expr(expr))
with warns_deprecated_sympy():
Dq1 = Dimension(Quantity.get_dimensional_expr(expr))
assert Dq == Dq1
assert SI.get_dimension_system().get_dimensional_dependencies(Dq) == {
length: 1,
time: -1,
}
assert meter == sqrt(meter**2)
def test_check_unit_consistency():
u = Quantity("u")
v = Quantity("v")
w = Quantity("w")
u.set_global_relative_scale_factor(S(10), meter)
v.set_global_relative_scale_factor(S(5), meter)
w.set_global_relative_scale_factor(S(2), second)
def check_unit_consistency(expr):
SI._collect_factor_and_dimension(expr)
raises(ValueError, lambda: check_unit_consistency(u + w))
raises(ValueError, lambda: check_unit_consistency(u - w))
raises(ValueError, lambda: check_unit_consistency(u + 1))
raises(ValueError, lambda: check_unit_consistency(u - 1))
raises(ValueError, lambda: check_unit_consistency(1 - exp(u / w)))
def test_mul_div():
u = Quantity("u")
v = Quantity("v")
t = Quantity("t")
ut = Quantity("ut")
v2 = Quantity("v")
u.set_global_relative_scale_factor(S(10), meter)
v.set_global_relative_scale_factor(S(5), meter)
t.set_global_relative_scale_factor(S(2), second)
ut.set_global_relative_scale_factor(S(20), meter*second)
v2.set_global_relative_scale_factor(S(5), meter/second)
assert 1 / u == u**(-1)
assert u / 1 == u
v1 = u / t
v2 = v
# Pow only supports structural equality:
assert v1 != v2
assert v1 == v2.convert_to(v1)
# TODO: decide whether to allow such expression in the future
# (requires somehow manipulating the core).
# assert u / Quantity('l2', dimension=length, scale_factor=2) == 5
assert u * 1 == u
ut1 = u * t
ut2 = ut
# Mul only supports structural equality:
assert ut1 != ut2
assert ut1 == ut2.convert_to(ut1)
# Mul only supports structural equality:
lp1 = Quantity("lp1")
lp1.set_global_relative_scale_factor(S(2), 1/meter)
assert u * lp1 != 20
assert u**0 == 1
assert u**1 == u
# TODO: Pow only support structural equality:
u2 = Quantity("u2")
u3 = Quantity("u3")
u2.set_global_relative_scale_factor(S(100), meter**2)
u3.set_global_relative_scale_factor(Rational(1, 10), 1/meter)
assert u ** 2 != u2
assert u ** -1 != u3
assert u ** 2 == u2.convert_to(u)
assert u ** -1 == u3.convert_to(u)
def test_units():
assert convert_to((5*m/s * day) / km, 1) == 432
assert convert_to(foot / meter, meter) == Rational(3048, 10000)
# amu is a pure mass so mass/mass gives a number, not an amount (mol)
# TODO: need better simplification routine:
assert str(convert_to(grams/amu, grams).n(2)) == '6.0e+23'
# Light from the sun needs about 8.3 minutes to reach earth
t = (1*au / speed_of_light) / minute
# TODO: need a better way to simplify expressions containing units:
t = convert_to(convert_to(t, meter / minute), meter)
assert t.simplify() == Rational(49865956897, 5995849160)
# TODO: fix this, it should give `m` without `Abs`
assert sqrt(m**2) == m
assert (sqrt(m))**2 == m
t = Symbol('t')
assert integrate(t*m/s, (t, 1*s, 5*s)) == 12*m*s
assert (t * m/s).integrate((t, 1*s, 5*s)) == 12*m*s
def test_issue_quart():
assert convert_to(4 * quart / inch ** 3, meter) == 231
assert convert_to(4 * quart / inch ** 3, millimeter) == 231
def test_issue_5565():
assert (m < s).is_Relational
def test_find_unit():
assert find_unit('coulomb') == ['coulomb', 'coulombs', 'coulomb_constant']
assert find_unit(coulomb) == ['C', 'coulomb', 'coulombs', 'planck_charge', 'elementary_charge']
assert find_unit(charge) == ['C', 'coulomb', 'coulombs', 'planck_charge', 'elementary_charge']
assert find_unit(inch) == [
'm', 'au', 'cm', 'dm', 'ft', 'km', 'ly', 'mi', 'mm', 'nm', 'pm', 'um',
'yd', 'nmi', 'feet', 'foot', 'inch', 'mile', 'yard', 'meter', 'miles',
'yards', 'inches', 'meters', 'micron', 'microns', 'decimeter',
'kilometer', 'lightyear', 'nanometer', 'picometer', 'centimeter',
'decimeters', 'kilometers', 'lightyears', 'micrometer', 'millimeter',
'nanometers', 'picometers', 'centimeters', 'micrometers',
'millimeters', 'nautical_mile', 'planck_length', 'nautical_miles', 'astronomical_unit',
'astronomical_units']
assert find_unit(inch**-1) == ['D', 'dioptre', 'optical_power']
assert find_unit(length**-1) == ['D', 'dioptre', 'optical_power']
assert find_unit(inch ** 2) == ['ha', 'hectare', 'planck_area']
assert find_unit(inch ** 3) == [
'L', 'l', 'cL', 'cl', 'dL', 'dl', 'mL', 'ml', 'liter', 'quart', 'liters', 'quarts',
'deciliter', 'centiliter', 'deciliters', 'milliliter',
'centiliters', 'milliliters', 'planck_volume']
assert find_unit('voltage') == ['V', 'v', 'volt', 'volts', 'planck_voltage']
assert find_unit(grams) == ['g', 't', 'Da', 'kg', 'mg', 'ug', 'amu', 'mmu', 'amus',
'gram', 'mmus', 'grams', 'pound', 'tonne', 'dalton',
'pounds', 'kilogram', 'kilograms', 'microgram', 'milligram',
'metric_ton', 'micrograms', 'milligrams', 'planck_mass',
'milli_mass_unit', 'atomic_mass_unit', 'atomic_mass_constant']
def test_Quantity_derivative():
x = symbols("x")
assert diff(x*meter, x) == meter
assert diff(x**3*meter**2, x) == 3*x**2*meter**2
assert diff(meter, meter) == 1
assert diff(meter**2, meter) == 2*meter
def test_quantity_postprocessing():
q1 = Quantity('q1')
q2 = Quantity('q2')
SI.set_quantity_dimension(q1, length*pressure**2*temperature/time)
SI.set_quantity_dimension(q2, energy*pressure*temperature/(length**2*time))
assert q1 + q2
q = q1 + q2
Dq = Dimension(SI.get_dimensional_expr(q))
assert SI.get_dimension_system().get_dimensional_dependencies(Dq) == {
length: -1,
mass: 2,
temperature: 1,
time: -5,
}
def test_factor_and_dimension():
assert (3000, Dimension(1)) == SI._collect_factor_and_dimension(3000)
assert (1001, length) == SI._collect_factor_and_dimension(meter + km)
assert (2, length/time) == SI._collect_factor_and_dimension(
meter/second + 36*km/(10*hour))
x, y = symbols('x y')
assert (x + y/100, length) == SI._collect_factor_and_dimension(
x*m + y*centimeter)
cH = Quantity('cH')
SI.set_quantity_dimension(cH, amount_of_substance/volume)
pH = -log(cH)
assert (1, volume/amount_of_substance) == SI._collect_factor_and_dimension(
exp(pH))
v_w1 = Quantity('v_w1')
v_w2 = Quantity('v_w2')
v_w1.set_global_relative_scale_factor(Rational(3, 2), meter/second)
v_w2.set_global_relative_scale_factor(2, meter/second)
expr = Abs(v_w1/2 - v_w2)
assert (Rational(5, 4), length/time) == \
SI._collect_factor_and_dimension(expr)
expr = Rational(5, 2)*second/meter*v_w1 - 3000
assert (-(2996 + Rational(1, 4)), Dimension(1)) == \
SI._collect_factor_and_dimension(expr)
expr = v_w1**(v_w2/v_w1)
assert ((Rational(3, 2))**Rational(4, 3), (length/time)**Rational(4, 3)) == \
SI._collect_factor_and_dimension(expr)
with warns_deprecated_sympy():
assert (3000, Dimension(1)) == Quantity._collect_factor_and_dimension(3000)
@XFAIL
def test_factor_and_dimension_with_Abs():
with warns_deprecated_sympy():
v_w1 = Quantity('v_w1', length/time, Rational(3, 2)*meter/second)
v_w1.set_global_relative_scale_factor(Rational(3, 2), meter/second)
expr = v_w1 - Abs(v_w1)
with warns_deprecated_sympy():
assert (0, length/time) == Quantity._collect_factor_and_dimension(expr)
def test_dimensional_expr_of_derivative():
l = Quantity('l')
t = Quantity('t')
t1 = Quantity('t1')
l.set_global_relative_scale_factor(36, km)
t.set_global_relative_scale_factor(1, hour)
t1.set_global_relative_scale_factor(1, second)
x = Symbol('x')
y = Symbol('y')
f = Function('f')
dfdx = f(x, y).diff(x, y)
dl_dt = dfdx.subs({f(x, y): l, x: t, y: t1})
assert SI.get_dimensional_expr(dl_dt) ==\
SI.get_dimensional_expr(l / t / t1) ==\
Symbol("length")/Symbol("time")**2
assert SI._collect_factor_and_dimension(dl_dt) ==\
SI._collect_factor_and_dimension(l / t / t1) ==\
(10, length/time**2)
def test_get_dimensional_expr_with_function():
v_w1 = Quantity('v_w1')
v_w2 = Quantity('v_w2')
v_w1.set_global_relative_scale_factor(1, meter/second)
v_w2.set_global_relative_scale_factor(1, meter/second)
assert SI.get_dimensional_expr(sin(v_w1)) == \
sin(SI.get_dimensional_expr(v_w1))
assert SI.get_dimensional_expr(sin(v_w1/v_w2)) == 1
def test_binary_information():
assert convert_to(kibibyte, byte) == 1024*byte
assert convert_to(mebibyte, byte) == 1024**2*byte
assert convert_to(gibibyte, byte) == 1024**3*byte
assert convert_to(tebibyte, byte) == 1024**4*byte
assert convert_to(pebibyte, byte) == 1024**5*byte
assert convert_to(exbibyte, byte) == 1024**6*byte
assert kibibyte.convert_to(bit) == 8*1024*bit
assert byte.convert_to(bit) == 8*bit
a = 10*kibibyte*hour
assert convert_to(a, byte) == 10240*byte*hour
assert convert_to(a, minute) == 600*kibibyte*minute
assert convert_to(a, [byte, minute]) == 614400*byte*minute
def test_conversion_with_2_nonstandard_dimensions():
good_grade = Quantity("good_grade")
kilo_good_grade = Quantity("kilo_good_grade")
centi_good_grade = Quantity("centi_good_grade")
kilo_good_grade.set_global_relative_scale_factor(1000, good_grade)
centi_good_grade.set_global_relative_scale_factor(S.One/10**5, kilo_good_grade)
charity_points = Quantity("charity_points")
milli_charity_points = Quantity("milli_charity_points")
missions = Quantity("missions")
milli_charity_points.set_global_relative_scale_factor(S.One/1000, charity_points)
missions.set_global_relative_scale_factor(251, charity_points)
assert convert_to(
kilo_good_grade*milli_charity_points*millimeter,
[centi_good_grade, missions, centimeter]
) == S.One * 10**5 / (251*1000) / 10 * centi_good_grade*missions*centimeter
def test_eval_subs():
energy, mass, force = symbols('energy mass force')
expr1 = energy/mass
units = {energy: kilogram*meter**2/second**2, mass: kilogram}
assert expr1.subs(units) == meter**2/second**2
expr2 = force/mass
units = {force:gravitational_constant*kilogram**2/meter**2, mass:kilogram}
assert expr2.subs(units) == gravitational_constant*kilogram/meter**2
def test_issue_14932():
assert (log(inch) - log(2)).simplify() == log(inch/2)
assert (log(inch) - log(foot)).simplify() == -log(12)
p = symbols('p', positive=True)
assert (log(inch) - log(p)).simplify() == log(inch/p)
def test_issue_14547():
# the root issue is that an argument with dimensions should
# not raise an error when the `arg - 1` calculation is
# performed in the assumptions system
from sympy.physics.units import foot, inch
from sympy.core.relational import Eq
assert log(foot).is_zero is None
assert log(foot).is_positive is None
assert log(foot).is_nonnegative is None
assert log(foot).is_negative is None
assert log(foot).is_algebraic is None
assert log(foot).is_rational is None
# doesn't raise error
assert Eq(log(foot), log(inch)) is not None # might be False or unevaluated
x = Symbol('x')
e = foot + x
assert e.is_Add and set(e.args) == {foot, x}
e = foot + 1
assert e.is_Add and set(e.args) == {foot, 1}
def test_deprecated_quantity_methods():
step = Quantity("step")
with warns_deprecated_sympy():
step.set_dimension(length)
step.set_scale_factor(2*meter)
assert convert_to(step, centimeter) == 200*centimeter
assert convert_to(1000*step/second, kilometer/second) == 2*kilometer/second
def test_issue_22164():
warnings.simplefilter("error")
dm = Quantity("dm")
SI.set_quantity_dimension(dm, length)
SI.set_quantity_scale_factor(dm, 1)
bad_exp = Quantity("bad_exp")
SI.set_quantity_dimension(bad_exp, length)
SI.set_quantity_scale_factor(bad_exp, 1)
expr = dm ** bad_exp
# deprecation warning is not expected here
SI._collect_factor_and_dimension(expr)
def test_issue_22819():
from sympy.physics.units import tonne, gram, Da
from sympy.physics.units.systems.si import dimsys_SI
assert tonne.convert_to(gram) == 1000000*gram
assert dimsys_SI.get_dimensional_dependencies(area) == {length: 2}
assert Da.scale_factor == 1.66053906660000e-24
def test_issue_20288():
from sympy.core.numbers import E
from sympy.physics.units import energy
u = Quantity('u')
v = Quantity('v')
SI.set_quantity_dimension(u, energy)
SI.set_quantity_dimension(v, energy)
u.set_global_relative_scale_factor(1, joule)
v.set_global_relative_scale_factor(1, joule)
expr = 1 + exp(u**2/v**2)
assert SI._collect_factor_and_dimension(expr) == (1 + E, Dimension(1))
def test_issue_24062():
from sympy.core.numbers import E
from sympy.physics.units import impedance, capacitance, time, ohm, farad, second
R = Quantity('R')
C = Quantity('C')
T = Quantity('T')
SI.set_quantity_dimension(R, impedance)
SI.set_quantity_dimension(C, capacitance)
SI.set_quantity_dimension(T, time)
R.set_global_relative_scale_factor(1, ohm)
C.set_global_relative_scale_factor(1, farad)
T.set_global_relative_scale_factor(1, second)
expr = T / (R * C)
dim = SI._collect_factor_and_dimension(expr)[1]
assert SI.get_dimension_system().is_dimensionless(dim)
exp_expr = 1 + exp(expr)
assert SI._collect_factor_and_dimension(exp_expr) == (1 + E, Dimension(1))
def test_prefixed_property():
assert not meter.is_prefixed
assert not joule.is_prefixed
assert not day.is_prefixed
assert not second.is_prefixed
assert not volt.is_prefixed
assert not ohm.is_prefixed
assert centimeter.is_prefixed
assert kilometer.is_prefixed
assert kilogram.is_prefixed
assert pebibyte.is_prefixed
def test_physics_constant():
from sympy.physics.units import definitions
for name in dir(definitions):
quantity = getattr(definitions, name)
if not isinstance(quantity, Quantity):
continue
if name.endswith('_constant'):
assert isinstance(quantity, PhysicalConstant), f"{quantity} must be PhysicalConstant, but is {type(quantity)}"
assert quantity.is_physical_constant, f"{name} is not marked as physics constant when it should be"
for const in [gravitational_constant, molar_gas_constant, vacuum_permittivity, speed_of_light, elementary_charge]:
assert isinstance(const, PhysicalConstant), f"{const} must be PhysicalConstant, but is {type(const)}"
assert const.is_physical_constant, f"{const} is not marked as physics constant when it should be"
assert not meter.is_physical_constant
assert not joule.is_physical_constant
|
b24e1ab7212b0e58e86bd4386030d6a2e6c13b17e449747798a4e66003bfdaf1 | from sympy.core.numbers import pi
from sympy.core.symbol import symbols
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.matrices.dense import (eye, zeros)
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
from sympy.simplify.simplify import simplify
from sympy.physics.vector import (ReferenceFrame, Vector, CoordinateSym,
dynamicsymbols, time_derivative, express,
dot)
from sympy.physics.vector.frame import _check_frame
from sympy.physics.vector.vector import VectorTypeError
from sympy.testing.pytest import raises
import warnings
Vector.simp = True
def test_dict_list():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
C = ReferenceFrame('C')
D = ReferenceFrame('D')
E = ReferenceFrame('E')
F = ReferenceFrame('F')
B.orient_axis(A, A.x, 1.0)
C.orient_axis(B, B.x, 1.0)
D.orient_axis(C, C.x, 1.0)
assert D._dict_list(A, 0) == [D, C, B, A]
E.orient_axis(D, D.x, 1.0)
assert C._dict_list(A, 0) == [C, B, A]
assert C._dict_list(E, 0) == [C, D, E]
# only 0, 1, 2 permitted for second argument
raises(ValueError, lambda: C._dict_list(E, 5))
# no connecting path
raises(ValueError, lambda: F._dict_list(A, 0))
def test_coordinate_vars():
"""Tests the coordinate variables functionality"""
A = ReferenceFrame('A')
assert CoordinateSym('Ax', A, 0) == A[0]
assert CoordinateSym('Ax', A, 1) == A[1]
assert CoordinateSym('Ax', A, 2) == A[2]
raises(ValueError, lambda: CoordinateSym('Ax', A, 3))
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
assert isinstance(A[0], CoordinateSym) and \
isinstance(A[0], CoordinateSym) and \
isinstance(A[0], CoordinateSym)
assert A.variable_map(A) == {A[0]:A[0], A[1]:A[1], A[2]:A[2]}
assert A[0].frame == A
B = A.orientnew('B', 'Axis', [q, A.z])
assert B.variable_map(A) == {B[2]: A[2], B[1]: -A[0]*sin(q) + A[1]*cos(q),
B[0]: A[0]*cos(q) + A[1]*sin(q)}
assert A.variable_map(B) == {A[0]: B[0]*cos(q) - B[1]*sin(q),
A[1]: B[0]*sin(q) + B[1]*cos(q), A[2]: B[2]}
assert time_derivative(B[0], A) == -A[0]*sin(q)*qd + A[1]*cos(q)*qd
assert time_derivative(B[1], A) == -A[0]*cos(q)*qd - A[1]*sin(q)*qd
assert time_derivative(B[2], A) == 0
assert express(B[0], A, variables=True) == A[0]*cos(q) + A[1]*sin(q)
assert express(B[1], A, variables=True) == -A[0]*sin(q) + A[1]*cos(q)
assert express(B[2], A, variables=True) == A[2]
assert time_derivative(A[0]*A.x + A[1]*A.y + A[2]*A.z, B) == A[1]*qd*A.x - A[0]*qd*A.y
assert time_derivative(B[0]*B.x + B[1]*B.y + B[2]*B.z, A) == - B[1]*qd*B.x + B[0]*qd*B.y
assert express(B[0]*B[1]*B[2], A, variables=True) == \
A[2]*(-A[0]*sin(q) + A[1]*cos(q))*(A[0]*cos(q) + A[1]*sin(q))
assert (time_derivative(B[0]*B[1]*B[2], A) -
(A[2]*(-A[0]**2*cos(2*q) -
2*A[0]*A[1]*sin(2*q) +
A[1]**2*cos(2*q))*qd)).trigsimp() == 0
assert express(B[0]*B.x + B[1]*B.y + B[2]*B.z, A) == \
(B[0]*cos(q) - B[1]*sin(q))*A.x + (B[0]*sin(q) + \
B[1]*cos(q))*A.y + B[2]*A.z
assert express(B[0]*B.x + B[1]*B.y + B[2]*B.z, A, variables=True) == \
A[0]*A.x + A[1]*A.y + A[2]*A.z
assert express(A[0]*A.x + A[1]*A.y + A[2]*A.z, B) == \
(A[0]*cos(q) + A[1]*sin(q))*B.x + \
(-A[0]*sin(q) + A[1]*cos(q))*B.y + A[2]*B.z
assert express(A[0]*A.x + A[1]*A.y + A[2]*A.z, B, variables=True) == \
B[0]*B.x + B[1]*B.y + B[2]*B.z
N = B.orientnew('N', 'Axis', [-q, B.z])
assert N.variable_map(A) == {N[0]: A[0], N[2]: A[2], N[1]: A[1]}
C = A.orientnew('C', 'Axis', [q, A.x + A.y + A.z])
mapping = A.variable_map(C)
assert mapping[A[0]] == 2*C[0]*cos(q)/3 + C[0]/3 - 2*C[1]*sin(q + pi/6)/3 +\
C[1]/3 - 2*C[2]*cos(q + pi/3)/3 + C[2]/3
assert mapping[A[1]] == -2*C[0]*cos(q + pi/3)/3 + \
C[0]/3 + 2*C[1]*cos(q)/3 + C[1]/3 - 2*C[2]*sin(q + pi/6)/3 + C[2]/3
assert mapping[A[2]] == -2*C[0]*sin(q + pi/6)/3 + C[0]/3 - \
2*C[1]*cos(q + pi/3)/3 + C[1]/3 + 2*C[2]*cos(q)/3 + C[2]/3
def test_ang_vel():
q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4')
q1d, q2d, q3d, q4d = dynamicsymbols('q1 q2 q3 q4', 1)
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
D = N.orientnew('D', 'Axis', [q4, N.y])
u1, u2, u3 = dynamicsymbols('u1 u2 u3')
assert A.ang_vel_in(N) == (q1d)*A.z
assert B.ang_vel_in(N) == (q2d)*B.x + (q1d)*A.z
assert C.ang_vel_in(N) == (q3d)*C.y + (q2d)*B.x + (q1d)*A.z
A2 = N.orientnew('A2', 'Axis', [q4, N.y])
assert N.ang_vel_in(N) == 0
assert N.ang_vel_in(A) == -q1d*N.z
assert N.ang_vel_in(B) == -q1d*A.z - q2d*B.x
assert N.ang_vel_in(C) == -q1d*A.z - q2d*B.x - q3d*B.y
assert N.ang_vel_in(A2) == -q4d*N.y
assert A.ang_vel_in(N) == q1d*N.z
assert A.ang_vel_in(A) == 0
assert A.ang_vel_in(B) == - q2d*B.x
assert A.ang_vel_in(C) == - q2d*B.x - q3d*B.y
assert A.ang_vel_in(A2) == q1d*N.z - q4d*N.y
assert B.ang_vel_in(N) == q1d*A.z + q2d*A.x
assert B.ang_vel_in(A) == q2d*A.x
assert B.ang_vel_in(B) == 0
assert B.ang_vel_in(C) == -q3d*B.y
assert B.ang_vel_in(A2) == q1d*A.z + q2d*A.x - q4d*N.y
assert C.ang_vel_in(N) == q1d*A.z + q2d*A.x + q3d*B.y
assert C.ang_vel_in(A) == q2d*A.x + q3d*C.y
assert C.ang_vel_in(B) == q3d*B.y
assert C.ang_vel_in(C) == 0
assert C.ang_vel_in(A2) == q1d*A.z + q2d*A.x + q3d*B.y - q4d*N.y
assert A2.ang_vel_in(N) == q4d*A2.y
assert A2.ang_vel_in(A) == q4d*A2.y - q1d*N.z
assert A2.ang_vel_in(B) == q4d*N.y - q1d*A.z - q2d*A.x
assert A2.ang_vel_in(C) == q4d*N.y - q1d*A.z - q2d*A.x - q3d*B.y
assert A2.ang_vel_in(A2) == 0
C.set_ang_vel(N, u1*C.x + u2*C.y + u3*C.z)
assert C.ang_vel_in(N) == (u1)*C.x + (u2)*C.y + (u3)*C.z
assert N.ang_vel_in(C) == (-u1)*C.x + (-u2)*C.y + (-u3)*C.z
assert C.ang_vel_in(D) == (u1)*C.x + (u2)*C.y + (u3)*C.z + (-q4d)*D.y
assert D.ang_vel_in(C) == (-u1)*C.x + (-u2)*C.y + (-u3)*C.z + (q4d)*D.y
q0 = dynamicsymbols('q0')
q0d = dynamicsymbols('q0', 1)
E = N.orientnew('E', 'Quaternion', (q0, q1, q2, q3))
assert E.ang_vel_in(N) == (
2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1) * E.x +
2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2) * E.y +
2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3) * E.z)
F = N.orientnew('F', 'Body', (q1, q2, q3), 313)
assert F.ang_vel_in(N) == ((sin(q2)*sin(q3)*q1d + cos(q3)*q2d)*F.x +
(sin(q2)*cos(q3)*q1d - sin(q3)*q2d)*F.y + (cos(q2)*q1d + q3d)*F.z)
G = N.orientnew('G', 'Axis', (q1, N.x + N.y))
assert G.ang_vel_in(N) == q1d * (N.x + N.y).normalize()
assert N.ang_vel_in(G) == -q1d * (N.x + N.y).normalize()
def test_dcm():
q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
D = N.orientnew('D', 'Axis', [q4, N.y])
E = N.orientnew('E', 'Space', [q1, q2, q3], '123')
assert N.dcm(C) == Matrix([
[- sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), - sin(q1) *
cos(q2), sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], [sin(q1) *
cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), sin(q1) *
sin(q3) - sin(q2) * cos(q1) * cos(q3)], [- sin(q3) * cos(q2), sin(q2),
cos(q2) * cos(q3)]])
# This is a little touchy. Is it ok to use simplify in assert?
test_mat = D.dcm(C) - Matrix(
[[cos(q1) * cos(q3) * cos(q4) - sin(q3) * (- sin(q4) * cos(q2) +
sin(q1) * sin(q2) * cos(q4)), - sin(q2) * sin(q4) - sin(q1) *
cos(q2) * cos(q4), sin(q3) * cos(q1) * cos(q4) + cos(q3) * (- sin(q4) *
cos(q2) + sin(q1) * sin(q2) * cos(q4))], [sin(q1) * cos(q3) +
sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), sin(q1) * sin(q3) -
sin(q2) * cos(q1) * cos(q3)], [sin(q4) * cos(q1) * cos(q3) -
sin(q3) * (cos(q2) * cos(q4) + sin(q1) * sin(q2) * sin(q4)), sin(q2) *
cos(q4) - sin(q1) * sin(q4) * cos(q2), sin(q3) * sin(q4) * cos(q1) +
cos(q3) * (cos(q2) * cos(q4) + sin(q1) * sin(q2) * sin(q4))]])
assert test_mat.expand() == zeros(3, 3)
assert E.dcm(N) == Matrix(
[[cos(q2)*cos(q3), sin(q3)*cos(q2), -sin(q2)],
[sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) +
cos(q1)*cos(q3), sin(q1)*cos(q2)], [sin(q1)*sin(q3) +
sin(q2)*cos(q1)*cos(q3), - sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1),
cos(q1)*cos(q2)]])
def test_w_diff_dcm1():
# Ref:
# Dynamics Theory and Applications, Kane 1985
# Sec. 2.1 ANGULAR VELOCITY
A = ReferenceFrame('A')
B = ReferenceFrame('B')
c11, c12, c13 = dynamicsymbols('C11 C12 C13')
c21, c22, c23 = dynamicsymbols('C21 C22 C23')
c31, c32, c33 = dynamicsymbols('C31 C32 C33')
c11d, c12d, c13d = dynamicsymbols('C11 C12 C13', level=1)
c21d, c22d, c23d = dynamicsymbols('C21 C22 C23', level=1)
c31d, c32d, c33d = dynamicsymbols('C31 C32 C33', level=1)
DCM = Matrix([
[c11, c12, c13],
[c21, c22, c23],
[c31, c32, c33]
])
B.orient(A, 'DCM', DCM)
b1a = (B.x).express(A)
b2a = (B.y).express(A)
b3a = (B.z).express(A)
# Equation (2.1.1)
B.set_ang_vel(A, B.x*(dot((b3a).dt(A), B.y))
+ B.y*(dot((b1a).dt(A), B.z))
+ B.z*(dot((b2a).dt(A), B.x)))
# Equation (2.1.21)
expr = ( (c12*c13d + c22*c23d + c32*c33d)*B.x
+ (c13*c11d + c23*c21d + c33*c31d)*B.y
+ (c11*c12d + c21*c22d + c31*c32d)*B.z)
assert B.ang_vel_in(A) - expr == 0
def test_w_diff_dcm2():
q1, q2, q3 = dynamicsymbols('q1:4')
N = ReferenceFrame('N')
A = N.orientnew('A', 'axis', [q1, N.x])
B = A.orientnew('B', 'axis', [q2, A.y])
C = B.orientnew('C', 'axis', [q3, B.z])
DCM = C.dcm(N).T
D = N.orientnew('D', 'DCM', DCM)
# Frames D and C are the same ReferenceFrame,
# since they have equal DCM respect to frame N.
# Therefore, D and C should have same angle velocity in N.
assert D.dcm(N) == C.dcm(N) == Matrix([
[cos(q2)*cos(q3), sin(q1)*sin(q2)*cos(q3) +
sin(q3)*cos(q1), sin(q1)*sin(q3) -
sin(q2)*cos(q1)*cos(q3)], [-sin(q3)*cos(q2),
-sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3),
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],
[sin(q2), -sin(q1)*cos(q2), cos(q1)*cos(q2)]])
assert (D.ang_vel_in(N) - C.ang_vel_in(N)).express(N).simplify() == 0
def test_orientnew_respects_parent_class():
class MyReferenceFrame(ReferenceFrame):
pass
B = MyReferenceFrame('B')
C = B.orientnew('C', 'Axis', [0, B.x])
assert isinstance(C, MyReferenceFrame)
def test_orientnew_respects_input_indices():
N = ReferenceFrame('N')
q1 = dynamicsymbols('q1')
A = N.orientnew('a', 'Axis', [q1, N.z])
#modify default indices:
minds = [x+'1' for x in N.indices]
B = N.orientnew('b', 'Axis', [q1, N.z], indices=minds)
assert N.indices == A.indices
assert B.indices == minds
def test_orientnew_respects_input_latexs():
N = ReferenceFrame('N')
q1 = dynamicsymbols('q1')
A = N.orientnew('a', 'Axis', [q1, N.z])
#build default and alternate latex_vecs:
def_latex_vecs = [(r"\mathbf{\hat{%s}_%s}" % (A.name.lower(),
A.indices[0])), (r"\mathbf{\hat{%s}_%s}" %
(A.name.lower(), A.indices[1])),
(r"\mathbf{\hat{%s}_%s}" % (A.name.lower(),
A.indices[2]))]
name = 'b'
indices = [x+'1' for x in N.indices]
new_latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
B = N.orientnew(name, 'Axis', [q1, N.z], latexs=new_latex_vecs)
assert A.latex_vecs == def_latex_vecs
assert B.latex_vecs == new_latex_vecs
assert B.indices != indices
def test_orientnew_respects_input_variables():
N = ReferenceFrame('N')
q1 = dynamicsymbols('q1')
A = N.orientnew('a', 'Axis', [q1, N.z])
#build non-standard variable names
name = 'b'
new_variables = ['notb_'+x+'1' for x in N.indices]
B = N.orientnew(name, 'Axis', [q1, N.z], variables=new_variables)
for j,var in enumerate(A.varlist):
assert var.name == A.name + '_' + A.indices[j]
for j,var in enumerate(B.varlist):
assert var.name == new_variables[j]
def test_issue_10348():
u = dynamicsymbols('u:3')
I = ReferenceFrame('I')
I.orientnew('A', 'space', u, 'XYZ')
def test_issue_11503():
A = ReferenceFrame("A")
A.orientnew("B", "Axis", [35, A.y])
C = ReferenceFrame("C")
A.orient(C, "Axis", [70, C.z])
def test_partial_velocity():
N = ReferenceFrame('N')
A = ReferenceFrame('A')
u1, u2 = dynamicsymbols('u1, u2')
A.set_ang_vel(N, u1 * A.x + u2 * N.y)
assert N.partial_velocity(A, u1) == -A.x
assert N.partial_velocity(A, u1, u2) == (-A.x, -N.y)
assert A.partial_velocity(N, u1) == A.x
assert A.partial_velocity(N, u1, u2) == (A.x, N.y)
assert N.partial_velocity(N, u1) == 0
assert A.partial_velocity(A, u1) == 0
def test_issue_11498():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
# Identity transformation
A.orient(B, 'DCM', eye(3))
assert A.dcm(B) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert B.dcm(A) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# x -> y
# y -> -z
# z -> -x
A.orient(B, 'DCM', Matrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))
assert B.dcm(A) == Matrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])
assert A.dcm(B) == Matrix([[0, 0, -1], [1, 0, 0], [0, -1, 0]])
assert B.dcm(A).T == A.dcm(B)
def test_reference_frame():
raises(TypeError, lambda: ReferenceFrame(0))
raises(TypeError, lambda: ReferenceFrame('N', 0))
raises(ValueError, lambda: ReferenceFrame('N', [0, 1]))
raises(TypeError, lambda: ReferenceFrame('N', [0, 1, 2]))
raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], 0))
raises(ValueError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], [0, 1]))
raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], [0, 1, 2]))
raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'],
['a', 'b', 'c'], 0))
raises(ValueError, lambda: ReferenceFrame('N', ['a', 'b', 'c'],
['a', 'b', 'c'], [0, 1]))
raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'],
['a', 'b', 'c'], [0, 1, 2]))
N = ReferenceFrame('N')
assert N[0] == CoordinateSym('N_x', N, 0)
assert N[1] == CoordinateSym('N_y', N, 1)
assert N[2] == CoordinateSym('N_z', N, 2)
raises(ValueError, lambda: N[3])
N = ReferenceFrame('N', ['a', 'b', 'c'])
assert N['a'] == N.x
assert N['b'] == N.y
assert N['c'] == N.z
raises(ValueError, lambda: N['d'])
assert str(N) == 'N'
A = ReferenceFrame('A')
B = ReferenceFrame('B')
q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
raises(TypeError, lambda: A.orient(B, 'DCM', 0))
raises(TypeError, lambda: B.orient(N, 'Space', [q1, q2, q3], '222'))
raises(TypeError, lambda: B.orient(N, 'Axis', [q1, N.x + 2 * N.y], '222'))
raises(TypeError, lambda: B.orient(N, 'Axis', q1))
raises(IndexError, lambda: B.orient(N, 'Axis', [q1]))
raises(TypeError, lambda: B.orient(N, 'Quaternion', [q0, q1, q2, q3], '222'))
raises(TypeError, lambda: B.orient(N, 'Quaternion', q0))
raises(TypeError, lambda: B.orient(N, 'Quaternion', [q0, q1, q2]))
raises(NotImplementedError, lambda: B.orient(N, 'Foo', [q0, q1, q2]))
raises(TypeError, lambda: B.orient(N, 'Body', [q1, q2], '232'))
raises(TypeError, lambda: B.orient(N, 'Space', [q1, q2], '232'))
N.set_ang_acc(B, 0)
assert N.ang_acc_in(B) == Vector(0)
N.set_ang_vel(B, 0)
assert N.ang_vel_in(B) == Vector(0)
def test_check_frame():
raises(VectorTypeError, lambda: _check_frame(0))
def test_dcm_diff_16824():
# NOTE : This is a regression test for the bug introduced in PR 14758,
# identified in 16824, and solved by PR 16828.
# This is the solution to Problem 2.2 on page 264 in Kane & Lenvinson's
# 1985 book.
q1, q2, q3 = dynamicsymbols('q1:4')
s1 = sin(q1)
c1 = cos(q1)
s2 = sin(q2)
c2 = cos(q2)
s3 = sin(q3)
c3 = cos(q3)
dcm = Matrix([[c2*c3, s1*s2*c3 - s3*c1, c1*s2*c3 + s3*s1],
[c2*s3, s1*s2*s3 + c3*c1, c1*s2*s3 - c3*s1],
[-s2, s1*c2, c1*c2]])
A = ReferenceFrame('A')
B = ReferenceFrame('B')
B.orient(A, 'DCM', dcm)
AwB = B.ang_vel_in(A)
alpha2 = s3*c2*q1.diff() + c3*q2.diff()
beta2 = s1*c2*q3.diff() + c1*q2.diff()
assert simplify(AwB.dot(A.y) - alpha2) == 0
assert simplify(AwB.dot(B.y) - beta2) == 0
def test_orient_explicit():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
A.orient_explicit(B, eye(3))
assert A.dcm(B) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_orient_axis():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
A.orient_axis(B,-B.x, 1)
A1 = A.dcm(B)
A.orient_axis(B, B.x, -1)
A2 = A.dcm(B)
A.orient_axis(B, 1, -B.x)
A3 = A.dcm(B)
assert A1 == A2
assert A2 == A3
raises(TypeError, lambda: A.orient_axis(B, 1, 1))
def test_orient_body():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
B.orient_body_fixed(A, (1,1,0), 'XYX')
assert B.dcm(A) == Matrix([[cos(1), sin(1)**2, -sin(1)*cos(1)], [0, cos(1), sin(1)], [sin(1), -sin(1)*cos(1), cos(1)**2]])
def test_orient_body_advanced():
q1, q2, q3 = dynamicsymbols('q1:4')
c1, c2, c3 = symbols('c1:4')
u1, u2, u3 = dynamicsymbols('q1:4', 1)
# Test with everything as dynamicsymbols
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, q2, q3), 'zxy')
assert A.dcm(B) == Matrix([
[-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2),
sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)],
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2),
sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)],
[-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[-sin(q3) * cos(q2) * u1 + cos(q3) * u2],
[sin(q2) * u1 + u3],
[sin(q3) * u2 + cos(q2) * cos(q3) * u1]])
# Test with constant symbol
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, c2, q3), 131)
assert A.dcm(B) == Matrix([
[cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)],
[sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3),
-sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)],
[sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1),
-sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[cos(c2) * u1 + u3],
[-sin(c2) * cos(q3) * u1],
[sin(c2) * sin(q3) * u1]])
# Test all symbols not time dependent
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (c1, c2, c3), 123)
assert B.ang_vel_in(A) == Vector(0)
def test_orient_space_advanced():
# space fixed is in the end like body fixed only in opposite order
q1, q2, q3 = dynamicsymbols('q1:4')
c1, c2, c3 = symbols('c1:4')
u1, u2, u3 = dynamicsymbols('q1:4', 1)
# Test with everything as dynamicsymbols
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_space_fixed(A, (q3, q2, q1), 'yxz')
assert A.dcm(B) == Matrix([
[-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2),
sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)],
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2),
sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)],
[-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[-sin(q3) * cos(q2) * u1 + cos(q3) * u2],
[sin(q2) * u1 + u3],
[sin(q3) * u2 + cos(q2) * cos(q3) * u1]])
# Test with constant symbol
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_space_fixed(A, (q3, c2, q1), 131)
assert A.dcm(B) == Matrix([
[cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)],
[sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3),
-sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)],
[sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1),
-sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[cos(c2) * u1 + u3],
[-sin(c2) * cos(q3) * u1],
[sin(c2) * sin(q3) * u1]])
# Test all symbols not time dependent
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_space_fixed(A, (c1, c2, c3), 123)
assert B.ang_vel_in(A) == Vector(0)
def test_orient_body_simple_ang_vel():
"""This test ensures that the simplest form of that linear system solution
is returned, thus the == for the expression comparison."""
psi, theta, phi = dynamicsymbols('psi, theta, varphi')
t = dynamicsymbols._t
A = ReferenceFrame('A')
B = ReferenceFrame('B')
B.orient_body_fixed(A, (psi, theta, phi), 'ZXZ')
A_w_B = B.ang_vel_in(A)
assert A_w_B.args[0][1] == B
assert A_w_B.args[0][0][0] == (sin(theta)*sin(phi)*psi.diff(t) +
cos(phi)*theta.diff(t))
assert A_w_B.args[0][0][1] == (sin(theta)*cos(phi)*psi.diff(t) -
sin(phi)*theta.diff(t))
assert A_w_B.args[0][0][2] == cos(theta)*psi.diff(t) + phi.diff(t)
def test_orient_space():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
B.orient_space_fixed(A, (0,0,0), '123')
assert B.dcm(A) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_orient_quaternion():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
B.orient_quaternion(A, (0,0,0,0))
assert B.dcm(A) == Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_looped_frame_warning():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
C = ReferenceFrame('C')
a, b, c = symbols('a b c')
B.orient_axis(A, A.x, a)
C.orient_axis(B, B.x, b)
with warnings.catch_warnings(record = True) as w:
warnings.simplefilter("always")
A.orient_axis(C, C.x, c)
assert issubclass(w[-1].category, UserWarning)
assert 'Loops are defined among the orientation of frames. ' + \
'This is likely not desired and may cause errors in your calculations.' in str(w[-1].message)
def test_frame_dict():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
C = ReferenceFrame('C')
a, b, c = symbols('a b c')
B.orient_axis(A, A.x, a)
assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])}
assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]])}
assert C._dcm_dict == {}
B.orient_axis(C, C.x, b)
# Previous relation is not wiped
assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])}
assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]]), \
C: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]])}
assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])}
A.orient_axis(B, B.x, c)
# Previous relation is updated
assert B._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]]),\
A: Matrix([[1, 0, 0],[0, cos(c), -sin(c)],[0, sin(c), cos(c)]])}
assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(c), sin(c)],[0, -sin(c), cos(c)]])}
assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])}
def test_dcm_cache_dict():
A = ReferenceFrame('A')
B = ReferenceFrame('B')
C = ReferenceFrame('C')
D = ReferenceFrame('D')
a, b, c = symbols('a b c')
B.orient_axis(A, A.x, a)
C.orient_axis(B, B.x, b)
D.orient_axis(C, C.x, c)
assert D._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(c), sin(c)],[0, -sin(c), cos(c)]])}
assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]]), \
D: Matrix([[1, 0, 0],[0, cos(c), -sin(c)],[0, sin(c), cos(c)]])}
assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]]), \
C: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])}
assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])}
assert D._dcm_dict == D._dcm_cache
D.dcm(A) # Check calculated dcm relation is stored in _dcm_cache and not in _dcm_dict
assert list(A._dcm_cache.keys()) == [A, B, D]
assert list(D._dcm_cache.keys()) == [C, A]
assert list(A._dcm_dict.keys()) == [B]
assert list(D._dcm_dict.keys()) == [C]
assert A._dcm_dict != A._dcm_cache
A.orient_axis(B, B.x, b) # _dcm_cache of A is wiped out and new relation is stored.
assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]])}
assert A._dcm_dict == A._dcm_cache
assert B._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]]), \
A: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])}
|
06a3093bc9d1c078cc5b0eec0f82fe457aa035773044cebd9d6bbce564c72408 | from collections import Counter
from sympy.core import Mul, sympify
from sympy.core.add import Add
from sympy.core.expr import ExprBuilder
from sympy.core.sorting import default_sort_key
from sympy.functions.elementary.exponential import log
from sympy.matrices.common import ShapeError
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices.expressions.special import ZeroMatrix, OneMatrix
from sympy.strategies import (
unpack, flatten, condition, exhaust, rm_id, sort
)
from sympy.utilities.exceptions import sympy_deprecation_warning
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
========
>>> from sympy import hadamard_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
HadamardProduct(A, B)
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError("Empty Hadamard product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
Examples
========
Hadamard product for matrix symbols:
>>> from sympy import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
Notes
=====
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()`` or ``HadamardProduct.doit``
"""
is_HadamardProduct = True
def __new__(cls, *args, evaluate=False, check=None):
args = list(map(sympify, args))
if len(args) == 0:
# We currently don't have a way to support one-matrices of generic dimensions:
raise ValueError("HadamardProduct needs at least one argument")
if check is not None:
sympy_deprecation_warning(
"Passing check to HadamardProduct is deprecated and the check argument will be removed in a future version.",
deprecated_since_version="1.11",
active_deprecations_target='remove-check-argument-from-matrix-operations')
if check in (True, None):
validate(*args)
else:
sympy_deprecation_warning(
"Passing check=False to HadamardProduct is deprecated and the check argument will be removed in a future version.",
deprecated_since_version="1.11",
active_deprecations_target='remove-check-argument-from-matrix-operations')
obj = super().__new__(cls, *args)
if evaluate:
obj = obj.doit(deep=False)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j, **kwargs):
return Mul(*[arg._entry(i, j, **kwargs) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **hints):
expr = self.func(*[i.doit(**hints) for i in self.args])
# Check for explicit matrices:
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.immutable import ImmutableMatrix
explicit = [i for i in expr.args if isinstance(i, MatrixBase)]
if explicit:
remainder = [i for i in expr.args if i not in explicit]
expl_mat = ImmutableMatrix([
Mul.fromiter(i) for i in zip(*explicit)
]).reshape(*self.shape)
expr = HadamardProduct(*([expl_mat] + remainder))
return canonicalize(expr)
def _eval_derivative(self, x):
terms = []
args = list(self.args)
for i in range(len(args)):
factors = args[:i] + [args[i].diff(x)] + args[i+1:]
terms.append(hadamard_product(*factors))
return Add.fromiter(terms)
def _eval_derivative_matrix_lines(self, x):
from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct
from sympy.matrices.expressions.matexpr import _make_matrix
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
d = self.args[ind]._eval_derivative_matrix_lines(x)
hadam = hadamard_product(*(right_args + left_args))
diagonal = [(0, 2), (3, 4)]
diagonal = [e for j, e in enumerate(diagonal) if self.shape[j] != 1]
for i in d:
l1 = i._lines[i._first_line_index]
l2 = i._lines[i._second_line_index]
subexpr = ExprBuilder(
ArrayDiagonal,
[
ExprBuilder(
ArrayTensorProduct,
[
ExprBuilder(_make_matrix, [l1]),
hadam,
ExprBuilder(_make_matrix, [l2]),
]
),
*diagonal],
)
i._first_pointer_parent = subexpr.args[0].args[0].args
i._first_pointer_index = 0
i._second_pointer_parent = subexpr.args[0].args[2].args
i._second_pointer_index = 0
i._lines = [subexpr]
lines.append(i)
return lines
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned" % (A, B))
# TODO Implement algorithm for rewriting Hadamard product as diagonal matrix
# if matmul identy matrix is multiplied.
def canonicalize(x):
"""Canonicalize the Hadamard product ``x`` with mathematical properties.
Examples
========
>>> from sympy import MatrixSymbol, HadamardProduct
>>> from sympy import OneMatrix, ZeroMatrix
>>> from sympy.matrices.expressions.hadamard import canonicalize
>>> from sympy import init_printing
>>> init_printing(use_unicode=False)
>>> A = MatrixSymbol('A', 2, 2)
>>> B = MatrixSymbol('B', 2, 2)
>>> C = MatrixSymbol('C', 2, 2)
Hadamard product associativity:
>>> X = HadamardProduct(A, HadamardProduct(B, C))
>>> X
A.*(B.*C)
>>> canonicalize(X)
A.*B.*C
Hadamard product commutativity:
>>> X = HadamardProduct(A, B)
>>> Y = HadamardProduct(B, A)
>>> X
A.*B
>>> Y
B.*A
>>> canonicalize(X)
A.*B
>>> canonicalize(Y)
A.*B
Hadamard product identity:
>>> X = HadamardProduct(A, OneMatrix(2, 2))
>>> X
A.*1
>>> canonicalize(X)
A
Absorbing element of Hadamard product:
>>> X = HadamardProduct(A, ZeroMatrix(2, 2))
>>> X
A.*0
>>> canonicalize(X)
0
Rewriting to Hadamard Power
>>> X = HadamardProduct(A, A, A)
>>> X
A.*A.*A
>>> canonicalize(X)
.3
A
Notes
=====
As the Hadamard product is associative, nested products can be flattened.
The Hadamard product is commutative so that factors can be sorted for
canonical form.
A matrix of only ones is an identity for Hadamard product,
so every matrices of only ones can be removed.
Any zero matrix will make the whole product a zero matrix.
Duplicate elements can be collected and rewritten as HadamardPower
References
==========
.. [1] https://en.wikipedia.org/wiki/Hadamard_product_(matrices)
"""
# Associativity
rule = condition(
lambda x: isinstance(x, HadamardProduct),
flatten
)
fun = exhaust(rule)
x = fun(x)
# Identity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
rm_id(lambda x: isinstance(x, OneMatrix))
)
x = fun(x)
# Absorbing by Zero Matrix
def absorb(x):
if any(isinstance(c, ZeroMatrix) for c in x.args):
return ZeroMatrix(*x.shape)
else:
return x
fun = condition(
lambda x: isinstance(x, HadamardProduct),
absorb
)
x = fun(x)
# Rewriting with HadamardPower
if isinstance(x, HadamardProduct):
tally = Counter(x.args)
new_arg = []
for base, exp in tally.items():
if exp == 1:
new_arg.append(base)
else:
new_arg.append(HadamardPower(base, exp))
x = HadamardProduct(*new_arg)
# Commutativity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
sort(default_sort_key)
)
x = fun(x)
# Unpacking
x = unpack(x)
return x
def hadamard_power(base, exp):
base = sympify(base)
exp = sympify(exp)
if exp == 1:
return base
if not base.is_Matrix:
return base**exp
if exp.is_Matrix:
raise ValueError("cannot raise expression to a matrix")
return HadamardPower(base, exp)
class HadamardPower(MatrixExpr):
r"""
Elementwise power of matrix expressions
Parameters
==========
base : scalar or matrix
exp : scalar or matrix
Notes
=====
There are four definitions for the hadamard power which can be used.
Let's consider `A, B` as `(m, n)` matrices, and `a, b` as scalars.
Matrix raised to a scalar exponent:
.. math::
A^{\circ b} = \begin{bmatrix}
A_{0, 0}^b & A_{0, 1}^b & \cdots & A_{0, n-1}^b \\
A_{1, 0}^b & A_{1, 1}^b & \cdots & A_{1, n-1}^b \\
\vdots & \vdots & \ddots & \vdots \\
A_{m-1, 0}^b & A_{m-1, 1}^b & \cdots & A_{m-1, n-1}^b
\end{bmatrix}
Scalar raised to a matrix exponent:
.. math::
a^{\circ B} = \begin{bmatrix}
a^{B_{0, 0}} & a^{B_{0, 1}} & \cdots & a^{B_{0, n-1}} \\
a^{B_{1, 0}} & a^{B_{1, 1}} & \cdots & a^{B_{1, n-1}} \\
\vdots & \vdots & \ddots & \vdots \\
a^{B_{m-1, 0}} & a^{B_{m-1, 1}} & \cdots & a^{B_{m-1, n-1}}
\end{bmatrix}
Matrix raised to a matrix exponent:
.. math::
A^{\circ B} = \begin{bmatrix}
A_{0, 0}^{B_{0, 0}} & A_{0, 1}^{B_{0, 1}} &
\cdots & A_{0, n-1}^{B_{0, n-1}} \\
A_{1, 0}^{B_{1, 0}} & A_{1, 1}^{B_{1, 1}} &
\cdots & A_{1, n-1}^{B_{1, n-1}} \\
\vdots & \vdots &
\ddots & \vdots \\
A_{m-1, 0}^{B_{m-1, 0}} & A_{m-1, 1}^{B_{m-1, 1}} &
\cdots & A_{m-1, n-1}^{B_{m-1, n-1}}
\end{bmatrix}
Scalar raised to a scalar exponent:
.. math::
a^{\circ b} = a^b
"""
def __new__(cls, base, exp):
base = sympify(base)
exp = sympify(exp)
if base.is_scalar and exp.is_scalar:
return base ** exp
if base.is_Matrix and exp.is_Matrix and base.shape != exp.shape:
raise ValueError(
'The shape of the base {} and '
'the shape of the exponent {} do not match.'
.format(base.shape, exp.shape)
)
obj = super().__new__(cls, base, exp)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@property
def shape(self):
if self.base.is_Matrix:
return self.base.shape
return self.exp.shape
def _entry(self, i, j, **kwargs):
base = self.base
exp = self.exp
if base.is_Matrix:
a = base._entry(i, j, **kwargs)
elif base.is_scalar:
a = base
else:
raise ValueError(
'The base {} must be a scalar or a matrix.'.format(base))
if exp.is_Matrix:
b = exp._entry(i, j, **kwargs)
elif exp.is_scalar:
b = exp
else:
raise ValueError(
'The exponent {} must be a scalar or a matrix.'.format(exp))
return a ** b
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardPower(transpose(self.base), self.exp)
def _eval_derivative(self, x):
dexp = self.exp.diff(x)
logbase = self.base.applyfunc(log)
dlbase = logbase.diff(x)
return hadamard_product(
dexp*logbase + self.exp*dlbase,
self
)
def _eval_derivative_matrix_lines(self, x):
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct
from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal
from sympy.matrices.expressions.matexpr import _make_matrix
lr = self.base._eval_derivative_matrix_lines(x)
for i in lr:
diagonal = [(1, 2), (3, 4)]
diagonal = [e for j, e in enumerate(diagonal) if self.base.shape[j] != 1]
l1 = i._lines[i._first_line_index]
l2 = i._lines[i._second_line_index]
subexpr = ExprBuilder(
ArrayDiagonal,
[
ExprBuilder(
ArrayTensorProduct,
[
ExprBuilder(_make_matrix, [l1]),
self.exp*hadamard_power(self.base, self.exp-1),
ExprBuilder(_make_matrix, [l2]),
]
),
*diagonal],
validator=ArrayDiagonal._validate
)
i._first_pointer_parent = subexpr.args[0].args[0].args
i._first_pointer_index = 0
i._first_line_index = 0
i._second_pointer_parent = subexpr.args[0].args[2].args
i._second_pointer_index = 0
i._second_line_index = 0
i._lines = [subexpr]
return lr
|
fad693febef52dc9cfe4d4104236053aab6cdd8c6390d725043a53d7e28421fc | from sympy.matrices.dense import Matrix, eye
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions.special import (Identity, OneMatrix, ZeroMatrix)
from sympy.core import symbols
from sympy.testing.pytest import raises, warns_deprecated_sympy
from sympy.matrices import ShapeError, MatrixSymbol
from sympy.matrices.expressions import (HadamardProduct, hadamard_product, HadamardPower, hadamard_power)
n, m, k = symbols('n,m,k')
Z = MatrixSymbol('Z', n, n)
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, k)
def test_HadamardProduct():
assert HadamardProduct(A, B, A).shape == A.shape
raises(ShapeError, lambda: HadamardProduct(A, B.T))
raises(TypeError, lambda: HadamardProduct(A, n))
raises(TypeError, lambda: HadamardProduct(A, 1))
assert HadamardProduct(A, 2*B, -A)[1, 1] == \
-2 * A[1, 1] * B[1, 1] * A[1, 1]
mix = HadamardProduct(Z*A, B)*C
assert mix.shape == (n, k)
assert set(HadamardProduct(A, B, A).T.args) == {A.T, A.T, B.T}
def test_HadamardProduct_isnt_commutative():
assert HadamardProduct(A, B) != HadamardProduct(B, A)
def test_mixed_indexing():
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
Z = MatrixSymbol('Z', 2, 2)
assert (X*HadamardProduct(Y, Z))[0, 0] == \
X[0, 0]*Y[0, 0]*Z[0, 0] + X[0, 1]*Y[1, 0]*Z[1, 0]
def test_canonicalize():
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
with warns_deprecated_sympy():
expr = HadamardProduct(X, check=False)
assert isinstance(expr, HadamardProduct)
expr2 = expr.doit() # unpack is called
assert isinstance(expr2, MatrixSymbol)
Z = ZeroMatrix(2, 2)
U = OneMatrix(2, 2)
assert HadamardProduct(Z, X).doit() == Z
assert HadamardProduct(U, X, X, U).doit() == HadamardPower(X, 2)
assert HadamardProduct(X, U, Y).doit() == HadamardProduct(X, Y)
assert HadamardProduct(X, Z, U, Y).doit() == Z
def test_hadamard():
m, n, p = symbols('m, n, p', integer=True)
A = MatrixSymbol('A', m, n)
B = MatrixSymbol('B', m, n)
C = MatrixSymbol('C', m, p)
X = MatrixSymbol('X', m, m)
I = Identity(m)
with raises(TypeError):
hadamard_product()
assert hadamard_product(A) == A
assert isinstance(hadamard_product(A, B), HadamardProduct)
assert hadamard_product(A, B).doit() == hadamard_product(A, B)
with raises(ShapeError):
hadamard_product(A, C)
hadamard_product(A, I)
assert hadamard_product(X, I) == HadamardProduct(I, X)
assert isinstance(hadamard_product(X, I), HadamardProduct)
a = MatrixSymbol("a", k, 1)
expr = MatAdd(ZeroMatrix(k, 1), OneMatrix(k, 1))
expr = HadamardProduct(expr, a)
assert expr.doit() == a
raises(ValueError, lambda: HadamardProduct())
def test_hadamard_product_with_explicit_mat():
A = MatrixSymbol("A", 3, 3).as_explicit()
B = MatrixSymbol("B", 3, 3).as_explicit()
X = MatrixSymbol("X", 3, 3)
expr = hadamard_product(A, B)
ret = Matrix([i*j for i, j in zip(A, B)]).reshape(3, 3)
assert expr == ret
expr = hadamard_product(A, X, B)
assert expr == HadamardProduct(ret, X)
expr = hadamard_product(eye(3), A)
assert expr == Matrix([[A[0, 0], 0, 0], [0, A[1, 1], 0], [0, 0, A[2, 2]]])
expr = hadamard_product(eye(3), eye(3))
assert expr == eye(3)
def test_hadamard_power():
m, n, p = symbols('m, n, p', integer=True)
A = MatrixSymbol('A', m, n)
assert hadamard_power(A, 1) == A
assert isinstance(hadamard_power(A, 2), HadamardPower)
assert hadamard_power(A, n).T == hadamard_power(A.T, n)
assert hadamard_power(A, n)[0, 0] == A[0, 0]**n
assert hadamard_power(m, n) == m**n
raises(ValueError, lambda: hadamard_power(A, A))
def test_hadamard_power_explicit():
A = MatrixSymbol('A', 2, 2)
B = MatrixSymbol('B', 2, 2)
a, b = symbols('a b')
assert HadamardPower(a, b) == a**b
assert HadamardPower(a, B).as_explicit() == \
Matrix([
[a**B[0, 0], a**B[0, 1]],
[a**B[1, 0], a**B[1, 1]]])
assert HadamardPower(A, b).as_explicit() == \
Matrix([
[A[0, 0]**b, A[0, 1]**b],
[A[1, 0]**b, A[1, 1]**b]])
assert HadamardPower(A, B).as_explicit() == \
Matrix([
[A[0, 0]**B[0, 0], A[0, 1]**B[0, 1]],
[A[1, 0]**B[1, 0], A[1, 1]**B[1, 1]]])
|
4988a289e754a407af4b423680143c2445ab8510661acbce0318c62cf45efe08 | from sympy.core.expr import unchanged
from sympy.core.numbers import oo
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.sets.contains import Contains
from sympy.sets.sets import (FiniteSet, Interval)
from sympy.testing.pytest import raises
def test_contains_basic():
raises(TypeError, lambda: Contains(S.Integers, 1))
assert Contains(2, S.Integers) is S.true
assert Contains(-2, S.Naturals) is S.false
i = Symbol('i', integer=True)
assert Contains(i, S.Naturals) == Contains(i, S.Naturals, evaluate=False)
def test_issue_6194():
x = Symbol('x')
assert unchanged(Contains, x, Interval(0, 1))
assert Interval(0, 1).contains(x) == (S.Zero <= x) & (x <= 1)
assert Contains(x, FiniteSet(0)) != S.false
assert Contains(x, Interval(1, 1)) != S.false
assert Contains(x, S.Integers) != S.false
def test_issue_10326():
assert Contains(oo, Interval(-oo, oo)) == False
assert Contains(-oo, Interval(-oo, oo)) == False
def test_binary_symbols():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
assert Contains(x, FiniteSet(y, Eq(z, True))
).binary_symbols == {y, z}
def test_as_set():
x = Symbol('x')
y = Symbol('y')
assert Contains(x, FiniteSet(y)).as_set() == FiniteSet(y)
assert Contains(x, S.Integers).as_set() == S.Integers
assert Contains(x, S.Reals).as_set() == S.Reals
def test_type_error():
# Pass in a parameter not of type "set"
raises(TypeError, lambda: Contains(2, None))
|
df3dd68de37b4d3b251c97af86bdd7ffb1d11cf81495e6129e0bbadbce6547a9 | import pyglet.gl as pgl
from sympy.core import S
from sympy.plotting.pygletplot.color_scheme import ColorScheme
from sympy.plotting.pygletplot.plot_mode import PlotMode
from sympy.utilities.iterables import is_sequence
from time import sleep
from threading import Thread, Event, RLock
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on SymPy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, bounds_callback=None, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
self.cbounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = bounds_callback
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = pgl.glGenLists(1)
pgl.glNewList(dl, pgl.GL_COMPILE)
function()
pgl.glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if pgl.GL_TRUE == pgl.glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT)
pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_FILL)
pgl.glCallList(dl)
pgl.glPopAttrib()
def _draw_wireframe_display_list(self, dl):
pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT)
pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_LINE)
pgl.glEnable(pgl.GL_POLYGON_OFFSET_LINE)
pgl.glPolygonOffset(-0.005, -50.0)
pgl.glCallList(dl)
pgl.glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.is_set():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.is_set():
return
while self._calculating_cverts.is_set():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.is_set()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.is_set()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError("Color change failed. "
"Reason: %s" % (str(e)))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % ("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None'))
return ", ".join([f, i, o])
|
8b77e3341ff2145b07f6a2cf458cfecd3c450f3319d8570039cf3470db70bd1b | #!/usr/bin/env python
"""Setup script for SymPy.
This uses Setuptools (https://setuptools.pypa.io/en/latest/) the standard
python mechanism for installing packages.
For the easiest installation just type the command (you'll probably need
root privileges for that):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py test -> will run the complete test suite
To get a full list of available commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the sympy list at
[email protected] and ask for help.
"""
import sys
import os
import subprocess
from setuptools import setup, Command
from setuptools.command.sdist import sdist
min_mpmath_version = '0.19'
# This directory
dir_setup = os.path.dirname(os.path.realpath(__file__))
extra_kwargs = {
'zip_safe': False,
'entry_points': {
'console_scripts': [
'isympy = isympy:main',
]
}
}
if sys.version_info < (3, 8):
print("SymPy requires Python 3.8 or newer. Python %d.%d detected"
% sys.version_info[:2])
sys.exit(-1)
# Check that this list is uptodate against the result of the command:
# python bin/generate_module_list.py
modules = [
'sympy.algebras',
'sympy.assumptions',
'sympy.assumptions.handlers',
'sympy.assumptions.predicates',
'sympy.assumptions.relation',
'sympy.benchmarks',
'sympy.calculus',
'sympy.categories',
'sympy.codegen',
'sympy.combinatorics',
'sympy.concrete',
'sympy.core',
'sympy.core.benchmarks',
'sympy.crypto',
'sympy.diffgeom',
'sympy.discrete',
'sympy.external',
'sympy.functions',
'sympy.functions.combinatorial',
'sympy.functions.elementary',
'sympy.functions.elementary.benchmarks',
'sympy.functions.special',
'sympy.functions.special.benchmarks',
'sympy.geometry',
'sympy.holonomic',
'sympy.integrals',
'sympy.integrals.benchmarks',
'sympy.interactive',
'sympy.liealgebras',
'sympy.logic',
'sympy.logic.algorithms',
'sympy.logic.utilities',
'sympy.matrices',
'sympy.matrices.benchmarks',
'sympy.matrices.expressions',
'sympy.multipledispatch',
'sympy.ntheory',
'sympy.parsing',
'sympy.parsing.autolev',
'sympy.parsing.autolev._antlr',
'sympy.parsing.c',
'sympy.parsing.fortran',
'sympy.parsing.latex',
'sympy.parsing.latex._antlr',
'sympy.physics',
'sympy.physics.continuum_mechanics',
'sympy.physics.control',
'sympy.physics.hep',
'sympy.physics.mechanics',
'sympy.physics.optics',
'sympy.physics.quantum',
'sympy.physics.units',
'sympy.physics.units.definitions',
'sympy.physics.units.systems',
'sympy.physics.vector',
'sympy.plotting',
'sympy.plotting.intervalmath',
'sympy.plotting.pygletplot',
'sympy.polys',
'sympy.polys.agca',
'sympy.polys.benchmarks',
'sympy.polys.domains',
'sympy.polys.matrices',
'sympy.polys.numberfields',
'sympy.printing',
'sympy.printing.pretty',
'sympy.sandbox',
'sympy.series',
'sympy.series.benchmarks',
'sympy.sets',
'sympy.sets.handlers',
'sympy.simplify',
'sympy.solvers',
'sympy.solvers.benchmarks',
'sympy.solvers.diophantine',
'sympy.solvers.ode',
'sympy.stats',
'sympy.stats.sampling',
'sympy.strategies',
'sympy.strategies.branch',
'sympy.tensor',
'sympy.tensor.array',
'sympy.tensor.array.expressions',
'sympy.testing',
'sympy.unify',
'sympy.utilities',
'sympy.utilities._compilation',
'sympy.utilities.mathml',
'sympy.vector',
]
class test_sympy(Command):
"""Runs all tests under the sympy/ folder
"""
description = "run all tests and doctests; also see bin/test and bin/doctest"
user_options = [] # setuptools complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # setuptools wants this
pass
def finalize_options(self): # this too
pass
def run(self):
from sympy.testing import runtests
runtests.run_all_tests()
class antlr(Command):
"""Generate code with antlr4"""
description = "generate parser code from antlr grammars"
user_options = [] # setuptools complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # setuptools wants this
pass
def finalize_options(self): # this too
pass
def run(self):
from sympy.parsing.latex._build_latex_antlr import build_parser as build_latex_parser
if not build_latex_parser():
sys.exit(-1)
from sympy.parsing.autolev._build_autolev_antlr import build_parser as build_autolev_parser
if not build_autolev_parser():
sys.exit(-1)
class sdist_sympy(sdist):
def run(self):
# Fetch git commit hash and write down to commit_hash.txt before
# shipped in tarball.
commit_hash = None
commit_hash_filepath = 'doc/commit_hash.txt'
try:
commit_hash = \
subprocess.check_output(['git', 'rev-parse', 'HEAD'])
commit_hash = commit_hash.decode('ascii')
commit_hash = commit_hash.rstrip()
print('Commit hash found : {}.'.format(commit_hash))
print('Writing it to {}.'.format(commit_hash_filepath))
except:
pass
if commit_hash:
with open(commit_hash_filepath, 'w') as f:
f.write(commit_hash)
super().run()
try:
os.remove(commit_hash_filepath)
print(
'Successfully removed temporary file {}.'
.format(commit_hash_filepath))
except OSError as e:
print("Error deleting %s - %s." % (e.filename, e.strerror))
# Check that this list is uptodate against the result of the command:
# python bin/generate_test_list.py
tests = [
'sympy.algebras.tests',
'sympy.assumptions.tests',
'sympy.calculus.tests',
'sympy.categories.tests',
'sympy.codegen.tests',
'sympy.combinatorics.tests',
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.crypto.tests',
'sympy.diffgeom.tests',
'sympy.discrete.tests',
'sympy.external.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.geometry.tests',
'sympy.holonomic.tests',
'sympy.integrals.tests',
'sympy.interactive.tests',
'sympy.liealgebras.tests',
'sympy.logic.tests',
'sympy.matrices.expressions.tests',
'sympy.matrices.tests',
'sympy.multipledispatch.tests',
'sympy.ntheory.tests',
'sympy.parsing.tests',
'sympy.physics.continuum_mechanics.tests',
'sympy.physics.control.tests',
'sympy.physics.hep.tests',
'sympy.physics.mechanics.tests',
'sympy.physics.optics.tests',
'sympy.physics.quantum.tests',
'sympy.physics.tests',
'sympy.physics.units.tests',
'sympy.physics.vector.tests',
'sympy.plotting.intervalmath.tests',
'sympy.plotting.pygletplot.tests',
'sympy.plotting.tests',
'sympy.polys.agca.tests',
'sympy.polys.domains.tests',
'sympy.polys.matrices.tests',
'sympy.polys.numberfields.tests',
'sympy.polys.tests',
'sympy.printing.pretty.tests',
'sympy.printing.tests',
'sympy.sandbox.tests',
'sympy.series.tests',
'sympy.sets.tests',
'sympy.simplify.tests',
'sympy.solvers.diophantine.tests',
'sympy.solvers.ode.tests',
'sympy.solvers.tests',
'sympy.stats.sampling.tests',
'sympy.stats.tests',
'sympy.strategies.branch.tests',
'sympy.strategies.tests',
'sympy.tensor.array.expressions.tests',
'sympy.tensor.array.tests',
'sympy.tensor.tests',
'sympy.testing.tests',
'sympy.unify.tests',
'sympy.utilities._compilation.tests',
'sympy.utilities.tests',
'sympy.vector.tests',
]
with open(os.path.join(dir_setup, 'sympy', 'release.py')) as f:
# Defines __version__
exec(f.read())
if __name__ == '__main__':
setup(name='sympy',
version=__version__,
description='Computer algebra system (CAS) in Python',
author='SymPy development team',
author_email='[email protected]',
license='BSD',
keywords="Math CAS",
url='https://sympy.org',
project_urls={
'Source': 'https://github.com/sympy/sympy',
},
py_modules=['isympy'],
packages=['sympy'] + modules + tests,
ext_modules=[],
package_data={
'sympy.utilities.mathml': ['data/*.xsl'],
'sympy.logic.benchmarks': ['input/*.cnf'],
'sympy.parsing.autolev': [
'*.g4', 'test-examples/*.al', 'test-examples/*.py',
'test-examples/pydy-example-repo/*.al',
'test-examples/pydy-example-repo/*.py',
'test-examples/README.txt',
],
'sympy.parsing.latex': ['*.txt', '*.g4'],
'sympy.plotting.tests': ['test_region_*.png'],
'sympy': ['py.typed']
},
data_files=[('share/man/man1', ['doc/man/isympy.1'])],
cmdclass={'test': test_sympy,
'antlr': antlr,
'sdist': sdist_sympy,
},
python_requires='>=3.8',
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'mpmath>=%s' % min_mpmath_version,
],
**extra_kwargs
)
|
8390f44277f641dd3408bb428a65ad5cea019d9b8008fe53131f0f71e03d5c1b | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
from itertools import chain
import json
import sys
import warnings
import pytest
from sympy.testing.runtests import setup_pprint, _get_doctest_blacklist
durations_path = os.path.join(os.path.dirname(__file__), '.ci', 'durations.json')
blacklist_path = os.path.join(os.path.dirname(__file__), '.ci', 'blacklisted.json')
collect_ignore = _get_doctest_blacklist()
# Set up printing for doctests
setup_pprint()
sys.__displayhook__ = sys.displayhook
#from sympy import pprint_use_unicode
#pprint_use_unicode(False)
def _mk_group(group_dict):
return list(chain(*[[k+'::'+v for v in files] for k, files in group_dict.items()]))
if os.path.exists(durations_path):
veryslow_group, slow_group = [_mk_group(group_dict) for group_dict in json.loads(open(durations_path, 'rt').read())]
else:
# warnings in conftest has issues: https://github.com/pytest-dev/pytest/issues/2891
warnings.warn("conftest.py:22: Could not find %s, --quickcheck and --veryquickcheck will have no effect.\n" % durations_path)
veryslow_group, slow_group = [], []
if os.path.exists(blacklist_path):
with open(blacklist_path, 'rt') as stream:
blacklist_group = _mk_group(json.load(stream))
else:
warnings.warn("conftest.py:28: Could not find %s, no tests will be skipped due to blacklisting\n" % blacklist_path)
blacklist_group = []
def pytest_addoption(parser):
parser.addoption("--quickcheck", dest="runquick", action="store_true",
help="Skip very slow tests (see ./ci/parse_durations_log.py)")
parser.addoption("--veryquickcheck", dest="runveryquick", action="store_true",
help="Skip slow & very slow (see ./ci/parse_durations_log.py)")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line("markers", "slow: manually marked test as slow (use .ci/durations.json instead)")
config.addinivalue_line("markers", "quickcheck: skip very slow tests")
config.addinivalue_line("markers", "veryquickcheck: skip slow & very slow tests")
def pytest_runtest_setup(item):
if isinstance(item, pytest.Function):
if item.nodeid in veryslow_group and (item.config.getvalue("runquick") or
item.config.getvalue("runveryquick")):
pytest.skip("very slow test, skipping since --quickcheck or --veryquickcheck was passed.")
return
if item.nodeid in slow_group and item.config.getvalue("runveryquick"):
pytest.skip("slow test, skipping since --veryquickcheck was passed.")
return
if item.nodeid in blacklist_group:
pytest.skip("blacklisted test, see %s" % blacklist_path)
return
|
ff56eb672fc89b5c5d77dfadee6e88e2989b71298da37ac1d1c9e6e499ba6db0 | #!/usr/bin/env python
#
# Tests that a useful message is give in the ImportError when trying to import
# sympy from Python 2. This ensures that we don't get a Py2 SyntaxError from
# sympy/__init__.py
import sys
assert sys.version_info[:2] == (2, 7), "This test is for Python 2.7 only"
import os
thisdir = os.path.dirname(__file__)
parentdir = os.path.normpath(os.path.join(thisdir, '..'))
# Append the SymPy root directory to path
sys.path.append(parentdir)
try:
import sympy
except ImportError as exc:
message = str(exc)
# e.g. "Python version 3.5 or above is required for SymPy."
assert message.startswith("Python version")
assert message.endswith(" or above is required for SymPy.")
else:
raise AssertionError("import sympy should give ImportError on Python 2.7")
|
5829f68273b205d9f3ad00e7a38952d2f4519a04dd86a85e214980ef6dfbfd55 | #!/usr/bin/env python
"""
Run tests for specific packages that use optional dependencies.
The optional dependencies need to be installed before running this.
"""
# Add the local sympy to sys.path (needed for CI)
from get_sympy import path_hack
path_hack()
class TestsFailedError(Exception):
pass
test_list = [
# numpy
'*numpy*',
'sympy/core/',
'sympy/matrices/',
'sympy/physics/quantum/',
'sympy/utilities/tests/test_lambdify.py',
'sympy/physics/control/',
# scipy
'*scipy*',
# matplotlib
'sympy/plotting/',
# llvmlite
'*llvm*',
# aesara
'*aesara*',
# jax
'*jax*',
# gmpy
'sympy/polys',
# gmpy, numpy, scipy, autowrap, matplotlib
'sympy/external',
# autowrap
'*autowrap*',
# ipython
'*ipython*',
# antlr, lfortran, clang
'sympy/parsing/',
# codegen
'sympy/codegen/',
'sympy/utilities/tests/test_codegen',
'sympy/utilities/_compilation/tests/test_compilation',
'sympy/external/tests/test_codegen.py',
# cloudpickle
'pickling',
# pycosat
'sympy/logic',
'sympy/assumptions',
#stats
'sympy/stats',
]
blacklist = [
'sympy/physics/quantum/tests/test_circuitplot.py',
]
doctest_list = [
# numpy
'sympy/matrices/',
'sympy/utilities/lambdify.py',
# scipy
'*scipy*',
# matplotlib
'sympy/plotting/',
# llvmlite
'*llvm*',
# aesara
'*aesara*',
# gmpy
'sympy/polys',
# autowrap
'*autowrap*',
# ipython
'*ipython*',
# antlr, lfortran, clang
'sympy/parsing/',
# codegen
'sympy/codegen/',
# pycosat
'sympy/logic',
'sympy/assumptions',
#stats
'sympy/stats',
]
print('Testing optional dependencies')
from sympy import test, doctest
tests_passed = test(*test_list, blacklist=blacklist, force_colors=True)
doctests_passed = doctest(*doctest_list, force_colors=True)
if not tests_passed and not doctests_passed:
raise TestsFailedError('Tests and doctests failed')
elif not tests_passed:
raise TestsFailedError('Doctests passed but tests failed')
elif not doctests_passed:
raise TestsFailedError('Tests passed but doctests failed')
|
b378b191987640c4450b4eb03c771b8fcb63bb388964673542f5e8073470a016 | #!/usr/bin/env python
"""
Script to generate test coverage reports.
Usage:
$ bin/coverage_report.py
This will create a directory covhtml with the coverage reports. To
restrict the analysis to a directory, you just need to pass its name as
argument. For example:
$ bin/coverage_report.py sympy/logic
runs only the tests in sympy/logic/ and reports only on the modules in
sympy/logic/. To also run slow tests use --slow option. You can also get a
report on the parts of the whole sympy code covered by the tests in
sympy/logic/ by following up the previous command with
$ bin/coverage_report.py -c
"""
from __future__ import print_function
import os
import re
import sys
from argparse import ArgumentParser
minver = '3.4'
try:
import coverage
if coverage.__version__ < minver:
raise ImportError
except ImportError:
print(
"You need to install module coverage (version %s or newer required).\n"
"See https://coverage.readthedocs.io/en/latest/ or \n"
"https://launchpad.net/ubuntu/+source/python-coverage/" % minver)
sys.exit(-1)
omit_dir_patterns = ['benchmark', 'examples',
'pyglet', 'test_external']
omit_dir_re = re.compile(r'|'.join(omit_dir_patterns))
source_re = re.compile(r'.*\.py$')
def generate_covered_files(top_dir):
for dirpath, dirnames, filenames in os.walk(top_dir):
omit_dirs = [dirn for dirn in dirnames if omit_dir_re.match(dirn)]
for x in omit_dirs:
dirnames.remove(x)
for filename in filenames:
if source_re.match(filename):
yield os.path.join(dirpath, filename)
def make_report(
test_args, source_dir='sympy/', report_dir='covhtml', use_cache=False,
slow=False
):
# code adapted from /bin/test
from get_sympy import path_hack
sympy_top = path_hack()
os.chdir(sympy_top)
cov = coverage.coverage()
cov.exclude("raise NotImplementedError")
cov.exclude("def canonize") # this should be "@decorated"
if use_cache:
cov.load()
else:
cov.erase()
cov.start()
import sympy
sympy.test(*test_args, subprocess=False, slow=slow)
#sympy.doctest() # coverage doesn't play well with doctests
cov.stop()
try:
cov.save()
except PermissionError:
import warnings
warnings.warn(
"PermissionError has been raised while saving the " \
"coverage result.",
RuntimeWarning
)
covered_files = list(generate_covered_files(source_dir))
cov.html_report(morfs=covered_files, directory=report_dir)
parser = ArgumentParser()
parser.add_argument(
'-c', '--use-cache', action='store_true', default=False,
help='Use cached data.')
parser.add_argument(
'-d', '--report-dir', default='covhtml',
help='Directory to put the generated report in.')
parser.add_argument(
"--slow", action="store_true", dest="slow", default=False,
help="Run slow functions also.")
options, args = parser.parse_known_args()
if __name__ == '__main__':
report_dir = options.report_dir
use_cache = options.use_cache
slow = options.slow
make_report(
args, report_dir=report_dir, use_cache=use_cache, slow=slow)
print("The generated coverage report is in covhtml directory.")
print(
"Open %s in your web browser to view the report" %
os.sep.join([report_dir, 'index.html'])
)
|
b9852b33354633c15f5ada85fcebadcd3f69a7031ecf02bda60a6bed42d138c3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A tool to generate AUTHORS. We started tracking authors before moving to git,
so we have to do some manual rearrangement of the git history authors in order
to get the order in AUTHORS. bin/mailmap_check.py should be run before
committing the results.
See here for instructions on using this script:
https://github.com/sympy/sympy/wiki/Development-workflow#update-mailmap
"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os
if sys.version_info < (3, 8):
sys.exit("This script requires Python 3.8 or newer")
from pathlib import Path
from subprocess import run, PIPE
from collections import OrderedDict, defaultdict
from argparse import ArgumentParser
def sympy_dir():
return Path(__file__).resolve().parent.parent
# put sympy on the path
sys.path.insert(0, str(sympy_dir()))
import sympy
from sympy.utilities.misc import filldedent
from sympy.external.importtools import version_tuple
def main(*args):
parser = ArgumentParser(description='Update the .mailmap file')
parser.add_argument('--update-authors', action='store_true',
help=filldedent("""
Also updates the AUTHORS file. DO NOT use this option as part of a
pull request. The AUTHORS file will be updated later at the time a
new version of SymPy is released."""))
args = parser.parse_args(args)
if not check_git_version():
return 1
# find who git knows ahout
try:
git_people = get_authors_from_git()
except AssertionError as msg:
print(red(msg))
return 1
lines_mailmap = read_lines(mailmap_path())
def key(line):
# return lower case first address on line or
# raise an error if not an entry
if '#' in line:
line = line.split('#')[0]
L, R = line.count("<"), line.count(">")
assert L == R and L in (1, 2)
return line.split(">", 1)[0].split("<")[1].lower()
who = OrderedDict()
for i, line in enumerate(lines_mailmap):
try:
who.setdefault(key(line), []).append(line)
except AssertionError:
who[i] = [line]
problems = False
missing = False
ambiguous = False
dups = defaultdict(list)
#
# Here we use the git people with the most recent commit skipped. This
# means we don't need to add .mailmap entries for the temporary merge
# commit created in CI on a PR.
#
for person in git_people:
email = key(person)
dups[email].append(person)
if email not in who:
print(red("This author is not included in the .mailmap file:"))
print(person)
missing = True
elif not any(p.startswith(person) for p in who[email]):
print(red("Ambiguous names in .mailmap"))
print(red("This email address appears for multiple entries:"))
print('Person:', person)
print('Mailmap entries:')
for line in who[email]:
print(line)
ambiguous = True
if missing:
print(red(filldedent("""
The .mailmap file needs to be updated because there are commits with
unrecognised author/email metadata.
""")))
problems = True
if ambiguous:
print(red(filldedent("""
Lines should be added to .mailmap to indicate the correct name and
email aliases for all commits.
""")))
problems = True
for email, commitauthors in dups.items():
if len(commitauthors) > 2:
print(red(filldedent("""
The following commits are recorded with different metadata but the
same/ambiguous email address. The .mailmap file will need to be
updated.""")))
for author in commitauthors:
print(author)
problems = True
lines_mailmap_sorted = sort_lines_mailmap(lines_mailmap)
write_lines(mailmap_path(), lines_mailmap_sorted)
if lines_mailmap_sorted != lines_mailmap:
problems = True
print(red("The mailmap file was reordered"))
# Check if changes to AUTHORS file are also needed
#
# Here we don't skip the last commit. We need authors from the most recent
# commit if the AUTHORS file was updated.
lines_authors = make_authors_file_lines(git_people)
old_lines_authors = read_lines(authors_path())
for person in old_lines_authors[8:]:
if person not in git_people:
print(red("This author is in the AUTHORS file but not .mailmap:"))
print(person)
problems = True
if problems:
print(red(filldedent("""
For instructions on updating the .mailmap file see:
https://github.com/sympy/sympy/wiki/Development-workflow#add-your-name-and-email-address-to-the-mailmap-file""",
break_on_hyphens=False, break_long_words=False)))
else:
print(green("No changes needed in .mailmap"))
# Actually update the AUTHORS file (if --update-authors was passed)
authors_changed = update_authors_file(lines_authors, old_lines_authors, args.update_authors)
return int(problems) + int(authors_changed)
def update_authors_file(lines, old_lines, update_yesno):
if old_lines == lines:
print(green('No changes needed in AUTHORS.'))
return 0
# Actually write changes to the file?
if update_yesno:
write_lines(authors_path(), lines)
print(red("Changes were made in the authors file"))
# check for new additions
new_authors = []
for i in sorted(set(lines) - set(old_lines)):
try:
author_name(i)
new_authors.append(i)
except AssertionError:
continue
if new_authors:
if update_yesno:
print(yellow("The following authors were added to AUTHORS."))
else:
print(green(filldedent("""
The following authors will be added to the AUTHORS file at the
time of the next SymPy release.""")))
print()
for i in sorted(new_authors, key=lambda x: x.lower()):
print('\t%s' % i)
if new_authors and update_yesno:
return 1
else:
return 0
def check_git_version():
# check git version
minimal = '1.8.4.2'
git_ver = run(['git', '--version'], stdout=PIPE, encoding='utf-8').stdout[12:]
if version_tuple(git_ver) < version_tuple(minimal):
print(yellow("Please use a git version >= %s" % minimal))
return False
else:
return True
def authors_path():
return sympy_dir() / 'AUTHORS'
def mailmap_path():
return sympy_dir() / '.mailmap'
def red(text):
return "\033[31m%s\033[0m" % text
def yellow(text):
return "\033[33m%s\033[0m" % text
def green(text):
return "\033[32m%s\033[0m" % text
def author_name(line):
assert line.count("<") == line.count(">") == 1
assert line.endswith(">")
return line.split("<", 1)[0].strip()
def get_authors_from_git():
git_command = ["git", "log", "--topo-order", "--reverse", "--format=%aN <%aE>"]
parents = run(["git", "rev-list", "--no-walk", "--count", "HEAD^@"],
stdout=PIPE, encoding='utf-8').stdout.strip()
if parents != '1':
# Skip the most recent commit. Used to ignore the merge commit created
# when this script runs in CI. If HEAD is a merge commit parents will
# typically be '2'. We use HEAD^2 rather than HEAD^1 to select the
# parent commit that is part of the PR rather than the parent commit
# that was the previous tip of master.
git_command.append("HEAD^"+parents)
git_people = run(git_command, stdout=PIPE, encoding='utf-8').stdout.strip().split("\n")
# remove duplicates, keeping the original order
git_people = list(OrderedDict.fromkeys(git_people))
# Do the few changes necessary in order to reproduce AUTHORS:
def move(l, i1, i2, who):
x = l.pop(i1)
# this will fail if the .mailmap is not right
assert who == author_name(x), \
'%s was not found at line %i' % (who, i1)
l.insert(i2, x)
move(git_people, 2, 0, 'Ondřej Čertík')
move(git_people, 42, 1, 'Fabian Pedregosa')
move(git_people, 22, 2, 'Jurjen N.E. Bos')
git_people.insert(4, "*Marc-Etienne M.Leveille <[email protected]>")
move(git_people, 10, 5, 'Brian Jorgensen')
git_people.insert(11, "*Ulrich Hecht <[email protected]>")
# this will fail if the .mailmap is not right
assert 'Kirill Smelkov' == author_name(git_people.pop(12)
), 'Kirill Smelkov was not found at line 12'
move(git_people, 12, 32, 'Sebastian Krämer')
move(git_people, 227, 35, 'Case Van Horsen')
git_people.insert(43, "*Dan <[email protected]>")
move(git_people, 57, 59, 'Aaron Meurer')
move(git_people, 58, 57, 'Andrew Docherty')
move(git_people, 67, 66, 'Chris Smith')
move(git_people, 79, 76, 'Kevin Goodsell')
git_people.insert(84, "*Chu-Ching Huang <[email protected]>")
move(git_people, 93, 92, 'James Pearson')
# this will fail if the .mailmap is not right
assert 'Sergey B Kirpichev' == author_name(git_people.pop(226)
), 'Sergey B Kirpichev was not found at line 226.'
index = git_people.index(
"azure-pipelines[bot] " +
"<azure-pipelines[bot]@users.noreply.github.com>")
git_people.pop(index)
index = git_people.index(
"whitesource-bolt-for-github[bot] " +
"<whitesource-bolt-for-github[bot]@users.noreply.github.com>")
git_people.pop(index)
return git_people
def make_authors_file_lines(git_people):
# define new lines for the file
header = filldedent("""
All people who contributed to SymPy by sending at least a patch or
more (in the order of the date of their first contribution), except
those who explicitly didn't want to be mentioned. People with a * next
to their names are not found in the metadata of the git history. This
file is generated automatically by running `./bin/authors_update.py`.
""").lstrip()
header_extra = "There are a total of %d authors." % len(git_people)
lines = header.splitlines()
lines.append('')
lines.append(header_extra)
lines.append('')
lines.extend(git_people)
return lines
def sort_lines_mailmap(lines):
for n, line in enumerate(lines):
if not line.startswith('#'):
header_end = n
break
header = lines[:header_end]
mailmap_lines = lines[header_end:]
return header + sorted(mailmap_lines)
def read_lines(path):
with open(path, 'r', encoding='utf-8') as fin:
return [line.strip() for line in fin.readlines()]
def write_lines(path, lines):
with open(path, 'w', encoding='utf-8') as fout:
fout.write('\n'.join(lines))
fout.write('\n')
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))
|
39adc6520b523c16f09f73cdf207c02d9c806ec6515d8933ed9d94c1201c6f35 | #!/usr/bin/env python3
from subprocess import check_output
import sys
import os.path
def main(tarname, gitroot):
"""Run this as ./compare_tar_against_git.py TARFILE GITROOT
Args
====
TARFILE: Path to the built sdist (sympy-xx.tar.gz)
GITROOT: Path ro root of git (dir containing .git)
"""
compare_tar_against_git(tarname, gitroot)
## TARBALL WHITELISTS
# If a file does not end up in the tarball that should, add it to setup.py if
# it is Python, or MANIFEST.in if it is not. (There is a command at the top
# of setup.py to gather all the things that should be there).
# TODO: Also check that this whitelist isn't growing out of date from files
# removed from git.
# Files that are in git that should not be in the tarball
git_whitelist = {
# Git specific dotfiles
'.gitattributes',
'.gitignore',
'.mailmap',
# CI
'.github/workflows/runtests.yml',
'.github/workflows/ci-sage.yml',
'.github/workflows/comment-on-pr.yml',
'.github/workflows/release.yml',
'.github/workflows/docs-preview.yml',
'.github/workflows/checkconflict.yml',
'.ci/durations.json',
'.ci/generate_durations_log.sh',
'.ci/parse_durations_log.py',
'.ci/blacklisted.json',
'.ci/README.rst',
'.circleci/config.yml',
'.github/FUNDING.yml',
'.editorconfig',
'.coveragerc',
'CODEOWNERS',
'asv.conf.actions.json',
'codecov.yml',
'pytest.ini',
'MANIFEST.in',
'banner.svg',
# Code of conduct
'CODE_OF_CONDUCT.md',
# Pull request template
'PULL_REQUEST_TEMPLATE.md',
# Contributing guide
'CONTRIBUTING.md',
# Nothing from bin/ should be shipped unless we intend to install it. Most
# of this stuff is for development anyway. To run the tests from the
# tarball, use setup.py test, or import sympy and run sympy.test() or
# sympy.doctest().
'bin/adapt_paths.py',
'bin/ask_update.py',
'bin/authors_update.py',
'bin/build_doc.sh',
'bin/coverage_doctest.py',
'bin/coverage_report.py',
'bin/deploy_doc.sh',
'bin/diagnose_imports',
'bin/doctest',
'bin/generate_module_list.py',
'bin/generate_test_list.py',
'bin/get_sympy.py',
'bin/mailmap_update.py',
'bin/py.bench',
'bin/strip_whitespace',
'bin/sympy_time.py',
'bin/sympy_time_cache.py',
'bin/test',
'bin/test_external_imports.py',
'bin/test_executable.py',
'bin/test_import',
'bin/test_import.py',
'bin/test_isolated',
'bin/test_py2_import.py',
'bin/test_setup.py',
'bin/test_submodule_imports.py',
'bin/test_optional_dependencies.py',
'bin/test_sphinx.sh',
'bin/mailmap_check.py',
'bin/test_symengine.py',
'bin/test_tensorflow.py',
'bin/test_pyodide.mjs',
# The notebooks are not ready for shipping yet. They need to be cleaned
# up, and preferably doctested. See also
# https://github.com/sympy/sympy/issues/6039.
'examples/advanced/identitysearch_example.ipynb',
'examples/beginner/plot_advanced.ipynb',
'examples/beginner/plot_colors.ipynb',
'examples/beginner/plot_discont.ipynb',
'examples/beginner/plot_gallery.ipynb',
'examples/beginner/plot_intro.ipynb',
'examples/intermediate/limit_examples_advanced.ipynb',
'examples/intermediate/schwarzschild.ipynb',
'examples/notebooks/density.ipynb',
'examples/notebooks/fidelity.ipynb',
'examples/notebooks/fresnel_integrals.ipynb',
'examples/notebooks/qubits.ipynb',
'examples/notebooks/sho1d_example.ipynb',
'examples/notebooks/spin.ipynb',
'examples/notebooks/trace.ipynb',
'examples/notebooks/Bezout_Dixon_resultant.ipynb',
'examples/notebooks/IntegrationOverPolytopes.ipynb',
'examples/notebooks/Macaulay_resultant.ipynb',
'examples/notebooks/Sylvester_resultant.ipynb',
'examples/notebooks/README.txt',
# This stuff :)
'release/.gitignore',
'release/README.md',
'release/compare_tar_against_git.py',
'release/update_docs.py',
'release/build_docs.py',
'release/github_release.py',
'release/helpers.py',
'release/releasecheck.py',
'release/sha256.py',
'release/authors.py',
'release/ci_release_script.sh',
# This is just a distribute version of setup.py. Used mainly for setup.py
# develop, which we don't care about in the release tarball
'setupegg.py',
# pytest stuff
'conftest.py',
}
# Files that should be in the tarball should not be in git
tarball_whitelist = {
# Generated by setup.py. Contains metadata for PyPI.
"PKG-INFO",
# Generated by setuptools. More metadata.
'setup.cfg',
'sympy.egg-info/PKG-INFO',
'sympy.egg-info/SOURCES.txt',
'sympy.egg-info/dependency_links.txt',
'sympy.egg-info/requires.txt',
'sympy.egg-info/top_level.txt',
'sympy.egg-info/not-zip-safe',
'sympy.egg-info/entry_points.txt',
# Not sure where this is generated from...
'doc/commit_hash.txt',
}
def blue(text):
return "\033[34m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def run(*cmdline, cwd=None):
"""
Run command in subprocess and get lines of output
"""
return check_output(cmdline, encoding='utf-8', cwd=cwd).splitlines()
def full_path_split(path):
"""
Function to do a full split on a path.
"""
# Based on https://stackoverflow.com/a/13505966/161801
rest, tail = os.path.split(path)
if not rest or rest == os.path.sep:
return (tail,)
return full_path_split(rest) + (tail,)
def compare_tar_against_git(tarname, gitroot):
"""
Compare the contents of the tarball against git ls-files
See the bottom of the file for the whitelists.
"""
git_lsfiles = set(i.strip() for i in run('git', 'ls-files', cwd=gitroot))
tar_output_orig = set(run('tar', 'tf', tarname))
tar_output = set()
for file in tar_output_orig:
# The tar files are like sympy-0.7.3/sympy/__init__.py, and the git
# files are like sympy/__init__.py.
split_path = full_path_split(file)
if split_path[-1]:
# Exclude directories, as git ls-files does not include them
tar_output.add(os.path.join(*split_path[1:]))
# print tar_output
# print git_lsfiles
fail = False
print()
print(blue("Files in the tarball from git that should not be there:"))
print()
for line in sorted(tar_output.intersection(git_whitelist)):
fail = True
print(line)
print()
print(blue("Files in git but not in the tarball:"))
print()
for line in sorted(git_lsfiles - tar_output - git_whitelist):
fail = True
print(line)
print()
print(blue("Files in the tarball but not in git:"))
print()
for line in sorted(tar_output - git_lsfiles - tarball_whitelist):
fail = True
print(line)
print()
if fail:
sys.exit(red("Non-whitelisted files found or not found in the tarball"))
if __name__ == "__main__":
main(*sys.argv[1:])
|
9e504e1620a827c5af9c4aa5100393c5259e5ff924a4c155470c1a5451d9d00d | #! /usr/bin/env python
# Check the plot docstring
from sympy import Symbol, exp, sin, cos
from sympy.plotting import (plot, plot_parametric,
plot3d_parametric_surface, plot3d_parametric_line,
plot3d)
lx = range(5)
ly = [i**2 for i in lx]
x = Symbol('x')
y = Symbol('y')
u = Symbol('u')
v = Symbol('v')
expr = x**2 - 1
b = plot(expr, (x, 2, 4), show=False) # cartesian plot
e = plot(exp(-x), (x, 0, 4), show=False) # cartesian plot (and coloring, see below)
f = plot3d_parametric_line(sin(x), cos(x), x, (x, 0, 10), show=False) # 3d parametric line plot
g = plot3d(sin(x)*cos(y), (x, -5, 5), (y, -10, 10), show=False) # 3d surface cartesian plot
h = plot3d_parametric_surface(cos(u)*v, sin(u)*v, u, (u, 0, 10), (v, -2, 2), show=False) # 3d parametric surface plot
# Some aesthetics
e[0].line_color = lambda x: x / 4
f[0].line_color = lambda x, y, z: z / 10
g[0].surface_color = lambda x, y: sin(x)
# Some more stuff on aesthetics - coloring wrt coordinates or parameters
param_line_2d = plot_parametric((x*cos(x), x*sin(x), (x, 0, 15)), (1.1*x*cos(x), 1.1*x*sin(x), (x, 0, 15)), show=False)
param_line_2d[0].line_color = lambda u: sin(u) # parametric
param_line_2d[1].line_color = lambda u, v: u**2 + v**2 # coordinates
param_line_2d.title = 'The inner one is colored by parameter and the outer one by coordinates'
param_line_3d = plot3d_parametric_line((x*cos(x), x*sin(x), x, (x, 0, 15)),
(1.5*x*cos(x), 1.5*x*sin(x), x, (x, 0, 15)),
(2*x*cos(x), 2*x*sin(x), x, (x, 0, 15)), show=False)
param_line_3d[0].line_color = lambda u: u # parametric
param_line_3d[1].line_color = lambda u, v: u*v # first and second coordinates
param_line_3d[2].line_color = lambda u, v, w: u*v*w # all coordinates
if __name__ == '__main__':
for p in [b, e, f, g, h, param_line_2d, param_line_3d]:
p.show()
|
8c94299c9e2b4bfea671f933f30b893b32e4aa15a551d5c9965d98a9a0c08f52 | #
# SymPy documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 22 19:34:32 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import inspect
import os
import subprocess
from datetime import datetime
# Make sure we import sympy from git
sys.path.insert(0, os.path.abspath('../..'))
import sympy
# If your extensions are in another directory, add it here.
sys.path = ['ext'] + sys.path
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.linkcode',
'sphinx_math_dollar', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx_reredirects', 'sphinx_copybutton',
'sphinx.ext.graphviz', 'matplotlib.sphinxext.plot_directive',
'myst_parser', 'convert-svg-to-pdf', 'sphinx.ext.intersphinx',
]
# Add redirects here. This should be done whenever a page that is in the
# existing release docs is moved somewhere else so that the URLs don't break.
# The format is
# "page/path/without/extension": "../relative_path_with.html"
# Note that the html path is relative to the redirected page. Always test the
# redirect manually (they aren't tested automatically). See
# https://documatt.gitlab.io/sphinx-reredirects/usage.html
redirects = {
"guides/getting_started/install": "../../install.html",
"documentation-style-guide": "contributing/documentation-style-guide.html",
"gotchas": "explanation/gotchas.html",
"special_topics/classification": "../explanation/classification.html",
"special_topics/finite_diff_derivatives": "../explanation/finite_diff_derivatives.html",
"special_topics/intro": "../explanation/index.html",
"special_topics/index": "../explanation/index.html",
"modules/index": "../reference/index.html",
"modules/physics/index": "../../reference/public/physics/index.html",
"guides/contributing/index": "../../contributing/index.html",
"guides/contributing/dev-setup": "../../contributing/dev-setup.html",
"guides/contributing/dependencies": "../../contributing/dependencies.html",
"guides/contributing/build-docs": "../../contributing/build-docs.html",
"guides/contributing/debug": "../../contributing/debug.html",
"guides/contributing/docstring": "../../contributing/docstring.html",
"guides/documentation-style-guide": "../../contributing/contributing/documentation-style-guide.html",
"guides/make-a-contribution": "../../contributing/make-a-contribution.html",
"guides/contributing/deprecations": "../../contributing/deprecations.html",
"tutorial/preliminaries": "../tutorials/intro-tutorial/preliminaries.html",
"tutorial/intro": "../tutorials/intro-tutorial/intro.html",
"tutorial/index": "../tutorials/intro-tutorial/index.html",
"tutorial/gotchas": "../tutorials/intro-tutorial/gotchas.html",
"tutorial/features": "../tutorials/intro-tutorial/features.html",
"tutorial/next": "../tutorials/intro-tutorial/next.html",
"tutorial/basic_operations": "../tutorials/intro-tutorial/basic_operations.html",
"tutorial/printing": "../tutorials/intro-tutorial/printing.html",
"tutorial/simplification": "../tutorials/intro-tutorial/simplification.html",
"tutorial/calculus": "../tutorials/intro-tutorial/calculus.html",
"tutorial/solvers": "../tutorials/intro-tutorial/solvers.html",
"tutorial/matrices": "../tutorials/intro-tutorial/matrices.html",
"tutorial/manipulation": "../tutorials/intro-tutorial/manipulation.html",
}
html_baseurl = "https://docs.sympy.org/latest/"
# Configure Sphinx copybutton (see https://sphinx-copybutton.readthedocs.io/en/latest/use.html)
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# Use this to use pngmath instead
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ]
# Enable warnings for all bad cross references. These are turned into errors
# with the -W flag in the Makefile.
nitpicky = True
nitpick_ignore = [
('py:class', 'sympy.logic.boolalg.Boolean')
]
# To stop docstrings inheritance.
autodoc_inherit_docstrings = False
# See https://www.sympy.org/sphinx-math-dollar/
mathjax3_config = {
"tex": {
"inlineMath": [['\\(', '\\)']],
"displayMath": [["\\[", "\\]"]],
}
}
# Myst configuration (for .md files). See
# https://myst-parser.readthedocs.io/en/latest/syntax/optional.html
myst_enable_extensions = ["dollarmath", "linkify"]
myst_heading_anchors = 6
# myst_update_mathjax = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
suppress_warnings = ['ref.citation', 'ref.footnote']
# General substitutions.
project = 'SymPy'
copyright = '{} SymPy Development Team'.format(datetime.utcnow().year)
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = sympy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
sys.path.append(os.path.abspath("./_pygments"))
pygments_style = 'styles.SphinxHighContrastStyle'
pygments_dark_style = 'styles.NativeHighContrastStyle'
# Don't show the source code hyperlinks when using matplotlib plot directive.
plot_html_show_source_link = False
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# was classic
# html_theme = "classic"
html_theme = "furo"
# Adjust the sidebar so that the entire sidebar is scrollable
html_sidebars = {
"**": [
"sidebar/scroll-start.html",
"sidebar/brand.html",
"sidebar/search.html",
"sidebar/navigation.html",
"sidebar/versions.html",
"sidebar/scroll-end.html",
],
}
common_theme_variables = {
# Main "SymPy green" colors. Many things uses these colors.
"color-brand-primary": "#52833A",
"color-brand-content": "#307748",
# The left sidebar.
"color-sidebar-background": "#3B5526",
"color-sidebar-background-border": "var(--color-background-primary)",
"color-sidebar-link-text": "#FFFFFF",
"color-sidebar-brand-text": "var(--color-sidebar-link-text--top-level)",
"color-sidebar-link-text--top-level": "#FFFFFF",
"color-sidebar-item-background--hover": "var(--color-brand-primary)",
"color-sidebar-item-expander-background--hover": "var(--color-brand-primary)",
"color-link-underline--hover": "var(--color-link)",
"color-api-keyword": "#000000bd",
"color-api-name": "var(--color-brand-content)",
"color-api-pre-name": "var(--color-brand-content)",
"api-font-size": "var(--font-size--normal)",
"color-foreground-secondary": "#53555B",
# TODO: Add the other types of admonitions here if anyone uses them.
"color-admonition-title-background--seealso": "#CCCCCC",
"color-admonition-title--seealso": "black",
"color-admonition-title-background--note": "#CCCCCC",
"color-admonition-title--note": "black",
"color-admonition-title-background--warning": "var(--color-problematic)",
"color-admonition-title--warning": "white",
"admonition-font-size": "var(--font-size--normal)",
"admonition-title-font-size": "var(--font-size--normal)",
# Note: this doesn't work. If we want to change this, we have to set
# it as the .highlight background in custom.css.
"color-code-background": "hsl(80deg 100% 95%)",
"code-font-size": "var(--font-size--small)",
"font-stack--monospace": 'DejaVu Sans Mono,"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;'
}
html_theme_options = {
"light_css_variables": common_theme_variables,
# The dark variables automatically inherit values from the light variables
"dark_css_variables": {
**common_theme_variables,
"color-brand-primary": "#33CB33",
"color-brand-content": "#1DBD1D",
"color-api-keyword": "#FFFFFFbd",
"color-api-overall": "#FFFFFF90",
"color-api-paren": "#FFFFFF90",
"color-sidebar-item-background--hover": "#52833A",
"color-sidebar-item-expander-background--hover": "#52833A",
# This is the color of the text in the right sidebar
"color-foreground-secondary": "#9DA1AC",
"color-admonition-title-background--seealso": "#555555",
"color-admonition-title-background--note": "#555555",
"color-problematic": "#B30000",
},
# See https://pradyunsg.me/furo/customisation/footer/
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/sympy/sympy",
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""",
"class": "",
},
],
}
# Add a header for PR preview builds. See the Circle CI configuration.
if os.environ.get("CIRCLECI") == "true":
PR_NUMBER = os.environ.get('CIRCLE_PR_NUMBER')
SHA1 = os.environ.get('CIRCLE_SHA1')
html_theme_options['announcement'] = f"""This is a preview build from
SymPy pull request <a href="https://github.com/sympy/sympy/pull/{PR_NUMBER}">
#{PR_NUMBER}</a>. It was built against <a
href="https://github.com/sympy/sympy/pull/{PR_NUMBER}/commits/{SHA1}">{SHA1[:7]}</a>.
If you aren't looking for a PR preview, go to <a
href="https://docs.sympy.org/">the main SymPy documentation</a>. """
# custom.css contains changes that aren't possible with the above because they
# aren't specified in the Furo theme as CSS variables
html_css_files = ['custom.css']
# html_js_files = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
html_domain_indices = ['py-modindex']
# If true, the reST sources are included in the HTML build as _sources/<name>.
# html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymPydoc'
language = 'en'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual], toctree_only).
# toctree_only is set to True so that the start file document itself is not included in the
# output, only the documents referenced by it via TOC trees. The extra stuff in the master
# document is intended to show up in the HTML, but doesn't really belong in the LaTeX output.
latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation',
'SymPy Development Team', 'manual', True)]
# Additional stuff for the LaTeX preamble.
# Tweaked to work with XeTeX.
latex_elements = {
'babel': '',
'fontenc': r'''
% Define version of \LaTeX that is usable in math mode
\let\OldLaTeX\LaTeX
\renewcommand{\LaTeX}{\text{\OldLaTeX}}
\usepackage{bm}
\usepackage{amssymb}
\usepackage{fontspec}
\usepackage[english]{babel}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
'fontpkg': '',
'inputenc': '',
'utf8extra': '',
'preamble': r'''
'''
}
# SymPy logo on title page
html_logo = '_static/sympylogo.png'
latex_logo = '_static/sympylogo_big.png'
html_favicon = '../_build/logo/sympy-notailtext-favicon.ico'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Show page numbers next to internal references
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
default_role = 'math'
pngmath_divpng_args = ['-gamma 1.5', '-D 110']
# Note, this is ignored by the mathjax extension
# Any \newcommand should be defined in the file
pngmath_latex_preamble = '\\usepackage{amsmath}\n' \
'\\usepackage{bm}\n' \
'\\usepackage{amsfonts}\n' \
'\\usepackage{amssymb}\n' \
'\\setlength{\\parindent}{0pt}\n'
texinfo_documents = [
(master_doc, 'sympy', 'SymPy Documentation', 'SymPy Development Team',
'SymPy', 'Computer algebra system (CAS) in Python', 'Programming', 1),
]
# Use svg for graphviz
graphviz_output_format = 'svg'
# Enable links to other packages
intersphinx_mapping = {
'matplotlib': ('https://matplotlib.org/stable/', None),
'mpmath': ('https://mpmath.org/doc/current/', None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
}
# Require :external: to reference intersphinx. Prevents accidentally linking
# to something from matplotlib.
intersphinx_disabled_reftypes = ['*']
# Required for linkcode extension.
# Get commit hash from the external file.
commit_hash_filepath = '../commit_hash.txt'
commit_hash = None
if os.path.isfile(commit_hash_filepath):
with open(commit_hash_filepath) as f:
commit_hash = f.readline()
# Get commit hash from the external file.
if not commit_hash:
try:
commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
commit_hash = commit_hash.decode('ascii')
commit_hash = commit_hash.rstrip()
except:
import warnings
warnings.warn(
"Failed to get the git commit hash as the command " \
"'git rev-parse HEAD' is not working. The commit hash will be " \
"assumed as the SymPy master, but the lines may be misleading " \
"or nonexistent as it is not the correct branch the doc is " \
"built with. Check your installation of 'git' if you want to " \
"resolve this warning.")
commit_hash = 'master'
fork = 'sympy'
blobpath = \
"https://github.com/{}/sympy/blob/{}/sympy/".format(fork, commit_hash)
def linkcode_resolve(domain, info):
"""Determine the URL corresponding to Python object."""
if domain != 'py':
return
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(sympy.__file__))
return blobpath + fn + linespec
|
3d3662e46b649194d77ebff8e48359bdbc7691ec38b51ddcc61e2729a210bf0e | """
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaNoncentral
BetaPrime
BoundedPareto
Cauchy
Chi
ChiNoncentral
ChiSquared
Dagum
Erlang
ExGaussian
Exponential
ExponentialPower
FDistribution
FisherZ
Frechet
Gamma
GammaInverse
Gumbel
Gompertz
Kumaraswamy
Laplace
Levy
LogCauchy
Logistic
LogLogistic
LogitNormal
LogNormal
Lomax
Maxwell
Moyal
Nakagami
Normal
Pareto
PowerFunction
QuadraticU
RaisedCosine
Rayleigh
Reciprocal
ShiftedGompertz
StudentT
Trapezoidal
Triangular
Uniform
UniformSum
VonMises
Wald
Weibull
WignerSemicircle
"""
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import (atan, cos, sin, tan)
from sympy.functions.special.bessel import (besseli, besselj, besselk)
from sympy.functions.special.beta_functions import beta as beta_fn
from sympy.concrete.summations import Sum
from sympy.core.basic import Basic
from sympy.core.function import Lambda
from sympy.core.numbers import (I, Rational, pi)
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import (binomial, factorial)
from sympy.functions.elementary.complexes import (Abs, sign)
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.hyperbolic import sinh
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import asin
from sympy.functions.special.error_functions import (erf, erfc, erfi, erfinv, expint)
from sympy.functions.special.gamma_functions import (gamma, lowergamma, uppergamma)
from sympy.functions.special.hyper import hyper
from sympy.integrals.integrals import integrate
from sympy.logic.boolalg import And
from sympy.sets.sets import Interval
from sympy.matrices import MatrixBase
from sympy.stats.crv import SingleContinuousPSpace, SingleContinuousDistribution
from sympy.stats.rv import _value_check, is_random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaNoncentral',
'BetaPrime',
'BoundedPareto',
'Cauchy',
'Chi',
'ChiNoncentral',
'ChiSquared',
'Dagum',
'Erlang',
'ExGaussian',
'Exponential',
'ExponentialPower',
'FDistribution',
'FisherZ',
'Frechet',
'Gamma',
'GammaInverse',
'Gompertz',
'Gumbel',
'Kumaraswamy',
'Laplace',
'Levy',
'LogCauchy',
'Logistic',
'LogLogistic',
'LogitNormal',
'LogNormal',
'Lomax',
'Maxwell',
'Moyal',
'Nakagami',
'Normal',
'GaussianInverse',
'Pareto',
'PowerFunction',
'QuadraticU',
'RaisedCosine',
'Rayleigh',
'Reciprocal',
'StudentT',
'ShiftedGompertz',
'Trapezoidal',
'Triangular',
'Uniform',
'UniformSum',
'VonMises',
'Wald',
'Weibull',
'WignerSemicircle',
]
@is_random.register(MatrixBase)
def _(x):
return any(is_random(i) for i in x)
def rv(symbol, cls, args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleContinuousPSpace(symbol, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(symbol, CompoundDistribution(dist))
return pspace.value
class ContinuousDistributionHandmade(SingleContinuousDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=Interval(-oo, oo)):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = integrate(pdf(x), (x, set))
_value_check(Eq(val, 1) != S.false, "The pdf on the given set is incorrect.")
def ContinuousRV(symbol, density, set=Interval(-oo, oo), **kwargs):
"""
Create a Continuous Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set/Interval
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Returns
=======
RandomSymbol
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, ContinuousDistributionHandmade, (pdf, set), **kwargs)
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
def pdf(self, x):
a, b = self.a, self.b
return 1/(pi*sqrt((x - a)*(b - x)))
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < a),
(2*asin(sqrt((x - a)/(b - a)))/pi, x <= b),
(S.One, True))
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in (a,b)`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Arcsin, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = Arcsin("x", a, b)
>>> density(X)(z)
1/(pi*sqrt((-a + z)*(b - z)))
>>> cdf(X)(z)
Piecewise((0, a > z),
(2*asin(sqrt((-a + z)/(-a + b)))/pi, b >= z),
(1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Arcsine_distribution
"""
return rv(name, ArcsinDistribution, (a, b))
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'sigma')
@staticmethod
def check(alpha, beta, sigma):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(sigma > 0, "Scale parameter Sigma must be positive.")
@property
def set(self):
return Interval(self.sigma, oo)
def pdf(self, x):
alpha, beta, sigma = self.alpha, self.beta, self.sigma
return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2)
*(alpha/x + 2*beta*log(x/sigma)/x))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of the '
'Benini distribution does not exist.')
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log^2\left[{\frac{x}{\sigma}}\right]}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
This is a heavy-tailed distribution and is also known as the log-Rayleigh
distribution.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Benini, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ / z \\ / z \ 2/ z \
| 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----|
|alpha \sigma/| \sigma/ \sigma/
|----- + -----------------|*e
\ z z /
>>> cdf(X)(z)
Piecewise((1 - exp(-alpha*log(z/sigma) - beta*log(z/sigma)**2), sigma <= z),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Benini_distribution
.. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html
"""
return rv(name, BeniniDistribution, (alpha, beta, sigma))
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta)
def _characteristic_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), I*t)
def _moment_generating_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), t)
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint, factor
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Beta("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 beta - 1
z *(1 - z)
--------------------------
B(alpha, beta)
>>> simplify(E(X))
alpha/(alpha + beta)
>>> factor(simplify(variance(X)))
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_distribution
.. [2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return rv(name, BetaDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Noncentral Beta distribution ------------------------------------------------------------
class BetaNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'lamda')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta, lamda):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(lamda >= 0, "Noncentrality parameter Lambda must be positive")
def pdf(self, x):
alpha, beta, lamda = self.alpha, self.beta, self.lamda
k = Dummy("k")
return Sum(exp(-lamda / 2) * (lamda / 2)**k * x**(alpha + k - 1) *(
1 - x)**(beta - 1) / (factorial(k) * beta_fn(alpha + k, beta)), (k, 0, oo))
def BetaNoncentral(name, alpha, beta, lamda):
r"""
Create a Continuous Random Variable with a Type I Noncentral Beta distribution.
The density of the Noncentral Beta distribution is given by
.. math::
f(x) := \sum_{k=0}^\infty e^{-\lambda/2}\frac{(\lambda/2)^k}{k!}
\frac{x^{\alpha+k-1}(1-x)^{\beta-1}}{\mathrm{B}(\alpha+k,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
lamda : Real number, `\lambda \geq 0`, noncentrality parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaNoncentral, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> lamda = Symbol("lamda", nonnegative=True)
>>> z = Symbol("z")
>>> X = BetaNoncentral("x", alpha, beta, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
oo
_____
\ `
\ -lamda
\ k -------
\ k + alpha - 1 /lamda\ beta - 1 2
) z *|-----| *(1 - z) *e
/ \ 2 /
/ ------------------------------------------------
/ B(k + alpha, beta)*k!
/____,
k = 0
Compute cdf with specific 'x', 'alpha', 'beta' and 'lamda' values as follows:
>>> cdf(BetaNoncentral("x", 1, 1, 1), evaluate=False)(2).doit()
2*exp(1/2)
The argument evaluate=False prevents an attempt at evaluation
of the sum for general x, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_beta_distribution
.. [2] https://reference.wolfram.com/language/ref/NoncentralBetaDistribution.html
"""
return rv(name, BetaNoncentralDistribution, (alpha, beta, lamda))
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimeDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
B(alpha, beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Bounded Pareto Distribution --------------------------------------------------
class BoundedParetoDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'left', 'right')
@property
def set(self):
return Interval(self.left, self.right)
@staticmethod
def check(alpha, left, right):
_value_check (alpha.is_positive, "Shape must be positive.")
_value_check (left.is_positive, "Left value should be positive.")
_value_check (right > left, "Right should be greater than left.")
def pdf(self, x):
alpha, left, right = self.alpha, self.left, self.right
num = alpha * (left**alpha) * x**(- alpha -1)
den = 1 - (left/right)**alpha
return num/den
def BoundedPareto(name, alpha, left, right):
r"""
Create a continuous random variable with a Bounded Pareto distribution.
The density of the Bounded Pareto distribution is given by
.. math::
f(x) := \frac{\alpha L^{\alpha}x^{-\alpha-1}}{1-(\frac{L}{H})^{\alpha}}
Parameters
==========
alpha : Real Number, `\alpha > 0`
Shape parameter
left : Real Number, `left > 0`
Location parameter
right : Real Number, `right > left`
Location parameter
Examples
========
>>> from sympy.stats import BoundedPareto, density, cdf, E
>>> from sympy import symbols
>>> L, H = symbols('L, H', positive=True)
>>> X = BoundedPareto('X', 2, L, H)
>>> x = symbols('x')
>>> density(X)(x)
2*L**2/(x**3*(1 - L**2/H**2))
>>> cdf(X)(x)
Piecewise((-H**2*L**2/(x**2*(H**2 - L**2)) + H**2/(H**2 - L**2), L <= x), (0, True))
>>> E(X).simplify()
2*H*L/(H + L)
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution#Bounded_Pareto_distribution
"""
return rv (name, BoundedParetoDistribution, (alpha, left, right))
# ------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyDistribution(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
@staticmethod
def check(x0, gamma):
_value_check(gamma > 0, "Scale parameter Gamma must be positive.")
_value_check(x0.is_real, "Location parameter must be real.")
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def _cdf(self, x):
x0, gamma = self.x0, self.gamma
return (1/pi)*atan((x - x0)/gamma) + S.Half
def _characteristic_function(self, t):
return exp(self.x0 * I * t - self.gamma * Abs(t))
def _moment_generating_function(self, t):
raise NotImplementedError("The moment generating function for the "
"Cauchy distribution does not exist.")
def _quantile(self, p):
return self.x0 + self.gamma*tan(pi*(p - S.Half))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi \gamma [1 + {(\frac{x-x_0}{\gamma})}^2]}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2)
def _characteristic_function(self, t):
k = self.k
part_1 = hyper((k/2,), (S.Half,), -t**2/2)
part_2 = I*t*sqrt(2)*gamma((k+1)/2)/gamma(k/2)
part_3 = hyper(((k+1)/2,), (Rational(3, 2),), -t**2/2)
return part_1 + part_2*part_3
def _moment_generating_function(self, t):
k = self.k
part_1 = hyper((k / 2,), (S.Half,), t ** 2 / 2)
part_2 = t * sqrt(2) * gamma((k + 1) / 2) / gamma(k / 2)
part_3 = hyper(((k + 1) / 2,), (S(3) / 2,), t ** 2 / 2)
return part_1 + part_2 * part_3
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer, The number of degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Chi, density, E
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> z = Symbol("z")
>>> X = Chi("x", k)
>>> density(X)(z)
2**(1 - k/2)*z**(k - 1)*exp(-z**2/2)/gamma(k/2)
>>> simplify(E(X))
sqrt(2)*gamma(k/2 + 1/2)/gamma(k/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_distribution
.. [2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return rv(name, ChiDistribution, (k,))
#-------------------------------------------------------------------------------
# Non-central Chi distribution -------------------------------------------------
class ChiNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('k', 'l')
@staticmethod
def check(k, l):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
_value_check(l > 0, "Shift parameter Lambda must be positive.")
set = Interval(0, oo)
def pdf(self, x):
k, l = self.k, self.l
return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x)
def ChiNoncentral(name, k, l):
r"""
Create a continuous random variable with a non-central Chi distribution.
Explanation
===========
The density of the non-central Chi distribution is given by
.. math::
f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda}
{(\lambda x)^{k/2}} I_{k/2-1}(\lambda x)
with `x \geq 0`. Here, `I_\nu (x)` is the
:ref:`modified Bessel function of the first kind <besseli>`.
Parameters
==========
k : A positive Integer, $k > 0$
The number of degrees of freedom.
lambda : Real number, `\lambda > 0`
Shift parameter.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiNoncentral, density
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True)
>>> l = Symbol("l")
>>> z = Symbol("z")
>>> X = ChiNoncentral("x", k, l)
>>> density(X)(z)
l*z**k*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z)/(l*z)**(k/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_chi_distribution
"""
return rv(name, ChiNoncentralDistribution, (k, l))
#-------------------------------------------------------------------------------
# Chi squared distribution -----------------------------------------------------
class ChiSquaredDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
k = self.k
return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2)
def _cdf(self, x):
k = self.k
return Piecewise(
(S.One/gamma(k/2)*lowergamma(k/2, x/2), x >= 0),
(0, True)
)
def _characteristic_function(self, t):
return (1 - 2*I*t)**(-self.k/2)
def _moment_generating_function(self, t):
return (1 - 2*t)**(-self.k/2)
def ChiSquared(name, k):
r"""
Create a continuous random variable with a Chi-squared distribution.
Explanation
===========
The density of the Chi-squared distribution is given by
.. math::
f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)}
x^{\frac{k}{2}-1} e^{-\frac{x}{2}}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer
The number of degrees of freedom.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiSquared, density, E, variance, moment
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True, positive=True)
>>> z = Symbol("z")
>>> X = ChiSquared("x", k)
>>> density(X)(z)
z**(k/2 - 1)*exp(-z/2)/(2**(k/2)*gamma(k/2))
>>> E(X)
k
>>> variance(X)
2*k
>>> moment(X, 3)
k**3 + 6*k**2 + 8*k
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_squared_distribution
.. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html
"""
return rv(name, ChiSquaredDistribution, (k, ))
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumDistribution(SingleContinuousDistribution):
_argnames = ('p', 'a', 'b')
set = Interval(0, oo)
@staticmethod
def check(p, a, b):
_value_check(p > 0, "Shape parameter p must be positive.")
_value_check(a > 0, "Shape parameter a must be positive.")
_value_check(b > 0, "Scale parameter b must be positive.")
def pdf(self, x):
p, a, b = self.p, self.a, self.b
return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1)))
def _cdf(self, x):
p, a, b = self.p, self.a, self.b
return Piecewise(((S.One + (S(x)/b)**-a)**-p, x>=0),
(S.Zero, True))
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
Explanation
===========
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}}
{\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number
`p > 0`, a shape.
a : Real number
`a > 0`, a shape.
b : Real number
`b > 0`, a scale.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Dagum, density, cdf
>>> from sympy import Symbol
>>> p = Symbol("p", positive=True)
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Dagum("x", p, a, b)
>>> density(X)(z)
a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z
>>> cdf(X)(z)
Piecewise(((1 + (z/b)**(-a))**(-p), z >= 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Dagum_distribution
"""
return rv(name, DagumDistribution, (p, a, b))
#-------------------------------------------------------------------------------
# Erlang distribution ----------------------------------------------------------
def Erlang(name, k, l):
r"""
Create a continuous random variable with an Erlang distribution.
Explanation
===========
The density of the Erlang distribution is given by
.. math::
f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!}
with :math:`x \in [0,\infty]`.
Parameters
==========
k : Positive integer
l : Real number, `\lambda > 0`, the rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Erlang, density, cdf, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> k = Symbol("k", integer=True, positive=True)
>>> l = Symbol("l", positive=True)
>>> z = Symbol("z")
>>> X = Erlang("x", k, l)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k k - 1 -l*z
l *z *e
---------------
Gamma(k)
>>> C = cdf(X)(z)
>>> pprint(C, use_unicode=False)
/lowergamma(k, l*z)
|------------------ for z > 0
< Gamma(k)
|
\ 0 otherwise
>>> E(X)
k/l
>>> simplify(variance(X))
k/l**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Erlang_distribution
.. [2] http://mathworld.wolfram.com/ErlangDistribution.html
"""
return rv(name, GammaDistribution, (k, S.One/l))
# -------------------------------------------------------------------------------
# ExGaussian distribution -----------------------------------------------------
class ExGaussianDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std', 'rate')
set = Interval(-oo, oo)
@staticmethod
def check(mean, std, rate):
_value_check(
std > 0, "Standard deviation of ExGaussian must be positive.")
_value_check(rate > 0, "Rate of ExGaussian must be positive.")
def pdf(self, x):
mean, std, rate = self.mean, self.std, self.rate
term1 = rate/2
term2 = exp(rate * (2 * mean + rate * std**2 - 2*x)/2)
term3 = erfc((mean + rate*std**2 - x)/(sqrt(2)*std))
return term1*term2*term3
def _cdf(self, x):
from sympy.stats import cdf
mean, std, rate = self.mean, self.std, self.rate
u = rate*(x - mean)
v = rate*std
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
return GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
def _characteristic_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - I*t/rate)**(-1)
term2 = exp(I*mean*t - std**2*t**2/2)
return term1 * term2
def _moment_generating_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - t/rate)**(-1)
term2 = exp(mean*t + std**2*t**2/2)
return term1*term2
def ExGaussian(name, mean, std, rate):
r"""
Create a continuous random variable with an Exponentially modified
Gaussian (EMG) distribution.
Explanation
===========
The density of the exponentially modified Gaussian distribution is given by
.. math::
f(x) := \frac{\lambda}{2}e^{\frac{\lambda}{2}(2\mu+\lambda\sigma^2-2x)}
\text{erfc}(\frac{\mu + \lambda\sigma^2 - x}{\sqrt{2}\sigma})
with $x > 0$. Note that the expected value is `1/\lambda`.
Parameters
==========
name : A string giving a name for this distribution
mean : A Real number, the mean of Gaussian component
std : A positive Real number,
:math: `\sigma^2 > 0` the variance of Gaussian component
rate : A positive Real number,
:math: `\lambda > 0` the rate of Exponential component
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExGaussian, density, cdf, E
>>> from sympy.stats import variance, skewness
>>> from sympy import Symbol, pprint, simplify
>>> mean = Symbol("mu")
>>> std = Symbol("sigma", positive=True)
>>> rate = Symbol("lamda", positive=True)
>>> z = Symbol("z")
>>> X = ExGaussian("x", mean, std, rate)
>>> pprint(density(X)(z), use_unicode=False)
/ 2 \
lamda*\lamda*sigma + 2*mu - 2*z/
--------------------------------- / ___ / 2 \\
2 |\/ 2 *\lamda*sigma + mu - z/|
lamda*e *erfc|-----------------------------|
\ 2*sigma /
----------------------------------------------------------------------------
2
>>> cdf(X)(z)
-(erf(sqrt(2)*(-lamda**2*sigma**2 + lamda*(-mu + z))/(2*lamda*sigma))/2 + 1/2)*exp(lamda**2*sigma**2/2 - lamda*(-mu + z)) + erf(sqrt(2)*(-mu + z)/(2*sigma))/2 + 1/2
>>> E(X)
(lamda*mu + 1)/lamda
>>> simplify(variance(X))
sigma**2 + lamda**(-2)
>>> simplify(skewness(X))
2/(lamda**2*sigma**2 + 1)**(3/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
"""
return rv(name, ExGaussianDistribution, (mean, std, rate))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialDistribution(SingleContinuousDistribution):
_argnames = ('rate',)
set = Interval(0, oo)
@staticmethod
def check(rate):
_value_check(rate > 0, "Rate must be positive.")
def pdf(self, x):
return self.rate * exp(-self.rate*x)
def _cdf(self, x):
return Piecewise(
(S.One - exp(-self.rate*x), x >= 0),
(0, True),
)
def _characteristic_function(self, t):
rate = self.rate
return rate / (rate - I*t)
def _moment_generating_function(self, t):
rate = self.rate
return rate / (rate - t)
def _quantile(self, p):
return -log(1-p)/self.rate
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
Explanation
===========
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with $x > 0$. Note that the expected value is `1/\lambda`.
Parameters
==========
rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness, quantile
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> p = Symbol("p")
>>> X = Exponential("x", l)
>>> density(X)(z)
lambda*exp(-lambda*z)
>>> cdf(X)(z)
Piecewise((1 - exp(-lambda*z), z >= 0), (0, True))
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)(z)
10*exp(-10*z)
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponential_distribution
.. [2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return rv(name, ExponentialDistribution, (rate, ))
# -------------------------------------------------------------------------------
# Exponential Power distribution -----------------------------------------------------
class ExponentialPowerDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'alpha', 'beta')
set = Interval(-oo, oo)
@staticmethod
def check(mu, alpha, beta):
_value_check(alpha > 0, "Scale parameter alpha must be positive.")
_value_check(beta > 0, "Shape parameter beta must be positive.")
def pdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = beta*exp(-(Abs(x - mu)/alpha)**beta)
den = 2*alpha*gamma(1/beta)
return num/den
def _cdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = lowergamma(1/beta, (Abs(x - mu) / alpha)**beta)
den = 2*gamma(1/beta)
return sign(x - mu)*num/den + S.Half
def ExponentialPower(name, mu, alpha, beta):
r"""
Create a Continuous Random Variable with Exponential Power distribution.
This distribution is known also as Generalized Normal
distribution version 1.
Explanation
===========
The density of the Exponential Power distribution is given by
.. math::
f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})}
e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number
A location.
alpha : Real number,`\alpha > 0`
A scale.
beta : Real number, `\beta > 0`
A shape.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExponentialPower, density, cdf
>>> from sympy import Symbol, pprint
>>> z = Symbol("z")
>>> mu = Symbol("mu")
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = ExponentialPower("x", mu, alpha, beta)
>>> pprint(density(X)(z), use_unicode=False)
beta
/|mu - z|\
-|--------|
\ alpha /
beta*e
---------------------
/ 1 \
2*alpha*Gamma|----|
\beta/
>>> cdf(X)(z)
1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta))
References
==========
.. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html
.. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
"""
return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
class FDistributionDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
@staticmethod
def check(d1, d2):
_value_check((d1 > 0, d1.is_integer),
"Degrees of freedom d1 must be positive integer.")
_value_check((d2 > 0, d2.is_integer),
"Degrees of freedom d2 must be positive integer.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'F-distribution does not exist.')
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
Explanation
===========
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
Parameters
==========
d1 : `d_1 > 0`, where `d_1` is the degrees of freedom (`n_1 - 1`)
d2 : `d_2 > 0`, where `d_2` is the degrees of freedom (`n_2 - 1`)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/F-distribution
.. [2] http://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
class FisherZDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(-oo, oo)
@staticmethod
def check(d1, d2):
_value_check(d1 > 0, "Degree of freedom d1 must be positive.")
_value_check(d2 > 0, "Degree of freedom d2 must be positive.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) *
exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2))
def FisherZ(name, d1, d2):
r"""
Create a Continuous Random Variable with an Fisher's Z distribution.
Explanation
===========
The density of the Fisher's Z distribution is given by
.. math::
f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)}
\frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}}
.. TODO - What is the difference between these degrees of freedom?
Parameters
==========
d1 : `d_1 > 0`
Degree of freedom.
d2 : `d_2 > 0`
Degree of freedom.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FisherZ, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FisherZ("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d1 d2
d1 d2 - -- - --
-- -- 2 2
2 2 / 2*z \ d1*z
2*d1 *d2 *\d1*e + d2/ *e
-----------------------------------------
/d1 d2\
B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution
.. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
"""
return rv(name, FisherZDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Frechet distribution ---------------------------------------------------------
class FrechetDistribution(SingleContinuousDistribution):
_argnames = ('a', 's', 'm')
set = Interval(0, oo)
@staticmethod
def check(a, s, m):
_value_check(a > 0, "Shape parameter alpha must be positive.")
_value_check(s > 0, "Scale parameter s must be positive.")
def __new__(cls, a, s=1, m=0):
a, s, m = list(map(sympify, (a, s, m)))
return Basic.__new__(cls, a, s, m)
def pdf(self, x):
a, s, m = self.a, self.s, self.m
return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a))
def _cdf(self, x):
a, s, m = self.a, self.s, self.m
return Piecewise((exp(-((x-m)/s)**(-a)), x >= m),
(S.Zero, True))
def Frechet(name, a, s=1, m=0):
r"""
Create a continuous random variable with a Frechet distribution.
Explanation
===========
The density of the Frechet distribution is given by
.. math::
f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha}
e^{-(\frac{x-m}{s})^{-\alpha}}
with :math:`x \geq m`.
Parameters
==========
a : Real number, :math:`a \in \left(0, \infty\right)` the shape
s : Real number, :math:`s \in \left(0, \infty\right)` the scale
m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Frechet, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", positive=True)
>>> s = Symbol("s", positive=True)
>>> m = Symbol("m", real=True)
>>> z = Symbol("z")
>>> X = Frechet("x", a, s, m)
>>> density(X)(z)
a*((-m + z)/s)**(-a - 1)*exp(-1/((-m + z)/s)**a)/s
>>> cdf(X)(z)
Piecewise((exp(-1/((-m + z)/s)**a), m <= z), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution
"""
return rv(name, FrechetDistribution, (a, s, m))
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaDistribution(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def _cdf(self, x):
k, theta = self.k, self.theta
return Piecewise(
(lowergamma(k, S(x)/theta)/gamma(k), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
return (1 - self.theta*I*t)**(-self.k)
def _moment_generating_function(self, t):
return (1- self.theta*t)**(-self.k)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
Explanation
===========
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
Gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
|k*lowergamma|k, -----|
| \ theta/
<---------------------- for z >= 0
| Gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
k*theta
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_distribution
.. [2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
class GammaInverseDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "alpha must be positive")
_value_check(b > 0, "beta must be positive")
def pdf(self, x):
a, b = self.a, self.b
return b**a/gamma(a) * x**(-a-1) * exp(-b/x)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise((uppergamma(a,b/x)/gamma(a), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
a, b = self.a, self.b
return 2 * (-I*b*t)**(a/2) * besselk(a, sqrt(-4*I*b*t)) / gamma(a)
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'gamma inverse distribution does not exist.')
def GammaInverse(name, a, b):
r"""
Create a continuous random variable with an inverse Gamma distribution.
Explanation
===========
The density of the inverse Gamma distribution is given by
.. math::
f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
with :math:`x > 0`.
Parameters
==========
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GammaInverse, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = GammaInverse("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-b
---
a -a - 1 z
b *z *e
---------------
Gamma(a)
>>> cdf(X)(z)
Piecewise((uppergamma(a, b/z)/gamma(a), z > 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse-gamma_distribution
"""
return rv(name, GammaInverseDistribution, (a, b))
#-------------------------------------------------------------------------------
# Gumbel distribution (Maximum and Minimum) --------------------------------------------------------
class GumbelDistribution(SingleContinuousDistribution):
_argnames = ('beta', 'mu', 'minimum')
set = Interval(-oo, oo)
@staticmethod
def check(beta, mu, minimum):
_value_check(beta > 0, "Scale parameter beta must be positive.")
def pdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
f_max = (1/beta)*exp(-z - exp(-z))
f_min = (1/beta)*exp(z - exp(z))
return Piecewise((f_min, self.minimum), (f_max, not self.minimum))
def _cdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
F_max = exp(-exp(-z))
F_min = 1 - exp(-exp(z))
return Piecewise((F_min, self.minimum), (F_max, not self.minimum))
def _characteristic_function(self, t):
cf_max = gamma(1 - I*self.beta*t) * exp(I*self.mu*t)
cf_min = gamma(1 + I*self.beta*t) * exp(I*self.mu*t)
return Piecewise((cf_min, self.minimum), (cf_max, not self.minimum))
def _moment_generating_function(self, t):
mgf_max = gamma(1 - self.beta*t) * exp(self.mu*t)
mgf_min = gamma(1 + self.beta*t) * exp(self.mu*t)
return Piecewise((mgf_min, self.minimum), (mgf_max, not self.minimum))
def Gumbel(name, beta, mu, minimum=False):
r"""
Create a Continuous Random Variable with Gumbel distribution.
Explanation
===========
The density of the Gumbel distribution is given by
For Maximum
.. math::
f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta}
- \exp \left( -\dfrac{x - \mu}{\beta} \right) \right)
with :math:`x \in [ - \infty, \infty ]`.
For Minimum
.. math::
f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}} + \frac{- \mu + x}{\beta}}}{\beta}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number, `\mu`, a location
beta : Real number, `\beta > 0`, a scale
minimum : Boolean, by default ``False``, set to ``True`` for enabling minimum distribution
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gumbel, density, cdf
>>> from sympy import Symbol
>>> x = Symbol("x")
>>> mu = Symbol("mu")
>>> beta = Symbol("beta", positive=True)
>>> X = Gumbel("x", beta, mu)
>>> density(X)(x)
exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta
>>> cdf(X)(x)
exp(-exp(-(-mu + x)/beta))
References
==========
.. [1] http://mathworld.wolfram.com/GumbelDistribution.html
.. [2] https://en.wikipedia.org/wiki/Gumbel_distribution
.. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html
.. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html
"""
return rv(name, GumbelDistribution, (beta, mu, minimum))
#-------------------------------------------------------------------------------
# Gompertz distribution --------------------------------------------------------
class GompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
eta, b = self.eta, self.b
return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x))
def _cdf(self, x):
eta, b = self.eta, self.b
return 1 - exp(eta)*exp(-eta*exp(b*x))
def _moment_generating_function(self, t):
eta, b = self.eta, self.b
return eta * exp(eta) * expint(t/b, eta)
def Gompertz(name, b, eta):
r"""
Create a Continuous Random Variable with Gompertz distribution.
Explanation
===========
The density of the Gompertz distribution is given by
.. math::
f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right)
with :math:`x \in [0, \infty)`.
Parameters
==========
b : Real number, `b > 0`, a scale
eta : Real number, `\eta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> z = Symbol("z")
>>> X = Gompertz("x", b, eta)
>>> density(X)(z)
b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z))
References
==========
.. [1] https://en.wikipedia.org/wiki/Gompertz_distribution
"""
return rv(name, GompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# Kumaraswamy distribution -----------------------------------------------------
class KumaraswamyDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "a must be positive")
_value_check(b > 0, "b must be positive")
def pdf(self, x):
a, b = self.a, self.b
return a * b * x**(a-1) * (1-x**a)**(b-1)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < S.Zero),
(1 - (1 - x**a)**b, x <= S.One),
(S.One, True))
def Kumaraswamy(name, a, b):
r"""
Create a Continuous Random Variable with a Kumaraswamy distribution.
Explanation
===========
The density of the Kumaraswamy distribution is given by
.. math::
f(x) := a b x^{a-1} (1-x^a)^{b-1}
with :math:`x \in [0,1]`.
Parameters
==========
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Kumaraswamy, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Kumaraswamy("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
b - 1
a - 1 / a\
a*b*z *\1 - z /
>>> cdf(X)(z)
Piecewise((0, z < 0), (1 - (1 - z**a)**b, z <= 1), (1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Kumaraswamy_distribution
"""
return rv(name, KumaraswamyDistribution, (a, b))
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplaceDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'b')
set = Interval(-oo, oo)
@staticmethod
def check(mu, b):
_value_check(b > 0, "Scale parameter b must be positive.")
_value_check(mu.is_real, "Location parameter mu should be real")
def pdf(self, x):
mu, b = self.mu, self.b
return 1/(2*b)*exp(-Abs(x - mu)/b)
def _cdf(self, x):
mu, b = self.mu, self.b
return Piecewise(
(S.Half*exp((x - mu)/b), x < mu),
(S.One - S.Half*exp(-(x - mu)/b), x >= mu)
)
def _characteristic_function(self, t):
return exp(self.mu*I*t) / (1 + self.b**2*t**2)
def _moment_generating_function(self, t):
return exp(self.mu*t) / (1 - self.b**2*t**2)
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
Explanation
===========
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number or a list/matrix, the location (mean) or the
location vector
b : Real number or a positive definite matrix, representing a scale
or the covariance matrix.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Laplace, density, cdf
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Laplace("x", mu, b)
>>> density(X)(z)
exp(-Abs(mu - z)/b)/(2*b)
>>> cdf(X)(z)
Piecewise((exp((-mu + z)/b)/2, mu > z), (1 - exp((mu - z)/b)/2, True))
>>> L = Laplace('L', [1, 2], [[1, 0], [0, 1]])
>>> pprint(density(L)(1, 2), use_unicode=False)
5 / ____\
e *besselk\0, \/ 35 /
---------------------
pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Laplace_distribution
.. [2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
if isinstance(mu, (list, MatrixBase)) and\
isinstance(b, (list, MatrixBase)):
from sympy.stats.joint_rv_types import MultivariateLaplace
return MultivariateLaplace(name, mu, b)
return rv(name, LaplaceDistribution, (mu, b))
#-------------------------------------------------------------------------------
# Levy distribution ---------------------------------------------------------
class LevyDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'c')
@property
def set(self):
return Interval(self.mu, oo)
@staticmethod
def check(mu, c):
_value_check(c > 0, "c (scale parameter) must be positive")
_value_check(mu.is_real, "mu (location parameter) must be real")
def pdf(self, x):
mu, c = self.mu, self.c
return sqrt(c/(2*pi))*exp(-c/(2*(x - mu)))/((x - mu)**(S.One + S.Half))
def _cdf(self, x):
mu, c = self.mu, self.c
return erfc(sqrt(c/(2*(x - mu))))
def _characteristic_function(self, t):
mu, c = self.mu, self.c
return exp(I * mu * t - sqrt(-2 * I * c * t))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of Levy distribution does not exist.')
def Levy(name, mu, c):
r"""
Create a continuous random variable with a Levy distribution.
The density of the Levy distribution is given by
.. math::
f(x) := \sqrt(\frac{c}{2 \pi}) \frac{\exp -\frac{c}{2 (x - \mu)}}{(x - \mu)^{3/2}}
Parameters
==========
mu : Real number
The location parameter.
c : Real number, `c > 0`
A scale parameter.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Levy, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> c = Symbol("c", positive=True)
>>> z = Symbol("z")
>>> X = Levy("x", mu, c)
>>> density(X)(z)
sqrt(2)*sqrt(c)*exp(-c/(-2*mu + 2*z))/(2*sqrt(pi)*(-mu + z)**(3/2))
>>> cdf(X)(z)
erfc(sqrt(c)*sqrt(1/(-2*mu + 2*z)))
References
==========
.. [1] https://en.wikipedia.org/wiki/L%C3%A9vy_distribution
.. [2] http://mathworld.wolfram.com/LevyDistribution.html
"""
return rv(name, LevyDistribution, (mu, c))
#-------------------------------------------------------------------------------
# Log-Cauchy distribution --------------------------------------------------------
class LogCauchyDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'sigma')
set = Interval.open(0, oo)
@staticmethod
def check(mu, sigma):
_value_check((sigma > 0) != False, "Scale parameter Gamma must be positive.")
_value_check(mu.is_real != False, "Location parameter must be real.")
def pdf(self, x):
mu, sigma = self.mu, self.sigma
return 1/(x*pi)*(sigma/((log(x) - mu)**2 + sigma**2))
def _cdf(self, x):
mu, sigma = self.mu, self.sigma
return (1/pi)*atan((log(x) - mu)/sigma) + S.Half
def _characteristic_function(self, t):
raise NotImplementedError("The characteristic function for the "
"Log-Cauchy distribution does not exist.")
def _moment_generating_function(self, t):
raise NotImplementedError("The moment generating function for the "
"Log-Cauchy distribution does not exist.")
def LogCauchy(name, mu, sigma):
r"""
Create a continuous random variable with a Log-Cauchy distribution.
The density of the Log-Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi x} \frac{\sigma}{(log(x)-\mu^2) + \sigma^2}
Parameters
==========
mu : Real number, the location
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogCauchy, density, cdf
>>> from sympy import Symbol, S
>>> mu = 2
>>> sigma = S.One / 5
>>> z = Symbol("z")
>>> X = LogCauchy("x", mu, sigma)
>>> density(X)(z)
1/(5*pi*z*((log(z) - 2)**2 + 1/25))
>>> cdf(X)(z)
atan(5*log(z) - 10)/pi + 1/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Log-Cauchy_distribution
"""
return rv(name, LogCauchyDistribution, (mu, sigma))
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
set = Interval(-oo, oo)
@staticmethod
def check(mu, s):
_value_check(s > 0, "Scale parameter s must be positive.")
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2)
def _cdf(self, x):
mu, s = self.mu, self.s
return S.One/(1 + exp(-(x - mu)/s))
def _characteristic_function(self, t):
return Piecewise((exp(I*t*self.mu) * pi*self.s*t / sinh(pi*self.s*t), Ne(t, 0)), (S.One, True))
def _moment_generating_function(self, t):
return exp(self.mu*t) * beta_fn(1 - self.s*t, 1 + self.s*t)
def _quantile(self, p):
return self.mu - self.s*log(-S.One + S.One/p)
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
Explanation
===========
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Logistic, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = Logistic("x", mu, s)
>>> density(X)(z)
exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)
>>> cdf(X)(z)
1/(exp((mu - z)/s) + 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Logistic_distribution
.. [2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return rv(name, LogisticDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log-logistic distribution --------------------------------------------------------
class LogLogisticDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Scale parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
a, b = self.alpha, self.beta
return ((b/a)*(x/a)**(b - 1))/(1 + (x/a)**b)**2
def _cdf(self, x):
a, b = self.alpha, self.beta
return 1/(1 + (x/a)**(-b))
def _quantile(self, p):
a, b = self.alpha, self.beta
return a*((p/(1 - p))**(1/b))
def expectation(self, expr, var, **kwargs):
a, b = self.args
return Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
def LogLogistic(name, alpha, beta):
r"""
Create a continuous random variable with a log-logistic distribution.
The distribution is unimodal when ``beta > 1``.
Explanation
===========
The density of the log-logistic distribution is given by
.. math::
f(x) := \frac{(\frac{\beta}{\alpha})(\frac{x}{\alpha})^{\beta - 1}}
{(1 + (\frac{x}{\alpha})^{\beta})^2}
Parameters
==========
alpha : Real number, `\alpha > 0`, scale parameter and median of distribution
beta : Real number, `\beta > 0`, a shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogLogistic, density, cdf, quantile
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> p = Symbol("p")
>>> z = Symbol("z", positive=True)
>>> X = LogLogistic("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
beta - 1
/ z \
beta*|-----|
\alpha/
------------------------
2
/ beta \
|/ z \ |
alpha*||-----| + 1|
\\alpha/ /
>>> cdf(X)(z)
1/(1 + (z/alpha)**(-beta))
>>> quantile(X)(p)
alpha*(p/(1 - p))**(1/beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Log-logistic_distribution
"""
return rv(name, LogLogisticDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
#Logit-Normal distribution------------------------------------------------------
class LogitNormalDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
set = Interval.open(0, 1)
@staticmethod
def check(mu, s):
_value_check((s ** 2).is_real is not False and s ** 2 > 0, "Squared scale parameter s must be positive.")
_value_check(mu.is_real is not False, "Location parameter must be real")
def _logit(self, x):
return log(x / (1 - x))
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(self._logit(x) - mu)**2/(2*s**2))*(S.One/sqrt(2*pi*(s**2)))*(1/(x*(1 - x)))
def _cdf(self, x):
mu, s = self.mu, self.s
return (S.One/2)*(1 + erf((self._logit(x) - mu)/(sqrt(2*s**2))))
def LogitNormal(name, mu, s):
r"""
Create a continuous random variable with a Logit-Normal distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{1}{s \sqrt{2 \pi}} \frac{1}{x(1 - x)} e^{- \frac{(logit(x) - \mu)^2}{s^2}}
where logit(x) = \log(\frac{x}{1 - x})
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogitNormal, density, cdf
>>> from sympy import Symbol,pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = LogitNormal("x",mu,s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
/ / z \\
-|-mu + log|-----||
\ \1 - z//
---------------------
2
___ 2*s
\/ 2 *e
----------------------------
____
2*\/ pi *s*z*(1 - z)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + log(z/(1 - z)))**2/(2*s**2))/(2*sqrt(pi)*s*z*(1 - z))
>>> cdf(X)(z)
erf(sqrt(2)*(-mu + log(z/(1 - z)))/(2*s))/2 + 1/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Logit-normal_distribution
"""
return rv(name, LogitNormalDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
set = Interval(0, oo)
@staticmethod
def check(mean, std):
_value_check(std > 0, "Parameter std must be positive.")
def pdf(self, x):
mean, std = self.mean, self.std
return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
def _cdf(self, x):
mean, std = self.mean, self.std
return Piecewise(
(S.Half + S.Half*erf((log(x) - mean)/sqrt(2)/std), x > 0),
(S.Zero, True)
)
def _moment_generating_function(self, t):
raise NotImplementedError('Moment generating function of the log-normal distribution is not defined.')
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
Explanation
===========
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number
The log-scale.
sigma : Real number
A shape. ($\sigma^2 > 0$)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-(-mu + log(z))
-----------------
2
___ 2*sigma
\/ 2 *e
------------------------
____
2*\/ pi *sigma*z
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z)
References
==========
.. [1] https://en.wikipedia.org/wiki/Lognormal
.. [2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return rv(name, LogNormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Lomax Distribution -----------------------------------------------------------
class LomaxDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'lamda',)
set = Interval(0, oo)
@staticmethod
def check(alpha, lamda):
_value_check(alpha.is_real, "Shape parameter should be real.")
_value_check(lamda.is_real, "Scale parameter should be real.")
_value_check(alpha.is_positive, "Shape parameter should be positive.")
_value_check(lamda.is_positive, "Scale parameter should be positive.")
def pdf(self, x):
lamba, alpha = self.lamda, self.alpha
return (alpha/lamba) * (S.One + x/lamba)**(-alpha-1)
def Lomax(name, alpha, lamda):
r"""
Create a continuous random variable with a Lomax distribution.
Explanation
===========
The density of the Lomax distribution is given by
.. math::
f(x) := \frac{\alpha}{\lambda}\left[1+\frac{x}{\lambda}\right]^{-(\alpha+1)}
Parameters
==========
alpha : Real Number, `\alpha > 0`
Shape parameter
lamda : Real Number, `\lambda > 0`
Scale parameter
Examples
========
>>> from sympy.stats import Lomax, density, cdf, E
>>> from sympy import symbols
>>> a, l = symbols('a, l', positive=True)
>>> X = Lomax('X', a, l)
>>> x = symbols('x')
>>> density(X)(x)
a*(1 + x/l)**(-a - 1)/l
>>> cdf(X)(x)
Piecewise((1 - 1/(1 + x/l)**a, x >= 0), (0, True))
>>> a = 2
>>> X = Lomax('X', a, l)
>>> E(X)
l
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Lomax_distribution
"""
return rv(name, LomaxDistribution, (alpha, lamda))
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellDistribution(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
@staticmethod
def check(a):
_value_check(a > 0, "Parameter a must be positive.")
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def _cdf(self, x):
a = self.a
return erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
Explanation
===========
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Moyal Distribution -----------------------------------------------------------
class MoyalDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'sigma')
@staticmethod
def check(mu, sigma):
_value_check(mu.is_real, "Location parameter must be real.")
_value_check(sigma.is_real and sigma > 0, "Scale parameter must be real\
and positive.")
def pdf(self, x):
mu, sigma = self.mu, self.sigma
num = exp(-(exp(-(x - mu)/sigma) + (x - mu)/(sigma))/2)
den = (sqrt(2*pi) * sigma)
return num/den
def _characteristic_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(I*t*mu)
term2 = (2**(-I*sigma*t) * gamma(Rational(1, 2) - I*t*sigma))
return (term1 * term2)/sqrt(pi)
def _moment_generating_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(t*mu)
term2 = (2**(-1*sigma*t) * gamma(Rational(1, 2) - t*sigma))
return (term1 * term2)/sqrt(pi)
def Moyal(name, mu, sigma):
r"""
Create a continuous random variable with a Moyal distribution.
Explanation
===========
The density of the Moyal distribution is given by
.. math::
f(x) := \frac{\exp-\frac{1}{2}\exp-\frac{x-\mu}{\sigma}-\frac{x-\mu}{2\sigma}}{\sqrt{2\pi}\sigma}
with :math:`x \in \mathbb{R}`.
Parameters
==========
mu : Real number
Location parameter
sigma : Real positive number
Scale parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Moyal, density, cdf
>>> from sympy import Symbol, simplify
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True, real=True)
>>> z = Symbol("z")
>>> X = Moyal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-exp((mu - z)/sigma)/2 - (-mu + z)/(2*sigma))/(2*sqrt(pi)*sigma)
>>> simplify(cdf(X)(z))
1 - erf(sqrt(2)*exp((mu - z)/(2*sigma))/2)
References
==========
.. [1] https://reference.wolfram.com/language/ref/MoyalDistribution.html
.. [2] http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
"""
return rv(name, MoyalDistribution, (mu, sigma))
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'omega')
set = Interval(0, oo)
@staticmethod
def check(mu, omega):
_value_check(mu >= S.Half, "Shape parameter mu must be greater than equal to 1/2.")
_value_check(omega > 0, "Spread parameter omega must be positive.")
def pdf(self, x):
mu, omega = self.mu, self.omega
return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2)
def _cdf(self, x):
mu, omega = self.mu, self.omega
return Piecewise(
(lowergamma(mu, (mu/omega)*x**2)/gamma(mu), x > 0),
(S.Zero, True))
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
Explanation
===========
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, `\mu \geq \frac{1}{2}`, a shape
omega : Real number, `\omega > 0`, the spread
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance, cdf
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> z = Symbol("z")
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-mu*z
-------
mu -mu 2*mu - 1 omega
2*mu *omega *z *e
----------------------------------
Gamma(mu)
>>> simplify(E(X))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
omega*Gamma (mu + 1/2)
omega - -----------------------
Gamma(mu)*Gamma(mu + 1)
>>> cdf(X)(z)
Piecewise((lowergamma(mu, mu*z**2/omega)/gamma(mu), z > 0),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Nakagami_distribution
"""
return rv(name, NakagamiDistribution, (mu, omega))
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
@staticmethod
def check(mean, std):
_value_check(std > 0, "Standard deviation must be positive")
def pdf(self, x):
return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std)
def _cdf(self, x):
mean, std = self.mean, self.std
return erf(sqrt(2)*(-mean + x)/(2*std))/2 + S.Half
def _characteristic_function(self, t):
mean, std = self.mean, self.std
return exp(I*mean*t - std**2*t**2/2)
def _moment_generating_function(self, t):
mean, std = self.mean, self.std
return exp(mean*t + std**2*t**2/2)
def _quantile(self, p):
mean, std = self.mean, self.std
return mean + std*sqrt(2)*erfinv(2*p - 1)
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
Explanation
===========
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number or a list representing the mean or the mean vector
sigma : Real number or a positive definite square matrix,
:math:`\sigma^2 > 0`, the variance
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness, quantile, marginal_distribution
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> y = Symbol("y")
>>> p = Symbol("p")
>>> X = Normal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma)
>>> C = simplify(cdf(X))(z) # it needs a little more help...
>>> pprint(C, use_unicode=False)
/ ___ \
|\/ 2 *(-mu + z)|
erf|---------------|
\ 2*sigma / 1
-------------------- + -
2 2
>>> quantile(X)(p)
mu + sqrt(2)*sigma*erfinv(2*p - 1)
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-z**2/2)/(2*sqrt(pi))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
>>> m = Normal('X', [1, 2], [[2, 1], [1, 2]])
>>> pprint(density(m)(y, z), use_unicode=False)
2 2
y y*z z
- -- + --- - -- + z - 1
___ 3 3 3
\/ 3 *e
------------------------------
6*pi
>>> marginal_distribution(m, m[0])(1)
1/(2*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_distribution
.. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
if isinstance(mean, list) or getattr(mean, 'is_Matrix', False) and\
isinstance(std, list) or getattr(std, 'is_Matrix', False):
from sympy.stats.joint_rv_types import MultivariateNormal
return MultivariateNormal(name, mean, std)
return rv(name, NormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Inverse Gaussian distribution ----------------------------------------------------------
class GaussianInverseDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'shape')
@property
def set(self):
return Interval(0, oo)
@staticmethod
def check(mean, shape):
_value_check(shape > 0, "Shape parameter must be positive")
_value_check(mean > 0, "Mean must be positive")
def pdf(self, x):
mu, s = self.mean, self.shape
return exp(-s*(x - mu)**2 / (2*x*mu**2)) * sqrt(s/(2*pi*x**3))
def _cdf(self, x):
from sympy.stats import cdf
mu, s = self.mean, self.shape
stdNormalcdf = cdf(Normal('x', 0, 1))
first_term = stdNormalcdf(sqrt(s/x) * ((x/mu) - S.One))
second_term = exp(2*s/mu) * stdNormalcdf(-sqrt(s/x)*(x/mu + S.One))
return first_term + second_term
def _characteristic_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*I*t)/s)))
def _moment_generating_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*t)/s)))
def GaussianInverse(name, mean, shape):
r"""
Create a continuous random variable with an Inverse Gaussian distribution.
Inverse Gaussian distribution is also known as Wald distribution.
Explanation
===========
The density of the Inverse Gaussian distribution is given by
.. math::
f(x) := \sqrt{\frac{\lambda}{2\pi x^3}} e^{-\frac{\lambda(x-\mu)^2}{2x\mu^2}}
Parameters
==========
mu :
Positive number representing the mean.
lambda :
Positive number representing the shape parameter.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GaussianInverse, density, E, std, skewness
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", positive=True)
>>> lamda = Symbol("lambda", positive=True)
>>> z = Symbol("z", positive=True)
>>> X = GaussianInverse("x", mu, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-lambda*(-mu + z)
-------------------
2
___ ________ 2*mu *z
\/ 2 *\/ lambda *e
-------------------------------------
____ 3/2
2*\/ pi *z
>>> E(X)
mu
>>> std(X).expand()
mu**(3/2)/sqrt(lambda)
>>> skewness(X).expand()
3*sqrt(mu)/sqrt(lambda)
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
.. [2] http://mathworld.wolfram.com/InverseGaussianDistribution.html
"""
return rv(name, GaussianInverseDistribution, (mean, shape))
Wald = GaussianInverse
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoDistribution(SingleContinuousDistribution):
_argnames = ('xm', 'alpha')
@property
def set(self):
return Interval(self.xm, oo)
@staticmethod
def check(xm, alpha):
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
def pdf(self, x):
xm, alpha = self.xm, self.alpha
return alpha * xm**alpha / x**(alpha + 1)
def _cdf(self, x):
xm, alpha = self.xm, self.alpha
return Piecewise(
(S.One - xm**alpha/x**alpha, x>=xm),
(0, True),
)
def _moment_generating_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-xm*t)**alpha * uppergamma(-alpha, -xm*t)
def _characteristic_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-I * xm * t) ** alpha * uppergamma(-alpha, -I * xm * t)
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
Explanation
===========
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `x_m > 0`, a scale
alpha : Real number, `\alpha > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Pareto("x", xm, beta)
>>> density(X)(z)
beta*xm**beta*z**(-beta - 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution
.. [2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return rv(name, ParetoDistribution, (xm, alpha))
#-------------------------------------------------------------------------------
# PowerFunction distribution ---------------------------------------------------
class PowerFunctionDistribution(SingleContinuousDistribution):
_argnames=('alpha','a','b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(alpha, a, b):
_value_check(a.is_real, "Continuous Boundary parameter should be real.")
_value_check(b.is_real, "Continuous Boundary parameter should be real.")
_value_check(a < b, " 'a' the left Boundary must be smaller than 'b' the right Boundary." )
_value_check(alpha.is_positive, "Continuous Shape parameter should be positive.")
def pdf(self, x):
alpha, a, b = self.alpha, self.a, self.b
num = alpha*(x - a)**(alpha - 1)
den = (b - a)**alpha
return num/den
def PowerFunction(name, alpha, a, b):
r"""
Creates a continuous random variable with a Power Function Distribution.
Explanation
===========
The density of PowerFunction distribution is given by
.. math::
f(x) := \frac{{\alpha}(x - a)^{\alpha - 1}}{(b - a)^{\alpha}}
with :math:`x \in [a,b]`.
Parameters
==========
alpha : Positive number, `0 < \alpha`, the shape parameter
a : Real number, :math:`-\infty < a`, the left boundary
b : Real number, :math:`a < b < \infty`, the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import PowerFunction, density, cdf, E, variance
>>> from sympy import Symbol
>>> alpha = Symbol("alpha", positive=True)
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = PowerFunction("X", 2, a, b)
>>> density(X)(z)
(-2*a + 2*z)/(-a + b)**2
>>> cdf(X)(z)
Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) +
z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))
>>> alpha = 2
>>> a = 0
>>> b = 1
>>> Y = PowerFunction("Y", alpha, a, b)
>>> E(Y)
2/3
>>> variance(Y)
1/18
References
==========
.. [1] http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html
"""
return rv(name, PowerFunctionDistribution, (alpha, a, b))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
class QuadraticUDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(b > a, "Parameter b must be in range (%s, oo)."%(a))
def pdf(self, x):
a, b = self.a, self.b
alpha = 12 / (b-a)**3
beta = (a+b) / 2
return Piecewise(
(alpha * (x-beta)**2, And(a<=x, x<=b)),
(S.Zero, True))
def _moment_generating_function(self, t):
a, b = self.a, self.b
return -3 * (exp(a*t) * (4 + (a**2 + 2*a*(-2 + b) + b**2) * t) \
- exp(b*t) * (4 + (-4*b + (a + b)**2) * t)) / ((a-b)**3 * t**2)
def _characteristic_function(self, t):
a, b = self.a, self.b
return -3*I*(exp(I*a*t*exp(I*b*t)) * (4*I - (-4*b + (a+b)**2)*t)) \
/ ((a-b)**3 * t**2)
def QuadraticU(name, a, b):
r"""
Create a Continuous Random Variable with a U-quadratic distribution.
Explanation
===========
The density of the U-quadratic distribution is given by
.. math::
f(x) := \alpha (x-\beta)^2
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import QuadraticU, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = QuadraticU("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ 2
| / a b \
|12*|- - - - + z|
| \ 2 2 /
<----------------- for And(b >= z, a <= z)
| 3
| (-a + b)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/U-quadratic_distribution
"""
return rv(name, QuadraticUDistribution, (a, b))
#-------------------------------------------------------------------------------
# RaisedCosine distribution ----------------------------------------------------
class RaisedCosineDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def _characteristic_function(self, t):
mu, s = self.mu, self.s
return Piecewise((exp(-I*pi*mu/s)/2, Eq(t, -pi/s)),
(exp(I*pi*mu/s)/2, Eq(t, pi/s)),
(pi**2*sin(s*t)*exp(I*mu*t) / (s*t*(pi**2 - s**2*t**2)), True))
def _moment_generating_function(self, t):
mu, s = self.mu, self.s
return pi**2 * sinh(s*t) * exp(mu*t) / (s*t*(pi**2 + s**2*t**2))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
Explanation
===========
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import RaisedCosine, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z >= mu - s, z <= mu + s)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighDistribution(SingleContinuousDistribution):
_argnames = ('sigma',)
set = Interval(0, oo)
@staticmethod
def check(sigma):
_value_check(sigma > 0, "Scale parameter sigma must be positive.")
def pdf(self, x):
sigma = self.sigma
return x/sigma**2*exp(-x**2/(2*sigma**2))
def _cdf(self, x):
sigma = self.sigma
return 1 - exp(-(x**2/(2*sigma**2)))
def _characteristic_function(self, t):
sigma = self.sigma
return 1 - sigma*t*exp(-sigma**2*t**2/2) * sqrt(pi/2) * (erfi(sigma*t/sqrt(2)) - I)
def _moment_generating_function(self, t):
sigma = self.sigma
return 1 + sigma*t*exp(sigma**2*t**2/2) * sqrt(pi/2) * (erf(sigma*t/sqrt(2)) + 1)
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
Explanation
===========
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `\sigma > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Rayleigh("x", sigma)
>>> density(X)(z)
z*exp(-z**2/(2*sigma**2))/sigma**2
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Rayleigh_distribution
.. [2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return rv(name, RayleighDistribution, (sigma, ))
#-------------------------------------------------------------------------------
# Reciprocal distribution --------------------------------------------------------
class ReciprocalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(a > 0, "Parameter > 0. a = %s"%a)
_value_check((a < b),
"Parameter b must be in range (%s, +oo]. b = %s"%(a, b))
def pdf(self, x):
a, b = self.a, self.b
return 1/(x*(log(b) - log(a)))
def Reciprocal(name, a, b):
r"""Creates a continuous random variable with a reciprocal distribution.
Parameters
==========
a : Real number, :math:`0 < a`
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Reciprocal, density, cdf
>>> from sympy import symbols
>>> a, b, x = symbols('a, b, x', positive=True)
>>> R = Reciprocal('R', a, b)
>>> density(R)(x)
1/(x*(-log(a) + log(b)))
>>> cdf(R)(x)
Piecewise((log(a)/(log(a) - log(b)) - log(x)/(log(a) - log(b)), a <= x), (0, True))
Reference
=========
.. [1] https://en.wikipedia.org/wiki/Reciprocal_distribution
"""
return rv(name, ReciprocalDistribution, (a, b))
#-------------------------------------------------------------------------------
# Shifted Gompertz distribution ------------------------------------------------
class ShiftedGompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
b, eta = self.b, self.eta
return b*exp(-b*x)*exp(-eta*exp(-b*x))*(1+eta*(1-exp(-b*x)))
def ShiftedGompertz(name, b, eta):
r"""
Create a continuous random variable with a Shifted Gompertz distribution.
Explanation
===========
The density of the Shifted Gompertz distribution is given by
.. math::
f(x) := b e^{-b x} e^{-\eta \exp(-b x)} \left[1 + \eta(1 - e^(-bx)) \right]
with :math:`x \in [0, \infty)`.
Parameters
==========
b : Real number, `b > 0`, a scale
eta : Real number, `\eta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ShiftedGompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> x = Symbol("x")
>>> X = ShiftedGompertz("x", b, eta)
>>> density(X)(x)
b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
References
==========
.. [1] https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution
"""
return rv(name, ShiftedGompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTDistribution(SingleContinuousDistribution):
_argnames = ('nu',)
set = Interval(-oo, oo)
@staticmethod
def check(nu):
_value_check(nu > 0, "Degrees of freedom nu must be positive.")
def pdf(self, x):
nu = self.nu
return 1/(sqrt(nu)*beta_fn(S.Half, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2)
def _cdf(self, x):
nu = self.nu
return S.Half + x*gamma((nu+1)/2)*hyper((S.Half, (nu+1)/2),
(Rational(3, 2),), -x**2/nu)/(sqrt(pi*nu)*gamma(nu/2))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the Student-T distribution is undefined.')
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
Explanation
===========
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `\nu > 0`, the degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import StudentT, density, cdf
>>> from sympy import Symbol, pprint
>>> nu = Symbol("nu", positive=True)
>>> z = Symbol("z")
>>> X = StudentT("x", nu)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
nu 1
- -- - -
2 2
/ 2\
| z |
|1 + --|
\ nu/
-----------------
____ / nu\
\/ nu *B|1/2, --|
\ 2 /
>>> cdf(X)(z)
1/2 + z*gamma(nu/2 + 1/2)*hyper((1/2, nu/2 + 1/2), (3/2,),
-z**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Student_t-distribution
.. [2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return rv(name, StudentTDistribution, (nu, ))
#-------------------------------------------------------------------------------
# Trapezoidal distribution ------------------------------------------------------
class TrapezoidalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c', 'd')
@property
def set(self):
return Interval(self.a, self.d)
@staticmethod
def check(a, b, c, d):
_value_check(a < d, "Lower bound parameter a < %s. a = %s"%(d, a))
_value_check((a <= b, b < c),
"Level start parameter b must be in range [%s, %s). b = %s"%(a, c, b))
_value_check((b < c, c <= d),
"Level end parameter c must be in range (%s, %s]. c = %s"%(b, d, c))
_value_check(d >= c, "Upper bound parameter d > %s. d = %s"%(c, d))
def pdf(self, x):
a, b, c, d = self.a, self.b, self.c, self.d
return Piecewise(
(2*(x-a) / ((b-a)*(d+c-a-b)), And(a <= x, x < b)),
(2 / (d+c-a-b), And(b <= x, x < c)),
(2*(d-x) / ((d-c)*(d+c-a-b)), And(c <= x, x <= d)),
(S.Zero, True))
def Trapezoidal(name, a, b, c, d):
r"""
Create a continuous random variable with a trapezoidal distribution.
Explanation
===========
The density of the trapezoidal distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(d+c-a-b)} & \mathrm{for\ } a \le x < b, \\
\frac{2}{d+c-a-b} & \mathrm{for\ } b \le x < c, \\
\frac{2(d-x)}{(d-c)(d+c-a-b)} & \mathrm{for\ } c \le x < d, \\
0 & \mathrm{for\ } d < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a < d`
b : Real number, :math:`a \le b < c`
c : Real number, :math:`b < c \le d`
d : Real number
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Trapezoidal, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> d = Symbol("d")
>>> z = Symbol("z")
>>> X = Trapezoidal("x", a,b,c,d)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|------------------------- for And(a <= z, b > z)
|(-a + b)*(-a - b + c + d)
|
| 2
| -------------- for And(b <= z, c > z)
< -a - b + c + d
|
| 2*d - 2*z
|------------------------- for And(d >= z, c <= z)
|(-c + d)*(-a - b + c + d)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Trapezoidal_distribution
"""
return rv(name, TrapezoidalDistribution, (a, b, c, d))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b, c):
_value_check(b > a, "Parameter b > %s. b = %s"%(a, b))
_value_check((a <= c, c <= b),
"Parameter c must be in range [%s, %s]. c = %s"%(a, b, c))
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def _characteristic_function(self, t):
a, b, c = self.a, self.b, self.c
return -2 *((b-c) * exp(I*a*t) - (b-a) * exp(I*c*t) + (c-a) * exp(I*b*t)) / ((b-a)*(c-a)*(b-c)*t**2)
def _moment_generating_function(self, t):
a, b, c = self.a, self.b, self.c
return 2 * ((b - c) * exp(a * t) - (b - a) * exp(c * t) + (c - a) * exp(b * t)) / (
(b - a) * (c - a) * (b - c) * t ** 2)
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
Explanation
===========
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Triangular, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, c > z)
|(-a + b)*(-a + c)
|
| 2
| ------ for c = z
< -a + b
|
| 2*b - 2*z
|---------------- for And(b >= z, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_distribution
.. [2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformDistribution(SingleContinuousDistribution):
_argnames = ('left', 'right')
@property
def set(self):
return Interval(self.left, self.right)
@staticmethod
def check(left, right):
_value_check(left < right, "Lower limit should be less than Upper limit.")
def pdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.One/(right - left), And(left <= x, x <= right)),
(S.Zero, True)
)
def _cdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.Zero, x < left),
((x - left)/(right - left), x <= right),
(S.One, True)
)
def _characteristic_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(I*t*right) - exp(I*t*left)) / (I*t*(right - left)), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(t*right) - exp(t*left)) / (t * (right - left)), Ne(t, 0)),
(S.One, True))
def expectation(self, expr, var, **kwargs):
kwargs['evaluate'] = True
result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs)
result = result.subs({Max(self.left, self.right): self.right,
Min(self.left, self.right): self.left})
return result
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
Explanation
===========
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a`, the left boundary
b : Real number, :math:`a < b < \infty`, the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", negative=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Uniform("x", a, b)
>>> density(X)(z)
Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True))
>>> cdf(X)(z)
Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True))
>>> E(X)
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
.. [2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumDistribution(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
@staticmethod
def check(n):
_value_check((n > 0, n.is_integer),
"Parameter n must be positive integer.")
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def _cdf(self, x):
n = self.n
k = Dummy("k")
return Piecewise((S.Zero, x < 0),
(1/factorial(n)*Sum((-1)**k*binomial(n, k)*(x - k)**(n),
(k, 0, floor(x))), x <= n),
(S.One, True))
def _characteristic_function(self, t):
return ((exp(I*t) - 1) / (I*t))**self.n
def _moment_generating_function(self, t):
return ((exp(t) - 1) / t)**self.n
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
Explanation
===========
The probability distribution function depends on a single parameter
$n$ which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\left\lfloor x\right\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive integer, `n > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import UniformSum, density, cdf
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
>>> cdf(X)(z)
Piecewise((0, z < 0), (Sum((-1)**_k*(-_k + z)**n*binomial(n, _k),
(_k, 0, floor(z)))/factorial(n), n >= z), (1, True))
Compute cdf with specific 'x' and 'n' values as follows :
>>> cdf(UniformSum("x", 5), evaluate=False)(2).doit()
9/40
The argument evaluate=False prevents an attempt at evaluation
of the sum for general n, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
class VonMisesDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'k')
set = Interval(0, 2*pi)
@staticmethod
def check(mu, k):
_value_check(k > 0, "k must be positive")
def pdf(self, x):
mu, k = self.mu, self.k
return exp(k*cos(x-mu)) / (2*pi*besseli(0, k))
def VonMises(name, mu, k):
r"""
Create a Continuous Random Variable with a von Mises distribution.
Explanation
===========
The density of the von Mises distribution is given by
.. math::
f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
with :math:`x \in [0,2\pi]`.
Parameters
==========
mu : Real number
Measure of location.
k : Real number
Measure of concentration.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import VonMises, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = VonMises("x", mu, k)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k*cos(mu - z)
e
------------------
2*pi*besseli(0, k)
References
==========
.. [1] https://en.wikipedia.org/wiki/Von_Mises_distribution
.. [2] http://mathworld.wolfram.com/vonMisesDistribution.html
"""
return rv(name, VonMisesDistribution, (mu, k))
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
Explanation
===========
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, $\lambda > 0$, a scale
k : Real number, $k > 0$, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = Weibull("x", l, k)
>>> density(X)(z)
k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
.. [1] https://en.wikipedia.org/wiki/Weibull_distribution
.. [2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return rv(name, WeibullDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicircleDistribution(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
@staticmethod
def check(R):
_value_check(R > 0, "Radius R must be positive.")
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def _characteristic_function(self, t):
return Piecewise((2 * besselj(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
return Piecewise((2 * besseli(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
Explanation
===========
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
|
c6a5ddd9f59b108fbf3426a4ad2102bb1236d70aadb302c7f64445ed4edffbc1 | """
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
BetaBinomial
Hypergeometric
Rademacher
IdealSoliton
RobustSoliton
"""
from sympy.core.cache import cacheit
from sympy.core.function import Lambda
from sympy.core.numbers import (Integer, Rational)
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.piecewise import Piecewise
from sympy.logic.boolalg import Or
from sympy.sets.contains import Contains
from sympy.sets.fancysets import Range
from sympy.sets.sets import (Intersection, Interval)
from sympy.functions.special.beta_functions import beta as beta_fn
from sympy.stats.frv import (SingleFiniteDistribution,
SingleFinitePSpace)
from sympy.stats.rv import _value_check, Density, is_random
from sympy.utilities.iterables import multiset
from sympy.utilities.misc import filldedent
__all__ = ['FiniteRV',
'DiscreteUniform',
'Die',
'Bernoulli',
'Coin',
'Binomial',
'BetaBinomial',
'Hypergeometric',
'Rademacher',
'IdealSoliton',
'RobustSoliton',
]
def rv(name, cls, *args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleFinitePSpace(name, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(name, CompoundDistribution(dist))
return pspace.value
class FiniteDistributionHandmade(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def pmf(self, x):
x = Symbol('x')
return Lambda(x, Piecewise(*(
[(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)])))
@property
def set(self):
return set(self.dict.keys())
@staticmethod
def check(density):
for p in density.values():
_value_check((p >= 0, p <= 1),
"Probability at a point must be between 0 and 1.")
val = sum(density.values())
_value_check(Eq(val, 1) != S.false, "Total Probability must be 1.")
def FiniteRV(name, density, **kwargs):
r"""
Create a Finite Random Variable given a dict representing the density.
Parameters
==========
name : Symbol
Represents name of the random variable.
density : dict
Dictionary containing the pdf of finite distribution
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X >= 2)
0.700000000000000
Returns
=======
RandomSymbol
"""
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(name, FiniteDistributionHandmade, density, **kwargs)
class DiscreteUniformDistribution(SingleFiniteDistribution):
@staticmethod
def check(*args):
# not using _value_check since there is a
# suggestion for the user
if len(set(args)) != len(args):
weights = multiset(args)
n = Integer(len(args))
for k in weights:
weights[k] /= n
raise ValueError(filldedent("""
Repeated args detected but set expected. For a
distribution having different weights for each
item use the following:""") + (
'\nS("FiniteRV(%s, %s)")' % ("'X'", weights)))
@property
def p(self):
return Rational(1, len(self.args))
@property # type: ignore
@cacheit
def dict(self):
return {k: self.p for k in self.set}
@property
def set(self):
return set(self.args)
def pmf(self, x):
if x in self.args:
return self.p
else:
return S.Zero
def DiscreteUniform(name, items):
r"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Parameters
==========
items : list/tuple
Items over which Uniform distribution is to be made
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X).dict
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range
>>> density(Y).dict
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Discrete_uniform_distribution
.. [2] http://mathworld.wolfram.com/DiscreteUniformDistribution.html
"""
return rv(name, DiscreteUniformDistribution, *items)
class DieDistribution(SingleFiniteDistribution):
_argnames = ('sides',)
@staticmethod
def check(sides):
_value_check((sides.is_positive, sides.is_integer),
"number of sides must be a positive integer.")
@property
def is_symbolic(self):
return not self.sides.is_number
@property
def high(self):
return self.sides
@property
def low(self):
return S.One
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.sides))
return set(map(Integer, range(1, self.sides + 1)))
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number', 'Symbol', or "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 1) & Le(x, self.sides) & Contains(x, S.Integers)
return Piecewise((S.One/self.sides, cond), (S.Zero, True))
def Die(name, sides=6):
r"""
Create a Finite Random Variable representing a fair die.
Parameters
==========
sides : Integer
Represents the number of sides of the Die, by default is 6
Examples
========
>>> from sympy.stats import Die, density
>>> from sympy import Symbol
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4).dict
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
>>> n = Symbol('n', positive=True, integer=True)
>>> Dn = Die('Dn', n) # n sided Die
>>> density(Dn).dict
Density(DieDistribution(n))
>>> density(Dn).dict.subs(n, 4).doit()
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
Returns
=======
RandomSymbol
"""
return rv(name, DieDistribution, sides)
class BernoulliDistribution(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@staticmethod
def check(p, succ, fail):
_value_check((p >= 0, p <= 1),
"p should be in range [0, 1].")
@property
def set(self):
return {self.succ, self.fail}
def pmf(self, x):
if isinstance(self.succ, Symbol) and isinstance(self.fail, Symbol):
return Piecewise((self.p, x == self.succ),
(1 - self.p, x == self.fail),
(S.Zero, True))
return Piecewise((self.p, Eq(x, self.succ)),
(1 - self.p, Eq(x, self.fail)),
(S.Zero, True))
def Bernoulli(name, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a Bernoulli process.
Parameters
==========
p : Rational number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success
fail : Integer/symbol/string
Represents event of failure
Examples
========
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution
.. [2] http://mathworld.wolfram.com/BernoulliDistribution.html
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
r"""
Create a Finite Random Variable representing a Coin toss.
Parameters
==========
p : Rational Number between 0 and 1
Represents probability of getting "Heads", by default is Half
Examples
========
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Binomial
References
==========
.. [1] https://en.wikipedia.org/wiki/Coin_flipping
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
class BinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'p', 'succ', 'fail')
@staticmethod
def check(n, p, succ, fail):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer.")
_value_check((p <= 1, p >= 0),
"p should be in range [0, 1].")
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(self.dict.keys())
def pmf(self, x):
n, p = self.n, self.p
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number', 'Symbol', or "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 0) & Le(x, n) & Contains(x, S.Integers)
return Piecewise((binomial(n, x) * p**x * (1 - p)**(n - x), cond), (S.Zero, True))
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k*self.succ + (self.n-k)*self.fail: self.pmf(k)
for k in range(0, self.n + 1)}
def Binomial(name, n, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
p : Rational Number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success, by default is 1
fail : Integer/symbol/string
Represents event of failure, by default is 0
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S, Symbol
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X).dict
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
>>> n = Symbol('n', positive=True, integer=True)
>>> p = Symbol('p', positive=True)
>>> X = Binomial('X', n, S.Half) # n "coin flips"
>>> density(X).dict
Density(BinomialDistribution(n, 1/2, 1, 0))
>>> density(X).dict.subs(n, 4).doit()
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Binomial_distribution
.. [2] http://mathworld.wolfram.com/BinomialDistribution.html
"""
return rv(name, BinomialDistribution, n, p, succ, fail)
#-------------------------------------------------------------------------------
# Beta-binomial distribution ----------------------------------------------------------
class BetaBinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'alpha', 'beta')
@staticmethod
def check(n, alpha, beta):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((alpha > 0),
"'alpha' must be: alpha > 0 . alpha = %s" % str(alpha))
_value_check((beta > 0),
"'beta' must be: beta > 0 . beta = %s" % str(beta))
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(map(Integer, range(self.n + 1)))
def pmf(self, k):
n, a, b = self.n, self.alpha, self.beta
return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b)
def BetaBinomial(name, n, alpha, beta):
r"""
Create a Finite Random Variable representing a Beta-binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
alpha : Real positive number
beta : Real positive number
Examples
========
>>> from sympy.stats import BetaBinomial, density
>>> X = BetaBinomial('X', 2, 1, 1)
>>> density(X).dict
{0: 1/3, 1: 2*beta(2, 2), 2: 1/3}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
.. [2] http://mathworld.wolfram.com/BetaBinomialDistribution.html
"""
return rv(name, BetaBinomialDistribution, n, alpha, beta)
class HypergeometricDistribution(SingleFiniteDistribution):
_argnames = ('N', 'm', 'n')
@staticmethod
def check(n, N, m):
_value_check((N.is_integer, N.is_nonnegative),
"'N' must be nonnegative integer. N = %s." % str(n))
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((m.is_integer, m.is_nonnegative),
"'m' must be nonnegative integer. m = %s." % str(n))
@property
def is_symbolic(self):
return not all(x.is_number for x in (self.N, self.m, self.n))
@property
def high(self):
return Piecewise((self.n, Lt(self.n, self.m) != False), (self.m, True))
@property
def low(self):
return Piecewise((0, Gt(0, self.n + self.m - self.N) != False), (self.n + self.m - self.N, True))
@property
def set(self):
N, m, n = self.N, self.m, self.n
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(self.low, self.high))
return {i for i in range(max(0, n + m - N), min(n, m) + 1)}
def pmf(self, k):
N, m, n = self.N, self.m, self.n
return S(binomial(m, k) * binomial(N - m, n - k))/binomial(N, n)
def Hypergeometric(name, N, m, n):
r"""
Create a Finite Random Variable representing a hypergeometric distribution.
Parameters
==========
N : Positive Integer
Represents finite population of size N.
m : Positive Integer
Represents number of trials with required feature.
n : Positive Integer
Represents numbers of draws.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X).dict
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Hypergeometric_distribution
.. [2] http://mathworld.wolfram.com/HypergeometricDistribution.html
"""
return rv(name, HypergeometricDistribution, N, m, n)
class RademacherDistribution(SingleFiniteDistribution):
@property
def set(self):
return {-1, 1}
@property
def pmf(self):
k = Dummy('k')
return Lambda(k, Piecewise((S.Half, Or(Eq(k, -1), Eq(k, 1))), (S.Zero, True)))
def Rademacher(name):
r"""
Create a Finite Random Variable representing a Rademacher distribution.
Examples
========
>>> from sympy.stats import Rademacher, density
>>> X = Rademacher('X')
>>> density(X).dict
{-1: 1/2, 1: 1/2}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Bernoulli
References
==========
.. [1] https://en.wikipedia.org/wiki/Rademacher_distribution
"""
return rv(name, RademacherDistribution)
class IdealSolitonDistribution(SingleFiniteDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer.")
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(map(Integer, range(1, self.k + 1)))
@property # type: ignore
@cacheit
def dict(self):
if self.k.is_Symbol:
return Density(self)
d = {1: Rational(1, self.k)}
d.update(dict((i, Rational(1, i*(i - 1))) for i in range(2, self.k + 1)))
return d
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number', 'Symbol', or "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
return Piecewise((1/self.k, cond1), (1/(x*(x - 1)), cond2), (S.Zero, True))
def IdealSoliton(name, k):
r"""
Create a Finite Random Variable of Ideal Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
Examples
========
>>> from sympy.stats import IdealSoliton, density, P, E
>>> sol = IdealSoliton('sol', 5)
>>> density(sol).dict
{1: 1/5, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20}
>>> density(sol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> sol = IdealSoliton('sol', k)
>>> density(sol).dict
Density(IdealSolitonDistribution(k))
>>> density(sol).dict.subs(k, 10).doit()
{1: 1/10, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20, 6: 1/30, 7: 1/42, 8: 1/56, 9: 1/72, 10: 1/90}
>>> E(sol.subs(k, 10))
7381/2520
>>> P(sol.subs(k, 4) > 2)
1/4
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Ideal_distribution
.. [2] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
"""
return rv(name, IdealSolitonDistribution, k)
class RobustSolitonDistribution(SingleFiniteDistribution):
_argnames= ('k', 'delta', 'c')
@staticmethod
def check(k, delta, c):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer")
_value_check(Gt(delta, 0) and Le(delta, 1),
"'delta' must be a real number in the interval (0,1)")
_value_check(c.is_positive,
"'c' must be a positive real number.")
@property
def R(self):
return self.c * log(self.k/self.delta) * self.k**0.5
@property
def Z(self):
z = 0
for i in Range(1, round(self.k/self.R)):
z += (1/i)
z += log(self.R/self.delta)
return 1 + z * self.R/self.k
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(map(Integer, range(1, self.k + 1)))
@property
def is_symbolic(self):
return not (self.k.is_number and self.c.is_number and self.delta.is_number)
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number', 'Symbol', or "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
rho = Piecewise((Rational(1, self.k), cond1), (Rational(1, x*(x-1)), cond2), (S.Zero, True))
cond1 = Ge(x, 1) & Le(x, round(self.k/self.R)-1)
cond2 = Eq(x, round(self.k/self.R))
tau = Piecewise((self.R/(self.k * x), cond1), (self.R * log(self.R/self.delta)/self.k, cond2), (S.Zero, True))
return (rho + tau)/self.Z
def RobustSoliton(name, k, delta, c):
r'''
Create a Finite Random Variable of Robust Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
delta : Positive Rational Number
Represents the failure probability. Must be in the interval (0,1).
c : Positive Rational Number
Constant of proportionality. Values close to 1 are recommended
Examples
========
>>> from sympy.stats import RobustSoliton, density, P, E
>>> robSol = RobustSoliton('robSol', 5, 0.5, 0.01)
>>> density(robSol).dict
{1: 0.204253668152708, 2: 0.490631107897393, 3: 0.165210624506162, 4: 0.0834387731899302, 5: 0.0505633404760675}
>>> density(robSol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> c = Symbol('c', positive=True)
>>> robSol = RobustSoliton('robSol', k, 0.5, c)
>>> density(robSol).dict
Density(RobustSolitonDistribution(k, 0.5, c))
>>> density(robSol).dict.subs(k, 10).subs(c, 0.03).doit()
{1: 0.116641095387194, 2: 0.467045731687165, 3: 0.159984123349381, 4: 0.0821431680681869, 5: 0.0505765646770100,
6: 0.0345781523420719, 7: 0.0253132820710503, 8: 0.0194459129233227, 9: 0.0154831166726115, 10: 0.0126733075238887}
>>> E(robSol.subs(k, 10).subs(c, 0.05))
2.91358846104106
>>> P(robSol.subs(k, 4).subs(c, 0.1) > 2)
0.243650614389834
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Robust_distribution
.. [2] http://www.inference.org.uk/mackay/itprnn/ps/588.596.pdf
.. [3] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
'''
return rv(name, RobustSolitonDistribution, k, delta, c)
|
0410a0b8e1dda6b3d2c2d88c967b679ddbe446838921d0a2b939f0bbabdcb6bc | import random
import itertools
from typing import (Sequence as tSequence, Union as tUnion, List as tList,
Tuple as tTuple, Set as tSet)
from sympy.concrete.summations import Sum
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import (Function, Lambda)
from sympy.core.mul import Mul
from sympy.core.numbers import (Integer, Rational, igcd, oo, pi)
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol)
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
from sympy.logic.boolalg import (And, Not, Or)
from sympy.matrices.common import NonSquareMatrixError
from sympy.matrices.dense import (Matrix, eye, ones, zeros)
from sympy.matrices.expressions.blockmatrix import BlockMatrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.special import Identity
from sympy.matrices.immutable import ImmutableMatrix
from sympy.sets.conditionset import ConditionSet
from sympy.sets.contains import Contains
from sympy.sets.fancysets import Range
from sympy.sets.sets import (FiniteSet, Intersection, Interval, Set, Union)
from sympy.solvers.solveset import linsolve
from sympy.tensor.indexed import (Indexed, IndexedBase)
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import strongly_connected_components
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import (RandomIndexedSymbol, random_symbols, RandomSymbol,
_symbol_converter, _value_check, pspace, given,
dependent, is_random, sample_iter, Distribution,
Density)
from sympy.stats.stochastic_process import StochasticPSpace
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.stats.frv_types import Bernoulli, BernoulliDistribution, FiniteRV
from sympy.stats.drv_types import Poisson, PoissonDistribution
from sympy.stats.crv_types import Normal, NormalDistribution, Gamma, GammaDistribution
from sympy.core.sympify import _sympify, sympify
EmptySet = S.EmptySet
__all__ = [
'StochasticProcess',
'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain',
'TransitionMatrixOf',
'StochasticStateSpaceOf',
'GeneratorMatrixOf',
'ContinuousMarkovChain',
'BernoulliProcess',
'PoissonProcess',
'WienerProcess',
'GammaProcess'
]
@is_random.register(Indexed)
def _(x):
return is_random(x.base)
@is_random.register(RandomIndexedSymbol) # type: ignore
def _(x):
return True
def _set_converter(itr):
"""
Helper function for converting list/tuple/set to Set.
If parameter is not an instance of list/tuple/set then
no operation is performed.
Returns
=======
Set
The argument converted to Set.
Raises
======
TypeError
If the argument is not an instance of list/tuple/set.
"""
if isinstance(itr, (list, tuple, set)):
itr = FiniteSet(*itr)
if not isinstance(itr, Set):
raise TypeError("%s is not an instance of list/tuple/set."%(itr))
return itr
def _state_converter(itr: tSequence) -> tUnion[Tuple, Range]:
"""
Helper function for converting list/tuple/set/Range/Tuple/FiniteSet
to tuple/Range.
"""
itr_ret: tUnion[Tuple, Range]
if isinstance(itr, (Tuple, set, FiniteSet)):
itr_ret = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, (list, tuple)):
# check if states are unique
if len(set(itr)) != len(itr):
raise ValueError('The state space must have unique elements.')
itr_ret = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, Range):
# the only ordered set in SymPy I know of
# try to convert to tuple
try:
itr_ret = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
except (TypeError, ValueError):
itr_ret = itr
else:
raise TypeError("%s is not an instance of list/tuple/set/Range/Tuple/FiniteSet." % (itr))
return itr_ret
def _sym_sympify(arg):
"""
Converts an arbitrary expression to a type that can be used inside SymPy.
As generally strings are unwise to use in the expressions,
it returns the Symbol of argument if the string type argument is passed.
Parameters
=========
arg: The parameter to be converted to be used in SymPy.
Returns
=======
The converted parameter.
"""
if isinstance(arg, str):
return Symbol(arg)
else:
return _sympify(arg)
def _matrix_checks(matrix):
if not isinstance(matrix, (Matrix, MatrixSymbol, ImmutableMatrix)):
raise TypeError("Transition probabilities either should "
"be a Matrix or a MatrixSymbol.")
if matrix.shape[0] != matrix.shape[1]:
raise NonSquareMatrixError("%s is not a square matrix"%(matrix))
if isinstance(matrix, Matrix):
matrix = ImmutableMatrix(matrix.tolist())
return matrix
class StochasticProcess(Basic):
"""
Base class for all the stochastic processes whether
discrete or continuous.
Parameters
==========
sym: Symbol or str
state_space: Set
The state space of the stochastic process, by default S.Reals.
For discrete sets it is zero indexed.
See Also
========
DiscreteTimeStochasticProcess
"""
index_set = S.Reals
def __new__(cls, sym, state_space=S.Reals, **kwargs):
sym = _symbol_converter(sym)
state_space = _set_converter(state_space)
return Basic.__new__(cls, sym, state_space)
@property
def symbol(self):
return self.args[0]
@property
def state_space(self) -> tUnion[FiniteSet, Range]:
if not isinstance(self.args[1], (FiniteSet, Range)):
assert isinstance(self.args[1], Tuple)
return FiniteSet(*self.args[1])
return self.args[1]
def _deprecation_warn_distribution(self):
sympy_deprecation_warning(
"""
Calling the distribution method with a RandomIndexedSymbol
argument, like X.distribution(X(t)) is deprecated. Instead, call
distribution() with the given timestamp, like
X.distribution(t)
""",
deprecated_since_version="1.7.1",
active_deprecations_target="deprecated-distribution-randomindexedsymbol",
stacklevel=4,
)
def distribution(self, key=None):
if key is None:
self._deprecation_warn_distribution()
return Distribution()
def density(self, x):
return Density()
def __call__(self, time):
"""
Overridden in ContinuousTimeStochasticProcess.
"""
raise NotImplementedError("Use [] for indexing discrete time stochastic process.")
def __getitem__(self, time):
"""
Overridden in DiscreteTimeStochasticProcess.
"""
raise NotImplementedError("Use () for indexing continuous time stochastic process.")
def probability(self, condition):
raise NotImplementedError()
def joint_distribution(self, *args):
"""
Computes the joint distribution of the random indexed variables.
Parameters
==========
args: iterable
The finite list of random indexed variables/the key of a stochastic
process whose joint distribution has to be computed.
Returns
=======
JointDistribution
The joint distribution of the list of random indexed variables.
An unevaluated object is returned if it is not possible to
compute the joint distribution.
Raises
======
ValueError: When the arguments passed are not of type RandomIndexSymbol
or Number.
"""
args = list(args)
for i, arg in enumerate(args):
if S(arg).is_Number:
if self.index_set.is_subset(S.Integers):
args[i] = self.__getitem__(arg)
else:
args[i] = self.__call__(arg)
elif not isinstance(arg, RandomIndexedSymbol):
raise ValueError("Expected a RandomIndexedSymbol or "
"key not %s"%(type(arg)))
if args[0].pspace.distribution == Distribution():
return JointDistribution(*args)
density = Lambda(tuple(args),
expr=Mul.fromiter(arg.pspace.process.density(arg) for arg in args))
return JointDistributionHandmade(density)
def expectation(self, condition, given_condition):
raise NotImplementedError("Abstract method for expectation queries.")
def sample(self):
raise NotImplementedError("Abstract method for sampling queries.")
class DiscreteTimeStochasticProcess(StochasticProcess):
"""
Base class for all discrete stochastic processes.
"""
def __getitem__(self, time):
"""
For indexing discrete time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
time = sympify(time)
if not time.is_symbol and time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
idx_obj = Indexed(self.symbol, time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution(time))
return RandomIndexedSymbol(idx_obj, pspace_obj)
class ContinuousTimeStochasticProcess(StochasticProcess):
"""
Base class for all continuous time stochastic process.
"""
def __call__(self, time):
"""
For indexing continuous time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
time = sympify(time)
if not time.is_symbol and time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
func_obj = Function(self.symbol)(time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution(time))
return RandomIndexedSymbol(func_obj, pspace_obj)
class TransitionMatrixOf(Boolean):
"""
Assumes that the matrix is the transition matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, DiscreteMarkovChain):
raise ValueError("Currently only DiscreteMarkovChain "
"support TransitionMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
process = property(lambda self: self.args[0])
matrix = property(lambda self: self.args[1])
class GeneratorMatrixOf(TransitionMatrixOf):
"""
Assumes that the matrix is the generator matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, ContinuousMarkovChain):
raise ValueError("Currently only ContinuousMarkovChain "
"support GeneratorMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
class StochasticStateSpaceOf(Boolean):
def __new__(cls, process, state_space):
if not isinstance(process, (DiscreteMarkovChain, ContinuousMarkovChain)):
raise ValueError("Currently only DiscreteMarkovChain and ContinuousMarkovChain "
"support StochasticStateSpaceOf.")
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
state_index = Range(ss_size)
return Basic.__new__(cls, process, state_index)
process = property(lambda self: self.args[0])
state_index = property(lambda self: self.args[1])
class MarkovProcess(StochasticProcess):
"""
Contains methods that handle queries
common to Markov processes.
"""
@property
def number_of_states(self) -> tUnion[Integer, Symbol]:
"""
The number of states in the Markov Chain.
"""
return _sympify(self.args[2].shape[0]) # type: ignore
@property
def _state_index(self):
"""
Returns state index as Range.
"""
return self.args[1]
@classmethod
def _sanity_checks(cls, state_space, trans_probs):
# Try to never have None as state_space or trans_probs.
# This helps a lot if we get it done at the start.
if (state_space is None) and (trans_probs is None):
_n = Dummy('n', integer=True, nonnegative=True)
state_space = _state_converter(Range(_n))
trans_probs = _matrix_checks(MatrixSymbol('_T', _n, _n))
elif state_space is None:
trans_probs = _matrix_checks(trans_probs)
state_space = _state_converter(Range(trans_probs.shape[0]))
elif trans_probs is None:
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
_n = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
_n = len(state_space)
trans_probs = MatrixSymbol('_T', _n, _n)
else:
state_space = _state_converter(state_space)
trans_probs = _matrix_checks(trans_probs)
# Range object doesn't want to give a symbolic size
# so we do it ourselves.
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
if ss_size != trans_probs.shape[0]:
raise ValueError('The size of the state space and the number of '
'rows of the transition matrix must be the same.')
return state_space, trans_probs
def _extract_information(self, given_condition):
"""
Helper function to extract information, like,
transition matrix/generator matrix, state space, etc.
"""
if isinstance(self, DiscreteMarkovChain):
trans_probs = self.transition_probabilities
state_index = self._state_index
elif isinstance(self, ContinuousMarkovChain):
trans_probs = self.generator_matrix
state_index = self._state_index
if isinstance(given_condition, And):
gcs = given_condition.args
given_condition = S.true
for gc in gcs:
if isinstance(gc, TransitionMatrixOf):
trans_probs = gc.matrix
if isinstance(gc, StochasticStateSpaceOf):
state_index = gc.state_index
if isinstance(gc, Relational):
given_condition = given_condition & gc
if isinstance(given_condition, TransitionMatrixOf):
trans_probs = given_condition.matrix
given_condition = S.true
if isinstance(given_condition, StochasticStateSpaceOf):
state_index = given_condition.state_index
given_condition = S.true
return trans_probs, state_index, given_condition
def _check_trans_probs(self, trans_probs, row_sum=1):
"""
Helper function for checking the validity of transition
probabilities.
"""
if not isinstance(trans_probs, MatrixSymbol):
rows = trans_probs.tolist()
for row in rows:
if (sum(row) - row_sum) != 0:
raise ValueError("Values in a row must sum to %s. "
"If you are using Float or floats then please use Rational."%(row_sum))
def _work_out_state_index(self, state_index, given_condition, trans_probs):
"""
Helper function to extract state space if there
is a random symbol in the given condition.
"""
# if given condition is None, then there is no need to work out
# state_space from random variables
if given_condition != None:
rand_var = list(given_condition.atoms(RandomSymbol) -
given_condition.atoms(RandomIndexedSymbol))
if len(rand_var) == 1:
state_index = rand_var[0].pspace.set
# `not None` is `True`. So the old test fails for symbolic sizes.
# Need to build the statement differently.
sym_cond = not self.number_of_states.is_Integer
cond1 = not sym_cond and len(state_index) != trans_probs.shape[0]
if cond1:
raise ValueError("state space is not compatible with the transition probabilities.")
if not isinstance(trans_probs.shape[0], Symbol):
state_index = FiniteSet(*[i for i in range(trans_probs.shape[0])])
return state_index
@cacheit
def _preprocess(self, given_condition, evaluate):
"""
Helper function for pre-processing the information.
"""
is_insufficient = False
if not evaluate: # avoid pre-processing if the result is not to be evaluated
return (True, None, None, None)
# extracting transition matrix and state space
trans_probs, state_index, given_condition = self._extract_information(given_condition)
# given_condition does not have sufficient information
# for computations
if trans_probs is None or \
given_condition is None:
is_insufficient = True
else:
# checking transition probabilities
if isinstance(self, DiscreteMarkovChain):
self._check_trans_probs(trans_probs, row_sum=1)
elif isinstance(self, ContinuousMarkovChain):
self._check_trans_probs(trans_probs, row_sum=0)
# working out state space
state_index = self._work_out_state_index(state_index, given_condition, trans_probs)
return is_insufficient, trans_probs, state_index, given_condition
def replace_with_index(self, condition):
if isinstance(condition, Relational):
lhs, rhs = condition.lhs, condition.rhs
if not isinstance(lhs, RandomIndexedSymbol):
lhs, rhs = rhs, lhs
condition = type(condition)(self.index_of.get(lhs, lhs),
self.index_of.get(rhs, rhs))
return condition
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Handles probability queries for Markov process.
Parameters
==========
condition: Relational
given_condition: Relational/And
Returns
=======
Probability
If the information is not sufficient.
Expr
In all other cases.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, new_given_condition = \
self._preprocess(given_condition, evaluate)
rv = list(condition.atoms(RandomIndexedSymbol))
symbolic = False
for sym in rv:
if sym.key.is_symbol:
symbolic = True
break
if check:
return Probability(condition, new_given_condition)
if isinstance(self, ContinuousMarkovChain):
trans_probs = self.transition_probabilities(mat)
elif isinstance(self, DiscreteMarkovChain):
trans_probs = mat
condition = self.replace_with_index(condition)
given_condition = self.replace_with_index(given_condition)
new_given_condition = self.replace_with_index(new_given_condition)
if isinstance(condition, Relational):
if isinstance(new_given_condition, And):
gcs = new_given_condition.args
else:
gcs = (new_given_condition, )
min_key_rv = list(new_given_condition.atoms(RandomIndexedSymbol))
if len(min_key_rv):
min_key_rv = min_key_rv[0]
for r in rv:
if min_key_rv.key.is_symbol or r.key.is_symbol:
continue
if min_key_rv.key > r.key:
return Probability(condition)
else:
min_key_rv = None
return Probability(condition)
if symbolic:
return self._symbolic_probability(condition, new_given_condition, rv, min_key_rv)
if len(rv) > 1:
rv[0] = condition.lhs
rv[1] = condition.rhs
if rv[0].key < rv[1].key:
rv[0], rv[1] = rv[1], rv[0]
if isinstance(condition, Gt):
condition = Lt(condition.lhs, condition.rhs)
elif isinstance(condition, Lt):
condition = Gt(condition.lhs, condition.rhs)
elif isinstance(condition, Ge):
condition = Le(condition.lhs, condition.rhs)
elif isinstance(condition, Le):
condition = Ge(condition.lhs, condition.rhs)
s = Rational(0, 1)
n = len(self.state_space)
if isinstance(condition, (Eq, Ne)):
for i in range(0, n):
s += self.probability(Eq(rv[0], i), Eq(rv[1], i)) * self.probability(Eq(rv[1], i), new_given_condition)
return s if isinstance(condition, Eq) else 1 - s
else:
upper = 0
greater = False
if isinstance(condition, (Ge, Lt)):
upper = 1
if isinstance(condition, (Ge, Gt)):
greater = True
for i in range(0, n):
if i <= n//2:
for j in range(0, i + upper):
s += self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
else:
s += self.probability(Eq(rv[0], i), new_given_condition)
for j in range(i + upper, n):
s -= self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
return s if greater else 1 - s
rv = rv[0]
states = condition.as_set()
prob, gstate = {}, None
for gc in gcs:
if gc.has(min_key_rv):
if gc.has(Probability):
p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \
else (gc.lhs, gc.rhs)
gr = gp.args[0]
gset = Intersection(gr.as_set(), state_index)
gstate = list(gset)[0]
prob[gset] = p
else:
_, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \
else (gc.rhs.key, gc.lhs)
if not all(k in self.index_set for k in (rv.key, min_key_rv.key)):
raise IndexError("The timestamps of the process are not in it's index set.")
states = Intersection(states, state_index) if not isinstance(self.number_of_states, Symbol) else states
for state in Union(states, FiniteSet(gstate)):
if not state.is_Integer or Ge(state, mat.shape[0]) is True:
raise IndexError("No information is available for (%s, %s) in "
"transition probabilities of shape, (%s, %s). "
"State space is zero indexed."
%(gstate, state, mat.shape[0], mat.shape[1]))
if prob:
gstates = Union(*prob.keys())
if len(gstates) == 1:
gstate = list(gstates)[0]
gprob = list(prob.values())[0]
prob[gstates] = gprob
elif len(gstates) == len(state_index) - 1:
gstate = list(state_index - gstates)[0]
gprob = S.One - sum(prob.values())
prob[state_index - gstates] = gprob
else:
raise ValueError("Conflicting information.")
else:
gprob = S.One
if min_key_rv == rv:
return sum([prob[FiniteSet(state)] for state in states])
if isinstance(self, ContinuousMarkovChain):
return gprob * sum([trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state))
for state in states])
if isinstance(self, DiscreteMarkovChain):
return gprob * sum([(trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state))
for state in states])
if isinstance(condition, Not):
expr = condition.args[0]
return S.One - self.probability(expr, given_condition, evaluate, **kwargs)
if isinstance(condition, And):
compute_later, state2cond, conds = [], dict(), condition.args
for expr in conds:
if isinstance(expr, Relational):
ris = list(expr.atoms(RandomIndexedSymbol))[0]
if state2cond.get(ris, None) is None:
state2cond[ris] = S.true
state2cond[ris] &= expr
else:
compute_later.append(expr)
ris = []
for ri in state2cond:
ris.append(ri)
cset = Intersection(state2cond[ri].as_set(), state_index)
if len(cset) == 0:
return S.Zero
state2cond[ri] = cset.as_relational(ri)
sorted_ris = sorted(ris, key=lambda ri: ri.key)
prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs)
for i in range(1, len(sorted_ris)):
ri, prev_ri = sorted_ris[i], sorted_ris[i-1]
if not isinstance(state2cond[ri], Eq):
raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
prod *= self.probability(state2cond[ri], state2cond[prev_ri]
& mat_of
& StochasticStateSpaceOf(self, state_index),
evaluate, **kwargs)
for expr in compute_later:
prod *= self.probability(expr, given_condition, evaluate, **kwargs)
return prod
if isinstance(condition, Or):
return sum([self.probability(expr, given_condition, evaluate, **kwargs)
for expr in condition.args])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(condition, given_condition))
def _symbolic_probability(self, condition, new_given_condition, rv, min_key_rv):
#Function to calculate probability for queries with symbols
if isinstance(condition, Relational):
curr_state = new_given_condition.rhs if isinstance(new_given_condition.lhs, RandomIndexedSymbol) \
else new_given_condition.lhs
next_state = condition.rhs if isinstance(condition.lhs, RandomIndexedSymbol) \
else condition.lhs
if isinstance(condition, (Eq, Ne)):
if isinstance(self, DiscreteMarkovChain):
P = self.transition_probabilities**(rv[0].key - min_key_rv.key)
else:
P = exp(self.generator_matrix*(rv[0].key - min_key_rv.key))
prob = P[curr_state, next_state] if isinstance(condition, Eq) else 1 - P[curr_state, next_state]
return Piecewise((prob, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
upper = 1
greater = False
if isinstance(condition, (Ge, Lt)):
upper = 0
if isinstance(condition, (Ge, Gt)):
greater = True
k = Dummy('k')
condition = Eq(condition.lhs, k) if isinstance(condition.lhs, RandomIndexedSymbol)\
else Eq(condition.rhs, k)
total = Sum(self.probability(condition, new_given_condition), (k, next_state + upper, self.state_space._sup))
return Piecewise((total, rv[0].key > min_key_rv.key), (Probability(condition), True)) if greater\
else Piecewise((1 - total, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
return Probability(condition, new_given_condition)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Handles expectation queries for markov process.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation
Unevaluated object if computations cannot be done due to
insufficient information.
Expr
In all other cases when the computations are successful.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, condition = \
self._preprocess(condition, evaluate)
if check:
return Expectation(expr, condition)
rvs = random_symbols(expr)
if isinstance(expr, Expr) and isinstance(condition, Eq) \
and len(rvs) == 1:
# handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>))
condition=self.replace_with_index(condition)
state_index=self.replace_with_index(state_index)
rv = list(rvs)[0]
lhsg, rhsg = condition.lhs, condition.rhs
if not isinstance(lhsg, RandomIndexedSymbol):
lhsg, rhsg = (rhsg, lhsg)
if rhsg not in state_index:
raise ValueError("%s state is not in the state space."%(rhsg))
if rv.key < lhsg.key:
raise ValueError("Incorrect given condition is given, expectation "
"time %s < time %s"%(rv.key, rv.key))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
cond = condition & mat_of & \
StochasticStateSpaceOf(self, state_index)
func = lambda s: self.probability(Eq(rv, s), cond) * expr.subs(rv, self._state_index[s])
return sum([func(s) for s in state_index])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(expr, condition))
class DiscreteMarkovChain(DiscreteTimeStochasticProcess, MarkovProcess):
"""
Represents a finite discrete time-homogeneous Markov chain.
This type of Markov Chain can be uniquely characterised by
its (ordered) state space and its one-step transition probability
matrix.
Parameters
==========
sym:
The name given to the Markov Chain
state_space:
Optional, by default, Range(n)
trans_probs:
Optional, by default, MatrixSymbol('_T', n, n)
Examples
========
>>> from sympy.stats import DiscreteMarkovChain, TransitionMatrixOf, P, E
>>> from sympy import Matrix, MatrixSymbol, Eq, symbols
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> YS = DiscreteMarkovChain("Y")
>>> Y.state_space
{0, 1, 2}
>>> Y.transition_probabilities
Matrix([
[0.5, 0.2, 0.3],
[0.2, 0.5, 0.3],
[0.2, 0.3, 0.5]])
>>> TS = MatrixSymbol('T', 3, 3)
>>> P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TS))
T[0, 2]*T[1, 0] + T[1, 1]*T[1, 2] + T[1, 2]*T[2, 2]
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
Probabilities will be calculated based on indexes rather
than state names. For example, with the Sunny-Cloudy-Rainy
model with string state names:
>>> from sympy.core.symbol import Str
>>> Y = DiscreteMarkovChain("Y", [Str('Sunny'), Str('Cloudy'), Str('Rainy')], T)
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
This gives the same answer as the ``[0, 1, 2]`` state space.
Currently, there is no support for state names within probability
and expectation statements. Here is a work-around using ``Str``:
>>> P(Eq(Str('Rainy'), Y[3]), Eq(Y[1], Str('Cloudy'))).round(2)
0.36
Symbol state names can also be used:
>>> sunny, cloudy, rainy = symbols('Sunny, Cloudy, Rainy')
>>> Y = DiscreteMarkovChain("Y", [sunny, cloudy, rainy], T)
>>> P(Eq(Y[3], rainy), Eq(Y[1], cloudy)).round(2)
0.36
Expectations will be calculated as follows:
>>> E(Y[3], Eq(Y[1], cloudy))
0.38*Cloudy + 0.36*Rainy + 0.26*Sunny
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
transition matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> P(Eq(Y[3], Y[1]), Eq(Y[0], 0)).round(3)
0.409
>>> P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2)
0.36
>>> P(Le(Y[15], Y[10]), Eq(Y[8], 2)).round(7)
0.6963328
Symbolic probability queries are also supported
>>> a, b, c, d = symbols('a b c d')
>>> T = Matrix([[Rational(1, 10), Rational(4, 10), Rational(5, 10)], [Rational(3, 10), Rational(4, 10), Rational(3, 10)], [Rational(7, 10), Rational(2, 10), Rational(1, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> query = P(Eq(Y[a], b), Eq(Y[c], d))
>>> query.subs({a:10, b:2, c:5, d:1}).round(4)
0.3096
>>> P(Eq(Y[10], 2), Eq(Y[5], 1)).evalf().round(4)
0.3096
>>> query_gt = P(Gt(Y[a], b), Eq(Y[c], d))
>>> query_gt.subs({a:21, b:0, c:5, d:0}).evalf().round(5)
0.64705
>>> P(Gt(Y[21], 0), Eq(Y[5], 0)).round(5)
0.64705
There is limited support for arbitrarily sized states:
>>> n = symbols('n', nonnegative=True, integer=True)
>>> T = MatrixSymbol('T', n, n)
>>> Y = DiscreteMarkovChain("Y", trans_probs=T)
>>> Y.state_space
Range(0, n, 1)
>>> query = P(Eq(Y[a], b), Eq(Y[c], d))
>>> query.subs({a:10, b:2, c:5, d:1})
(T**5)[1, 2]
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Discrete-time_Markov_chain
.. [2] https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, state_space=None, trans_probs=None):
sym = _symbol_converter(sym)
state_space, trans_probs = MarkovProcess._sanity_checks(state_space, trans_probs)
obj = Basic.__new__(cls, sym, state_space, trans_probs) # type: ignore
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj._state_index):
indices[state] = index
obj.index_of = indices
return obj
@property
def transition_probabilities(self):
"""
Transition probabilities of discrete Markov chain,
either an instance of Matrix or MatrixSymbol.
"""
return self.args[2]
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
"""
Returns the list of communication classes that partition
the states of the markov chain.
A communication class is defined to be a set of states
such that every state in that set is reachable from
every other state in that set. Due to its properties
this forms a class in the mathematical sense.
Communication classes are also known as recurrence
classes.
Returns
=======
classes
The ``classes`` are a list of tuples. Each
tuple represents a single communication class
with its properties. The first element in the
tuple is the list of states in the class, the
second element is whether the class is recurrent
and the third element is the period of the
communication class.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0, 1, 0],
... [1, 0, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
>>> classes = X.communication_classes()
>>> for states, is_recurrent, period in classes:
... states, is_recurrent, period
([1, 2], True, 2)
([3], False, 1)
From this we can see that states ``1`` and ``2``
communicate, are recurrent and have a period
of 2. We can also see state ``3`` is transient
with a period of 1.
Notes
=====
The algorithm used is of order ``O(n**2)`` where
``n`` is the number of states in the markov chain.
It uses Tarjan's algorithm to find the classes
themselves and then it uses a breadth-first search
algorithm to find each class's periodicity.
Most of the algorithm's components approach ``O(n)``
as the matrix becomes more and more sparse.
References
==========
.. [1] http://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
.. [2] http://cecas.clemson.edu/~shierd/Shier/markov.pdf
.. [3] https://ujcontent.uj.ac.za/vital/access/services/Download/uj:7506/CONTENT1
.. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
"""
n = self.number_of_states
T = self.transition_probabilities
if isinstance(T, MatrixSymbol):
raise NotImplementedError("Cannot perform the operation with a symbolic matrix.")
# begin Tarjan's algorithm
V = Range(n)
# don't use state names. Rather use state
# indexes since we use them for matrix
# indexing here and later onward
E = [(i, j) for i in V for j in V if T[i, j] != 0]
classes = strongly_connected_components((V, E))
# end Tarjan's algorithm
recurrence = []
periods = []
for class_ in classes:
# begin recurrent check (similar to self._check_trans_probs())
submatrix = T[class_, class_] # get the submatrix with those states
is_recurrent = S.true
rows = submatrix.tolist()
for row in rows:
if (sum(row) - 1) != 0:
is_recurrent = S.false
break
recurrence.append(is_recurrent)
# end recurrent check
# begin breadth-first search
non_tree_edge_values: tSet[int] = set()
visited = {class_[0]}
newly_visited = {class_[0]}
level = {class_[0]: 0}
current_level = 0
done = False # imitate a do-while loop
while not done: # runs at most len(class_) times
done = len(visited) == len(class_)
current_level += 1
# this loop and the while loop above run a combined len(class_) number of times.
# so this triple nested loop runs through each of the n states once.
for i in newly_visited:
# the loop below runs len(class_) number of times
# complexity is around about O(n * avg(len(class_)))
newly_visited = {j for j in class_ if T[i, j] != 0}
new_tree_edges = newly_visited.difference(visited)
for j in new_tree_edges:
level[j] = current_level
new_non_tree_edges = newly_visited.intersection(visited)
new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges}
non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values)
visited = visited.union(new_tree_edges)
# igcd needs at least 2 arguments
positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0}
if len(positive_ntev) == 0:
periods.append(len(class_))
elif len(positive_ntev) == 1:
periods.append(positive_ntev.pop())
else:
periods.append(igcd(*positive_ntev))
# end breadth-first search
# convert back to the user's state names
classes = [[_sympify(self._state_index[i]) for i in class_] for class_ in classes]
return list(zip(classes, recurrence, map(Integer,periods)))
def fundamental_matrix(self):
"""
Each entry fundamental matrix can be interpreted as
the expected number of times the chains is in state j
if it started in state i.
References
==========
.. [1] https://lips.cs.princeton.edu/the-fundamental-matrix-of-a-finite-markov-chain/
"""
_, _, _, Q = self.decompose()
if Q.shape[0] > 0: # if non-ergodic
I = eye(Q.shape[0])
if (I - Q).det() == 0:
raise ValueError("The fundamental matrix doesn't exist.")
return (I - Q).inv().as_immutable()
else: # if ergodic
P = self.transition_probabilities
I = eye(P.shape[0])
w = self.fixed_row_vector()
W = Matrix([list(w) for i in range(0, P.shape[0])])
if (I - P + W).det() == 0:
raise ValueError("The fundamental matrix doesn't exist.")
return (I - P + W).inv().as_immutable()
def absorbing_probabilities(self):
"""
Computes the absorbing probabilities, i.e.
the ij-th entry of the matrix denotes the
probability of Markov chain being absorbed
in state j starting from state i.
"""
_, _, R, _ = self.decompose()
N = self.fundamental_matrix()
if R is None or N is None:
return None
return N*R
def absorbing_probabilites(self):
sympy_deprecation_warning(
"""
DiscreteMarkovChain.absorbing_probabilites() is deprecated. Use
absorbing_probabilities() instead (note the spelling difference).
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-absorbing_probabilites",
)
return self.absorbing_probabilities()
def is_regular(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, periods = list(zip(*tuples))
return And(len(classes) == 1, periods[0] == 1)
def is_ergodic(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, _ = list(zip(*tuples))
return S(len(classes) == 1)
def is_absorbing_state(self, state):
trans_probs = self.transition_probabilities
if isinstance(trans_probs, ImmutableMatrix) and \
state < trans_probs.shape[0]:
return S(trans_probs[state, state]) is S.One
def is_absorbing_chain(self):
states, A, B, C = self.decompose()
r = A.shape[0]
return And(r > 0, A == Identity(r).as_explicit())
def stationary_distribution(self, condition_set=False) -> tUnion[ImmutableMatrix, ConditionSet, Lambda]:
r"""
The stationary distribution is any row vector, p, that solves p = pP,
is row stochastic and each element in p must be nonnegative.
That means in matrix form: :math:`(P-I)^T p^T = 0` and
:math:`(1, \dots, 1) p = 1`
where ``P`` is the one-step transition matrix.
All time-homogeneous Markov Chains with a finite state space
have at least one stationary distribution. In addition, if
a finite time-homogeneous Markov Chain is irreducible, the
stationary distribution is unique.
Parameters
==========
condition_set : bool
If the chain has a symbolic size or transition matrix,
it will return a ``Lambda`` if ``False`` and return a
``ConditionSet`` if ``True``.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
An irreducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13, 5/13, 0]])
A reducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [0, 0, 1]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13 - 8*tau0/13, 5/13 - 5*tau0/13, tau0]])
>>> Y = DiscreteMarkovChain('Y')
>>> Y.stationary_distribution()
Lambda((wm, _T), Eq(wm*_T, wm))
>>> Y.stationary_distribution(condition_set=True)
ConditionSet(wm, Eq(wm*_T, wm))
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_2_6_stationary_and_limiting_distributions.php
.. [2] https://galton.uchicago.edu/~yibi/teaching/stat317/2014/Lectures/Lecture4_6up.pdf
See Also
========
sympy.stats.DiscreteMarkovChain.limiting_distribution
"""
trans_probs = self.transition_probabilities
n = self.number_of_states
if n == 0:
return ImmutableMatrix(Matrix([[]]))
# symbolic matrix version
if isinstance(trans_probs, MatrixSymbol):
wm = MatrixSymbol('wm', 1, n)
if condition_set:
return ConditionSet(wm, Eq(wm * trans_probs, wm))
else:
return Lambda((wm, trans_probs), Eq(wm * trans_probs, wm))
# numeric matrix version
a = Matrix(trans_probs - Identity(n)).T
a[0, 0:n] = ones(1, n) # type: ignore
b = zeros(n, 1)
b[0, 0] = 1
soln = list(linsolve((a, b)))[0]
return ImmutableMatrix([[sol for sol in soln]])
def fixed_row_vector(self):
"""
A wrapper for ``stationary_distribution()``.
"""
return self.stationary_distribution()
@property
def limiting_distribution(self):
"""
The fixed row vector is the limiting
distribution of a discrete Markov chain.
"""
return self.fixed_row_vector()
def decompose(self) -> tTuple[tList[Basic], ImmutableMatrix, ImmutableMatrix, ImmutableMatrix]:
"""
Decomposes the transition matrix into submatrices with
special properties.
The transition matrix can be decomposed into 4 submatrices:
- A - the submatrix from recurrent states to recurrent states.
- B - the submatrix from transient to recurrent states.
- C - the submatrix from transient to transient states.
- O - the submatrix of zeros for recurrent to transient states.
Returns
=======
states, A, B, C
``states`` - a list of state names with the first being
the recurrent states and the last being
the transient states in the order
of the row names of A and then the row names of C.
``A`` - the submatrix from recurrent states to recurrent states.
``B`` - the submatrix from transient to recurrent states.
``C`` - the submatrix from transient to transient states.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
One can decompose this chain for example:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, A, B, C = X.decompose()
>>> states
[2, 0, 1, 3, 4]
>>> A # recurrent to recurrent
Matrix([[1]])
>>> B # transient to recurrent
Matrix([
[ 0],
[2/5],
[1/2],
[ 0]])
>>> C # transient to transient
Matrix([
[1/2, 1/2, 0, 0],
[2/5, 1/5, 0, 0],
[ 0, 0, 1/2, 0],
[1/2, 0, 0, 1/2]])
This means that state 2 is the only absorbing state
(since A is a 1x1 matrix). B is a 4x1 matrix since
the 4 remaining transient states all merge into reccurent
state 2. And C is the 4x4 matrix that shows how the
transient states 0, 1, 3, 4 all interact.
See Also
========
sympy.stats.DiscreteMarkovChain.communication_classes
sympy.stats.DiscreteMarkovChain.canonical_form
References
==========
.. [1] https://en.wikipedia.org/wiki/Absorbing_Markov_chain
.. [2] http://people.brandeis.edu/~igusa/Math56aS08/Math56a_S08_notes015.pdf
"""
trans_probs = self.transition_probabilities
classes = self.communication_classes()
r_states = []
t_states = []
for states, recurrent, period in classes:
if recurrent:
r_states += states
else:
t_states += states
states = r_states + t_states
indexes = [self.index_of[state] for state in states] # type: ignore
A = Matrix(len(r_states), len(r_states),
lambda i, j: trans_probs[indexes[i], indexes[j]])
B = Matrix(len(t_states), len(r_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[j]])
C = Matrix(len(t_states), len(t_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[len(r_states) + j]])
return states, A.as_immutable(), B.as_immutable(), C.as_immutable()
def canonical_form(self) -> tTuple[tList[Basic], ImmutableMatrix]:
"""
Reorders the one-step transition matrix
so that recurrent states appear first and transient
states appear last. Other representations include inserting
transient states first and recurrent states last.
Returns
=======
states, P_new
``states`` is the list that describes the order of the
new states in the matrix
so that the ith element in ``states`` is the state of the
ith row of A.
``P_new`` is the new transition matrix in canonical form.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
You can convert your chain into canonical form:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', list(range(1, 6)), trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
The new states are [3, 1, 2, 4, 5] and you can
create a new chain with this and its canonical
form will remain the same (since it is already
in canonical form).
>>> X = DiscreteMarkovChain('X', states, new_matrix)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
This is not limited to absorbing chains:
>>> T = Matrix([[0, 5, 5, 0, 0],
... [0, 0, 0, 10, 0],
... [5, 0, 5, 0, 0],
... [0, 10, 0, 0, 0],
... [0, 3, 0, 3, 4]])/10
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[1, 3, 0, 2, 4]
>>> new_matrix
Matrix([
[ 0, 1, 0, 0, 0],
[ 1, 0, 0, 0, 0],
[ 1/2, 0, 0, 1/2, 0],
[ 0, 0, 1/2, 1/2, 0],
[3/10, 3/10, 0, 0, 2/5]])
See Also
========
sympy.stats.DiscreteMarkovChain.communication_classes
sympy.stats.DiscreteMarkovChain.decompose
References
==========
.. [1] https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470316887.app1
.. [2] http://www.columbia.edu/~ww2040/6711F12/lect1023big.pdf
"""
states, A, B, C = self.decompose()
O = zeros(A.shape[0], C.shape[1])
return states, BlockMatrix([[A, O], [B, C]]).as_explicit()
def sample(self):
"""
Returns
=======
sample: iterator object
iterator object containing the sample
"""
if not isinstance(self.transition_probabilities, (Matrix, ImmutableMatrix)):
raise ValueError("Transition Matrix must be provided for sampling")
Tlist = self.transition_probabilities.tolist()
samps = [random.choice(list(self.state_space))]
yield samps[0]
time = 1
densities = {}
for state in self.state_space:
states = list(self.state_space)
densities[state] = {states[i]: Tlist[state][i]
for i in range(len(states))}
while time < S.Infinity:
samps.append((next(sample_iter(FiniteRV("_", densities[samps[time - 1]])))))
yield samps[time]
time += 1
class ContinuousMarkovChain(ContinuousTimeStochasticProcess, MarkovProcess):
"""
Represents continuous time Markov chain.
Parameters
==========
sym : Symbol/str
state_space : Set
Optional, by default, S.Reals
gen_mat : Matrix/ImmutableMatrix/MatrixSymbol
Optional, by default, None
Examples
========
>>> from sympy.stats import ContinuousMarkovChain, P
>>> from sympy import Matrix, S, Eq, Gt
>>> G = Matrix([[-S(1), S(1)], [S(1), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1], gen_mat=G)
>>> C.limiting_distribution()
Matrix([[1/2, 1/2]])
>>> C.state_space
{0, 1}
>>> C.generator_matrix
Matrix([
[-1, 1],
[ 1, -1]])
Probability queries are supported
>>> P(Eq(C(1.96), 0), Eq(C(0.78), 1)).round(5)
0.45279
>>> P(Gt(C(1.7), 0), Eq(C(0.82), 1)).round(5)
0.58602
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
generator matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> P(Eq(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.37933
>>> P(Gt(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.34211
>>> P(Le(C(1.57), C(3.14)), Eq(C(1.22), 1)).round(4)
0.7143
Symbolic probability queries are also supported
>>> from sympy import symbols
>>> a,b,c,d = symbols('a b c d')
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> query = P(Eq(C(a), b), Eq(C(c), d))
>>> query.subs({a:3.65, b:2, c:1.78, d:1}).evalf().round(10)
0.4002723175
>>> P(Eq(C(3.65), 2), Eq(C(1.78), 1)).round(10)
0.4002723175
>>> query_gt = P(Gt(C(a), b), Eq(C(c), d))
>>> query_gt.subs({a:43.2, b:0, c:3.29, d:2}).evalf().round(10)
0.6832579186
>>> P(Gt(C(43.2), 0), Eq(C(3.29), 2)).round(10)
0.6832579186
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Continuous-time_Markov_chain
.. [2] http://u.math.biu.ac.il/~amirgi/CTMCnotes.pdf
"""
index_set = S.Reals
def __new__(cls, sym, state_space=None, gen_mat=None):
sym = _symbol_converter(sym)
state_space, gen_mat = MarkovProcess._sanity_checks(state_space, gen_mat)
obj = Basic.__new__(cls, sym, state_space, gen_mat)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj.state_space):
indices[state] = index
obj.index_of = indices
return obj
@property
def generator_matrix(self):
return self.args[2]
@cacheit
def transition_probabilities(self, gen_mat=None):
t = Dummy('t')
if isinstance(gen_mat, (Matrix, ImmutableMatrix)) and \
gen_mat.is_diagonalizable():
# for faster computation use diagonalized generator matrix
Q, D = gen_mat.diagonalize()
return Lambda(t, Q*exp(t*D)*Q.inv())
if gen_mat != None:
return Lambda(t, exp(t*gen_mat))
def limiting_distribution(self):
gen_mat = self.generator_matrix
if gen_mat is None:
return None
if isinstance(gen_mat, MatrixSymbol):
wm = MatrixSymbol('wm', 1, gen_mat.shape[0])
return Lambda((wm, gen_mat), Eq(wm*gen_mat, wm))
w = IndexedBase('w')
wi = [w[i] for i in range(gen_mat.shape[0])]
wm = Matrix([wi])
eqs = (wm*gen_mat).tolist()[0]
eqs.append(sum(wi) - 1)
soln = list(linsolve(eqs, wi))[0]
return ImmutableMatrix([[sol for sol in soln]])
class BernoulliProcess(DiscreteTimeStochasticProcess):
"""
The Bernoulli process consists of repeated
independent Bernoulli process trials with the same parameter `p`.
It's assumed that the probability `p` applies to every
trial and that the outcomes of each trial
are independent of all the rest. Therefore Bernoulli Process
is Discrete State and Discrete Time Stochastic Process.
Parameters
==========
sym : Symbol/str
success : Integer/str
The event which is considered to be success. Default: 1.
failure: Integer/str
The event which is considered to be failure. Default: 0.
p : Real Number between 0 and 1
Represents the probability of getting success.
Examples
========
>>> from sympy.stats import BernoulliProcess, P, E
>>> from sympy import Eq, Gt
>>> B = BernoulliProcess("B", p=0.7, success=1, failure=0)
>>> B.state_space
{0, 1}
>>> (B.p).round(2)
0.70
>>> B.success
1
>>> B.failure
0
>>> X = B[1] + B[2] + B[3]
>>> P(Eq(X, 0)).round(2)
0.03
>>> P(Eq(X, 2)).round(2)
0.44
>>> P(Eq(X, 4)).round(2)
0
>>> P(Gt(X, 1)).round(2)
0.78
>>> P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2)
0.04
>>> B.joint_distribution(B[1], B[2])
JointDistributionHandmade(Lambda((B[1], B[2]), Piecewise((0.7, Eq(B[1], 1)),
(0.3, Eq(B[1], 0)), (0, True))*Piecewise((0.7, Eq(B[2], 1)), (0.3, Eq(B[2], 0)),
(0, True))))
>>> E(2*B[1] + B[2]).round(2)
2.10
>>> P(B[1] < 1).round(2)
0.30
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_process
.. [2] https://mathcs.clarku.edu/~djoyce/ma217/bernoulli.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, p, success=1, failure=0):
_value_check(p >= 0 and p <= 1, 'Value of p must be between 0 and 1.')
sym = _symbol_converter(sym)
p = _sympify(p)
success = _sym_sympify(success)
failure = _sym_sympify(failure)
return Basic.__new__(cls, sym, p, success, failure)
@property
def symbol(self):
return self.args[0]
@property
def p(self):
return self.args[1]
@property
def success(self):
return self.args[2]
@property
def failure(self):
return self.args[3]
@property
def state_space(self):
return _set_converter([self.success, self.failure])
def distribution(self, key=None):
if key is None:
self._deprecation_warn_distribution()
return BernoulliDistribution(self.p)
return BernoulliDistribution(self.p, self.success, self.failure)
def simple_rv(self, rv):
return Bernoulli(rv.name, p=self.p,
succ=self.success, fail=self.failure)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation.
Parameters
==========
expr : RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition : Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability.
Parameters
==========
condition : Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition : Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
return _SubstituteRV._probability(condition, given_condition, evaluate, **kwargs)
def density(self, x):
return Piecewise((self.p, Eq(x, self.success)),
(1 - self.p, Eq(x, self.failure)),
(S.Zero, True))
class _SubstituteRV:
"""
Internal class to handle the queries of expectation and probability
by substitution.
"""
@staticmethod
def _rvindexed_subs(expr, condition=None):
"""
Substitutes the RandomIndexedSymbol with the RandomSymbol with
same name, distribution and probability as RandomIndexedSymbol.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
"""
rvs_expr = random_symbols(expr)
if len(rvs_expr) != 0:
swapdict_expr = {}
for rv in rvs_expr:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv) # substitute with equivalent simple rv
swapdict_expr[rv] = newrv
expr = expr.subs(swapdict_expr)
rvs_cond = random_symbols(condition)
if len(rvs_cond)!=0:
swapdict_cond = {}
for rv in rvs_cond:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv)
swapdict_cond[rv] = newrv
condition = condition.subs(swapdict_cond)
return expr, condition
@classmethod
def _expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Internal method for computing expectation of indexed RV.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
new_expr, new_condition = self._rvindexed_subs(expr, condition)
if not is_random(new_expr):
return new_expr
new_pspace = pspace(new_expr)
if new_condition is not None:
new_expr = given(new_expr, new_condition)
if new_expr.is_Add: # As E is Linear
return Add(*[new_pspace.compute_expectation(
expr=arg, evaluate=evaluate, **kwargs)
for arg in new_expr.args])
return new_pspace.compute_expectation(
new_expr, evaluate=evaluate, **kwargs)
@classmethod
def _probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Internal method for computing probability of indexed RV
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational/And
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
new_condition, new_givencondition = self._rvindexed_subs(condition, given_condition)
if isinstance(new_givencondition, RandomSymbol):
condrv = random_symbols(new_condition)
if len(condrv) == 1 and condrv[0] == new_givencondition:
return BernoulliDistribution(self._probability(new_condition), 0, 1)
if any(dependent(rv, new_givencondition) for rv in condrv):
return Probability(new_condition, new_givencondition)
else:
return self._probability(new_condition)
if new_givencondition is not None and \
not isinstance(new_givencondition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_givencondition))
if new_givencondition == False or new_condition == False:
return S.Zero
if new_condition == True:
return S.One
if not isinstance(new_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_condition))
if new_givencondition is not None: # If there is a condition
# Recompute on new conditional expr
return self._probability(given(new_condition, new_givencondition, **kwargs), **kwargs)
result = pspace(new_condition).probability(new_condition, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def get_timerv_swaps(expr, condition):
"""
Finds the appropriate interval for each time stamp in expr by parsing
the given condition and returns intervals for each timestamp and
dictionary that maps variable time-stamped Random Indexed Symbol to its
corresponding Random Indexed variable with fixed time stamp.
Parameters
==========
expr: SymPy Expression
Expression containing Random Indexed Symbols with variable time stamps
condition: Relational/Boolean Expression
Expression containing time bounds of variable time stamps in expr
Examples
========
>>> from sympy.stats.stochastic_process_types import get_timerv_swaps, PoissonProcess
>>> from sympy import symbols, Contains, Interval
>>> x, t, d = symbols('x t d', positive=True)
>>> X = PoissonProcess("X", 3)
>>> get_timerv_swaps(x*X(t), Contains(t, Interval.Lopen(0, 1)))
([Interval.Lopen(0, 1)], {X(t): X(1)})
>>> get_timerv_swaps((X(t)**2 + X(d)**2), Contains(t, Interval.Lopen(0, 1))
... & Contains(d, Interval.Ropen(1, 4))) # doctest: +SKIP
([Interval.Ropen(1, 4), Interval.Lopen(0, 1)], {X(d): X(3), X(t): X(1)})
Returns
=======
intervals: list
List of Intervals/FiniteSet on which each time stamp is defined
rv_swap: dict
Dictionary mapping variable time Random Indexed Symbol to constant time
Random Indexed Variable
"""
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
expr_syms = list(expr.atoms(RandomIndexedSymbol))
if isinstance(condition, (And, Or)):
given_cond_args = condition.args
else: # single condition
given_cond_args = (condition, )
rv_swap = {}
intervals = []
for expr_sym in expr_syms:
for arg in given_cond_args:
if arg.has(expr_sym.key) and isinstance(expr_sym.key, Symbol):
intv = _set_converter(arg.args[1])
diff_key = intv._sup - intv._inf
if diff_key == oo:
raise ValueError("%s should have finite bounds" % str(expr_sym.name))
elif diff_key == S.Zero: # has singleton set
diff_key = intv._sup
rv_swap[expr_sym] = expr_sym.subs({expr_sym.key: diff_key})
intervals.append(intv)
return intervals, rv_swap
class CountingProcess(ContinuousTimeStochasticProcess):
"""
This class handles the common methods of the Counting Processes
such as Poisson, Wiener and Gamma Processes
"""
index_set = _set_converter(Interval(0, oo))
@property
def symbol(self):
return self.args[0]
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Expectation of the given expr
"""
if condition is not None:
intervals, rv_swap = get_timerv_swaps(expr, condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if expr.is_Add:
return Add.fromiter(self.expectation(arg, condition)
for arg in expr.args)
expr = expr.subs(rv_swap)
else:
return Expectation(expr, condition)
return _SubstituteRV._expectation(expr, evaluate=evaluate, **kwargs)
def _solve_argwith_tworvs(self, arg):
if arg.args[0].key >= arg.args[1].key or isinstance(arg, Eq):
diff_key = abs(arg.args[0].key - arg.args[1].key)
rv = arg.args[0]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
else:
diff_key = arg.args[1].key - arg.args[0].key
rv = arg.args[1]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
return arg
def _solve_numerical(self, condition, given_condition=None):
if isinstance(condition, And):
args_list = list(condition.args)
else:
args_list = [condition]
if given_condition is not None:
if isinstance(given_condition, And):
args_list.extend(list(given_condition.args))
else:
args_list.extend([given_condition])
# sort the args based on timestamp to get the independent increments in
# each segment using all the condition args as well as given_condition args
args_list = sorted(args_list, key=lambda x: x.args[0].key)
result = []
cond_args = list(condition.args) if isinstance(condition, And) else [condition]
if args_list[0] in cond_args and not (is_random(args_list[0].args[0])
and is_random(args_list[0].args[1])):
result.append(_SubstituteRV._probability(args_list[0]))
if is_random(args_list[0].args[0]) and is_random(args_list[0].args[1]):
arg = self._solve_argwith_tworvs(args_list[0])
result.append(_SubstituteRV._probability(arg))
for i in range(len(args_list) - 1):
curr, nex = args_list[i], args_list[i + 1]
diff_key = nex.args[0].key - curr.args[0].key
working_set = curr.args[0].pspace.process.state_space
if curr.args[1] > nex.args[1]: #impossible condition so return 0
result.append(0)
break
if isinstance(curr, Eq):
working_set = Intersection(working_set, Interval.Lopen(curr.args[1], oo))
else:
working_set = Intersection(working_set, curr.as_set())
if isinstance(nex, Eq):
working_set = Intersection(working_set, Interval(-oo, nex.args[1]))
else:
working_set = Intersection(working_set, nex.as_set())
if working_set == EmptySet:
rv = Eq(curr.args[0].pspace.process(diff_key), 0)
result.append(_SubstituteRV._probability(rv))
else:
if working_set.is_finite_set:
if isinstance(curr, Eq) and isinstance(nex, Eq):
rv = Eq(curr.args[0].pspace.process(diff_key), len(working_set))
result.append(_SubstituteRV._probability(rv))
elif isinstance(curr, Eq) ^ isinstance(nex, Eq):
result.append(Add.fromiter(_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x))
for x in range(len(working_set))))
else:
n = len(working_set)
result.append(Add.fromiter((n - x)*_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x)) for x in range(n)))
else:
result.append(_SubstituteRV._probability(
curr.args[0].pspace.process(diff_key) <= working_set._sup - working_set._inf))
return Mul.fromiter(result)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability.
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Probability of the condition
"""
check_numeric = True
if isinstance(condition, (And, Or)):
cond_args = condition.args
else:
cond_args = (condition, )
# check that condition args are numeric or not
if not all(arg.args[0].key.is_number for arg in cond_args):
check_numeric = False
if given_condition is not None:
check_given_numeric = True
if isinstance(given_condition, (And, Or)):
given_cond_args = given_condition.args
else:
given_cond_args = (given_condition, )
# check that given condition args are numeric or not
if given_condition.has(Contains):
check_given_numeric = False
# Handle numerical queries
if check_numeric and check_given_numeric:
res = []
if isinstance(condition, Or):
res.append(Add.fromiter(self._solve_numerical(arg, given_condition)
for arg in condition.args))
if isinstance(given_condition, Or):
res.append(Add.fromiter(self._solve_numerical(condition, arg)
for arg in given_condition.args))
if res:
return Add.fromiter(res)
return self._solve_numerical(condition, given_condition)
# No numeric queries, go by Contains?... then check that all the
# given condition are in form of `Contains`
if not all(arg.has(Contains) for arg in given_cond_args):
raise ValueError("If given condition is passed with `Contains`, then "
"please pass the evaluated condition with its corresponding information "
"in terms of intervals of each time stamp to be passed in given condition.")
intervals, rv_swap = get_timerv_swaps(condition, given_condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if isinstance(condition, And):
return Mul.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
elif isinstance(condition, Or):
return Add.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
condition = condition.subs(rv_swap)
else:
return Probability(condition, given_condition)
if check_numeric:
return self._solve_numerical(condition)
return _SubstituteRV._probability(condition, evaluate=evaluate, **kwargs)
class PoissonProcess(CountingProcess):
"""
The Poisson process is a counting process. It is usually used in scenarios
where we are counting the occurrences of certain events that appear
to happen at a certain rate, but completely at random.
Parameters
==========
sym : Symbol/str
lamda : Positive number
Rate of the process, ``lambda > 0``
Examples
========
>>> from sympy.stats import PoissonProcess, P, E
>>> from sympy import symbols, Eq, Ne, Contains, Interval
>>> X = PoissonProcess("X", lamda=3)
>>> X.state_space
Naturals0
>>> X.lamda
3
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 4)
(9*t1**3/2 + 9*t1**2/2 + 3*t1 + 1)*exp(-3*t1)
>>> P(Eq(X(t1), 2) | Ne(X(t1), 4), Contains(t1, Interval.Ropen(2, 4)))
1 - 36*exp(-6)
>>> P(Eq(X(t1), 2) & Eq(X(t2), 3), Contains(t1, Interval.Lopen(0, 2))
... & Contains(t2, Interval.Lopen(2, 4)))
648*exp(-12)
>>> E(X(t1))
3*t1
>>> E(X(t1)**2 + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
18
>>> P(X(3) < 1, Eq(X(1), 0))
exp(-6)
>>> P(Eq(X(4), 3), Eq(X(2), 3))
exp(-6)
>>> P(X(2) <= 3, X(1) > 1)
5*exp(-3)
Merging two Poisson Processes
>>> Y = PoissonProcess("Y", lamda=4)
>>> Z = X + Y
>>> Z.lamda
7
Splitting a Poisson Process into two independent Poisson Processes
>>> N, M = Z.split(l1=2, l2=5)
>>> N.lamda, M.lamda
(2, 5)
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_0_0_intro.php
.. [2] https://en.wikipedia.org/wiki/Poisson_point_process
"""
def __new__(cls, sym, lamda):
_value_check(lamda > 0, 'lamda should be a positive number.')
sym = _symbol_converter(sym)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda)
@property
def lamda(self):
return self.args[1]
@property
def state_space(self):
return S.Naturals0
def distribution(self, key):
if isinstance(key, RandomIndexedSymbol):
self._deprecation_warn_distribution()
return PoissonDistribution(self.lamda*key.key)
return PoissonDistribution(self.lamda*key)
def density(self, x):
return (self.lamda*x.key)**x / factorial(x) * exp(-(self.lamda*x.key))
def simple_rv(self, rv):
return Poisson(rv.name, lamda=self.lamda*rv.key)
def __add__(self, other):
if not isinstance(other, PoissonProcess):
raise ValueError("Only instances of Poisson Process can be merged")
return PoissonProcess(Dummy(self.symbol.name + other.symbol.name),
self.lamda + other.lamda)
def split(self, l1, l2):
if _sympify(l1 + l2) != self.lamda:
raise ValueError("Sum of l1 and l2 should be %s" % str(self.lamda))
return PoissonProcess(Dummy("l1"), l1), PoissonProcess(Dummy("l2"), l2)
class WienerProcess(CountingProcess):
"""
The Wiener process is a real valued continuous-time stochastic process.
In physics it is used to study Brownian motion and it is often also called
Brownian motion due to its historical connection with physical process of the
same name originally observed by Scottish botanist Robert Brown.
Parameters
==========
sym : Symbol/str
Examples
========
>>> from sympy.stats import WienerProcess, P, E
>>> from sympy import symbols, Contains, Interval
>>> X = WienerProcess("X")
>>> X.state_space
Reals
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 7).simplify()
erf(7*sqrt(2)/(2*sqrt(t1)))/2 + 1/2
>>> P((X(t1) > 2) | (X(t1) < 4), Contains(t1, Interval.Ropen(2, 4))).simplify()
-erf(1)/2 + erf(2)/2 + 1
>>> E(X(t1))
0
>>> E(X(t1) + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
0
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_4_0_brownian_motion_wiener_process.php
.. [2] https://en.wikipedia.org/wiki/Wiener_process
"""
def __new__(cls, sym):
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym)
@property
def state_space(self):
return S.Reals
def distribution(self, key):
if isinstance(key, RandomIndexedSymbol):
self._deprecation_warn_distribution()
return NormalDistribution(0, sqrt(key.key))
return NormalDistribution(0, sqrt(key))
def density(self, x):
return exp(-x**2/(2*x.key)) / (sqrt(2*pi)*sqrt(x.key))
def simple_rv(self, rv):
return Normal(rv.name, 0, sqrt(rv.key))
class GammaProcess(CountingProcess):
r"""
A Gamma process is a random process with independent gamma distributed
increments. It is a pure-jump increasing Levy process.
Parameters
==========
sym : Symbol/str
lamda : Positive number
Jump size of the process, ``lamda > 0``
gamma : Positive number
Rate of jump arrivals, `\gamma > 0`
Examples
========
>>> from sympy.stats import GammaProcess, E, P, variance
>>> from sympy import symbols, Contains, Interval, Not
>>> t, d, x, l, g = symbols('t d x l g', positive=True)
>>> X = GammaProcess("X", l, g)
>>> E(X(t))
g*t/l
>>> variance(X(t)).simplify()
g*t/l**2
>>> X = GammaProcess('X', 1, 2)
>>> P(X(t) < 1).simplify()
lowergamma(2*t, 1)/gamma(2*t)
>>> P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
... Contains(d, Interval.Lopen(7, 8))).simplify()
-4*exp(-3) + 472*exp(-8)/3 + 1
>>> E(X(2) + x*E(X(5)))
10*x + 4
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_process
"""
def __new__(cls, sym, lamda, gamma):
_value_check(lamda > 0, 'lamda should be a positive number')
_value_check(gamma > 0, 'gamma should be a positive number')
sym = _symbol_converter(sym)
gamma = _sympify(gamma)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda, gamma)
@property
def lamda(self):
return self.args[1]
@property
def gamma(self):
return self.args[2]
@property
def state_space(self):
return _set_converter(Interval(0, oo))
def distribution(self, key):
if isinstance(key, RandomIndexedSymbol):
self._deprecation_warn_distribution()
return GammaDistribution(self.gamma*key.key, 1/self.lamda)
return GammaDistribution(self.gamma*key, 1/self.lamda)
def density(self, x):
k = self.gamma*x.key
theta = 1/self.lamda
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def simple_rv(self, rv):
return Gamma(rv.name, self.gamma*rv.key, 1/self.lamda)
|
4f57d3046340dc202fa88c5cee59065b3ada576beb001ca69b29389e4690a408 | from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.core.add import Add
from sympy.core.function import Lambda
from sympy.core.mul import Mul
from sympy.core.numbers import (Integer, Rational, pi)
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import (rf, factorial)
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.bessel import besselk
from sympy.functions.special.gamma_functions import gamma
from sympy.matrices.dense import (Matrix, ones)
from sympy.sets.fancysets import Range
from sympy.sets.sets import (Intersection, Interval)
from sympy.tensor.indexed import (Indexed, IndexedBase)
from sympy.matrices import ImmutableMatrix, MatrixSymbol
from sympy.matrices.expressions.determinant import det
from sympy.matrices.expressions.matexpr import MatrixElement
from sympy.stats.joint_rv import JointDistribution, JointPSpace, MarginalDistribution
from sympy.stats.rv import _value_check, random_symbols
__all__ = ['JointRV',
'MultivariateNormal',
'MultivariateLaplace',
'Dirichlet',
'GeneralizedMultivariateLogGamma',
'GeneralizedMultivariateLogGammaOmega',
'Multinomial',
'MultivariateBeta',
'MultivariateEwens',
'MultivariateT',
'NegativeMultinomial',
'NormalGamma'
]
def multivariate_rv(cls, sym, *args):
args = list(map(sympify, args))
dist = cls(*args)
args = dist.args
dist.check(*args)
return JointPSpace(sym, dist).value
def marginal_distribution(rv, *indices):
"""
Marginal distribution function of a joint random variable.
Parameters
==========
rv : A random variable with a joint probability distribution.
indices : Component indices or the indexed random symbol
for which the joint distribution is to be calculated
Returns
=======
A Lambda expression in `sym`.
Examples
========
>>> from sympy.stats import MultivariateNormal, marginal_distribution
>>> m = MultivariateNormal('X', [1, 2], [[2, 1], [1, 2]])
>>> marginal_distribution(m, m[0])(1)
1/(2*sqrt(pi))
"""
indices = list(indices)
for i in range(len(indices)):
if isinstance(indices[i], Indexed):
indices[i] = indices[i].args[1]
prob_space = rv.pspace
if not indices:
raise ValueError(
"At least one component for marginal density is needed.")
if hasattr(prob_space.distribution, '_marginal_distribution'):
return prob_space.distribution._marginal_distribution(indices, rv.symbol)
return prob_space.marginal_distribution(*indices)
class JointDistributionHandmade(JointDistribution):
_argnames = ('pdf',)
is_Continuous = True
@property
def set(self):
return self.args[1]
def JointRV(symbol, pdf, _set=None):
"""
Create a Joint Random Variable where each of its component is continuous,
given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
pdf : A PDF in terms of indexed symbols of the symbol given
as the first argument
NOTE
====
As of now, the set for each component for a ``JointRV`` is
equal to the set of all integers, which cannot be changed.
Examples
========
>>> from sympy import exp, pi, Indexed, S
>>> from sympy.stats import density, JointRV
>>> x1, x2 = (Indexed('x', i) for i in (1, 2))
>>> pdf = exp(-x1**2/2 + x1 - x2**2/2 - S(1)/2)/(2*pi)
>>> N1 = JointRV('x', pdf) #Multivariate Normal distribution
>>> density(N1)(1, 2)
exp(-2)/(2*pi)
Returns
=======
RandomSymbol
"""
#TODO: Add support for sets provided by the user
symbol = sympify(symbol)
syms = list(i for i in pdf.free_symbols if isinstance(i, Indexed)
and i.base == IndexedBase(symbol))
syms = tuple(sorted(syms, key = lambda index: index.args[1]))
_set = S.Reals**len(syms)
pdf = Lambda(syms, pdf)
dist = JointDistributionHandmade(pdf, _set)
jrv = JointPSpace(symbol, dist).value
rvs = random_symbols(pdf)
if len(rvs) != 0:
dist = MarginalDistribution(dist, (jrv,))
return JointPSpace(symbol, dist).value
return jrv
#-------------------------------------------------------------------------------
# Multivariate Normal distribution ---------------------------------------------
class MultivariateNormalDistribution(JointDistribution):
_argnames = ('mu', 'sigma')
is_Continuous=True
@property
def set(self):
k = self.mu.shape[0]
return S.Reals**k
@staticmethod
def check(mu, sigma):
_value_check(mu.shape[0] == sigma.shape[0],
"Size of the mean vector and covariance matrix are incorrect.")
#check if covariance matrix is positive semi definite or not.
if not isinstance(sigma, MatrixSymbol):
_value_check(sigma.is_positive_semidefinite,
"The covariance matrix must be positive semi definite. ")
def pdf(self, *args):
mu, sigma = self.mu, self.sigma
k = mu.shape[0]
if len(args) == 1 and args[0].is_Matrix:
args = args[0]
else:
args = ImmutableMatrix(args)
x = args - mu
density = S.One/sqrt((2*pi)**(k)*det(sigma))*exp(
Rational(-1, 2)*x.transpose()*(sigma.inv()*x))
return MatrixElement(density, 0, 0)
def _marginal_distribution(self, indices, sym):
sym = ImmutableMatrix([Indexed(sym, i) for i in indices])
_mu, _sigma = self.mu, self.sigma
k = self.mu.shape[0]
for i in range(k):
if i not in indices:
_mu = _mu.row_del(i)
_sigma = _sigma.col_del(i)
_sigma = _sigma.row_del(i)
return Lambda(tuple(sym), S.One/sqrt((2*pi)**(len(_mu))*det(_sigma))*exp(
Rational(-1, 2)*(_mu - sym).transpose()*(_sigma.inv()*\
(_mu - sym)))[0])
def MultivariateNormal(name, mu, sigma):
r"""
Creates a continuous random variable with Multivariate Normal
Distribution.
The density of the multivariate normal distribution can be found at [1].
Parameters
==========
mu : List representing the mean or the mean vector
sigma : Positive semidefinite square matrix
Represents covariance Matrix.
If `\sigma` is noninvertible then only sampling is supported currently
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import MultivariateNormal, density, marginal_distribution
>>> from sympy import symbols, MatrixSymbol
>>> X = MultivariateNormal('X', [3, 4], [[2, 1], [1, 2]])
>>> y, z = symbols('y z')
>>> density(X)(y, z)
sqrt(3)*exp(-y**2/3 + y*z/3 + 2*y/3 - z**2/3 + 5*z/3 - 13/3)/(6*pi)
>>> density(X)(1, 2)
sqrt(3)*exp(-4/3)/(6*pi)
>>> marginal_distribution(X, X[1])(y)
exp(-(y - 4)**2/4)/(2*sqrt(pi))
>>> marginal_distribution(X, X[0])(y)
exp(-(y - 3)**2/4)/(2*sqrt(pi))
The example below shows that it is also possible to use
symbolic parameters to define the MultivariateNormal class.
>>> n = symbols('n', integer=True, positive=True)
>>> Sg = MatrixSymbol('Sg', n, n)
>>> mu = MatrixSymbol('mu', n, 1)
>>> obs = MatrixSymbol('obs', n, 1)
>>> X = MultivariateNormal('X', mu, Sg)
The density of a multivariate normal can be
calculated using a matrix argument, as shown below.
>>> density(X)(obs)
(exp(((1/2)*mu.T - (1/2)*obs.T)*Sg**(-1)*(-mu + obs))/sqrt((2*pi)**n*Determinant(Sg)))[0, 0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution
"""
return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma)
#-------------------------------------------------------------------------------
# Multivariate Laplace distribution --------------------------------------------
class MultivariateLaplaceDistribution(JointDistribution):
_argnames = ('mu', 'sigma')
is_Continuous=True
@property
def set(self):
k = self.mu.shape[0]
return S.Reals**k
@staticmethod
def check(mu, sigma):
_value_check(mu.shape[0] == sigma.shape[0],
"Size of the mean vector and covariance matrix are incorrect.")
# check if covariance matrix is positive definite or not.
if not isinstance(sigma, MatrixSymbol):
_value_check(sigma.is_positive_definite,
"The covariance matrix must be positive definite. ")
def pdf(self, *args):
mu, sigma = self.mu, self.sigma
mu_T = mu.transpose()
k = S(mu.shape[0])
sigma_inv = sigma.inv()
args = ImmutableMatrix(args)
args_T = args.transpose()
x = (mu_T*sigma_inv*mu)[0]
y = (args_T*sigma_inv*args)[0]
v = 1 - k/2
return (2 * (y/(2 + x))**(v/2) * besselk(v, sqrt((2 + x)*y)) *
exp((args_T * sigma_inv * mu)[0]) /
((2 * pi)**(k/2) * sqrt(det(sigma))))
def MultivariateLaplace(name, mu, sigma):
"""
Creates a continuous random variable with Multivariate Laplace
Distribution.
The density of the multivariate Laplace distribution can be found at [1].
Parameters
==========
mu : List representing the mean or the mean vector
sigma : Positive definite square matrix
Represents covariance Matrix
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import MultivariateLaplace, density
>>> from sympy import symbols
>>> y, z = symbols('y z')
>>> X = MultivariateLaplace('X', [2, 4], [[3, 1], [1, 3]])
>>> density(X)(y, z)
sqrt(2)*exp(y/4 + 5*z/4)*besselk(0, sqrt(15*y*(3*y/8 - z/8)/2 + 15*z*(-y/8 + 3*z/8)/2))/(4*pi)
>>> density(X)(1, 2)
sqrt(2)*exp(11/4)*besselk(0, sqrt(165)/4)/(4*pi)
References
==========
.. [1] https://en.wikipedia.org/wiki/Multivariate_Laplace_distribution
"""
return multivariate_rv(MultivariateLaplaceDistribution, name, mu, sigma)
#-------------------------------------------------------------------------------
# Multivariate StudentT distribution -------------------------------------------
class MultivariateTDistribution(JointDistribution):
_argnames = ('mu', 'shape_mat', 'dof')
is_Continuous=True
@property
def set(self):
k = self.mu.shape[0]
return S.Reals**k
@staticmethod
def check(mu, sigma, v):
_value_check(mu.shape[0] == sigma.shape[0],
"Size of the location vector and shape matrix are incorrect.")
# check if covariance matrix is positive definite or not.
if not isinstance(sigma, MatrixSymbol):
_value_check(sigma.is_positive_definite,
"The shape matrix must be positive definite. ")
def pdf(self, *args):
mu, sigma = self.mu, self.shape_mat
v = S(self.dof)
k = S(mu.shape[0])
sigma_inv = sigma.inv()
args = ImmutableMatrix(args)
x = args - mu
return gamma((k + v)/2)/(gamma(v/2)*(v*pi)**(k/2)*sqrt(det(sigma)))\
*(1 + 1/v*(x.transpose()*sigma_inv*x)[0])**((-v - k)/2)
def MultivariateT(syms, mu, sigma, v):
"""
Creates a joint random variable with multivariate T-distribution.
Parameters
==========
syms : A symbol/str
For identifying the random variable.
mu : A list/matrix
Representing the location vector
sigma : The shape matrix for the distribution
Examples
========
>>> from sympy.stats import density, MultivariateT
>>> from sympy import Symbol
>>> x = Symbol("x")
>>> X = MultivariateT("x", [1, 1], [[1, 0], [0, 1]], 2)
>>> density(X)(1, 2)
2/(9*pi)
Returns
=======
RandomSymbol
"""
return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v)
#-------------------------------------------------------------------------------
# Multivariate Normal Gamma distribution ---------------------------------------
class NormalGammaDistribution(JointDistribution):
_argnames = ('mu', 'lamda', 'alpha', 'beta')
is_Continuous=True
@staticmethod
def check(mu, lamda, alpha, beta):
_value_check(mu.is_real, "Location must be real.")
_value_check(lamda > 0, "Lambda must be positive")
_value_check(alpha > 0, "alpha must be positive")
_value_check(beta > 0, "beta must be positive")
@property
def set(self):
return S.Reals*Interval(0, S.Infinity)
def pdf(self, x, tau):
beta, alpha, lamda = self.beta, self.alpha, self.lamda
mu = self.mu
return beta**alpha*sqrt(lamda)/(gamma(alpha)*sqrt(2*pi))*\
tau**(alpha - S.Half)*exp(-1*beta*tau)*\
exp(-1*(lamda*tau*(x - mu)**2)/S(2))
def _marginal_distribution(self, indices, *sym):
if len(indices) == 2:
return self.pdf(*sym)
if indices[0] == 0:
#For marginal over `x`, return non-standardized Student-T's
#distribution
x = sym[0]
v, mu, sigma = self.alpha - S.Half, self.mu, \
S(self.beta)/(self.lamda * self.alpha)
return Lambda(sym, gamma((v + 1)/2)/(gamma(v/2)*sqrt(pi*v)*sigma)*\
(1 + 1/v*((x - mu)/sigma)**2)**((-v -1)/2))
#For marginal over `tau`, return Gamma distribution as per construction
from sympy.stats.crv_types import GammaDistribution
return Lambda(sym, GammaDistribution(self.alpha, self.beta)(sym[0]))
def NormalGamma(sym, mu, lamda, alpha, beta):
"""
Creates a bivariate joint random variable with multivariate Normal gamma
distribution.
Parameters
==========
sym : A symbol/str
For identifying the random variable.
mu : A real number
The mean of the normal distribution
lamda : A positive integer
Parameter of joint distribution
alpha : A positive integer
Parameter of joint distribution
beta : A positive integer
Parameter of joint distribution
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, NormalGamma
>>> from sympy import symbols
>>> X = NormalGamma('x', 0, 1, 2, 3)
>>> y, z = symbols('y z')
>>> density(X)(y, z)
9*sqrt(2)*z**(3/2)*exp(-3*z)*exp(-y**2*z/2)/(2*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
return multivariate_rv(NormalGammaDistribution, sym, mu, lamda, alpha, beta)
#-------------------------------------------------------------------------------
# Multivariate Beta/Dirichlet distribution -------------------------------------
class MultivariateBetaDistribution(JointDistribution):
_argnames = ('alpha',)
is_Continuous = True
@staticmethod
def check(alpha):
_value_check(len(alpha) >= 2, "At least two categories should be passed.")
for a_k in alpha:
_value_check((a_k > 0) != False, "Each concentration parameter"
" should be positive.")
@property
def set(self):
k = len(self.alpha)
return Interval(0, 1)**k
def pdf(self, *syms):
alpha = self.alpha
B = Mul.fromiter(map(gamma, alpha))/gamma(Add(*alpha))
return Mul.fromiter(sym**(a_k - 1) for a_k, sym in zip(alpha, syms))/B
def MultivariateBeta(syms, *alpha):
"""
Creates a continuous random variable with Dirichlet/Multivariate Beta
Distribution.
The density of the Dirichlet distribution can be found at [1].
Parameters
==========
alpha : Positive real numbers
Signifies concentration numbers.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, MultivariateBeta, marginal_distribution
>>> from sympy import Symbol
>>> a1 = Symbol('a1', positive=True)
>>> a2 = Symbol('a2', positive=True)
>>> B = MultivariateBeta('B', [a1, a2])
>>> C = MultivariateBeta('C', a1, a2)
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> density(B)(x, y)
x**(a1 - 1)*y**(a2 - 1)*gamma(a1 + a2)/(gamma(a1)*gamma(a2))
>>> marginal_distribution(C, C[0])(x)
x**(a1 - 1)*gamma(a1 + a2)/(a2*gamma(a1)*gamma(a2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Dirichlet_distribution
.. [2] http://mathworld.wolfram.com/DirichletDistribution.html
"""
if not isinstance(alpha[0], list):
alpha = (list(alpha),)
return multivariate_rv(MultivariateBetaDistribution, syms, alpha[0])
Dirichlet = MultivariateBeta
#-------------------------------------------------------------------------------
# Multivariate Ewens distribution ----------------------------------------------
class MultivariateEwensDistribution(JointDistribution):
_argnames = ('n', 'theta')
is_Discrete = True
is_Continuous = False
@staticmethod
def check(n, theta):
_value_check((n > 0),
"sample size should be positive integer.")
_value_check(theta.is_positive, "mutation rate should be positive.")
@property
def set(self):
if not isinstance(self.n, Integer):
i = Symbol('i', integer=True, positive=True)
return Product(Intersection(S.Naturals0, Interval(0, self.n//i)),
(i, 1, self.n))
prod_set = Range(0, self.n + 1)
for i in range(2, self.n + 1):
prod_set *= Range(0, self.n//i + 1)
return prod_set.flatten()
def pdf(self, *syms):
n, theta = self.n, self.theta
condi = isinstance(self.n, Integer)
if not (isinstance(syms[0], IndexedBase) or condi):
raise ValueError("Please use IndexedBase object for syms as "
"the dimension is symbolic")
term_1 = factorial(n)/rf(theta, n)
if condi:
term_2 = Mul.fromiter(theta**syms[j]/((j+1)**syms[j]*factorial(syms[j]))
for j in range(n))
cond = Eq(sum([(k + 1)*syms[k] for k in range(n)]), n)
return Piecewise((term_1 * term_2, cond), (0, True))
syms = syms[0]
j, k = symbols('j, k', positive=True, integer=True)
term_2 = Product(theta**syms[j]/((j+1)**syms[j]*factorial(syms[j])),
(j, 0, n - 1))
cond = Eq(Sum((k + 1)*syms[k], (k, 0, n - 1)), n)
return Piecewise((term_1 * term_2, cond), (0, True))
def MultivariateEwens(syms, n, theta):
"""
Creates a discrete random variable with Multivariate Ewens
Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
n : Positive integer
Size of the sample or the integer whose partitions are considered
theta : Positive real number
Denotes Mutation rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, marginal_distribution, MultivariateEwens
>>> from sympy import Symbol
>>> a1 = Symbol('a1', positive=True)
>>> a2 = Symbol('a2', positive=True)
>>> ed = MultivariateEwens('E', 2, 1)
>>> density(ed)(a1, a2)
Piecewise((1/(2**a2*factorial(a1)*factorial(a2)), Eq(a1 + 2*a2, 2)), (0, True))
>>> marginal_distribution(ed, ed[0])(a1)
Piecewise((1/factorial(a1), Eq(a1, 2)), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Ewens%27s_sampling_formula
.. [2] http://www.stat.rutgers.edu/home/hcrane/Papers/STS529.pdf
"""
return multivariate_rv(MultivariateEwensDistribution, syms, n, theta)
#-------------------------------------------------------------------------------
# Generalized Multivariate Log Gamma distribution ------------------------------
class GeneralizedMultivariateLogGammaDistribution(JointDistribution):
_argnames = ('delta', 'v', 'lamda', 'mu')
is_Continuous=True
def check(self, delta, v, l, mu):
_value_check((delta >= 0, delta <= 1), "delta must be in range [0, 1].")
_value_check((v > 0), "v must be positive")
for lk in l:
_value_check((lk > 0), "lamda must be a positive vector.")
for muk in mu:
_value_check((muk > 0), "mu must be a positive vector.")
_value_check(len(l) > 1,"the distribution should have at least"
" two random variables.")
@property
def set(self):
return S.Reals**len(self.lamda)
def pdf(self, *y):
d, v, l, mu = self.delta, self.v, self.lamda, self.mu
n = Symbol('n', negative=False, integer=True)
k = len(l)
sterm1 = Pow((1 - d), n)/\
((gamma(v + n)**(k - 1))*gamma(v)*gamma(n + 1))
sterm2 = Mul.fromiter(mui*li**(-v - n) for mui, li in zip(mu, l))
term1 = sterm1 * sterm2
sterm3 = (v + n) * sum([mui * yi for mui, yi in zip(mu, y)])
sterm4 = sum([exp(mui * yi)/li for (mui, yi, li) in zip(mu, y, l)])
term2 = exp(sterm3 - sterm4)
return Pow(d, v) * Sum(term1 * term2, (n, 0, S.Infinity))
def GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu):
"""
Creates a joint random variable with generalized multivariate log gamma
distribution.
The joint pdf can be found at [1].
Parameters
==========
syms : list/tuple/set of symbols for identifying each component
delta : A constant in range $[0, 1]$
v : Positive real number
lamda : List of positive real numbers
mu : List of positive real numbers
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density
>>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma
>>> from sympy import symbols, S
>>> v = 1
>>> l, mu = [1, 1, 1], [1, 1, 1]
>>> d = S.Half
>>> y = symbols('y_1:4', positive=True)
>>> Gd = GeneralizedMultivariateLogGamma('G', d, v, l, mu)
>>> density(Gd)(y[0], y[1], y[2])
Sum(exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) - exp(y_2) -
exp(y_3))/(2**n*gamma(n + 1)**3), (n, 0, oo))/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution
.. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis
Note
====
If the GeneralizedMultivariateLogGamma is too long to type use,
>>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma as GMVLG
>>> Gd = GMVLG('G', d, v, l, mu)
If you want to pass the matrix omega instead of the constant delta, then use
``GeneralizedMultivariateLogGammaOmega``.
"""
return multivariate_rv(GeneralizedMultivariateLogGammaDistribution,
syms, delta, v, lamda, mu)
def GeneralizedMultivariateLogGammaOmega(syms, omega, v, lamda, mu):
"""
Extends GeneralizedMultivariateLogGamma.
Parameters
==========
syms : list/tuple/set of symbols
For identifying each component
omega : A square matrix
Every element of square matrix must be absolute value of
square root of correlation coefficient
v : Positive real number
lamda : List of positive real numbers
mu : List of positive real numbers
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density
>>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega
>>> from sympy import Matrix, symbols, S
>>> omega = Matrix([[1, S.Half, S.Half], [S.Half, 1, S.Half], [S.Half, S.Half, 1]])
>>> v = 1
>>> l, mu = [1, 1, 1], [1, 1, 1]
>>> G = GeneralizedMultivariateLogGammaOmega('G', omega, v, l, mu)
>>> y = symbols('y_1:4', positive=True)
>>> density(G)(y[0], y[1], y[2])
sqrt(2)*Sum((1 - sqrt(2)/2)**n*exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) -
exp(y_2) - exp(y_3))/gamma(n + 1)**3, (n, 0, oo))/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution
.. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis
Notes
=====
If the GeneralizedMultivariateLogGammaOmega is too long to type use,
>>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega as GMVLGO
>>> G = GMVLGO('G', omega, v, l, mu)
"""
_value_check((omega.is_square, isinstance(omega, Matrix)), "omega must be a"
" square matrix")
for val in omega.values():
_value_check((val >= 0, val <= 1),
"all values in matrix must be between 0 and 1(both inclusive).")
_value_check(omega.diagonal().equals(ones(1, omega.shape[0])),
"all the elements of diagonal should be 1.")
_value_check((omega.shape[0] == len(lamda), len(lamda) == len(mu)),
"lamda, mu should be of same length and omega should "
" be of shape (length of lamda, length of mu)")
_value_check(len(lamda) > 1,"the distribution should have at least"
" two random variables.")
delta = Pow(Rational(omega.det()), Rational(1, len(lamda) - 1))
return GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu)
#-------------------------------------------------------------------------------
# Multinomial distribution -----------------------------------------------------
class MultinomialDistribution(JointDistribution):
_argnames = ('n', 'p')
is_Continuous=False
is_Discrete = True
@staticmethod
def check(n, p):
_value_check(n > 0,
"number of trials must be a positive integer")
for p_k in p:
_value_check((p_k >= 0, p_k <= 1),
"probability must be in range [0, 1]")
_value_check(Eq(sum(p), 1),
"probabilities must sum to 1")
@property
def set(self):
return Intersection(S.Naturals0, Interval(0, self.n))**len(self.p)
def pdf(self, *x):
n, p = self.n, self.p
term_1 = factorial(n)/Mul.fromiter(factorial(x_k) for x_k in x)
term_2 = Mul.fromiter(p_k**x_k for p_k, x_k in zip(p, x))
return Piecewise((term_1 * term_2, Eq(sum(x), n)), (0, True))
def Multinomial(syms, n, *p):
"""
Creates a discrete random variable with Multinomial Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
n : Positive integer
Represents number of trials
p : List of event probabilities
Must be in the range of $[0, 1]$.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, Multinomial, marginal_distribution
>>> from sympy import symbols
>>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)
>>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)
>>> M = Multinomial('M', 3, p1, p2, p3)
>>> density(M)(x1, x2, x3)
Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)),
Eq(x1 + x2 + x3, 3)), (0, True))
>>> marginal_distribution(M, M[0])(x1).subs(x1, 1)
3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Multinomial_distribution
.. [2] http://mathworld.wolfram.com/MultinomialDistribution.html
"""
if not isinstance(p[0], list):
p = (list(p), )
return multivariate_rv(MultinomialDistribution, syms, n, p[0])
#-------------------------------------------------------------------------------
# Negative Multinomial Distribution --------------------------------------------
class NegativeMultinomialDistribution(JointDistribution):
_argnames = ('k0', 'p')
is_Continuous=False
is_Discrete = True
@staticmethod
def check(k0, p):
_value_check(k0 > 0,
"number of failures must be a positive integer")
for p_k in p:
_value_check((p_k >= 0, p_k <= 1),
"probability must be in range [0, 1].")
_value_check(sum(p) <= 1,
"success probabilities must not be greater than 1.")
@property
def set(self):
return Range(0, S.Infinity)**len(self.p)
def pdf(self, *k):
k0, p = self.k0, self.p
term_1 = (gamma(k0 + sum(k))*(1 - sum(p))**k0)/gamma(k0)
term_2 = Mul.fromiter(pi**ki/factorial(ki) for pi, ki in zip(p, k))
return term_1 * term_2
def NegativeMultinomial(syms, k0, *p):
"""
Creates a discrete random variable with Negative Multinomial Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
k0 : positive integer
Represents number of failures before the experiment is stopped
p : List of event probabilities
Must be in the range of $[0, 1]$
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, NegativeMultinomial, marginal_distribution
>>> from sympy import symbols
>>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)
>>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)
>>> N = NegativeMultinomial('M', 3, p1, p2, p3)
>>> N_c = NegativeMultinomial('M', 3, 0.1, 0.1, 0.1)
>>> density(N)(x1, x2, x3)
p1**x1*p2**x2*p3**x3*(-p1 - p2 - p3 + 1)**3*gamma(x1 + x2 +
x3 + 3)/(2*factorial(x1)*factorial(x2)*factorial(x3))
>>> marginal_distribution(N_c, N_c[0])(1).evalf().round(2)
0.25
References
==========
.. [1] https://en.wikipedia.org/wiki/Negative_multinomial_distribution
.. [2] http://mathworld.wolfram.com/NegativeBinomialDistribution.html
"""
if not isinstance(p[0], list):
p = (list(p), )
return multivariate_rv(NegativeMultinomialDistribution, syms, k0, p[0])
|
19ceb4cd8137bd1da18d3d63fccfc460bcd3da82f5fb89abcb351b6d1e780b55 | from sympy.sets import FiniteSet
from sympy.core.numbers import Rational
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy
from sympy.functions.combinatorial.factorials import FallingFactorial
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.integrals.integrals import Integral
from sympy.solvers.solveset import solveset
from .rv import (probability, expectation, density, where, given, pspace, cdf, PSpace,
characteristic_function, sample, sample_iter, random_symbols, independent, dependent,
sampling_density, moment_generating_function, quantile, is_random,
sample_stochastic_process)
__all__ = ['P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf',
'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'median',
'independent', 'random_symbols', 'correlation', 'factorial_moment',
'moment', 'cmoment', 'sampling_density', 'moment_generating_function',
'smoment', 'quantile', 'sample_stochastic_process']
def moment(X, n, c=0, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth moment of a random expression about c.
.. math::
moment(X, c, n) = E((X-c)^{n})
Default value of c is 0.
Examples
========
>>> from sympy.stats import Die, moment, E
>>> X = Die('X', 6)
>>> moment(X, 1, 6)
-5/2
>>> moment(X, 2)
91/6
>>> moment(X, 1) == E(X)
True
"""
from sympy.stats.symbolic_probability import Moment
if evaluate:
return Moment(X, n, c, condition).doit()
return Moment(X, n, c, condition).rewrite(Integral)
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression.
.. math::
variance(X) = E((X-E(X))^{2})
Examples
========
>>> from sympy.stats import Die, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die('X', 6)
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(1 - p)
"""
if is_random(X) and pspace(X) == PSpace():
from sympy.stats.symbolic_probability import Variance
return Variance(X, condition)
return cmoment(X, 2, condition, **kwargs)
def standard_deviation(X, condition=None, **kwargs):
r"""
Standard Deviation of a random expression
.. math::
std(X) = \sqrt(E((X-E(X))^{2}))
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol, simplify
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> simplify(std(B))
sqrt(p*(1 - p))
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def entropy(expr, condition=None, **kwargs):
"""
Calculuates entropy of a probability distribution.
Parameters
==========
expression : the random expression whose entropy is to be calculated
condition : optional, to specify conditions on random expression
b: base of the logarithm, optional
By default, it is taken as Euler's number
Returns
=======
result : Entropy of the expression, a constant
Examples
========
>>> from sympy.stats import Normal, Die, entropy
>>> X = Normal('X', 0, 1)
>>> entropy(X)
log(2)/2 + 1/2 + log(pi)/2
>>> D = Die('D', 4)
>>> entropy(D)
log(4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory)
.. [2] https://www.crmarsh.com/static/pdf/Charles_Marsh_Continuous_Entropy.pdf
.. [3] http://www.math.uconn.edu/~kconrad/blurbs/analysis/entropypost.pdf
"""
pdf = density(expr, condition, **kwargs)
base = kwargs.get('b', exp(1))
if isinstance(pdf, dict):
return sum([-prob*log(prob, base) for prob in pdf.values()])
return expectation(-log(pdf(expr), base))
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions.
Explanation
===========
The expectation that the two variables will rise and fall together
.. math::
covariance(X,Y) = E((X-E(X)) (Y-E(Y)))
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
if (is_random(X) and pspace(X) == PSpace()) or (is_random(Y) and pspace(Y) == PSpace()):
from sympy.stats.symbolic_probability import Covariance
return Covariance(X, Y, condition)
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def correlation(X, Y, condition=None, **kwargs):
r"""
Correlation of two random expressions, also known as correlation
coefficient or Pearson's correlation.
Explanation
===========
The normalized expectation that the two variables will rise
and fall together
.. math::
correlation(X,Y) = E((X-E(X))(Y-E(Y)) / (\sigma_x \sigma_y))
Examples
========
>>> from sympy.stats import Exponential, correlation
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> correlation(X, X)
1
>>> correlation(X, Y)
0
>>> correlation(X, Y + rate*X)
1/sqrt(1 + lambda**(-2))
"""
return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs)
* std(Y, condition, **kwargs))
def cmoment(X, n, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth central moment of a random expression about its mean.
.. math::
cmoment(X, n) = E((X - E(X))^{n})
Examples
========
>>> from sympy.stats import Die, cmoment, variance
>>> X = Die('X', 6)
>>> cmoment(X, 3)
0
>>> cmoment(X, 2)
35/12
>>> cmoment(X, 2) == variance(X)
True
"""
from sympy.stats.symbolic_probability import CentralMoment
if evaluate:
return CentralMoment(X, n, condition).doit()
return CentralMoment(X, n, condition).rewrite(Integral)
def smoment(X, n, condition=None, **kwargs):
r"""
Return the nth Standardized moment of a random expression.
.. math::
smoment(X, n) = E(((X - \mu)/\sigma_X)^{n})
Examples
========
>>> from sympy.stats import skewness, Exponential, smoment
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True)
>>> Y = Exponential('Y', rate)
>>> smoment(Y, 4)
9
>>> smoment(Y, 4) == smoment(3*Y, 4)
True
>>> smoment(Y, 3) == skewness(Y)
True
"""
sigma = std(X, condition, **kwargs)
return (1/sigma)**n*cmoment(X, n, condition, **kwargs)
def skewness(X, condition=None, **kwargs):
r"""
Measure of the asymmetry of the probability distribution.
Explanation
===========
Positive skew indicates that most of the values lie to the right of
the mean.
.. math::
skewness(X) = E(((X - E(X))/\sigma_X)^{3})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. skewness(X, X>0) is skewness of X given X > 0
Examples
========
>>> from sympy.stats import skewness, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> skewness(X)
0
>>> skewness(X, X > 0) # find skewness given X > 0
(-sqrt(2)/sqrt(pi) + 4*sqrt(2)/pi**(3/2))/(1 - 2/pi)**(3/2)
>>> rate = Symbol('lambda', positive=True, real=True)
>>> Y = Exponential('Y', rate)
>>> skewness(Y)
2
"""
return smoment(X, 3, condition=condition, **kwargs)
def kurtosis(X, condition=None, **kwargs):
r"""
Characterizes the tails/outliers of a probability distribution.
Explanation
===========
Kurtosis of any univariate normal distribution is 3. Kurtosis less than
3 means that the distribution produces fewer and less extreme outliers
than the normal distribution.
.. math::
kurtosis(X) = E(((X - E(X))/\sigma_X)^{4})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. kurtosis(X, X>0) is kurtosis of X given X > 0
Examples
========
>>> from sympy.stats import kurtosis, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> kurtosis(X)
3
>>> kurtosis(X, X > 0) # find kurtosis given X > 0
(-4/pi - 12/pi**2 + 3)/(1 - 2/pi)**2
>>> rate = Symbol('lamda', positive=True, real=True)
>>> Y = Exponential('Y', rate)
>>> kurtosis(Y)
9
References
==========
.. [1] https://en.wikipedia.org/wiki/Kurtosis
.. [2] http://mathworld.wolfram.com/Kurtosis.html
"""
return smoment(X, 4, condition=condition, **kwargs)
def factorial_moment(X, n, condition=None, **kwargs):
"""
The factorial moment is a mathematical quantity defined as the expectation
or average of the falling factorial of a random variable.
.. math::
factorial-moment(X, n) = E(X(X - 1)(X - 2)...(X - n + 1))
Parameters
==========
n: A natural number, n-th factorial moment.
condition : Expr containing RandomSymbols
A conditional expression.
Examples
========
>>> from sympy.stats import factorial_moment, Poisson, Binomial
>>> from sympy import Symbol, S
>>> lamda = Symbol('lamda')
>>> X = Poisson('X', lamda)
>>> factorial_moment(X, 2)
lamda**2
>>> Y = Binomial('Y', 2, S.Half)
>>> factorial_moment(Y, 2)
1/2
>>> factorial_moment(Y, 2, Y > 1) # find factorial moment for Y > 1
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Factorial_moment
.. [2] http://mathworld.wolfram.com/FactorialMoment.html
"""
return expectation(FallingFactorial(X, n), condition=condition, **kwargs)
def median(X, evaluate=True, **kwargs):
r"""
Calculuates the median of the probability distribution.
Explanation
===========
Mathematically, median of Probability distribution is defined as all those
values of `m` for which the following condition is satisfied
.. math::
P(X\leq m) \geq \frac{1}{2} \text{ and} \text{ } P(X\geq m)\geq \frac{1}{2}
Parameters
==========
X: The random expression whose median is to be calculated.
Returns
=======
The FiniteSet or an Interval which contains the median of the
random expression.
Examples
========
>>> from sympy.stats import Normal, Die, median
>>> N = Normal('N', 3, 1)
>>> median(N)
{3}
>>> D = Die('D')
>>> median(D)
{3, 4}
References
==========
.. [1] https://en.wikipedia.org/wiki/Median#Probability_distributions
"""
if not is_random(X):
return X
from sympy.stats.crv import ContinuousPSpace
from sympy.stats.drv import DiscretePSpace
from sympy.stats.frv import FinitePSpace
if isinstance(pspace(X), FinitePSpace):
cdf = pspace(X).compute_cdf(X)
result = []
for key, value in cdf.items():
if value>= Rational(1, 2) and (1 - value) + \
pspace(X).probability(Eq(X, key)) >= Rational(1, 2):
result.append(key)
return FiniteSet(*result)
if isinstance(pspace(X), (ContinuousPSpace, DiscretePSpace)):
cdf = pspace(X).compute_cdf(X)
x = Dummy('x')
result = solveset(piecewise_fold(cdf(x) - Rational(1, 2)), x, pspace(X).set)
return result
raise NotImplementedError("The median of %s is not implemented."%str(pspace(X)))
def coskewness(X, Y, Z, condition=None, **kwargs):
r"""
Calculates the co-skewness of three random variables.
Explanation
===========
Mathematically Coskewness is defined as
.. math::
coskewness(X,Y,Z)=\frac{E[(X-E[X]) * (Y-E[Y]) * (Z-E[Z])]} {\sigma_{X}\sigma_{Y}\sigma_{Z}}
Parameters
==========
X : RandomSymbol
Random Variable used to calculate coskewness
Y : RandomSymbol
Random Variable used to calculate coskewness
Z : RandomSymbol
Random Variable used to calculate coskewness
condition : Expr containing RandomSymbols
A conditional expression
Examples
========
>>> from sympy.stats import coskewness, Exponential, skewness
>>> from sympy import symbols
>>> p = symbols('p', positive=True)
>>> X = Exponential('X', p)
>>> Y = Exponential('Y', 2*p)
>>> coskewness(X, Y, Y)
0
>>> coskewness(X, Y + X, Y + 2*X)
16*sqrt(85)/85
>>> coskewness(X + 2*Y, Y + X, Y + 2*X, X > 3)
9*sqrt(170)/85
>>> coskewness(Y, Y, Y) == skewness(Y)
True
>>> coskewness(X, Y + p*X, Y + 2*p*X)
4/(sqrt(1 + 1/(4*p**2))*sqrt(4 + 1/(4*p**2)))
Returns
=======
coskewness : The coskewness of the three random variables
References
==========
.. [1] https://en.wikipedia.org/wiki/Coskewness
"""
num = expectation((X - expectation(X, condition, **kwargs)) \
* (Y - expectation(Y, condition, **kwargs)) \
* (Z - expectation(Z, condition, **kwargs)), condition, **kwargs)
den = std(X, condition, **kwargs) * std(Y, condition, **kwargs) \
* std(Z, condition, **kwargs)
return num/den
P = probability
E = expectation
H = entropy
|
5cdc32f408d6e010c5bc5644bb3f12c89a654fbe67632bce9298a241d7b16d93 | """
Main Random Variables Module
Defines abstract random variable type.
Contains interfaces for probability space object (PSpace) as well as standard
operators, P, E, sample, density, where, quantile
See Also
========
sympy.stats.crv
sympy.stats.frv
sympy.stats.rv_interface
"""
from __future__ import annotations
from functools import singledispatch
from math import prod
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import (Function, Lambda)
from sympy.core.logic import fuzzy_and
from sympy.core.mul import Mul
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.logic.boolalg import (And, Or)
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import Indexed
from sympy.utilities.lambdify import lambdify
from sympy.core.relational import Relational
from sympy.core.sympify import _sympify
from sympy.sets.sets import FiniteSet, ProductSet, Intersection
from sympy.solvers.solveset import solveset
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import iterable
x = Symbol('x')
@singledispatch
def is_random(x):
return False
@is_random.register(Basic)
def _(x):
atoms = x.free_symbols
return any(is_random(i) for i in atoms)
class RandomDomain(Basic):
"""
Represents a set of variables and the values which they can take.
See Also
========
sympy.stats.crv.ContinuousDomain
sympy.stats.frv.FiniteDomain
"""
is_ProductDomain = False
is_Finite = False
is_Continuous = False
is_Discrete = False
def __new__(cls, symbols, *args):
symbols = FiniteSet(*symbols)
return Basic.__new__(cls, symbols, *args)
@property
def symbols(self):
return self.args[0]
@property
def set(self):
return self.args[1]
def __contains__(self, other):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SingleDomain(RandomDomain):
"""
A single variable and its domain.
See Also
========
sympy.stats.crv.SingleContinuousDomain
sympy.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
def __contains__(self, other):
if len(other) != 1:
return False
sym, val = tuple(other)[0]
return self.symbol == sym and val in self.set
class MatrixDomain(RandomDomain):
"""
A Random Matrix variable and its domain.
"""
def __new__(cls, symbol, set):
symbol, set = _symbol_converter(symbol), _sympify(set)
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
class ConditionalDomain(RandomDomain):
"""
A RandomDomain with an attached condition.
See Also
========
sympy.stats.crv.ConditionalContinuousDomain
sympy.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace({rs: rs.symbol
for rs in random_symbols(condition)})
return Basic.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self):
raise NotImplementedError("Set of Conditional Domain not Implemented")
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
class PSpace(Basic):
"""
A Probability Space.
Explanation
===========
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in SymPy
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
sympy.stats.crv.ContinuousPSpace
sympy.stats.frv.FinitePSpace
"""
is_Finite = None # type: bool
is_Continuous = None # type: bool
is_Discrete = None # type: bool
is_real = None # type: bool
@property
def domain(self):
return self.args[0]
@property
def density(self):
return self.args[1]
@property
def values(self):
return frozenset(RandomSymbol(sym, self) for sym in self.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self, size=(), library='scipy', seed=None):
raise NotImplementedError()
def probability(self, condition):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SinglePSpace(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
class RandomSymbol(Expr):
"""
Random Symbols represent ProbabilitySpaces in SymPy Expressions.
In principle they can take on any value that their symbol can take on
within the associated PSpace with probability determined by the PSpace
Density.
Explanation
===========
Random Symbols contain pspace and symbol properties.
The pspace property points to the represented Probability Space
The symbol is a standard SymPy Symbol that is used in that probability space
for example in defining a density.
You can form normal SymPy expressions using RandomSymbols and operate on
those expressions with the Functions
E - Expectation of a random expression
P - Probability of a condition
density - Probability Density of an expression
given - A new random expression (with new random symbols) given a condition
An object of the RandomSymbol type should almost never be created by the
user. They tend to be created instead by the PSpace class's value method.
Traditionally a user does not even do this but instead calls one of the
convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc....
"""
def __new__(cls, symbol, pspace=None):
from sympy.stats.joint_rv import JointRandomSymbol
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
symbol = _symbol_converter(symbol)
if not isinstance(pspace, PSpace):
raise TypeError("pspace variable should be of type PSpace")
if cls == JointRandomSymbol and isinstance(pspace, SinglePSpace):
cls = RandomSymbol
return Basic.__new__(cls, symbol, pspace)
is_finite = True
is_symbol = True
is_Atom = True
_diff_wrt = True
pspace = property(lambda self: self.args[1])
symbol = property(lambda self: self.args[0])
name = property(lambda self: self.symbol.name)
def _eval_is_positive(self):
return self.symbol.is_positive
def _eval_is_integer(self):
return self.symbol.is_integer
def _eval_is_real(self):
return self.symbol.is_real or self.pspace.is_real
@property
def is_commutative(self):
return self.symbol.is_commutative
@property
def free_symbols(self):
return {self}
class RandomIndexedSymbol(RandomSymbol):
def __new__(cls, idx_obj, pspace=None):
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
if not isinstance(idx_obj, (Indexed, Function)):
raise TypeError("An Function or Indexed object is expected not %s"%(idx_obj))
return Basic.__new__(cls, idx_obj, pspace)
symbol = property(lambda self: self.args[0])
name = property(lambda self: str(self.args[0]))
@property
def key(self):
if isinstance(self.symbol, Indexed):
return self.symbol.args[1]
elif isinstance(self.symbol, Function):
return self.symbol.args[0]
@property
def free_symbols(self):
if self.key.free_symbols:
free_syms = self.key.free_symbols
free_syms.add(self)
return free_syms
return {self}
@property
def pspace(self):
return self.args[1]
class RandomMatrixSymbol(RandomSymbol, MatrixSymbol): # type: ignore
def __new__(cls, symbol, n, m, pspace=None):
n, m = _sympify(n), _sympify(m)
symbol = _symbol_converter(symbol)
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
return Basic.__new__(cls, symbol, n, m, pspace)
symbol = property(lambda self: self.args[0])
pspace = property(lambda self: self.args[3])
class ProductPSpace(PSpace):
"""
Abstract class for representing probability spaces with multiple random
variables.
See Also
========
sympy.stats.rv.IndependentProductPSpace
sympy.stats.joint_rv.JointPSpace
"""
pass
class IndependentProductPSpace(ProductPSpace):
"""
A probability space resulting from the merger of two independent probability
spaces.
Often created using the function, pspace.
"""
def __new__(cls, *spaces):
rs_space_dict = {}
for space in spaces:
for value in space.values:
rs_space_dict[value] = space
symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()])
# Overlapping symbols
from sympy.stats.joint_rv import MarginalDistribution
from sympy.stats.compound_rv import CompoundDistribution
if len(symbols) < sum(len(space.symbols) for space in spaces if not
isinstance(space.distribution, (
CompoundDistribution, MarginalDistribution))):
raise ValueError("Overlapping Random Variables")
if all(space.is_Finite for space in spaces):
from sympy.stats.frv import ProductFinitePSpace
cls = ProductFinitePSpace
obj = Basic.__new__(cls, *FiniteSet(*spaces))
return obj
@property
def pdf(self):
p = Mul(*[space.pdf for space in self.spaces])
return p.subs({rv: rv.symbol for rv in self.values})
@property
def rs_space_dict(self):
d = {}
for space in self.spaces:
for value in space.values:
d[value] = space
return d
@property
def symbols(self):
return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()])
@property
def spaces(self):
return FiniteSet(*self.args)
@property
def values(self):
return sumsets(space.values for space in self.spaces)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
rvs = rvs or self.values
rvs = frozenset(rvs)
for space in self.spaces:
expr = space.compute_expectation(expr, rvs & space.values, evaluate=False, **kwargs)
if evaluate and hasattr(expr, 'doit'):
return expr.doit(**kwargs)
return expr
@property
def domain(self):
return ProductDomain(*[space.domain for space in self.spaces])
@property
def density(self):
raise NotImplementedError("Density not available for ProductSpaces")
def sample(self, size=(), library='scipy', seed=None):
return {k: v for space in self.spaces
for k, v in space.sample(size=size, library=library, seed=seed).items()}
def probability(self, condition, **kwargs):
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
elif isinstance(condition, And): # they are independent
return Mul(*[self.probability(arg) for arg in condition.args])
elif isinstance(condition, Or): # they are independent
return Add(*[self.probability(arg) for arg in condition.args])
expr = condition.lhs - condition.rhs
rvs = random_symbols(expr)
dens = self.compute_density(expr)
if any(pspace(rv).is_Continuous for rv in rvs):
from sympy.stats.crv import SingleContinuousPSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.integrate(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
dens = ContinuousDistributionHandmade(dens)
z = Dummy('z', real=True)
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
else:
from sympy.stats.drv import SingleDiscretePSpace
from sympy.stats.drv_types import DiscreteDistributionHandmade
dens = DiscreteDistributionHandmade(dens)
z = Dummy('z', integer=True)
space = SingleDiscretePSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
return result if not cond_inv else S.One - result
def compute_density(self, expr, **kwargs):
rvs = random_symbols(expr)
if any(pspace(rv).is_Continuous for rv in rvs):
z = Dummy('z', real=True)
expr = self.compute_expectation(DiracDelta(expr - z),
**kwargs)
else:
z = Dummy('z', integer=True)
expr = self.compute_expectation(KroneckerDelta(expr, z),
**kwargs)
return Lambda(z, expr)
def compute_cdf(self, expr, **kwargs):
raise ValueError("CDF not well defined on multivariate expressions")
def conditional_space(self, condition, normalize=True, **kwargs):
rvs = random_symbols(condition)
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
pspaces = [pspace(rv) for rv in rvs]
if any(ps.is_Continuous for ps in pspaces):
from sympy.stats.crv import (ConditionalContinuousDomain,
ContinuousPSpace)
space = ContinuousPSpace
domain = ConditionalContinuousDomain(self.domain, condition)
elif any(ps.is_Discrete for ps in pspaces):
from sympy.stats.drv import (ConditionalDiscreteDomain,
DiscretePSpace)
space = DiscretePSpace
domain = ConditionalDiscreteDomain(self.domain, condition)
elif all(ps.is_Finite for ps in pspaces):
from sympy.stats.frv import FinitePSpace
return FinitePSpace.conditional_space(self, condition)
if normalize:
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting symbols from set to tuple. The order matters to
# Lambda though so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return space(domain, density)
class ProductDomain(RandomDomain):
"""
A domain resulting from the merger of two independent domains.
See Also
========
sympy.stats.crv.ProductContinuousDomain
sympy.stats.frv.ProductFiniteDomain
"""
is_ProductDomain = True
def __new__(cls, *domains):
# Flatten any product of products
domains2 = []
for domain in domains:
if not domain.is_ProductDomain:
domains2.append(domain)
else:
domains2.extend(domain.domains)
domains2 = FiniteSet(*domains2)
if all(domain.is_Finite for domain in domains2):
from sympy.stats.frv import ProductFiniteDomain
cls = ProductFiniteDomain
if all(domain.is_Continuous for domain in domains2):
from sympy.stats.crv import ProductContinuousDomain
cls = ProductContinuousDomain
if all(domain.is_Discrete for domain in domains2):
from sympy.stats.drv import ProductDiscreteDomain
cls = ProductDiscreteDomain
return Basic.__new__(cls, *domains2)
@property
def sym_domain_dict(self):
return {symbol: domain for domain in self.domains
for symbol in domain.symbols}
@property
def symbols(self):
return FiniteSet(*[sym for domain in self.domains
for sym in domain.symbols])
@property
def domains(self):
return self.args
@property
def set(self):
return ProductSet(*(domain.set for domain in self.domains))
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset([item for item in other
if sympify(domain.symbols.contains(item[0]))
is S.true])
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
def random_symbols(expr):
"""
Returns all RandomSymbols within a SymPy Expression.
"""
atoms = getattr(expr, 'atoms', None)
if atoms is not None:
comp = lambda rv: rv.symbol.name
l = list(atoms(RandomSymbol))
return sorted(l, key=comp)
else:
return []
def pspace(expr):
"""
Returns the underlying Probability Space of a random expression.
For internal use.
Examples
========
>>> from sympy.stats import pspace, Normal
>>> X = Normal('X', 0, 1)
>>> pspace(2*X + 1) == X.pspace
True
"""
expr = sympify(expr)
if isinstance(expr, RandomSymbol) and expr.pspace is not None:
return expr.pspace
if expr.has(RandomMatrixSymbol):
rm = list(expr.atoms(RandomMatrixSymbol))[0]
return rm.pspace
rvs = random_symbols(expr)
if not rvs:
raise ValueError("Expression containing Random Variable expected, not %s" % (expr))
# If only one space present
if all(rv.pspace == rvs[0].pspace for rv in rvs):
return rvs[0].pspace
from sympy.stats.compound_rv import CompoundPSpace
from sympy.stats.stochastic_process import StochasticPSpace
for rv in rvs:
if isinstance(rv.pspace, (CompoundPSpace, StochasticPSpace)):
return rv.pspace
# Otherwise make a product space
return IndependentProductPSpace(*[rv.pspace for rv in rvs])
def sumsets(sets):
"""
Union of sets
"""
return frozenset().union(*sets)
def rs_swap(a, b):
"""
Build a dictionary to swap RandomSymbols based on their underlying symbol.
i.e.
if ``X = ('x', pspace1)``
and ``Y = ('x', pspace2)``
then ``X`` and ``Y`` match and the key, value pair
``{X:Y}`` will appear in the result
Inputs: collections a and b of random variables which share common symbols
Output: dict mapping RVs in a to RVs in b
"""
d = {}
for rsa in a:
d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0]
return d
def given(expr, condition=None, **kwargs):
r""" Conditional Random Expression.
Explanation
===========
From a random expression and a condition on that expression creates a new
probability space from the condition and returns the same expression on that
conditional probability space.
Examples
========
>>> from sympy.stats import given, density, Die
>>> X = Die('X', 6)
>>> Y = given(X, X > 3)
>>> density(Y).dict
{4: 1/3, 5: 1/3, 6: 1/3}
Following convention, if the condition is a random symbol then that symbol
is considered fixed.
>>> from sympy.stats import Normal
>>> from sympy import pprint
>>> from sympy.abc import z
>>> X = Normal('X', 0, 1)
>>> Y = Normal('Y', 0, 1)
>>> pprint(density(X + Y, Y)(z), use_unicode=False)
2
-(-Y + z)
-----------
___ 2
\/ 2 *e
------------------
____
2*\/ pi
"""
if not is_random(condition) or pspace_independent(expr, condition):
return expr
if isinstance(condition, RandomSymbol):
condition = Eq(condition, condition.symbol)
condsymbols = random_symbols(condition)
if (isinstance(condition, Eq) and len(condsymbols) == 1 and
not isinstance(pspace(expr).domain, ConditionalDomain)):
rv = tuple(condsymbols)[0]
results = solveset(condition, rv)
if isinstance(results, Intersection) and S.Reals in results.args:
results = list(results.args[1])
sums = 0
for res in results:
temp = expr.subs(rv, res)
if temp == True:
return True
if temp != False:
# XXX: This seems nonsensical but preserves existing behaviour
# after the change that Relational is no longer a subclass of
# Expr. Here expr is sometimes Relational and sometimes Expr
# but we are trying to add them with +=. This needs to be
# fixed somehow.
if sums == 0 and isinstance(expr, Relational):
sums = expr.subs(rv, res)
else:
sums += expr.subs(rv, res)
if sums == 0:
return False
return sums
# Get full probability space of both the expression and the condition
fullspace = pspace(Tuple(expr, condition))
# Build new space given the condition
space = fullspace.conditional_space(condition, **kwargs)
# Dictionary to swap out RandomSymbols in expr with new RandomSymbols
# That point to the new conditional space
swapdict = rs_swap(fullspace.values, space.values)
# Swap random variables in the expression
expr = expr.xreplace(swapdict)
return expr
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not is_random(expr): # expr isn't random?
return expr
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Expectation
if evaluate:
return Expectation(expr, condition).doit(**kwargs)
return Expectation(expr, condition)
def probability(condition, given_condition=None, numsamples=None,
evaluate=True, **kwargs):
"""
Probability that a condition is true, optionally given a second condition.
Parameters
==========
condition : Combination of Relationals containing RandomSymbols
The condition of which you want to compute the probability
given_condition : Combination of Relationals containing RandomSymbols
A conditional expression. P(X > 1, X > 0) is expectation of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the probability with this many samples
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import P, Die
>>> from sympy import Eq
>>> X, Y = Die('X', 6), Die('Y', 6)
>>> P(X > 3)
1/2
>>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2
1/4
>>> P(X > Y)
5/12
"""
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Probability
if evaluate:
return Probability(condition, given_condition).doit(**kwargs)
return Probability(condition, given_condition)
class Density(Basic):
expr = property(lambda self: self.args[0])
def __new__(cls, expr, condition = None):
expr = _sympify(expr)
if condition is None:
obj = Basic.__new__(cls, expr)
else:
condition = _sympify(condition)
obj = Basic.__new__(cls, expr, condition)
return obj
@property
def condition(self):
if len(self.args) > 1:
return self.args[1]
else:
return None
def doit(self, evaluate=True, **kwargs):
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.stats.joint_rv import JointPSpace
from sympy.stats.matrix_distributions import MatrixPSpace
from sympy.stats.compound_rv import CompoundPSpace
from sympy.stats.frv import SingleFiniteDistribution
expr, condition = self.expr, self.condition
if isinstance(expr, SingleFiniteDistribution):
return expr.dict
if condition is not None:
# Recompute on new conditional expr
expr = given(expr, condition, **kwargs)
if not random_symbols(expr):
return Lambda(x, DiracDelta(x - expr))
if isinstance(expr, RandomSymbol):
if isinstance(expr.pspace, (SinglePSpace, JointPSpace, MatrixPSpace)) and \
hasattr(expr.pspace, 'distribution'):
return expr.pspace.distribution
elif isinstance(expr.pspace, RandomMatrixPSpace):
return expr.pspace.model
if isinstance(pspace(expr), CompoundPSpace):
kwargs['compound_evaluate'] = evaluate
result = pspace(expr).compute_density(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs):
"""
Probability density of a random expression, optionally given a second
condition.
Explanation
===========
This density will take on different forms for different types of
probability spaces. Discrete variables produce Dicts. Continuous
variables produce Lambdas.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the density value
condition : Relational containing RandomSymbols
A conditional expression. density(X > 1, X > 0) is density of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the density with this many samples
Examples
========
>>> from sympy.stats import density, Die, Normal
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> D = Die('D', 6)
>>> X = Normal(x, 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> density(2*D).dict
{2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6}
>>> density(X)(x)
sqrt(2)*exp(-x**2/2)/(2*sqrt(pi))
"""
if numsamples:
return sampling_density(expr, condition, numsamples=numsamples,
**kwargs)
return Density(expr, condition).doit(evaluate=evaluate, **kwargs)
def cdf(expr, condition=None, evaluate=True, **kwargs):
"""
Cumulative Distribution Function of a random expression.
optionally given a second condition.
Explanation
===========
This density will take on different forms for different types of
probability spaces.
Discrete variables produce Dicts.
Continuous variables produce Lambdas.
Examples
========
>>> from sympy.stats import density, Die, Normal, cdf
>>> D = Die('D', 6)
>>> X = Normal('X', 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> cdf(D)
{1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1}
>>> cdf(3*D, D > 2)
{9: 1/4, 12: 1/2, 15: 3/4, 18: 1}
>>> cdf(X)
Lambda(_z, erf(sqrt(2)*_z/2)/2 + 1/2)
"""
if condition is not None: # If there is a condition
# Recompute on new conditional expr
return cdf(given(expr, condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(expr).compute_cdf(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def characteristic_function(expr, condition=None, evaluate=True, **kwargs):
"""
Characteristic function of a random expression, optionally given a second condition.
Returns a Lambda.
Examples
========
>>> from sympy.stats import Normal, DiscreteUniform, Poisson, characteristic_function
>>> X = Normal('X', 0, 1)
>>> characteristic_function(X)
Lambda(_t, exp(-_t**2/2))
>>> Y = DiscreteUniform('Y', [1, 2, 7])
>>> characteristic_function(Y)
Lambda(_t, exp(7*_t*I)/3 + exp(2*_t*I)/3 + exp(_t*I)/3)
>>> Z = Poisson('Z', 2)
>>> characteristic_function(Z)
Lambda(_t, exp(2*exp(_t*I) - 2))
"""
if condition is not None:
return characteristic_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_characteristic_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def moment_generating_function(expr, condition=None, evaluate=True, **kwargs):
if condition is not None:
return moment_generating_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_moment_generating_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def where(condition, given_condition=None, **kwargs):
"""
Returns the domain where a condition is True.
Examples
========
>>> from sympy.stats import where, Die, Normal
>>> from sympy import And
>>> D1, D2 = Die('a', 6), Die('b', 6)
>>> a, b = D1.symbol, D2.symbol
>>> X = Normal('x', 0, 1)
>>> where(X**2<1)
Domain: (-1 < x) & (x < 1)
>>> where(X**2<1).set
Interval.open(-1, 1)
>>> where(And(D1<=D2, D2<3))
Domain: (Eq(a, 1) & Eq(b, 1)) | (Eq(a, 1) & Eq(b, 2)) | (Eq(a, 2) & Eq(b, 2))
"""
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return where(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
return pspace(condition).where(condition, **kwargs)
@doctest_depends_on(modules=('scipy',))
def sample(expr, condition=None, size=(), library='scipy',
numsamples=1, seed=None, **kwargs):
"""
A realization of the random expression.
Parameters
==========
expr : Expression of random variables
Expression from which sample is extracted
condition : Expr containing RandomSymbols
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
library : str
- 'scipy' : Sample using scipy
- 'numpy' : Sample using numpy
- 'pymc' : Sample using PyMC
Choose any of the available options to sample from as string,
by default is 'scipy'
numsamples : int
Number of samples, each with size as ``size``.
.. deprecated:: 1.9
The ``numsamples`` parameter is deprecated and is only provided for
compatibility with v1.8. Use a list comprehension or an additional
dimension in ``size`` instead. See
:ref:`deprecated-sympy-stats-numsamples` for details.
seed :
An object to be used as seed by the given external library for sampling `expr`.
Following is the list of possible types of object for the supported libraries,
- 'scipy': int, numpy.random.RandomState, numpy.random.Generator
- 'numpy': int, numpy.random.RandomState, numpy.random.Generator
- 'pymc': int
Optional, by default None, in which case seed settings
related to the given library will be used.
No modifications to environment's global seed settings
are done by this argument.
Returns
=======
sample: float/list/numpy.ndarray
one sample or a collection of samples of the random expression.
- sample(X) returns float/numpy.float64/numpy.int64 object.
- sample(X, size=int/tuple) returns numpy.ndarray object.
Examples
========
>>> from sympy.stats import Die, sample, Normal, Geometric
>>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) # Finite Random Variable
>>> die_roll = sample(X + Y + Z)
>>> die_roll # doctest: +SKIP
3
>>> N = Normal('N', 3, 4) # Continuous Random Variable
>>> samp = sample(N)
>>> samp in N.pspace.domain.set
True
>>> samp = sample(N, N>0)
>>> samp > 0
True
>>> samp_list = sample(N, size=4)
>>> [sam in N.pspace.domain.set for sam in samp_list]
[True, True, True, True]
>>> sample(N, size = (2,3)) # doctest: +SKIP
array([[5.42519758, 6.40207856, 4.94991743],
[1.85819627, 6.83403519, 1.9412172 ]])
>>> G = Geometric('G', 0.5) # Discrete Random Variable
>>> samp_list = sample(G, size=3)
>>> samp_list # doctest: +SKIP
[1, 3, 2]
>>> [sam in G.pspace.domain.set for sam in samp_list]
[True, True, True]
>>> MN = Normal("MN", [3, 4], [[2, 1], [1, 2]]) # Joint Random Variable
>>> samp_list = sample(MN, size=4)
>>> samp_list # doctest: +SKIP
[array([2.85768055, 3.38954165]),
array([4.11163337, 4.3176591 ]),
array([0.79115232, 1.63232916]),
array([4.01747268, 3.96716083])]
>>> [tuple(sam) in MN.pspace.domain.set for sam in samp_list]
[True, True, True, True]
.. versionchanged:: 1.7.0
sample used to return an iterator containing the samples instead of value.
.. versionchanged:: 1.9.0
sample returns values or array of values instead of an iterator and numsamples is deprecated.
"""
iterator = sample_iter(expr, condition, size=size, library=library,
numsamples=numsamples, seed=seed)
if numsamples != 1:
sympy_deprecation_warning(
f"""
The numsamples parameter to sympy.stats.sample() is deprecated.
Either use a list comprehension, like
[sample(...) for i in range({numsamples})]
or add a dimension to size, like
sample(..., size={(numsamples,) + size})
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-sympy-stats-numsamples",
)
return [next(iterator) for i in range(numsamples)]
return next(iterator)
def quantile(expr, evaluate=True, **kwargs):
r"""
Return the :math:`p^{th}` order quantile of a probability distribution.
Explanation
===========
Quantile is defined as the value at which the probability of the random
variable is less than or equal to the given probability.
.. math::
Q(p) = \inf\{x \in (-\infty, \infty) : p \le F(x)\}
Examples
========
>>> from sympy.stats import quantile, Die, Exponential
>>> from sympy import Symbol, pprint
>>> p = Symbol("p")
>>> l = Symbol("lambda", positive=True)
>>> X = Exponential("x", l)
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> D = Die("d", 6)
>>> pprint(quantile(D)(p), use_unicode=False)
/nan for Or(p > 1, p < 0)
|
| 1 for p <= 1/6
|
| 2 for p <= 1/3
|
< 3 for p <= 1/2
|
| 4 for p <= 2/3
|
| 5 for p <= 5/6
|
\ 6 for p <= 1
"""
result = pspace(expr).compute_quantile(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def sample_iter(expr, condition=None, size=(), library='scipy',
numsamples=S.Infinity, seed=None, **kwargs):
"""
Returns an iterator of realizations from the expression given a condition.
Parameters
==========
expr: Expr
Random expression to be realized
condition: Expr, optional
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
numsamples: integer, optional
Length of the iterator (defaults to infinity)
seed :
An object to be used as seed by the given external library for sampling `expr`.
Following is the list of possible types of object for the supported libraries,
- 'scipy': int, numpy.random.RandomState, numpy.random.Generator
- 'numpy': int, numpy.random.RandomState, numpy.random.Generator
- 'pymc': int
Optional, by default None, in which case seed settings
related to the given library will be used.
No modifications to environment's global seed settings
are done by this argument.
Examples
========
>>> from sympy.stats import Normal, sample_iter
>>> X = Normal('X', 0, 1)
>>> expr = X*X + 3
>>> iterator = sample_iter(expr, numsamples=3) # doctest: +SKIP
>>> list(iterator) # doctest: +SKIP
[12, 4, 7]
Returns
=======
sample_iter: iterator object
iterator object containing the sample/samples of given expr
See Also
========
sample
sampling_P
sampling_E
"""
from sympy.stats.joint_rv import JointRandomSymbol
if not import_module(library):
raise ValueError("Failed to import %s" % library)
if condition is not None:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
rvs = list(ps.values)
if isinstance(expr, JointRandomSymbol):
expr = expr.subs({expr: RandomSymbol(expr.symbol, expr.pspace)})
else:
sub = {}
for arg in expr.args:
if isinstance(arg, JointRandomSymbol):
sub[arg] = RandomSymbol(arg.symbol, arg.pspace)
expr = expr.subs(sub)
def fn_subs(*args):
return expr.subs({rv: arg for rv, arg in zip(rvs, args)})
def given_fn_subs(*args):
if condition is not None:
return condition.subs({rv: arg for rv, arg in zip(rvs, args)})
return False
if library in ('pymc', 'pymc3'):
# Currently unable to lambdify in pymc
# TODO : Remove when lambdify accepts 'pymc' as module
fn = lambdify(rvs, expr, **kwargs)
else:
fn = lambdify(rvs, expr, modules=library, **kwargs)
if condition is not None:
given_fn = lambdify(rvs, condition, **kwargs)
def return_generator_infinite():
count = 0
_size = (1,)+((size,) if isinstance(size, int) else size)
while count < numsamples:
d = ps.sample(size=_size, library=library, seed=seed) # a dictionary that maps RVs to values
args = [d[rv][0] for rv in rvs]
if condition is not None: # Check that these values satisfy the condition
# TODO: Replace the try-except block with only given_fn(*args)
# once lambdify works with unevaluated SymPy objects.
try:
gd = given_fn(*args)
except (NameError, TypeError):
gd = given_fn_subs(*args)
if gd != True and gd != False:
raise ValueError(
"Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
continue
yield fn(*args)
count += 1
def return_generator_finite():
faulty = True
while faulty:
d = ps.sample(size=(numsamples,) + ((size,) if isinstance(size, int) else size),
library=library, seed=seed) # a dictionary that maps RVs to values
faulty = False
count = 0
while count < numsamples and not faulty:
args = [d[rv][count] for rv in rvs]
if condition is not None: # Check that these values satisfy the condition
# TODO: Replace the try-except block with only given_fn(*args)
# once lambdify works with unevaluated SymPy objects.
try:
gd = given_fn(*args)
except (NameError, TypeError):
gd = given_fn_subs(*args)
if gd != True and gd != False:
raise ValueError(
"Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
faulty = True
count += 1
count = 0
while count < numsamples:
args = [d[rv][count] for rv in rvs]
# TODO: Replace the try-except block with only fn(*args)
# once lambdify works with unevaluated SymPy objects.
try:
yield fn(*args)
except (NameError, TypeError):
yield fn_subs(*args)
count += 1
if numsamples is S.Infinity:
return return_generator_infinite()
return return_generator_finite()
def sample_iter_lambdify(expr, condition=None, size=(),
numsamples=S.Infinity, seed=None, **kwargs):
return sample_iter(expr, condition=condition, size=size,
numsamples=numsamples, seed=seed, **kwargs)
def sample_iter_subs(expr, condition=None, size=(),
numsamples=S.Infinity, seed=None, **kwargs):
return sample_iter(expr, condition=condition, size=size,
numsamples=numsamples, seed=seed, **kwargs)
def sampling_P(condition, given_condition=None, library='scipy', numsamples=1,
evalf=True, seed=None, **kwargs):
"""
Sampling version of P.
See Also
========
P
sampling_E
sampling_density
"""
count_true = 0
count_false = 0
samples = sample_iter(condition, given_condition, library=library,
numsamples=numsamples, seed=seed, **kwargs)
for sample in samples:
if sample:
count_true += 1
else:
count_false += 1
result = S(count_true) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_E(expr, given_condition=None, library='scipy', numsamples=1,
evalf=True, seed=None, **kwargs):
"""
Sampling version of E.
See Also
========
P
sampling_P
sampling_density
"""
samples = list(sample_iter(expr, given_condition, library=library,
numsamples=numsamples, seed=seed, **kwargs))
result = Add(*[samp for samp in samples]) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_density(expr, given_condition=None, library='scipy',
numsamples=1, seed=None, **kwargs):
"""
Sampling version of density.
See Also
========
density
sampling_P
sampling_E
"""
results = {}
for result in sample_iter(expr, given_condition, library=library,
numsamples=numsamples, seed=seed, **kwargs):
results[result] = results.get(result, 0) + 1
return results
def dependent(a, b):
"""
Dependence of two random expressions.
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, dependent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> dependent(X, Y)
False
>>> dependent(2*X + Y, -Y)
True
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> dependent(X, Y)
True
See Also
========
independent
"""
if pspace_independent(a, b):
return False
z = Symbol('z', real=True)
# Dependent if density is unchanged when one is given information about
# the other
return (density(a, Eq(b, z)) != density(a) or
density(b, Eq(a, z)) != density(b))
def independent(a, b):
"""
Independence of two random expressions.
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, independent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> independent(X, Y)
True
>>> independent(2*X + Y, -Y)
False
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> independent(X, Y)
False
See Also
========
dependent
"""
return not dependent(a, b)
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0:
return False
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None
def rv_subs(expr, symbols=None):
"""
Given a random expression replace all random variables with their symbols.
If symbols keyword is given restrict the swap to only the symbols listed.
"""
if symbols is None:
symbols = random_symbols(expr)
if not symbols:
return expr
swapdict = {rv: rv.symbol for rv in symbols}
return expr.subs(swapdict)
class NamedArgsMixin:
_argnames: tuple[str, ...] = ()
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
class Distribution(Basic):
def sample(self, size=(), library='scipy', seed=None):
""" A random realization from the distribution """
module = import_module(library)
if library in {'scipy', 'numpy', 'pymc3', 'pymc'} and module is None:
raise ValueError("Failed to import %s" % library)
if library == 'scipy':
# scipy does not require map as it can handle using custom distributions.
# However, we will still use a map where we can.
# TODO: do this for drv.py and frv.py if necessary.
# TODO: add more distributions here if there are more
# See links below referring to sections beginning with "A common parametrization..."
# I will remove all these comments if everything is ok.
from sympy.stats.sampling.sample_scipy import do_sample_scipy
import numpy
if seed is None or isinstance(seed, int):
rand_state = numpy.random.default_rng(seed=seed)
else:
rand_state = seed
samps = do_sample_scipy(self, size, rand_state)
elif library == 'numpy':
from sympy.stats.sampling.sample_numpy import do_sample_numpy
import numpy
if seed is None or isinstance(seed, int):
rand_state = numpy.random.default_rng(seed=seed)
else:
rand_state = seed
_size = None if size == () else size
samps = do_sample_numpy(self, _size, rand_state)
elif library in ('pymc', 'pymc3'):
from sympy.stats.sampling.sample_pymc import do_sample_pymc
import logging
logging.getLogger("pymc").setLevel(logging.ERROR)
try:
import pymc
except ImportError:
import pymc3 as pymc
with pymc.Model():
if do_sample_pymc(self):
samps = pymc.sample(draws=prod(size), chains=1, compute_convergence_checks=False,
progressbar=False, random_seed=seed, return_inferencedata=False)[:]['X']
samps = samps.reshape(size)
else:
samps = None
else:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self, library))
def _value_check(condition, message):
"""
Raise a ValueError with message if condition is False, else
return True if all conditions were True, else False.
Examples
========
>>> from sympy.stats.rv import _value_check
>>> from sympy.abc import a, b, c
>>> from sympy import And, Dummy
>>> _value_check(2 < 3, '')
True
Here, the condition is not False, but it does not evaluate to True
so False is returned (but no error is raised). So checking if the
return value is True or False will tell you if all conditions were
evaluated.
>>> _value_check(a < b, '')
False
In this case the condition is False so an error is raised:
>>> r = Dummy(real=True)
>>> _value_check(r < r - 1, 'condition is not true')
Traceback (most recent call last):
...
ValueError: condition is not true
If no condition of many conditions must be False, they can be
checked by passing them as an iterable:
>>> _value_check((a < 0, b < 0, c < 0), '')
False
The iterable can be a generator, too:
>>> _value_check((i < 0 for i in (a, b, c)), '')
False
The following are equivalent to the above but do not pass
an iterable:
>>> all(_value_check(i < 0, '') for i in (a, b, c))
False
>>> _value_check(And(a < 0, b < 0, c < 0), '')
False
"""
if not iterable(condition):
condition = [condition]
truth = fuzzy_and(condition)
if truth == False:
raise ValueError(message)
return truth == True
def _symbol_converter(sym):
"""
Casts the parameter to Symbol if it is 'str'
otherwise no operation is performed on it.
Parameters
==========
sym
The parameter to be converted.
Returns
=======
Symbol
the parameter converted to Symbol.
Raises
======
TypeError
If the parameter is not an instance of both str and
Symbol.
Examples
========
>>> from sympy import Symbol
>>> from sympy.stats.rv import _symbol_converter
>>> s = _symbol_converter('s')
>>> isinstance(s, Symbol)
True
>>> _symbol_converter(1)
Traceback (most recent call last):
...
TypeError: 1 is neither a Symbol nor a string
>>> r = Symbol('r')
>>> isinstance(r, Symbol)
True
"""
if isinstance(sym, str):
sym = Symbol(sym)
if not isinstance(sym, Symbol):
raise TypeError("%s is neither a Symbol nor a string"%(sym))
return sym
def sample_stochastic_process(process):
"""
This function is used to sample from stochastic process.
Parameters
==========
process: StochasticProcess
Process used to extract the samples. It must be an instance of
StochasticProcess
Examples
========
>>> from sympy.stats import sample_stochastic_process, DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> next(sample_stochastic_process(Y)) in Y.state_space
True
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
0
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
2
Returns
=======
sample: iterator object
iterator object containing the sample of given process
"""
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise ValueError("Process must be an instance of Stochastic Process")
return process.sample()
|
48ed4a5c9d1a1c66c3e72f5003ca3aa8e1db8a8dee6bcc323b646c8741e7c38f | from sympy.concrete.summations import Sum
from sympy.core.basic import Basic
from sympy.core.function import Lambda
from sympy.core.symbol import Dummy
from sympy.integrals.integrals import Integral
from sympy.stats.rv import (NamedArgsMixin, random_symbols, _symbol_converter,
PSpace, RandomSymbol, is_random, Distribution)
from sympy.stats.crv import ContinuousDistribution, SingleContinuousPSpace
from sympy.stats.drv import DiscreteDistribution, SingleDiscretePSpace
from sympy.stats.frv import SingleFiniteDistribution, SingleFinitePSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
from sympy.stats.drv_types import DiscreteDistributionHandmade
from sympy.stats.frv_types import FiniteDistributionHandmade
class CompoundPSpace(PSpace):
"""
A temporary Probability Space for the Compound Distribution. After
Marginalization, this returns the corresponding Probability Space of the
parent distribution.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
if isinstance(distribution, ContinuousDistribution):
return SingleContinuousPSpace(s, distribution)
if isinstance(distribution, DiscreteDistribution):
return SingleDiscretePSpace(s, distribution)
if isinstance(distribution, SingleFiniteDistribution):
return SingleFinitePSpace(s, distribution)
if not isinstance(distribution, CompoundDistribution):
raise ValueError("%s should be an isinstance of "
"CompoundDistribution"%(distribution))
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def is_Continuous(self):
return self.distribution.is_Continuous
@property
def is_Finite(self):
return self.distribution.is_Finite
@property
def is_Discrete(self):
return self.distribution.is_Discrete
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return self._get_newpspace().domain
def _get_newpspace(self, evaluate=False):
x = Dummy('x')
parent_dist = self.distribution.args[0]
func = Lambda(x, self.distribution.pdf(x, evaluate))
new_pspace = self._transform_pspace(self.symbol, parent_dist, func)
if new_pspace is not None:
return new_pspace
message = ("Compound Distribution for %s is not implemented yet" % str(parent_dist))
raise NotImplementedError(message)
def _transform_pspace(self, sym, dist, pdf):
"""
This function returns the new pspace of the distribution using handmade
Distributions and their corresponding pspace.
"""
pdf = Lambda(sym, pdf(sym))
_set = dist.set
if isinstance(dist, ContinuousDistribution):
return SingleContinuousPSpace(sym, ContinuousDistributionHandmade(pdf, _set))
elif isinstance(dist, DiscreteDistribution):
return SingleDiscretePSpace(sym, DiscreteDistributionHandmade(pdf, _set))
elif isinstance(dist, SingleFiniteDistribution):
dens = {k: pdf(k) for k in _set}
return SingleFinitePSpace(sym, FiniteDistributionHandmade(dens))
def compute_density(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_density(expr, **kwargs)
def compute_cdf(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_cdf(expr, **kwargs)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
new_pspace = self._get_newpspace(evaluate)
expr = expr.subs({self.value: new_pspace.value})
if rvs:
rvs = rvs.subs({self.value: new_pspace.value})
if isinstance(new_pspace, SingleFinitePSpace):
return new_pspace.compute_expectation(expr, rvs, **kwargs)
return new_pspace.compute_expectation(expr, rvs, evaluate, **kwargs)
def probability(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.probability(condition)
def conditional_space(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.conditional_space(condition)
class CompoundDistribution(Distribution, NamedArgsMixin):
"""
Class for Compound Distributions.
Parameters
==========
dist : Distribution
Distribution must contain a random parameter
Examples
========
>>> from sympy.stats.compound_rv import CompoundDistribution
>>> from sympy.stats.crv_types import NormalDistribution
>>> from sympy.stats import Normal
>>> from sympy.abc import x
>>> X = Normal('X', 2, 4)
>>> N = NormalDistribution(X, 4)
>>> C = CompoundDistribution(N)
>>> C.set
Interval(-oo, oo)
>>> C.pdf(x, evaluate=True).simplify()
exp(-x**2/64 + x/16 - 1/16)/(8*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Compound_probability_distribution
"""
def __new__(cls, dist):
if not isinstance(dist, (ContinuousDistribution,
SingleFiniteDistribution, DiscreteDistribution)):
message = "Compound Distribution for %s is not implemented yet" % str(dist)
raise NotImplementedError(message)
if not cls._compound_check(dist):
return dist
return Basic.__new__(cls, dist)
@property
def set(self):
return self.args[0].set
@property
def is_Continuous(self):
return isinstance(self.args[0], ContinuousDistribution)
@property
def is_Finite(self):
return isinstance(self.args[0], SingleFiniteDistribution)
@property
def is_Discrete(self):
return isinstance(self.args[0], DiscreteDistribution)
def pdf(self, x, evaluate=False):
dist = self.args[0]
randoms = [rv for rv in dist.args if is_random(rv)]
if isinstance(dist, SingleFiniteDistribution):
y = Dummy('y', integer=True, negative=False)
expr = dist.pmf(y)
else:
y = Dummy('y')
expr = dist.pdf(y)
for rv in randoms:
expr = self._marginalise(expr, rv, evaluate)
return Lambda(y, expr)(x)
def _marginalise(self, expr, rv, evaluate):
if isinstance(rv.pspace.distribution, SingleFiniteDistribution):
rv_dens = rv.pspace.distribution.pmf(rv)
else:
rv_dens = rv.pspace.distribution.pdf(rv)
rv_dom = rv.pspace.domain.set
if rv.pspace.is_Discrete or rv.pspace.is_Finite:
expr = Sum(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
else:
expr = Integral(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
if evaluate:
return expr.doit()
return expr
@classmethod
def _compound_check(self, dist):
"""
Checks if the given distribution contains random parameters.
"""
randoms = []
for arg in dist.args:
randoms.extend(random_symbols(arg))
if len(randoms) == 0:
return False
return True
|
ee2e57014631b7e7a498fe4a0cc6b2b549660afc60944b29a166ecc127fcdad1 | from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.core.basic import Basic
from sympy.core.function import Lambda
from sympy.core.numbers import (I, pi)
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.special.gamma_functions import gamma
from sympy.integrals.integrals import Integral
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.trace import Trace
from sympy.tensor.indexed import IndexedBase
from sympy.core.sympify import _sympify
from sympy.stats.rv import _symbol_converter, Density, RandomMatrixSymbol, is_random
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.tensor.array import ArrayComprehension
__all__ = [
'CircularEnsemble',
'CircularUnitaryEnsemble',
'CircularOrthogonalEnsemble',
'CircularSymplecticEnsemble',
'GaussianEnsemble',
'GaussianUnitaryEnsemble',
'GaussianOrthogonalEnsemble',
'GaussianSymplecticEnsemble',
'joint_eigen_distribution',
'JointEigenDistribution',
'level_spacing_distribution'
]
@is_random.register(RandomMatrixSymbol)
def _(x):
return True
class RandomMatrixEnsembleModel(Basic):
"""
Base class for random matrix ensembles.
It acts as an umbrella and contains
the methods common to all the ensembles
defined in sympy.stats.random_matrix_models.
"""
def __new__(cls, sym, dim=None):
sym, dim = _symbol_converter(sym), _sympify(dim)
if dim.is_integer == False:
raise ValueError("Dimension of the random matrices must be "
"integers, received %s instead."%(dim))
return Basic.__new__(cls, sym, dim)
symbol = property(lambda self: self.args[0])
dimension = property(lambda self: self.args[1])
def density(self, expr):
return Density(expr)
def __call__(self, expr):
return self.density(expr)
class GaussianEnsembleModel(RandomMatrixEnsembleModel):
"""
Abstract class for Gaussian ensembles.
Contains the properties common to all the
gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Gaussian_ensembles
.. [2] https://arxiv.org/pdf/1712.07903.pdf
"""
def _compute_normalization_constant(self, beta, n):
"""
Helper function for computing normalization
constant for joint probability density of eigen
values of Gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Selberg_integral#Mehta's_integral
"""
n = S(n)
prod_term = lambda j: gamma(1 + beta*S(j)/2)/gamma(S.One + beta/S(2))
j = Dummy('j', integer=True, positive=True)
term1 = Product(prod_term(j), (j, 1, n)).doit()
term2 = (2/(beta*n))**(beta*n*(n - 1)/4 + n/2)
term3 = (2*pi)**(n/2)
return term1 * term2 * term3
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function for computing the joint
probability distribution of eigen values
of the random matrix.
"""
n = self.dimension
Zbn = self._compute_normalization_constant(beta, n)
l = IndexedBase('l')
i = Dummy('i', integer=True, positive=True)
j = Dummy('j', integer=True, positive=True)
k = Dummy('k', integer=True, positive=True)
term1 = exp((-S(n)/2) * Sum(l[k]**2, (k, 1, n)).doit())
sub_term = Lambda(i, Product(Abs(l[j] - l[i])**beta, (j, i + 1, n)))
term2 = Product(sub_term(i).doit(), (i, 1, n - 1)).doit()
syms = ArrayComprehension(l[k], (k, 1, n)).doit()
return Lambda(tuple(syms), (term1 * term2)/Zbn)
class GaussianUnitaryEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
return 2**(S(n)/2) * pi**(S(n**2)/2)
def density(self, expr):
n, ZGUE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/2 * Trace(H**2))/ZGUE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
def level_spacing_distribution(self):
s = Dummy('s')
f = (32/pi**2)*(s**2)*exp((-4/pi)*s**2)
return Lambda(s, f)
class GaussianOrthogonalEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n)/4 * Trace(_H**2)))
def density(self, expr):
n, ZGOE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/4 * Trace(H**2))/ZGOE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
def level_spacing_distribution(self):
s = Dummy('s')
f = (pi/2)*s*exp((-pi/4)*s**2)
return Lambda(s, f)
class GaussianSymplecticEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n) * Trace(_H**2)))
def density(self, expr):
n, ZGSE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n) * Trace(H**2))/ZGSE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def level_spacing_distribution(self):
s = Dummy('s')
f = ((S(2)**18)/((S(3)**6)*(pi**3)))*(s**4)*exp((-64/(9*pi))*s**2)
return Lambda(s, f)
def GaussianEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianUnitaryEnsemble(sym, dim):
"""
Represents Gaussian Unitary Ensembles.
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE, density
>>> from sympy import MatrixSymbol
>>> G = GUE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-Trace(X**2))/(2*pi**2)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianUnitaryEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianOrthogonalEnsemble(sym, dim):
"""
Represents Gaussian Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import GaussianOrthogonalEnsemble as GOE, density
>>> from sympy import MatrixSymbol
>>> G = GOE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-Trace(X**2)/2)/Integral(exp(-Trace(_H**2)/2), _H)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianOrthogonalEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianSymplecticEnsemble(sym, dim):
"""
Represents Gaussian Symplectic Ensembles.
Examples
========
>>> from sympy.stats import GaussianSymplecticEnsemble as GSE, density
>>> from sympy import MatrixSymbol
>>> G = GSE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-2*Trace(X**2))/Integral(exp(-2*Trace(_H**2)), _H)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
class CircularEnsembleModel(RandomMatrixEnsembleModel):
"""
Abstract class for Circular ensembles.
Contains the properties and methods
common to all the circular ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Circular_ensemble
"""
def density(self, expr):
# TODO : Add support for Lie groups(as extensions of sympy.diffgeom)
# and define measures on them
raise NotImplementedError("Support for Haar measure hasn't been "
"implemented yet, therefore the density of "
"%s cannot be computed."%(self))
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function to compute the joint distribution of phases
of the complex eigen values of matrices belonging to any
circular ensembles.
"""
n = self.dimension
Zbn = ((2*pi)**n)*(gamma(beta*n/2 + 1)/S(gamma(beta/2 + 1))**n)
t = IndexedBase('t')
i, j, k = (Dummy('i', integer=True), Dummy('j', integer=True),
Dummy('k', integer=True))
syms = ArrayComprehension(t[i], (i, 1, n)).doit()
f = Product(Product(Abs(exp(I*t[k]) - exp(I*t[j]))**beta, (j, k + 1, n)).doit(),
(k, 1, n - 1)).doit()
return Lambda(tuple(syms), f/Zbn)
class CircularUnitaryEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
class CircularOrthogonalEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
class CircularSymplecticEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def CircularEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularUnitaryEnsemble(sym, dim):
"""
Represents Circular Unitary Ensembles.
Examples
========
>>> from sympy.stats import CircularUnitaryEnsemble as CUE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CUE('U', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarUnitaryEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularUnitaryEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularOrthogonalEnsemble(sym, dim):
"""
Represents Circular Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import CircularOrthogonalEnsemble as COE
>>> from sympy.stats import joint_eigen_distribution
>>> C = COE('O', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k])), (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarOrthogonalEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularOrthogonalEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularSymplecticEnsemble(sym, dim):
"""
Represents Circular Symplectic Ensembles.
Examples
========
>>> from sympy.stats import CircularSymplecticEnsemble as CSE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CSE('S', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarSymplecticEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def joint_eigen_distribution(mat):
"""
For obtaining joint probability distribution
of eigen values of random matrix.
Parameters
==========
mat: RandomMatrixSymbol
The matrix symbol whose eigen values are to be considered.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import joint_eigen_distribution
>>> U = GUE('U', 2)
>>> joint_eigen_distribution(U)
Lambda((l[1], l[2]), exp(-l[1]**2 - l[2]**2)*Product(Abs(l[_i] - l[_j])**2, (_j, _i + 1, 2), (_i, 1, 1))/pi)
"""
if not isinstance(mat, RandomMatrixSymbol):
raise ValueError("%s is not of type, RandomMatrixSymbol."%(mat))
return mat.pspace.model.joint_eigen_distribution()
def JointEigenDistribution(mat):
"""
Creates joint distribution of eigen values of matrices with random
expressions.
Parameters
==========
mat: Matrix
The matrix under consideration.
Returns
=======
JointDistributionHandmade
Examples
========
>>> from sympy.stats import Normal, JointEigenDistribution
>>> from sympy import Matrix
>>> A = [[Normal('A00', 0, 1), Normal('A01', 0, 1)],
... [Normal('A10', 0, 1), Normal('A11', 0, 1)]]
>>> JointEigenDistribution(Matrix(A))
JointDistributionHandmade(-sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2
+ A00/2 + A11/2, sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2 + A00/2 + A11/2)
"""
eigenvals = mat.eigenvals(multiple=True)
if not all(is_random(eigenval) for eigenval in set(eigenvals)):
raise ValueError("Eigen values do not have any random expression, "
"joint distribution cannot be generated.")
return JointDistributionHandmade(*eigenvals)
def level_spacing_distribution(mat):
"""
For obtaining distribution of level spacings.
Parameters
==========
mat: RandomMatrixSymbol
The random matrix symbol whose eigen values are
to be considered for finding the level spacings.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import level_spacing_distribution
>>> U = GUE('U', 2)
>>> level_spacing_distribution(U)
Lambda(_s, 32*_s**2*exp(-4*_s**2/pi)/pi**2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Distribution_of_level_spacings
"""
return mat.pspace.model.level_spacing_distribution()
|
4332091478c51b69dc229a09ec218ccc9e87b9d56a07644da76c811c18e44b3f | from sympy.core.numbers import igcd, mod_inverse
from sympy.core.power import integer_nthroot
from sympy.ntheory.residue_ntheory import _sqrt_mod_prime_power
from sympy.ntheory import isprime
from math import log, sqrt
import random
rgen = random.Random()
class SievePolynomial:
def __init__(self, modified_coeff=(), a=None, b=None):
"""This class denotes the seive polynomial.
If ``g(x) = (a*x + b)**2 - N``. `g(x)` can be expanded
to ``a*x**2 + 2*a*b*x + b**2 - N``, so the coefficient
is stored in the form `[a**2, 2*a*b, b**2 - N]`. This
ensures faster `eval` method because we dont have to
perform `a**2, 2*a*b, b**2` every time we call the
`eval` method. As multiplication is more expensive
than addition, by using modified_coefficient we get
a faster seiving process.
Parameters
==========
modified_coeff : modified_coefficient of sieve polynomial
a : parameter of the sieve polynomial
b : parameter of the sieve polynomial
"""
self.modified_coeff = modified_coeff
self.a = a
self.b = b
def eval(self, x):
"""
Compute the value of the sieve polynomial at point x.
Parameters
==========
x : Integer parameter for sieve polynomial
"""
ans = 0
for coeff in self.modified_coeff:
ans *= x
ans += coeff
return ans
class FactorBaseElem:
"""This class stores an element of the `factor_base`.
"""
def __init__(self, prime, tmem_p, log_p):
"""
Initialization of factor_base_elem.
Parameters
==========
prime : prime number of the factor_base
tmem_p : Integer square root of x**2 = n mod prime
log_p : Compute Natural Logarithm of the prime
"""
self.prime = prime
self.tmem_p = tmem_p
self.log_p = log_p
self.soln1 = None
self.soln2 = None
self.a_inv = None
self.b_ainv = None
def _generate_factor_base(prime_bound, n):
"""Generate `factor_base` for Quadratic Sieve. The `factor_base`
consists of all the points whose ``legendre_symbol(n, p) == 1``
and ``p < num_primes``. Along with the prime `factor_base` also stores
natural logarithm of prime and the residue n modulo p.
It also returns the of primes numbers in the `factor_base` which are
close to 1000 and 5000.
Parameters
==========
prime_bound : upper prime bound of the factor_base
n : integer to be factored
"""
from sympy.ntheory.generate import sieve
factor_base = []
idx_1000, idx_5000 = None, None
for prime in sieve.primerange(1, prime_bound):
if pow(n, (prime - 1) // 2, prime) == 1:
if prime > 1000 and idx_1000 is None:
idx_1000 = len(factor_base) - 1
if prime > 5000 and idx_5000 is None:
idx_5000 = len(factor_base) - 1
residue = _sqrt_mod_prime_power(n, prime, 1)[0]
log_p = round(log(prime)*2**10)
factor_base.append(FactorBaseElem(prime, residue, log_p))
return idx_1000, idx_5000, factor_base
def _initialize_first_polynomial(N, M, factor_base, idx_1000, idx_5000, seed=None):
"""This step is the initialization of the 1st sieve polynomial.
Here `a` is selected as a product of several primes of the factor_base
such that `a` is about to ``sqrt(2*N) / M``. Other initial values of
factor_base elem are also initialized which includes a_inv, b_ainv, soln1,
soln2 which are used when the sieve polynomial is changed. The b_ainv
is required for fast polynomial change as we do not have to calculate
`2*b*mod_inverse(a, prime)` every time.
We also ensure that the `factor_base` primes which make `a` are between
1000 and 5000.
Parameters
==========
N : Number to be factored
M : sieve interval
factor_base : factor_base primes
idx_1000 : index of prime number in the factor_base near 1000
idx_5000 : index of prime number in the factor_base near to 5000
seed : Generate pseudoprime numbers
"""
if seed is not None:
rgen.seed(seed)
approx_val = sqrt(2*N) / M
# `a` is a parameter of the sieve polynomial and `q` is the prime factors of `a`
# randomly search for a combination of primes whose multiplication is close to approx_val
# This multiplication of primes will be `a` and the primes will be `q`
# `best_a` denotes that `a` is close to approx_val in the random search of combination
best_a, best_q, best_ratio = None, None, None
start = 0 if idx_1000 is None else idx_1000
end = len(factor_base) - 1 if idx_5000 is None else idx_5000
for _ in range(50):
a = 1
q = []
while(a < approx_val):
rand_p = 0
while(rand_p == 0 or rand_p in q):
rand_p = rgen.randint(start, end)
p = factor_base[rand_p].prime
a *= p
q.append(rand_p)
ratio = a / approx_val
if best_ratio is None or abs(ratio - 1) < abs(best_ratio - 1):
best_q = q
best_a = a
best_ratio = ratio
a = best_a
q = best_q
B = []
for idx, val in enumerate(q):
q_l = factor_base[val].prime
gamma = factor_base[val].tmem_p * mod_inverse(a // q_l, q_l) % q_l
if gamma > q_l / 2:
gamma = q_l - gamma
B.append(a//q_l*gamma)
b = sum(B)
g = SievePolynomial([a*a, 2*a*b, b*b - N], a, b)
for fb in factor_base:
if a % fb.prime == 0:
continue
fb.a_inv = mod_inverse(a, fb.prime)
fb.b_ainv = [2*b_elem*fb.a_inv % fb.prime for b_elem in B]
fb.soln1 = (fb.a_inv*(fb.tmem_p - b)) % fb.prime
fb.soln2 = (fb.a_inv*(-fb.tmem_p - b)) % fb.prime
return g, B
def _initialize_ith_poly(N, factor_base, i, g, B):
"""Initialization stage of ith poly. After we finish sieving 1`st polynomial
here we quickly change to the next polynomial from which we will again
start sieving. Suppose we generated ith sieve polynomial and now we
want to generate (i + 1)th polynomial, where ``1 <= i <= 2**(j - 1) - 1``
where `j` is the number of prime factors of the coefficient `a`
then this function can be used to go to the next polynomial. If
``i = 2**(j - 1) - 1`` then go to _initialize_first_polynomial stage.
Parameters
==========
N : number to be factored
factor_base : factor_base primes
i : integer denoting ith polynomial
g : (i - 1)th polynomial
B : array that stores a//q_l*gamma
"""
from sympy.functions.elementary.integers import ceiling
v = 1
j = i
while(j % 2 == 0):
v += 1
j //= 2
if ceiling(i / (2**v)) % 2 == 1:
neg_pow = -1
else:
neg_pow = 1
b = g.b + 2*neg_pow*B[v - 1]
a = g.a
g = SievePolynomial([a*a, 2*a*b, b*b - N], a, b)
for fb in factor_base:
if a % fb.prime == 0:
continue
fb.soln1 = (fb.soln1 - neg_pow*fb.b_ainv[v - 1]) % fb.prime
fb.soln2 = (fb.soln2 - neg_pow*fb.b_ainv[v - 1]) % fb.prime
return g
def _gen_sieve_array(M, factor_base):
"""Sieve Stage of the Quadratic Sieve. For every prime in the factor_base
that does not divide the coefficient `a` we add log_p over the sieve_array
such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`
is an integer. When p = 2 then log_p is only added using
``-M <= soln1 + i*p <= M``.
Parameters
==========
M : sieve interval
factor_base : factor_base primes
"""
sieve_array = [0]*(2*M + 1)
for factor in factor_base:
if factor.soln1 is None: #The prime does not divides a
continue
for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
if factor.prime == 2:
continue
#if prime is 2 then sieve only with soln_1_p
for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
return sieve_array
def _check_smoothness(num, factor_base):
"""Here we check that if `num` is a smooth number or not. If `a` is a smooth
number then it returns a vector of prime exponents modulo 2. For example
if a = 2 * 5**2 * 7**3 and the factor base contains {2, 3, 5, 7} then
`a` is a smooth number and this function returns ([1, 0, 0, 1], True). If
`a` is a partial relation which means that `a` a has one prime factor
greater than the `factor_base` then it returns `(a, False)` which denotes `a`
is a partial relation.
Parameters
==========
a : integer whose smootheness is to be checked
factor_base : factor_base primes
"""
vec = []
if num < 0:
vec.append(1)
num *= -1
else:
vec.append(0)
#-1 is not included in factor_base add -1 in vector
for factor in factor_base:
if num % factor.prime != 0:
vec.append(0)
continue
factor_exp = 0
while num % factor.prime == 0:
factor_exp += 1
num //= factor.prime
vec.append(factor_exp % 2)
if num == 1:
return vec, True
if isprime(num):
return num, False
return None, None
def _trial_division_stage(N, M, factor_base, sieve_array, sieve_poly, partial_relations, ERROR_TERM):
"""Trial division stage. Here we trial divide the values generetated
by sieve_poly in the sieve interval and if it is a smooth number then
it is stored in `smooth_relations`. Moreover, if we find two partial relations
with same large prime then they are combined to form a smooth relation.
First we iterate over sieve array and look for values which are greater
than accumulated_val, as these values have a high chance of being smooth
number. Then using these values we find smooth relations.
In general, let ``t**2 = u*p modN`` and ``r**2 = v*p modN`` be two partial relations
with the same large prime p. Then they can be combined ``(t*r/p)**2 = u*v modN``
to form a smooth relation.
Parameters
==========
N : Number to be factored
M : sieve interval
factor_base : factor_base primes
sieve_array : stores log_p values
sieve_poly : polynomial from which we find smooth relations
partial_relations : stores partial relations with one large prime
ERROR_TERM : error term for accumulated_val
"""
sqrt_n = sqrt(float(N))
accumulated_val = log(M * sqrt_n)*2**10 - ERROR_TERM
smooth_relations = []
proper_factor = set()
partial_relation_upper_bound = 128*factor_base[-1].prime
for idx, val in enumerate(sieve_array):
if val < accumulated_val:
continue
x = idx - M
v = sieve_poly.eval(x)
vec, is_smooth = _check_smoothness(v, factor_base)
if is_smooth is None:#Neither smooth nor partial
continue
u = sieve_poly.a*x + sieve_poly.b
# Update the partial relation
# If 2 partial relation with same large prime is found then generate smooth relation
if is_smooth is False:#partial relation found
large_prime = vec
#Consider the large_primes under 128*F
if large_prime > partial_relation_upper_bound:
continue
if large_prime not in partial_relations:
partial_relations[large_prime] = (u, v)
continue
else:
u_prev, v_prev = partial_relations[large_prime]
partial_relations.pop(large_prime)
try:
large_prime_inv = mod_inverse(large_prime, N)
except ValueError:#if large_prine divides N
proper_factor.add(large_prime)
continue
u = u*u_prev*large_prime_inv
v = v*v_prev // (large_prime*large_prime)
vec, is_smooth = _check_smoothness(v, factor_base)
#assert u*u % N == v % N
smooth_relations.append((u, v, vec))
return smooth_relations, proper_factor
#LINEAR ALGEBRA STAGE
def _build_matrix(smooth_relations):
"""Build a 2D matrix from smooth relations.
Parameters
==========
smooth_relations : Stores smooth relations
"""
matrix = []
for s_relation in smooth_relations:
matrix.append(s_relation[2])
return matrix
def _gauss_mod_2(A):
"""Fast gaussian reduction for modulo 2 matrix.
Parameters
==========
A : Matrix
Examples
========
>>> from sympy.ntheory.qs import _gauss_mod_2
>>> _gauss_mod_2([[0, 1, 1], [1, 0, 1], [0, 1, 0], [1, 1, 1]])
([[[1, 0, 1], 3]],
[True, True, True, False],
[[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1]])
Reference
==========
.. [1] A fast algorithm for gaussian elimination over GF(2) and
its implementation on the GAPP. Cetin K.Koc, Sarath N.Arachchige"""
import copy
matrix = copy.deepcopy(A)
row = len(matrix)
col = len(matrix[0])
mark = [False]*row
for c in range(col):
for r in range(row):
if matrix[r][c] == 1:
break
mark[r] = True
for c1 in range(col):
if c1 == c:
continue
if matrix[r][c1] == 1:
for r2 in range(row):
matrix[r2][c1] = (matrix[r2][c1] + matrix[r2][c]) % 2
dependent_row = []
for idx, val in enumerate(mark):
if val == False:
dependent_row.append([matrix[idx], idx])
return dependent_row, mark, matrix
def _find_factor(dependent_rows, mark, gauss_matrix, index, smooth_relations, N):
"""Finds proper factor of N. Here, transform the dependent rows as a
combination of independent rows of the gauss_matrix to form the desired
relation of the form ``X**2 = Y**2 modN``. After obtaining the desired relation
we obtain a proper factor of N by `gcd(X - Y, N)`.
Parameters
==========
dependent_rows : denoted dependent rows in the reduced matrix form
mark : boolean array to denoted dependent and independent rows
gauss_matrix : Reduced form of the smooth relations matrix
index : denoted the index of the dependent_rows
smooth_relations : Smooth relations vectors matrix
N : Number to be factored
"""
idx_in_smooth = dependent_rows[index][1]
independent_u = [smooth_relations[idx_in_smooth][0]]
independent_v = [smooth_relations[idx_in_smooth][1]]
dept_row = dependent_rows[index][0]
for idx, val in enumerate(dept_row):
if val == 1:
for row in range(len(gauss_matrix)):
if gauss_matrix[row][idx] == 1 and mark[row] == True:
independent_u.append(smooth_relations[row][0])
independent_v.append(smooth_relations[row][1])
break
u = 1
v = 1
for i in independent_u:
u *= i
for i in independent_v:
v *= i
#assert u**2 % N == v % N
v = integer_nthroot(v, 2)[0]
return igcd(u - v, N)
def qs(N, prime_bound, M, ERROR_TERM=25, seed=1234):
"""Performs factorization using Self-Initializing Quadratic Sieve.
In SIQS, let N be a number to be factored, and this N should not be a
perfect power. If we find two integers such that ``X**2 = Y**2 modN`` and
``X != +-Y modN``, then `gcd(X + Y, N)` will reveal a proper factor of N.
In order to find these integers X and Y we try to find relations of form
t**2 = u modN where u is a product of small primes. If we have enough of
these relations then we can form ``(t1*t2...ti)**2 = u1*u2...ui modN`` such that
the right hand side is a square, thus we found a relation of ``X**2 = Y**2 modN``.
Here, several optimizations are done like using multiple polynomials for
sieving, fast changing between polynomials and using partial relations.
The use of partial relations can speeds up the factoring by 2 times.
Parameters
==========
N : Number to be Factored
prime_bound : upper bound for primes in the factor base
M : Sieve Interval
ERROR_TERM : Error term for checking smoothness
threshold : Extra smooth relations for factorization
seed : generate pseudo prime numbers
Examples
========
>>> from sympy.ntheory import qs
>>> qs(25645121643901801, 2000, 10000)
{5394769, 4753701529}
>>> qs(9804659461513846513, 2000, 10000)
{4641991, 2112166839943}
References
==========
.. [1] https://pdfs.semanticscholar.org/5c52/8a975c1405bd35c65993abf5a4edb667c1db.pdf
.. [2] https://www.rieselprime.de/ziki/Self-initializing_quadratic_sieve
"""
ERROR_TERM*=2**10
rgen.seed(seed)
idx_1000, idx_5000, factor_base = _generate_factor_base(prime_bound, N)
smooth_relations = []
ith_poly = 0
partial_relations = {}
proper_factor = set()
threshold = 5*len(factor_base) // 100
while True:
if ith_poly == 0:
ith_sieve_poly, B_array = _initialize_first_polynomial(N, M, factor_base, idx_1000, idx_5000)
else:
ith_sieve_poly = _initialize_ith_poly(N, factor_base, ith_poly, ith_sieve_poly, B_array)
ith_poly += 1
if ith_poly >= 2**(len(B_array) - 1): # time to start with a new sieve polynomial
ith_poly = 0
sieve_array = _gen_sieve_array(M, factor_base)
s_rel, p_f = _trial_division_stage(N, M, factor_base, sieve_array, ith_sieve_poly, partial_relations, ERROR_TERM)
smooth_relations += s_rel
proper_factor |= p_f
if len(smooth_relations) >= len(factor_base) + threshold:
break
matrix = _build_matrix(smooth_relations)
dependent_row, mark, gauss_matrix = _gauss_mod_2(matrix)
N_copy = N
for index in range(len(dependent_row)):
factor = _find_factor(dependent_row, mark, gauss_matrix, index, smooth_relations, N)
if factor > 1 and factor < N:
proper_factor.add(factor)
while(N_copy % factor == 0):
N_copy //= factor
if isprime(N_copy):
proper_factor.add(N_copy)
break
if(N_copy == 1):
break
return proper_factor
|
370d049fb908f4632371adbceda86dd4735a4e3045bfcba5ab4135ae0b693ba4 | from sympy.combinatorics import Permutation
from sympy.combinatorics.util import _distribute_gens_by_base
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
Explanation
===========
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
Explanation
===========
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
Explanation
===========
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types.
Explanation
===========
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Parameters
==========
g : Permutation representing the tensor.
dummies : List of dummy indices.
msym : Symmetry of the metric.
v : A list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`.
base_i, gens_i BSGS for tensors of this type
n_i number of tensors of type `i`
Returns
=======
Returns 0 if the tensor is zero, else returns the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
Parameters
==========
gr : adjacency list
Explanation
===========
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
|
7713193daae9257b1cac676f425aad03c56d22f90a144452e0c1afc2760dbe05 | from math import factorial as _factorial, log, prod
from itertools import chain, islice, product
from sympy.combinatorics import Permutation
from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert,
_af_rmul, _af_rmuln, _af_pow, Cycle)
from sympy.combinatorics.util import (_check_cycles_alt_sym,
_distribute_gens_by_base, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr,
_strip, _strip_af)
from sympy.core import Basic
from sympy.core.random import _randrange, randrange, choice
from sympy.core.symbol import Symbol
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.factorials import factorial
from sympy.ntheory import primefactors, sieve
from sympy.ntheory.factor_ import (factorint, multiplicity)
from sympy.ntheory.primetest import isprime
from sympy.utilities.iterables import has_variety, is_sequence, uniq
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
r"""The class defining a Permutation group.
Explanation
===========
``PermutationGroup([p1, p2, ..., pn])`` returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics import Polyhedron
The permutations corresponding to motion of the front, right and
bottom face of a $2 \times 2$ Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the $2 \times 2$ Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
.. [2] Seress, A.
"Permutation Group Algorithms"
.. [3] https://en.wikipedia.org/wiki/Schreier_vector
.. [4] https://en.wikipedia.org/wiki/Nielsen_transformation#Product_replacement_algorithm
.. [5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
.. [6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
.. [7] http://www.algorithmist.com/index.php/Union_Find
.. [8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
.. [9] https://en.wikipedia.org/wiki/Center_%28group_theory%29
.. [10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer
.. [11] http://groupprops.subwiki.org/wiki/Derived_subgroup
.. [12] https://en.wikipedia.org/wiki/Nilpotent_group
.. [13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
.. [14] https://www.gap-system.org/Manuals/doc/ref/manual.pdf
"""
is_group = True
def __new__(cls, *args, dups=True, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is ``False``.
"""
if not args:
args = [Permutation()]
else:
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
args = [Permutation()]
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if dups:
args = list(uniq([_af_new(list(a)) for a in args]))
if len(args) > 1:
args = [g for g in args if not g.is_identity]
return Basic.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
self._generators = list(self.args)
self._order = None
self._center = []
self._is_abelian = None
self._is_transitive = None
self._is_sym = None
self._is_alt = None
self._is_primitive = None
self._is_nilpotent = None
self._is_solvable = None
self._is_trivial = None
self._transitivity_degree = None
self._max_div = None
self._is_perfect = None
self._is_cyclic = None
self._is_dihedral = None
self._r = len(self._generators)
self._degree = self._generators[0].size
# these attributes are assigned after running schreier_sims
self._base = []
self._strong_gens = []
self._strong_gens_slp = []
self._basic_orbits = []
self._transversals = []
self._transversal_slp = []
# these attributes are assigned after running _random_pr_init
self._random_gens = []
# finite presentation of the group as an instance of `FpGroup`
self._fp_presentation = None
def __getitem__(self, i):
return self._generators[i]
def __contains__(self, i):
"""Return ``True`` if *i* is contained in PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = Permutation(1, 2, 3)
>>> Permutation(3) in PermutationGroup(p)
True
"""
if not isinstance(i, Permutation):
raise TypeError("A PermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return self.contains(i)
def __len__(self):
return len(self._generators)
def equals(self, other):
"""Return ``True`` if PermutationGroup generated by elements in the
group are same i.e they represent the same PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G.equals(H)
True
"""
if not isinstance(other, PermutationGroup):
return False
set_self_gens = set(self.generators)
set_other_gens = set(other.generators)
# before reaching the general case there are also certain
# optimisation and obvious cases requiring less or no actual
# computation.
if set_self_gens == set_other_gens:
return True
# in the most general case it will check that each generator of
# one group belongs to the other PermutationGroup and vice-versa
for gen1 in set_self_gens:
if not other.contains(gen1):
return False
for gen2 in set_other_gens:
if not self.contains(gen2):
return False
return True
def __mul__(self, other):
"""
Return the direct product of two permutation groups as a permutation
group.
Explanation
===========
This implementation realizes the direct product by shifting the index
set for the generators of the second group: so if we have ``G`` acting
on ``n1`` points and ``H`` acting on ``n2`` points, ``G*H`` acts on
``n1 + n2`` points.
Examples
========
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
(9)(0 1 2 3 4),
(5 6 7 8 9)])
>>> H.order()
25
"""
if isinstance(other, Permutation):
return Coset(other, self, dir='+')
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
Explanation
===========
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group `G` with a set of generators
`S`. For the initialization ``_random_pr_init``, a list ``R`` of
`\max\{r, |S|\}` group generators is created as the attribute
``G._random_gens``, repeating elements of `S` if necessary, and the
identity element of `G` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of `G` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across `G` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
r"""Return a base from the Schreier-Sims algorithm.
Explanation
===========
For a permutation group `G`, a base is a sequence of points
`B = (b_1, b_2, \dots, b_k)` such that no element of `G` apart
from the identity fixes all the points in `B`. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of `B` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
Explanation
===========
If a base for a group `G` is given by `(b_1, b_2, \dots, b_k)`, this
function returns a base `(b_1, b_2, \dots, b_{i+1}, b_i, \dots, b_k)`,
where `i` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[(0 1 2 3), (3)(0 1), (1 3 2),
(2 3), (1 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
`|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by
`|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
//len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
r"""
Return the basic orbits relative to a base and strong generating set.
Explanation
===========
If `(b_1, b_2, \dots, b_k)` is a base for a group `G`, and
`G^{(i)} = G_{b_1, b_2, \dots, b_{i-1}}` is the ``i``-th basic stabilizer
(so that `G^{(1)} = G`), the ``i``-th basic orbit relative to this base
is the orbit of `b_i` under `G^{(i)}`. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
r"""
Return a chain of stabilizers relative to a base and strong generating
set.
Explanation
===========
The ``i``-th basic stabilizer `G^{(i)}` relative to a base
`(b_1, b_2, \dots, b_k)` is `G_{b_1, b_2, \dots, b_{i-1}}`. For more
information, see [1], pp. 87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
(3)(0 1 2),
(1 2 3)])
PermutationGroup([
(1 2 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
if not base: # e.g. if self is trivial
return []
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
Explanation
===========
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def composition_series(self):
r"""
Return the composition series for a group as a list
of permutation groups.
Explanation
===========
The composition series for a group `G` is defined as a
subnormal series `G = H_0 > H_1 > H_2 \ldots` A composition
series is a subnormal series such that each factor group
`H(i+1) / H(i)` is simple.
A subnormal series is a composition series only if it is of
maximum length.
The algorithm works as follows:
Starting with the derived series the idea is to fill
the gap between `G = der[i]` and `H = der[i+1]` for each
`i` independently. Since, all subgroups of the abelian group
`G/H` are normal so, first step is to take the generators
`g` of `G` and add them to generators of `H` one by one.
The factor groups formed are not simple in general. Each
group is obtained from the previous one by adding one
generator `g`, if the previous group is denoted by `H`
then the next group `K` is generated by `g` and `H`.
The factor group `K/H` is cyclic and it's order is
`K.order()//G.order()`. The series is then extended between
`K` and `H` by groups generated by powers of `g` and `H`.
The series formed is then prepended to the already existing
series.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> S = SymmetricGroup(12)
>>> G = S.sylow_subgroup(2)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
>>> G = S.sylow_subgroup(3)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[243, 81, 27, 9, 3, 1]
>>> G = CyclicGroup(12)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[12, 6, 3, 1]
"""
der = self.derived_series()
if not all(g.is_identity for g in der[-1].generators):
raise NotImplementedError('Group should be solvable')
series = []
for i in range(len(der)-1):
H = der[i+1]
up_seg = []
for g in der[i].generators:
K = PermutationGroup([g] + H.generators)
order = K.order() // H.order()
down_seg = []
for p, e in factorint(order).items():
for _ in range(e):
down_seg.append(PermutationGroup([g] + H.generators))
g = g**p
up_seg = down_seg + up_seg
H = K
up_seg[0] = der[i]
series.extend(up_seg)
series.append(der[-1])
return series
def coset_transversal(self, H):
"""Return a transversal of the right cosets of self by its subgroup H
using the second method described in [1], Subsection 4.6.7
"""
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
if H.order() == 1:
return self._elements
self._schreier_sims(base=H.base) # make G.base an extension of H.base
base = self.base
base_ordering = _base_ordering(base, self.degree)
identity = Permutation(self.degree - 1)
transversals = self.basic_transversals[:]
# transversals is a list of dictionaries. Get rid of the keys
# so that it is a list of lists and sort each list in
# the increasing order of base[l]^x
for l, t in enumerate(transversals):
transversals[l] = sorted(t.values(),
key = lambda x: base_ordering[base[l]^x])
orbits = H.basic_orbits
h_stabs = H.basic_stabilizers
g_stabs = self.basic_stabilizers
indices = [x.order()//y.order() for x, y in zip(g_stabs, h_stabs)]
# T^(l) should be a right transversal of H^(l) in G^(l) for
# 1<=l<=len(base). While H^(l) is the trivial group, T^(l)
# contains all the elements of G^(l) so we might just as well
# start with l = len(h_stabs)-1
if len(g_stabs) > len(h_stabs):
T = g_stabs[len(h_stabs)]._elements
else:
T = [identity]
l = len(h_stabs)-1
t_len = len(T)
while l > -1:
T_next = []
for u in transversals[l]:
if u == identity:
continue
b = base_ordering[base[l]^u]
for t in T:
p = t*u
if all(base_ordering[h^p] >= b for h in orbits[l]):
T_next.append(p)
if t_len + len(T_next) == indices[l]:
break
if t_len + len(T_next) == indices[l]:
break
T += T_next
t_len += len(T_next)
l -= 1
T.remove(identity)
T = [identity] + T
return T
def _coset_representative(self, g, H):
"""Return the representative of Hg from the transversal that
would be computed by ``self.coset_transversal(H)``.
"""
if H.order() == 1:
return g
# The base of self must be an extension of H.base.
if not(self.base[:len(H.base)] == H.base):
self._schreier_sims(base=H.base)
orbits = H.basic_orbits[:]
h_transversals = [list(_.values()) for _ in H.basic_transversals]
transversals = [list(_.values()) for _ in self.basic_transversals]
base = self.base
base_ordering = _base_ordering(base, self.degree)
def step(l, x):
gamma = sorted(orbits[l], key = lambda y: base_ordering[y^x])[0]
i = [base[l]^h for h in h_transversals[l]].index(gamma)
x = h_transversals[l][i]*x
if l < len(orbits)-1:
for u in transversals[l]:
if base[l]^u == base[l]^x:
break
x = step(l+1, x*u**-1)*u
return x
return step(0, g)
def coset_table(self, H):
"""Return the standardised (right) coset table of self in H as
a list of lists.
"""
# Maybe this should be made to return an instance of CosetTable
# from fp_groups.py but the class would need to be changed first
# to be compatible with PermutationGroups
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
T = self.coset_transversal(H)
n = len(T)
A = list(chain.from_iterable((gen, gen**-1)
for gen in self.generators))
table = []
for i in range(n):
row = [self._coset_representative(T[i]*x, H) for x in A]
row = [T.index(r) for r in row]
table.append(row)
# standardize (this is the same as the algorithm used in coset_table)
# If CosetTable is made compatible with PermutationGroups, this
# should be replaced by table.standardize()
A = range(len(A))
gamma = 1
for alpha, a in product(range(n), A):
beta = table[alpha][a]
if beta >= gamma:
if beta > gamma:
for x in A:
z = table[gamma][x]
table[gamma][x] = table[beta][x]
table[beta][x] = z
for i in range(n):
if table[i][x] == beta:
table[i][x] = gamma
elif table[i][x] == gamma:
table[i][x] = beta
gamma += 1
if gamma >= n-1:
return table
def center(self):
r"""
Return the center of a permutation group.
Explanation
===========
The center for a group `G` is defined as
`Z(G) = \{z\in G | \forall g\in G, zg = gz \}`,
the set of elements of `G` that commute with all elements of `G`.
It is equal to the centralizer of `G` inside `G`, and is naturally a
subgroup of `G` ([9]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
Explanation
===========
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
`C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
Explanation
===========
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators `[g, h] = hgh^{-1}g^{-1}` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups `H, G` is equal to the normal closure
of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h`
a generator of `H` and `g` a generator of `G` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
Explanation
===========
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
See Also
========
sympy.combinatorics.util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def generator_product(self, g, original=False):
r'''
Return a list of strong generators `[s1, \dots, sn]`
s.t `g = sn \times \dots \times s1`. If ``original=True``, make the
list contain only the original group generators
'''
product = []
if g.is_identity:
return []
if g in self.strong_gens:
if not original or g in self.generators:
return [g]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
return product
elif g**-1 in self.strong_gens:
g = g**-1
if not original or g in self.generators:
return [g**-1]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
l = len(product)
product = [product[l-i-1]**-1 for i in range(l)]
return product
f = self.coset_factor(g, True)
for i, j in enumerate(f):
slp = self._transversal_slp[i][j]
for s in slp:
if not original:
product.append(self.strong_gens[s])
else:
s = self.strong_gens[s]
product.extend(self.generator_product(s, original=True))
return product
def coset_rank(self, g):
"""rank using Schreier-Sims representation.
Explanation
===========
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
(7)(2 4)(3 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return None
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return None
base = self.base
transversals = self.basic_transversals
basic_orbits = self.basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
Explanation
===========
The number of permutations comprising the group is given by
``len(group)``; the number of permutations that can be generated
by the group is given by ``group.order()``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
See Also
========
order
"""
return self._degree
@property
def identity(self):
'''
Return the identity element of the permutation group.
'''
return _af_new(list(range(self.degree)))
@property
def elements(self):
"""Returns all the elements of the permutation group as a set
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2))
>>> p.elements
{(1 2 3), (1 3 2), (1 3), (2 3), (3), (3)(1 2)}
"""
return set(self._elements)
@property
def _elements(self):
"""Returns all the elements of the permutation group as a list
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2))
>>> p._elements
[(3), (3)(1 2), (1 3), (2 3), (1 2 3), (1 3 2)]
"""
return list(islice(self.generate(), None))
def derived_series(self):
r"""Return the derived series for the group.
Explanation
===========
The derived series for a group `G` is defined as
`G = G_0 > G_1 > G_2 > \ldots` where `G_i = [G_{i-1}, G_{i-1}]`,
i.e. `G_i` is the derived subgroup of `G_{i-1}`, for
`i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some
`k\in\mathbb{N}`, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order `G = G_0, G_1, G_2, \ldots`.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
nxt = self.derived_subgroup()
while not current.is_subgroup(nxt):
res.append(nxt)
current = nxt
nxt = nxt.derived_subgroup()
return res
def derived_subgroup(self):
r"""Compute the derived subgroup.
Explanation
===========
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if ct not in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group.
Explanation
===========
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is also
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group
True
Also the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
(0 1)(2 3),
(1 2 3),
(1 3 2),
(0 3 1),
(0 2 3),
(0 3)(1 2),
(0 1 3),
(3)(0 2 1),
(0 3 2),
(3)(0 1 2),
(0 2)(1 3)])
>>> _.is_group
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm.
If ``af == True`` it yields the array form of the permutations.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
References
==========
.. [1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = {tuple(idn)}
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
return
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
return
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
return
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[(1 2), (2)(0 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
Explanation
===========
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not ``True``, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, sympy.core.basic.Basic.has, __contains__
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_perfect(self):
"""Return ``True`` if the group is perfect.
A group is perfect if it equals to its derived subgroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1,2,3)(4,5)
>>> b = Permutation(1,2,3,4,5)
>>> G = PermutationGroup([a, b])
>>> G.is_perfect
False
"""
if self._is_perfect is None:
self._is_perfect = self.equals(self.derived_subgroup())
return self._is_perfect
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def abelian_invariants(self):
"""
Returns the abelian invariants for the given group.
Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to
the direct product of finitely many nontrivial cyclic groups of
prime-power order.
Explanation
===========
The prime-powers that occur as the orders of the factors are uniquely
determined by G. More precisely, the primes that occur in the orders of the
factors in any such decomposition of ``G`` are exactly the primes that divide
``|G|`` and for any such prime ``p``, if the orders of the factors that are
p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``,
then the orders of the factors that are p-groups in any such decomposition of ``G``
are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``.
The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken
for all primes that divide ``|G|`` are called the invariants of the nontrivial
group ``G`` as suggested in ([14], p. 542).
Notes
=====
We adopt the convention that the invariants of a trivial group are [].
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.abelian_invariants()
[2]
>>> from sympy.combinatorics import CyclicGroup
>>> G = CyclicGroup(7)
>>> G.abelian_invariants()
[7]
"""
if self.is_trivial:
return []
gns = self.generators
inv = []
G = self
H = G.derived_subgroup()
Hgens = H.generators
for p in primefactors(G.order()):
ranks = []
while True:
pows = []
for g in gns:
elm = g**p
if not H.contains(elm):
pows.append(elm)
K = PermutationGroup(Hgens + pows) if pows else H
r = G.order()//K.order()
G = K
gns = pows
if r == 1:
break
ranks.append(multiplicity(p, r))
if ranks:
pows = [1]*ranks[0]
for i in ranks:
for j in range(i):
pows[j] = pows[j]*p
inv.extend(pows)
inv.sort()
return inv
def is_elementary(self, p):
"""Return ``True`` if the group is elementary abelian. An elementary
abelian group is a finite abelian group, where every nontrivial
element has order `p`, where `p` is a prime.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_elementary(2)
True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([3, 1, 2, 0])
>>> G = PermutationGroup([a, b])
>>> G.is_elementary(2)
True
>>> G.is_elementary(3)
False
"""
return self.is_abelian and all(g.order() == p for g in self.generators)
def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False):
"""A naive test using the group order."""
if only_sym and only_alt:
raise ValueError(
"Both {} and {} cannot be set to True"
.format(only_sym, only_alt))
n = self.degree
sym_order = _factorial(n)
order = self.order()
if order == sym_order:
self._is_sym = True
self._is_alt = False
if only_alt:
return False
return True
elif 2*order == sym_order:
self._is_sym = False
self._is_alt = True
if only_sym:
return False
return True
return False
def _eval_is_alt_sym_monte_carlo(self, eps=0.05, perms=None):
"""A test using monte-carlo algorithm.
Parameters
==========
eps : float, optional
The criterion for the incorrect ``False`` return.
perms : list[Permutation], optional
If explicitly given, it tests over the given candidates
for testing.
If ``None``, it randomly computes ``N_eps`` and chooses
``N_eps`` sample of the permutation from the group.
See Also
========
_check_cycles_alt_sym
"""
if perms is None:
n = self.degree
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
perms = (self.random_pr() for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
for perm in perms:
if _check_cycles_alt_sym(perm):
return True
return False
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
Explanation
===========
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
For degree < 8, the order of the group is checked so the test
is deterministic.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately `\log(2)/\log(n)`
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec is not None:
N_eps = _random_prec['N_eps']
perms= (_random_prec[i] for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
if self._is_sym or self._is_alt:
return True
if self._is_sym is False and self._is_alt is False:
return False
n = self.degree
if n < 8:
return self._eval_is_alt_sym_naive()
elif self.is_transitive():
return self._eval_is_alt_sym_monte_carlo(eps=eps)
self._is_sym, self._is_alt = False, False
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
Explanation
===========
A group `G` is nilpotent if it has a central series of finite length.
Alternatively, `G` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr, strict=True):
"""Test if ``G=self`` is a normal subgroup of ``gr``.
Explanation
===========
G is normal in gr if
for each g2 in G, g1 in gr, ``g = g1*g2*g1**-1`` belongs to G
It is sufficient to check this for each g1 in gr.generators and
g2 in G.generators.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
if not self.is_subgroup(gr, strict=strict):
return False
d_self = self.degree
d_gr = gr.degree
if self.is_trivial and (d_self == d_gr or not strict):
return True
if self._is_abelian:
return True
new_self = self.copy()
if not strict and d_self != d_gr:
if d_self < d_gr:
new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)])
else:
gr = PermGroup(gr.generators + [Permutation(d_self - 1)])
gens2 = [p._array_form for p in new_self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not new_self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
r"""Test if a group is primitive.
Explanation
===========
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form `\{0, k\}` for ``k``
ranging over representatives for the orbits of `G_0`, the stabilizer of
``0``. This algorithm has complexity `O(n^2)` where ``n`` is the degree
of the group, and will perform badly if `G_0` is small.
There are two implementations offered: one finds `G_0`
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of `G_0` using ``random_stab``,
hoping that they generate a subgroup of `G_0` with not too many more
orbits than `G_0` (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
if self.is_transitive() is False:
return False
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for _ in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and any(e != 0 for e in self.minimal_block([0, x])):
self._is_primitive = False
return False
self._is_primitive = True
return True
def minimal_blocks(self, randomized=True):
'''
For a transitive group, return the list of all minimal
block systems. If a group is intransitive, return `False`.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> DihedralGroup(6).minimal_blocks()
[[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]]
>>> G = PermutationGroup(Permutation(1,2,5))
>>> G.minimal_blocks()
False
See Also
========
minimal_block, is_transitive, is_primitive
'''
def _number_blocks(blocks):
# number the blocks of a block system
# in order and return the number of
# blocks and the tuple with the
# reordering
n = len(blocks)
appeared = {}
m = 0
b = [None]*n
for i in range(n):
if blocks[i] not in appeared:
appeared[blocks[i]] = m
b[i] = m
m += 1
else:
b[i] = appeared[blocks[i]]
return tuple(b), m
if not self.is_transitive():
return False
blocks = []
num_blocks = []
rep_blocks = []
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0:
block = self.minimal_block([0, x])
num_block, _ = _number_blocks(block)
# a representative block (containing 0)
rep = {j for j in range(self.degree) if num_block[j] == 0}
# check if the system is minimal with
# respect to the already discovere ones
minimal = True
blocks_remove_mask = [False] * len(blocks)
for i, r in enumerate(rep_blocks):
if len(r) > len(rep) and rep.issubset(r):
# i-th block system is not minimal
blocks_remove_mask[i] = True
elif len(r) < len(rep) and r.issubset(rep):
# the system being checked is not minimal
minimal = False
break
# remove non-minimal representative blocks
blocks = [b for i, b in enumerate(blocks) if not blocks_remove_mask[i]]
num_blocks = [n for i, n in enumerate(num_blocks) if not blocks_remove_mask[i]]
rep_blocks = [r for i, r in enumerate(rep_blocks) if not blocks_remove_mask[i]]
if minimal and num_block not in num_blocks:
blocks.append(block)
num_blocks.append(num_block)
rep_blocks.append(rep)
return blocks
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
if self.order() % 2 != 0:
return True
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return ``True`` if all elements of ``self`` belong to ``G``.
If ``strict`` is ``False`` then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics import SymmetricGroup, CyclicGroup
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to ``False``:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if isinstance(G, SymmetricPermutationGroup):
if self.degree != G.degree:
return False
return True
if not isinstance(G, PermutationGroup):
return False
if self == G or self.generators[0]==Permutation():
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
@property
def is_polycyclic(self):
"""Return ``True`` if a group is polycyclic. A group is polycyclic if
it has a subnormal series with cyclic factors. For finite groups,
this is the same as if the group is solvable.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G = PermutationGroup([a, b])
>>> G.is_polycyclic
True
"""
return self.is_solvable
def is_transitive(self, strict=True):
"""Test if the group is transitive.
Explanation
===========
A group is transitive if it has a single orbit.
If ``strict`` is ``False`` the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group `G` is the series
`G = G_0 > G_1 > G_2 > \ldots` where
`G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the
commutator of `G` and the previous term in `G1` ([1], p.29).
Returns
=======
A list of permutation groups in the order `G = G_0, G_1, G_2, \ldots`
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
nxt = self.commutator(self, current)
while not current.is_subgroup(nxt):
res.append(nxt)
current = nxt
nxt = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Explanation
===========
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
Explanation
===========
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
`O(|points||S|)`. ([1], pp. 83-87; [7]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
gamma = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(gamma, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(gamma), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
# rewrite result so that block representatives are minimal
new_reps = {}
return [new_reps.setdefault(r, i) for i, r in enumerate(parents)]
def conjugacy_class(self, x):
r"""Return the conjugacy class of an element in the group.
Explanation
===========
The conjugacy class of an element ``g`` in a group ``G`` is the set of
elements ``x`` in ``G`` that are conjugate with ``g``, i.e. for which
``g = xax^{-1}``
for some ``a`` in ``G``.
Note that conjugacy is an equivalence relation, and therefore that
conjugacy classes are partitions of ``G``. For a list of all the
conjugacy classes of the group, use the conjugacy_classes() method.
In a permutation group, each conjugacy class corresponds to a particular
`cycle structure': for example, in ``S_3``, the conjugacy classes are:
* the identity class, ``{()}``
* all transpositions, ``{(1 2), (1 3), (2 3)}``
* all 3-cycles, ``{(1 2 3), (1 3 2)}``
Examples
========
>>> from sympy.combinatorics import Permutation, SymmetricGroup
>>> S3 = SymmetricGroup(3)
>>> S3.conjugacy_class(Permutation(0, 1, 2))
{(0 1 2), (0 2 1)}
Notes
=====
This procedure computes the conjugacy class directly by finding the
orbit of the element under conjugation in G. This algorithm is only
feasible for permutation groups of relatively small order, but is like
the orbit() function itself in that respect.
"""
# Ref: "Computing the conjugacy classes of finite groups"; Butler, G.
# Groups '93 Galway/St Andrews; edited by Campbell, C. M.
new_class = {x}
last_iteration = new_class
while len(last_iteration) > 0:
this_iteration = set()
for y in last_iteration:
for s in self.generators:
conjugated = s * y * (~s)
if conjugated not in new_class:
this_iteration.add(conjugated)
new_class.update(last_iteration)
last_iteration = this_iteration
return new_class
def conjugacy_classes(self):
r"""Return the conjugacy classes of the group.
Explanation
===========
As described in the documentation for the .conjugacy_class() function,
conjugacy is an equivalence relation on a group G which partitions the
set of elements. This method returns a list of all these conjugacy
classes of G.
Examples
========
>>> from sympy.combinatorics import SymmetricGroup
>>> SymmetricGroup(3).conjugacy_classes()
[{(2)}, {(0 1 2), (0 2 1)}, {(0 2), (1 2), (2)(0 1)}]
"""
identity = _af_new(list(range(self.degree)))
known_elements = {identity}
classes = [known_elements.copy()]
for x in self.generate():
if x not in known_elements:
new_class = self.conjugacy_class(x)
classes.append(new_class)
known_elements.update(new_class)
return classes
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
Explanation
===========
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for _ in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h^g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h^g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
{0, 1, 2}
>>> G.orbit([0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
Explanation
===========
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
(0 4 1 2 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
For a permutation group `G`, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of ``self``, ordered according to lowest element
in each orbit.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[{0, 2, 3, 4, 6}, {1, 5}]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
``len(group)``; the length of each permutation in the group is
given by ``group.size``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order is not None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
m = prod([len(x) for x in self.basic_transversals])
self._order = m
return m
def index(self, H):
"""
Returns the index of a permutation group.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1,2,3)
>>> b =Permutation(3)
>>> G = PermutationGroup([a])
>>> H = PermutationGroup([b])
>>> G.index(H)
3
"""
if H.is_subgroup(self):
return self.order()//H.order()
@property
def is_symmetric(self):
"""Return ``True`` if the group is symmetric.
Examples
========
>>> from sympy.combinatorics import SymmetricGroup
>>> g = SymmetricGroup(5)
>>> g.is_symmetric
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3))
>>> g.is_symmetric
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_sym = self._is_sym
if _is_sym is not None:
return _is_sym
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if any(g.is_odd for g in self.generators):
self._is_sym, self._is_alt = True, False
return True
self._is_sym, self._is_alt = False, True
return False
return self._eval_is_alt_sym_naive(only_sym=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_sym=True)
@property
def is_alternating(self):
"""Return ``True`` if the group is alternating.
Examples
========
>>> from sympy.combinatorics import AlternatingGroup
>>> g = AlternatingGroup(5)
>>> g.is_alternating
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3, 4))
>>> g.is_alternating
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_alt = self._is_alt
if _is_alt is not None:
return _is_alt
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if all(g.is_even for g in self.generators):
self._is_sym, self._is_alt = False, True
return True
self._is_sym, self._is_alt = True, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
@classmethod
def _distinct_primes_lemma(cls, primes):
"""Subroutine to test if there is only one cyclic group for the
order."""
primes = sorted(primes)
l = len(primes)
for i in range(l):
for j in range(i+1, l):
if primes[j] % primes[i] == 1:
return None
return True
@property
def is_cyclic(self):
r"""
Return ``True`` if the group is Cyclic.
Examples
========
>>> from sympy.combinatorics.named_groups import AbelianGroup
>>> G = AbelianGroup(3, 4)
>>> G.is_cyclic
True
>>> G = AbelianGroup(4, 4)
>>> G.is_cyclic
False
Notes
=====
If the order of a group $n$ can be factored into the distinct
primes $p_1, p_2, \dots , p_s$ and if
.. math::
\forall i, j \in \{1, 2, \dots, s \}:
p_i \not \equiv 1 \pmod {p_j}
holds true, there is only one group of the order $n$ which
is a cyclic group [1]_. This is a generalization of the lemma
that the group of order $15, 35, \dots$ are cyclic.
And also, these additional lemmas can be used to test if a
group is cyclic if the order of the group is already found.
- If the group is abelian and the order of the group is
square-free, the group is cyclic.
- If the order of the group is less than $6$ and is not $4$, the
group is cyclic.
- If the order of the group is prime, the group is cyclic.
References
==========
.. [1] 1978: John S. Rose: A Course on Group Theory,
Introduction to Finite Group Theory: 1.4
"""
if self._is_cyclic is not None:
return self._is_cyclic
if len(self.generators) == 1:
self._is_cyclic = True
self._is_abelian = True
return True
if self._is_abelian is False:
self._is_cyclic = False
return False
order = self.order()
if order < 6:
self._is_abelian = True
if order != 4:
self._is_cyclic = True
return True
factors = factorint(order)
if all(v == 1 for v in factors.values()):
if self._is_abelian:
self._is_cyclic = True
return True
primes = list(factors.keys())
if PermutationGroup._distinct_primes_lemma(primes) is True:
self._is_cyclic = True
self._is_abelian = True
return True
for p in factors:
pgens = []
for g in self.generators:
pgens.append(g**p)
if self.index(self.subgroup(pgens)) != p:
self._is_cyclic = False
return False
self._is_cyclic = True
self._is_abelian = True
return True
@property
def is_dihedral(self):
r"""
Return ``True`` if the group is dihedral.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup
>>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6))
>>> G.is_dihedral
True
>>> G = SymmetricGroup(3)
>>> G.is_dihedral
True
>>> G = CyclicGroup(6)
>>> G.is_dihedral
False
References
==========
.. [Di1] https://math.stackexchange.com/a/827273
.. [Di2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf
.. [Di3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf
.. [Di4] https://en.wikipedia.org/wiki/Dihedral_group
"""
if self._is_dihedral is not None:
return self._is_dihedral
order = self.order()
if order % 2 == 1:
self._is_dihedral = False
return False
if order == 2:
self._is_dihedral = True
return True
if order == 4:
# The dihedral group of order 4 is the Klein 4-group.
self._is_dihedral = not self.is_cyclic
return self._is_dihedral
if self.is_abelian:
# The only abelian dihedral groups are the ones of orders 2 and 4.
self._is_dihedral = False
return False
# Now we know the group is of even order >= 6, and nonabelian.
n = order // 2
# Handle special cases where there are exactly two generators.
gens = self.generators
if len(gens) == 2:
x, y = gens
a, b = x.order(), y.order()
# Make a >= b
if a < b:
x, y, a, b = y, x, b, a
# Using Theorem 2.1 of [Di3]:
if a == 2 == b:
self._is_dihedral = True
return True
# Using Theorem 1.1 of [Di3]:
if a == n and b == 2 and y*x*y == ~x:
self._is_dihedral = True
return True
# Procede with algorithm of [Di1]
# Find elements of orders 2 and n
order_2, order_n = [], []
for p in self.elements:
k = p.order()
if k == 2:
order_2.append(p)
elif k == n:
order_n.append(p)
if len(order_2) != n + 1 - (n % 2):
self._is_dihedral = False
return False
if not order_n:
self._is_dihedral = False
return False
x = order_n[0]
# Want an element y of order 2 that is not a power of x
# (i.e. that is not the 180-deg rotation, when n is even).
y = order_2[0]
if n % 2 == 0 and y == x**(n//2):
y = order_2[1]
self._is_dihedral = (y*x*y == ~x)
return self._is_dihedral
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
Explanation
===========
For a permutation group `G` and a set of points
`\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of
`p_1, p_2, \ldots, p_k` is defined as
`G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\}` ([1],p20).
It is a subgroup of `G`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
``.stabilizer()``, this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
(0 1)(2 3)
>>> G.make_perm(3, [0, 1, 0])
(0 2 3 1)
>>> G.make_perm([0, 1, 0])
(0 2 3 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randomrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for _ in range(n):
p = self[randomrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
Explanation
===========
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
Explanation
===========
It computes the generators of the chain of stabilizers
`G > G_{b_1} > .. > G_{b1,..,b_r} > 1`
in which `G_{b_1,..,b_i}` stabilizes `b_1,..,b_i`,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
`h_1*..*h_s`.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: (2)(0 1), 1: (2), 2: (1 2)},
{0: (2), 2: (0 2)}]
"""
if self._transversals:
return
self._schreier_sims()
return
def _schreier_sims(self, base=None):
schreier = self.schreier_sims_incremental(base=base, slp_dict=True)
base, strong_gens = schreier[:2]
self._base = base
self._strong_gens = strong_gens
self._strong_gens_slp = schreier[2]
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals, slps = _orbits_transversals_from_bsgs(base,\
strong_gens_distr, slp=True)
# rewrite the indices stored in slps in terms of strong_gens
for i, slp in enumerate(slps):
gens = strong_gens_distr[i]
for k in slp:
slp[k] = [strong_gens.index(gens[s]) for s in slp[k]]
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
self._transversal_slp = slps
def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
slp_dict
If `True`, return a dictionary `{g: gens}` for each strong
generator `g` where `gens` is a list of strong generators
coming before `g` in `strong_gens`, such that the product
of the elements of `gens` is equal to `g`.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
if slp_dict:
return base, gens, {gens[0]: [gens[0]]}
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
strong_gens_slp = []
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
slps = {}
base_len = len(_base)
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True, slp=True)
transversals[i] = dict(transversals[i])
orbs[i] = list(transversals[i].keys())
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for j, gen in enumerate(strong_gens_distr[i]):
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
slp = [(i, g) for g in slps[i][beta]]
slp = [(i, j)] + slp
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
u1_inv_slp = slps[i][gb][:]
u1_inv_slp.reverse()
u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]
slp = u1_inv_slp + slp
h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
strong_gens_slp.append((h, slp))
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l], slps[l] =\
_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True, slp=True)
transversals[l] = dict(transversals[l])
orbs[l] = list(transversals[l].keys())
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
strong_gens = _gens[:]
if slp_dict:
# create the list of the strong generators strong_gens and
# rewrite the indices of strong_gens_slp in terms of the
# elements of strong_gens
for k, slp in strong_gens_slp:
strong_gens.append(k)
for i in range(len(slp)):
s = slp[i]
if isinstance(s[1], tuple):
slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1
else:
slp[i] = strong_gens_distr[s[0]][s[1]]
strong_gens_slp = dict(strong_gens_slp)
# add the original generators
for g in _gens:
strong_gens_slp[g] = [g]
return (_base, strong_gens, strong_gens_slp)
strong_gens.extend([k for k, _ in strong_gens_slp])
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
Explanation
===========
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most `2^{-consec\_succ}`,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least `1 - 1/\text{consec\_succ}`.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i].keys())
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l].keys())
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
Explanation
===========
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element does not belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
(5)(0 4)(1 3)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
r"""Return a strong generating set from the Schreier-Sims algorithm.
Explanation
===========
A generating set `S = \{g_1, g_2, \dots, g_t\}` for a permutation group
`G` is a strong generating set relative to the sequence of points
(referred to as a "base") `(b_1, b_2, \dots, b_k)` if, for
`1 \leq i \leq k` we have that the intersection of the pointwise
stabilizer `G^{(i+1)} := G_{b_1, b_2, \dots, b_i}` with `S` generates
the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[(0 1 2 3), (0 3)(1 2), (1 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup(self, gens):
"""
Return the subgroup generated by `gens` which is a list of
elements of the group
"""
if not all(g in self for g in gens):
raise ValueError("The group does not contain the supplied generators")
G = PermutationGroup(gens)
return G
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
Explanation
===========
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lengthy and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current implementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key = lambda x: base_ordering[x]) \
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i]) \
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accordingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
r"""Compute the degree of transitivity of the group.
Explanation
===========
A permutation group `G` acting on `\Omega = \{0, 1, \dots, n-1\}` is
``k``-fold transitive, if, for any `k` points
`(a_1, a_2, \dots, a_k) \in \Omega` and any `k` points
`(b_1, b_2, \dots, b_k) \in \Omega` there exists `g \in G` such that
`g(a_1) = b_1, g(a_2) = b_2, \dots, g(a_k) = b_k`
The degree of transitivity of `G` is the maximum ``k`` such that
`G` is ``k``-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit(i)
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def _p_elements_group(self, p):
'''
For an abelian p-group, return the subgroup consisting of
all elements of order p (and the identity)
'''
gens = self.generators[:]
gens = sorted(gens, key=lambda x: x.order(), reverse=True)
gens_p = [g**(g.order()/p) for g in gens]
gens_r = []
for i in range(len(gens)):
x = gens[i]
x_order = x.order()
# x_p has order p
x_p = x**(x_order/p)
if i > 0:
P = PermutationGroup(gens_p[:i])
else:
P = PermutationGroup(self.identity)
if x**(x_order/p) not in P:
gens_r.append(x**(x_order/p))
else:
# replace x by an element of order (x.order()/p)
# so that gens still generates G
g = P.generator_product(x_p, original=True)
for s in g:
x = x*s**-1
x_order = x_order/p
# insert x to gens so that the sorting is preserved
del gens[i]
del gens_p[i]
j = i - 1
while j < len(gens) and gens[j].order() >= x_order:
j += 1
gens = gens[:j] + [x] + gens[j:]
gens_p = gens_p[:j] + [x] + gens_p[j:]
return PermutationGroup(gens_r)
def _sylow_alt_sym(self, p):
'''
Return a p-Sylow subgroup of a symmetric or an
alternating group.
Explanation
===========
The algorithm for this is hinted at in [1], Chapter 4,
Exercise 4.
For Sym(n) with n = p^i, the idea is as follows. Partition
the interval [0..n-1] into p equal parts, each of length p^(i-1):
[0..p^(i-1)-1], [p^(i-1)..2*p^(i-1)-1]...[(p-1)*p^(i-1)..p^i-1].
Find a p-Sylow subgroup of Sym(p^(i-1)) (treated as a subgroup
of ``self``) acting on each of the parts. Call the subgroups
P_1, P_2...P_p. The generators for the subgroups P_2...P_p
can be obtained from those of P_1 by applying a "shifting"
permutation to them, that is, a permutation mapping [0..p^(i-1)-1]
to the second part (the other parts are obtained by using the shift
multiple times). The union of this permutation and the generators
of P_1 is a p-Sylow subgroup of ``self``.
For n not equal to a power of p, partition
[0..n-1] in accordance with how n would be written in base p.
E.g. for p=2 and n=11, 11 = 2^3 + 2^2 + 1 so the partition
is [[0..7], [8..9], {10}]. To generate a p-Sylow subgroup,
take the union of the generators for each of the parts.
For the above example, {(0 1), (0 2)(1 3), (0 4), (1 5)(2 7)}
from the first part, {(8 9)} from the second part and
nothing from the third. This gives 4 generators in total, and
the subgroup they generate is p-Sylow.
Alternating groups are treated the same except when p=2. In this
case, (0 1)(s s+1) should be added for an appropriate s (the start
of a part) for each part in the partitions.
See Also
========
sylow_subgroup, is_alt_sym
'''
n = self.degree
gens = []
identity = Permutation(n-1)
# the case of 2-sylow subgroups of alternating groups
# needs special treatment
alt = p == 2 and all(g.is_even for g in self.generators)
# find the presentation of n in base p
coeffs = []
m = n
while m > 0:
coeffs.append(m % p)
m = m // p
power = len(coeffs)-1
# for a symmetric group, gens[:i] is the generating
# set for a p-Sylow subgroup on [0..p**(i-1)-1]. For
# alternating groups, the same is given by gens[:2*(i-1)]
for i in range(1, power+1):
if i == 1 and alt:
# (0 1) shouldn't be added for alternating groups
continue
gen = Permutation([(j + p**(i-1)) % p**i for j in range(p**i)])
gens.append(identity*gen)
if alt:
gen = Permutation(0, 1)*gen*Permutation(0, 1)*gen
gens.append(gen)
# the first point in the current part (see the algorithm
# description in the docstring)
start = 0
while power > 0:
a = coeffs[power]
# make the permutation shifting the start of the first
# part ([0..p^i-1] for some i) to the current one
for _ in range(a):
shift = Permutation()
if start > 0:
for i in range(p**power):
shift = shift(i, start + i)
if alt:
gen = Permutation(0, 1)*shift*Permutation(0, 1)*shift
gens.append(gen)
j = 2*(power - 1)
else:
j = power
for i, gen in enumerate(gens[:j]):
if alt and i % 2 == 1:
continue
# shift the generator to the start of the
# partition part
gen = shift*gen*shift
gens.append(gen)
start += p**power
power = power-1
return gens
def sylow_subgroup(self, p):
'''
Return a p-Sylow subgroup of the group.
The algorithm is described in [1], Chapter 4, Section 7
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> D = DihedralGroup(6)
>>> S = D.sylow_subgroup(2)
>>> S.order()
4
>>> G = SymmetricGroup(6)
>>> S = G.sylow_subgroup(5)
>>> S.order()
5
>>> G1 = AlternatingGroup(3)
>>> G2 = AlternatingGroup(5)
>>> G3 = AlternatingGroup(9)
>>> S1 = G1.sylow_subgroup(3)
>>> S2 = G2.sylow_subgroup(3)
>>> S3 = G3.sylow_subgroup(3)
>>> len1 = len(S1.lower_central_series())
>>> len2 = len(S2.lower_central_series())
>>> len3 = len(S3.lower_central_series())
>>> len1 == len2
True
>>> len1 < len3
True
'''
from sympy.combinatorics.homomorphisms import (
orbit_homomorphism, block_homomorphism)
if not isprime(p):
raise ValueError("p must be a prime")
def is_p_group(G):
# check if the order of G is a power of p
# and return the power
m = G.order()
n = 0
while m % p == 0:
m = m/p
n += 1
if m == 1:
return True, n
return False, n
def _sylow_reduce(mu, nu):
# reduction based on two homomorphisms
# mu and nu with trivially intersecting
# kernels
Q = mu.image().sylow_subgroup(p)
Q = mu.invert_subgroup(Q)
nu = nu.restrict_to(Q)
R = nu.image().sylow_subgroup(p)
return nu.invert_subgroup(R)
order = self.order()
if order % p != 0:
return PermutationGroup([self.identity])
p_group, n = is_p_group(self)
if p_group:
return self
if self.is_alt_sym():
return PermutationGroup(self._sylow_alt_sym(p))
# if there is a non-trivial orbit with size not divisible
# by p, the sylow subgroup is contained in its stabilizer
# (by orbit-stabilizer theorem)
orbits = self.orbits()
non_p_orbits = [o for o in orbits if len(o) % p != 0 and len(o) != 1]
if non_p_orbits:
G = self.stabilizer(list(non_p_orbits[0]).pop())
return G.sylow_subgroup(p)
if not self.is_transitive():
# apply _sylow_reduce to orbit actions
orbits = sorted(orbits, key=len)
omega1 = orbits.pop()
omega2 = orbits[0].union(*orbits)
mu = orbit_homomorphism(self, omega1)
nu = orbit_homomorphism(self, omega2)
return _sylow_reduce(mu, nu)
blocks = self.minimal_blocks()
if len(blocks) > 1:
# apply _sylow_reduce to block system actions
mu = block_homomorphism(self, blocks[0])
nu = block_homomorphism(self, blocks[1])
return _sylow_reduce(mu, nu)
elif len(blocks) == 1:
block = list(blocks)[0]
if any(e != 0 for e in block):
# self is imprimitive
mu = block_homomorphism(self, block)
if not is_p_group(mu.image())[0]:
S = mu.image().sylow_subgroup(p)
return mu.invert_subgroup(S).sylow_subgroup(p)
# find an element of order p
g = self.random()
g_order = g.order()
while g_order % p != 0 or g_order == 0:
g = self.random()
g_order = g.order()
g = g**(g_order // p)
if order % p**2 != 0:
return PermutationGroup(g)
C = self.centralizer(g)
while C.order() % p**n != 0:
S = C.sylow_subgroup(p)
s_order = S.order()
Z = S.center()
P = Z._p_elements_group(p)
h = P.random()
C_h = self.centralizer(h)
while C_h.order() % p*s_order != 0:
h = P.random()
C_h = self.centralizer(h)
C = C_h
return C.sylow_subgroup(p)
def _block_verify(self, L, alpha):
delta = sorted(list(self.orbit(alpha)))
# p[i] will be the number of the block
# delta[i] belongs to
p = [-1]*len(delta)
blocks = [-1]*len(delta)
B = [[]] # future list of blocks
u = [0]*len(delta) # u[i] in L s.t. alpha^u[i] = B[0][i]
t = L.orbit_transversal(alpha, pairs=True)
for a, beta in t:
B[0].append(a)
i_a = delta.index(a)
p[i_a] = 0
blocks[i_a] = alpha
u[i_a] = beta
rho = 0
m = 0 # number of blocks - 1
while rho <= m:
beta = B[rho][0]
for g in self.generators:
d = beta^g
i_d = delta.index(d)
sigma = p[i_d]
if sigma < 0:
# define a new block
m += 1
sigma = m
u[i_d] = u[delta.index(beta)]*g
p[i_d] = sigma
rep = d
blocks[i_d] = rep
newb = [rep]
for gamma in B[rho][1:]:
i_gamma = delta.index(gamma)
d = gamma^g
i_d = delta.index(d)
if p[i_d] < 0:
u[i_d] = u[i_gamma]*g
p[i_d] = sigma
blocks[i_d] = rep
newb.append(d)
else:
# B[rho] is not a block
s = u[i_gamma]*g*u[i_d]**(-1)
return False, s
B.append(newb)
else:
for h in B[rho][1:]:
if h^g not in B[sigma]:
# B[rho] is not a block
s = u[delta.index(beta)]*g*u[i_d]**(-1)
return False, s
rho += 1
return True, blocks
def _verify(H, K, phi, z, alpha):
'''
Return a list of relators ``rels`` in generators ``gens`_h` that
are mapped to ``H.generators`` by ``phi`` so that given a finite
presentation <gens_k | rels_k> of ``K`` on a subset of ``gens_h``
<gens_h | rels_k + rels> is a finite presentation of ``H``.
Explanation
===========
``H`` should be generated by the union of ``K.generators`` and ``z``
(a single generator), and ``H.stabilizer(alpha) == K``; ``phi`` is a
canonical injection from a free group into a permutation group
containing ``H``.
The algorithm is described in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics import free_group, Permutation, PermutationGroup
>>> from sympy.combinatorics.homomorphisms import homomorphism
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> H = PermutationGroup(Permutation(0, 2), Permutation (1, 5))
>>> K = PermutationGroup(Permutation(5)(0, 2))
>>> F = free_group("x_0 x_1")[0]
>>> gens = F.generators
>>> phi = homomorphism(F, H, F.generators, H.generators)
>>> rels_k = [gens[0]**2] # relators for presentation of K
>>> z= Permutation(1, 5)
>>> check, rels_h = H._verify(K, phi, z, 1)
>>> check
True
>>> rels = rels_k + rels_h
>>> G = FpGroup(F, rels) # presentation of H
>>> G.order() == H.order()
True
See also
========
strong_presentation, presentation, stabilizer
'''
orbit = H.orbit(alpha)
beta = alpha^(z**-1)
K_beta = K.stabilizer(beta)
# orbit representatives of K_beta
gammas = [alpha, beta]
orbits = list({tuple(K_beta.orbit(o)) for o in orbit})
orbit_reps = [orb[0] for orb in orbits]
for rep in orbit_reps:
if rep not in gammas:
gammas.append(rep)
# orbit transversal of K
betas = [alpha, beta]
transversal = {alpha: phi.invert(H.identity), beta: phi.invert(z**-1)}
for s, g in K.orbit_transversal(beta, pairs=True):
if s not in transversal:
transversal[s] = transversal[beta]*phi.invert(g)
union = K.orbit(alpha).union(K.orbit(beta))
while (len(union) < len(orbit)):
for gamma in gammas:
if gamma in union:
r = gamma^z
if r not in union:
betas.append(r)
transversal[r] = transversal[gamma]*phi.invert(z)
for s, g in K.orbit_transversal(r, pairs=True):
if s not in transversal:
transversal[s] = transversal[r]*phi.invert(g)
union = union.union(K.orbit(r))
break
# compute relators
rels = []
for b in betas:
k_gens = K.stabilizer(b).generators
for y in k_gens:
new_rel = transversal[b]
gens = K.generator_product(y, original=True)
for g in gens[::-1]:
new_rel = new_rel*phi.invert(g)
new_rel = new_rel*transversal[b]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
for gamma in gammas:
new_rel = transversal[gamma]*phi.invert(z)*transversal[gamma^z]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
return True, rels
def strong_presentation(self):
'''
Return a strong finite presentation of group. The generators
of the returned group are in the same order as the strong
generators of group.
The algorithm is based on Sims' Verify algorithm described
in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> P = DihedralGroup(4)
>>> G = P.strong_presentation()
>>> P.order() == G.order()
True
See Also
========
presentation, _verify
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import (block_homomorphism,
homomorphism, GroupHomomorphism)
strong_gens = self.strong_gens[:]
stabs = self.basic_stabilizers[:]
base = self.base[:]
# injection from a free group on len(strong_gens)
# generators into G
gen_syms = [('x_%d'%i) for i in range(len(strong_gens))]
F = free_group(', '.join(gen_syms))[0]
phi = homomorphism(F, self, F.generators, strong_gens)
H = PermutationGroup(self.identity)
while stabs:
alpha = base.pop()
K = H
H = stabs.pop()
new_gens = [g for g in H.generators if g not in K]
if K.order() == 1:
z = new_gens.pop()
rels = [F.generators[-1]**z.order()]
intermediate_gens = [z]
K = PermutationGroup(intermediate_gens)
# add generators one at a time building up from K to H
while new_gens:
z = new_gens.pop()
intermediate_gens = [z] + intermediate_gens
K_s = PermutationGroup(intermediate_gens)
orbit = K_s.orbit(alpha)
orbit_k = K.orbit(alpha)
# split into cases based on the orbit of K_s
if orbit_k == orbit:
if z in K:
rel = phi.invert(z)
perm = z
else:
t = K.orbit_rep(alpha, alpha^z)
rel = phi.invert(z)*phi.invert(t)**-1
perm = z*t**-1
for g in K.generator_product(perm, original=True):
rel = rel*phi.invert(g)**-1
new_rels = [rel]
elif len(orbit_k) == 1:
# `success` is always true because `strong_gens`
# and `base` are already a verified BSGS. Later
# this could be changed to start with a randomly
# generated (potential) BSGS, and then new elements
# would have to be appended to it when `success`
# is false.
success, new_rels = K_s._verify(K, phi, z, alpha)
else:
# K.orbit(alpha) should be a block
# under the action of K_s on K_s.orbit(alpha)
check, block = K_s._block_verify(K, alpha)
if check:
# apply _verify to the action of K_s
# on the block system; for convenience,
# add the blocks as additional points
# that K_s should act on
t = block_homomorphism(K_s, block)
m = t.codomain.degree # number of blocks
d = K_s.degree
# conjugating with p will shift
# permutations in t.image() to
# higher numbers, e.g.
# p*(0 1)*p = (m m+1)
p = Permutation()
for i in range(m):
p *= Permutation(i, i+d)
t_img = t.images
# combine generators of K_s with their
# action on the block system
images = {g: g*p*t_img[g]*p for g in t_img}
for g in self.strong_gens[:-len(K_s.generators)]:
images[g] = g
K_s_act = PermutationGroup(list(images.values()))
f = GroupHomomorphism(self, K_s_act, images)
K_act = PermutationGroup([f(g) for g in K.generators])
success, new_rels = K_s_act._verify(K_act, f.compose(phi), f(z), d)
for n in new_rels:
if n not in rels:
rels.append(n)
K = K_s
group = FpGroup(F, rels)
return simplify_presentation(group)
def presentation(self, eliminate_gens=True):
'''
Return an `FpGroup` presentation of the group.
The algorithm is described in [1], Chapter 6.1.
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.coset_table import CosetTable
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import homomorphism
if self._fp_presentation:
return self._fp_presentation
def _factor_group_by_rels(G, rels):
if isinstance(G, FpGroup):
rels.extend(G.relators)
return FpGroup(G.free_group, list(set(rels)))
return FpGroup(G, rels)
gens = self.generators
len_g = len(gens)
if len_g == 1:
order = gens[0].order()
# handle the trivial group
if order == 1:
return free_group([])[0]
F, x = free_group('x')
return FpGroup(F, [x**order])
if self.order() > 20:
half_gens = self.generators[0:(len_g+1)//2]
else:
half_gens = []
H = PermutationGroup(half_gens)
H_p = H.presentation()
len_h = len(H_p.generators)
C = self.coset_table(H)
n = len(C) # subgroup index
gen_syms = [('x_%d'%i) for i in range(len(gens))]
F = free_group(', '.join(gen_syms))[0]
# mapping generators of H_p to those of F
images = [F.generators[i] for i in range(len_h)]
R = homomorphism(H_p, F, H_p.generators, images, check=False)
# rewrite relators
rels = R(H_p.relators)
G_p = FpGroup(F, rels)
# injective homomorphism from G_p into self
T = homomorphism(G_p, self, G_p.generators, gens)
C_p = CosetTable(G_p, [])
C_p.table = [[None]*(2*len_g) for i in range(n)]
# initiate the coset transversal
transversal = [None]*n
transversal[0] = G_p.identity
# fill in the coset table as much as possible
for i in range(2*len_h):
C_p.table[0][i] = 0
gamma = 1
for alpha, x in product(range(n), range(2*len_g)):
beta = C[alpha][x]
if beta == gamma:
gen = G_p.generators[x//2]**((-1)**(x % 2))
transversal[beta] = transversal[alpha]*gen
C_p.table[alpha][x] = beta
C_p.table[beta][x + (-1)**(x % 2)] = alpha
gamma += 1
if gamma == n:
break
C_p.p = list(range(n))
beta = x = 0
while not C_p.is_complete():
# find the first undefined entry
while C_p.table[beta][x] == C[beta][x]:
x = (x + 1) % (2*len_g)
if x == 0:
beta = (beta + 1) % n
# define a new relator
gen = G_p.generators[x//2]**((-1)**(x % 2))
new_rel = transversal[beta]*gen*transversal[C[beta][x]]**-1
perm = T(new_rel)
nxt = G_p.identity
for s in H.generator_product(perm, original=True):
nxt = nxt*T.invert(s)**-1
new_rel = new_rel*nxt
# continue coset enumeration
G_p = _factor_group_by_rels(G_p, [new_rel])
C_p.scan_and_fill(0, new_rel)
C_p = G_p.coset_enumeration([], strategy="coset_table",
draft=C_p, max_cosets=n, incomplete=True)
self._fp_presentation = simplify_presentation(G_p)
return self._fp_presentation
def polycyclic_group(self):
"""
Return the PolycyclicGroup instance with below parameters:
Explanation
===========
* pc_sequence : Polycyclic sequence is formed by collecting all
the missing generators between the adjacent groups in the
derived series of given permutation group.
* pc_series : Polycyclic series is formed by adding all the missing
generators of ``der[i+1]`` in ``der[i]``, where ``der`` represents
the derived series.
* relative_order : A list, computed by the ratio of adjacent groups in
pc_series.
"""
from sympy.combinatorics.pc_groups import PolycyclicGroup
if not self.is_polycyclic:
raise ValueError("The group must be solvable")
der = self.derived_series()
pc_series = []
pc_sequence = []
relative_order = []
pc_series.append(der[-1])
der.reverse()
for i in range(len(der)-1):
H = der[i]
for g in der[i+1].generators:
if g not in H:
H = PermutationGroup([g] + H.generators)
pc_series.insert(0, H)
pc_sequence.insert(0, g)
G1 = pc_series[0].order()
G2 = pc_series[1].order()
relative_order.insert(0, G1 // G2)
return PolycyclicGroup(pc_sequence, pc_series, relative_order, collector=None)
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.perm_groups import _orbit
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
{0, 1, 2}
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = tuple([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = frozenset([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return {tuple(x) for x in orb}
def _orbits(degree, generators):
"""Compute the orbits of G.
If ``rep=False`` it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import _orbits
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[{0, 1, 2}]
"""
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False, slp=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
if ``af`` is ``True``, the transversal elements are given in
array form.
If `slp` is `True`, a dictionary `{beta: slp_beta}` is returned
for `\beta \in Orb` where `slp_beta` is a list of indices of the
generators in `generators` s.t. if `slp_beta = [i_1 \dots i_n]`
`g_\beta = generators[i_n] \times \dots \times generators[i_1]`.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.perm_groups import _orbit_transversal
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
"""
tr = [(alpha, list(range(degree)))]
slp_dict = {alpha: []}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
px_slp = slp_dict[x]
for gen in gens:
temp = gen[x]
if used[temp] == False:
slp_dict[temp] = [gens.index(gen)] + px_slp
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
if not slp:
return tr
return tr, slp_dict
if af:
tr = [y for _, y in tr]
if not slp:
return tr
return tr, slp_dict
tr = [_af_new(y) for _, y in tr]
if not slp:
return tr
return tr, slp_dict
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
degree : degree of G
generators : generators of G
Examples
========
>>> from sympy.combinatorics.perm_groups import _stabilizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[(5)(0 4)(1 3), (5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
class SymmetricPermutationGroup(Basic):
"""
The class defining the lazy form of SymmetricGroup.
deg : int
"""
def __new__(cls, deg):
deg = _sympify(deg)
obj = Basic.__new__(cls, deg)
return obj
def __init__(self, *args, **kwargs):
self._deg = self.args[0]
self._order = None
def __contains__(self, i):
"""Return ``True`` if *i* is contained in SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> Permutation(1, 2, 3) in G
True
"""
if not isinstance(i, Permutation):
raise TypeError("A SymmetricPermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return i.size == self.degree
def order(self):
"""
Return the order of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.order()
24
"""
if self._order is not None:
return self._order
n = self._deg
self._order = factorial(n)
return self._order
@property
def degree(self):
"""
Return the degree of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.degree
4
"""
return self._deg
@property
def identity(self):
'''
Return the identity element of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.identity()
(3)
'''
return _af_new(list(range(self._deg)))
class Coset(Basic):
"""A left coset of a permutation group with respect to an element.
Parameters
==========
g : Permutation
H : PermutationGroup
dir : "+" or "-", If not specified by default it will be "+"
here ``dir`` specified the type of coset "+" represent the
right coset and "-" represent the left coset.
G : PermutationGroup, optional
The group which contains *H* as its subgroup and *g* as its
element.
If not specified, it would automatically become a symmetric
group ``SymmetricPermutationGroup(g.size)`` and
``SymmetricPermutationGroup(H.degree)`` if ``g.size`` and ``H.degree``
are matching.``SymmetricPermutationGroup`` is a lazy form of SymmetricGroup
used for representation purpose.
"""
def __new__(cls, g, H, G=None, dir="+"):
g = _sympify(g)
if not isinstance(g, Permutation):
raise NotImplementedError
H = _sympify(H)
if not isinstance(H, PermutationGroup):
raise NotImplementedError
if G is not None:
G = _sympify(G)
if not isinstance(G, (PermutationGroup, SymmetricPermutationGroup)):
raise NotImplementedError
if not H.is_subgroup(G):
raise ValueError("{} must be a subgroup of {}.".format(H, G))
if g not in G:
raise ValueError("{} must be an element of {}.".format(g, G))
else:
g_size = g.size
h_degree = H.degree
if g_size != h_degree:
raise ValueError(
"The size of the permutation {} and the degree of "
"the permutation group {} should be matching "
.format(g, H))
G = SymmetricPermutationGroup(g.size)
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("dir must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-'):
raise ValueError("dir must be one of '+' or '-' not %s" % dir)
obj = Basic.__new__(cls, g, H, G, dir)
return obj
def __init__(self, *args, **kwargs):
self._dir = self.args[3]
@property
def is_left_coset(self):
"""
Check if the coset is left coset that is ``gH``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup, Coset
>>> a = Permutation(1, 2)
>>> b = Permutation(0, 1)
>>> G = PermutationGroup([a, b])
>>> cst = Coset(a, G, dir="-")
>>> cst.is_left_coset
True
"""
return str(self._dir) == '-'
@property
def is_right_coset(self):
"""
Check if the coset is right coset that is ``Hg``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup, Coset
>>> a = Permutation(1, 2)
>>> b = Permutation(0, 1)
>>> G = PermutationGroup([a, b])
>>> cst = Coset(a, G, dir="+")
>>> cst.is_right_coset
True
"""
return str(self._dir) == '+'
def as_list(self):
"""
Return all the elements of coset in the form of list.
"""
g = self.args[0]
H = self.args[1]
cst = []
if str(self._dir) == '+':
for h in H.elements:
cst.append(h*g)
else:
for h in H.elements:
cst.append(g*h)
return cst
|
05d5b5446d978226d52365849ca6a9331f2bc99fb7c1b834e34ed7fc4ba3e480 | import random
from collections import defaultdict
from collections.abc import Iterable
from functools import reduce
from sympy.core.parameters import global_parameters
from sympy.core.basic import Atom
from sympy.core.expr import Expr
from sympy.core.numbers import Integer
from sympy.core.sympify import _sympify
from sympy.matrices import zeros
from sympy.polys.polytools import lcm
from sympy.printing.repr import srepr
from sympy.utilities.iterables import (flatten, has_variety, minlex,
has_dups, runs, is_sequence)
from sympy.utilities.misc import as_int
from mpmath.libmp.libintmath import ifac
from sympy.multipledispatch import dispatch
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
def _af_rmuln(*abc):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
a = abc
m = len(a)
if m == 3:
p0, p1, p2 = a
return [p0[p1[i]] for i in p2]
if m == 4:
p0, p1, p2, p3 = a
return [p0[p1[p2[i]]] for i in p3]
if m == 5:
p0, p1, p2, p3, p4 = a
return [p0[p1[p2[p3[i]]]] for i in p4]
if m == 6:
p0, p1, p2, p3, p4, p5 = a
return [p0[p1[p2[p3[p4[i]]]]] for i in p5]
if m == 7:
p0, p1, p2, p3, p4, p5, p6 = a
return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6]
if m == 8:
p0, p1, p2, p3, p4, p5, p6, p7 = a
return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7]
if m == 1:
return a[0][:]
if m == 2:
a, b = a
return [a[i] for i in b]
if m == 0:
raise ValueError("String must not be empty")
p0 = _af_rmuln(*a[:m//2])
p1 = _af_rmuln(*a[m//2:])
return [p0[i] for i in p1]
def _af_parity(pi):
"""
Computes the parity of a permutation in array form.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that x > y but p[x] < p[y].
Examples
========
>>> from sympy.combinatorics.permutations import _af_parity
>>> _af_parity([0, 1, 2, 3])
0
>>> _af_parity([3, 2, 0, 1])
1
See Also
========
Permutation
"""
n = len(pi)
a = [0] * n
c = 0
for j in range(n):
if a[j] == 0:
c += 1
a[j] = 1
i = j
while pi[i] != j:
i = pi[i]
a[i] = 1
return (n - c) % 2
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> from sympy.combinatorics.permutations import _af_invert, _af_rmul
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form
def _af_pow(a, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.permutations import _af_pow
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> _af_pow(p._array_form, 4)
[0, 1, 2, 3]
"""
if n == 0:
return list(range(len(a)))
if n < 0:
return _af_pow(_af_invert(a), -n)
if n == 1:
return a[:]
elif n == 2:
b = [a[i] for i in a]
elif n == 3:
b = [a[a[i]] for i in a]
elif n == 4:
b = [a[a[a[i]]] for i in a]
else:
# use binary multiplication
b = list(range(len(a)))
while 1:
if n & 1:
b = [b[i] for i in a]
n -= 1
if not n:
break
if n % 4 == 0:
a = [a[a[a[i]]] for i in a]
n = n // 4
elif n % 2 == 0:
a = [a[i] for i in a]
n = n // 2
return b
def _af_commutes_with(a, b):
"""
Checks if the two permutations with array forms
given by ``a`` and ``b`` commute.
Examples
========
>>> from sympy.combinatorics.permutations import _af_commutes_with
>>> _af_commutes_with([1, 2, 0], [0, 2, 1])
False
See Also
========
Permutation, commutes_with
"""
return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1))
class Cycle(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
Explanation
===========
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
>>> from sympy.combinatorics.permutations import Perm, Cycle
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
(1 3 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
(1 3 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
(1 5 4 10)
>>> Cycle(1, 2)(4)(5)(10)
(1 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
return as_int(arg)
def __iter__(self):
yield from self.list()
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)(2, 3)
(1 3 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = Cycle(1, 2)
>>> a(2, 3)
(1 3 2)
>>> a(2, 3)(4, 5)
(1 3 2)(4 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> p = Cycle(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Cycle(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
if size is not None:
big = max([i for i in self.keys() if self[i] != i] + [0])
size = max(size, big + 1)
else:
size = self.size
return [self[i] for i in range(size)]
def __repr__(self):
"""We want it to print as a Cycle, not as a dict.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> print(_)
(1 2)
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
"""
if not self:
return 'Cycle()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
return 'Cycle%s' % s
def __str__(self):
"""We want it to be printed in a Cycle notation with no
comma in-between.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> Cycle(1, 2, 4)(5, 6)
(1 2 4)(5 6)
"""
if not self:
return '()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
s = s.replace(',', '')
return s
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2, 6)
(1 2 6)
"""
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
class Permutation(Atom):
r"""
A permutation, alternatively known as an 'arrangement number' or 'ordering'
is an arrangement of the elements of an ordered list into a one-to-one
mapping with itself. The permutation of a given arrangement is given by
indicating the positions of the elements after re-arrangement [2]_. For
example, if one started with elements ``[x, y, a, b]`` (in that order) and
they were reordered as ``[x, y, b, a]`` then the permutation would be
``[0, 1, 3, 2]``. Notice that (in SymPy) the first element is always referred
to as 0 and the permutation uses the indices of the elements in the
original ordering, not the elements ``(a, b, ...)`` themselves.
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations Notation
=====================
Permutations are commonly represented in disjoint cycle or array forms.
Array Notation and 2-line Form
------------------------------------
In the 2-line form, the elements and their final positions are shown
as a matrix with 2 rows:
[0 1 2 ... n-1]
[p(0) p(1) p(2) ... p(n-1)]
Since the first line is always ``range(n)``, where n is the size of p,
it is sufficient to represent the permutation by the second line,
referred to as the "array form" of the permutation. This is entered
in brackets as the argument to the Permutation class:
>>> p = Permutation([0, 2, 1]); p
Permutation([0, 2, 1])
Given i in range(p.size), the permutation maps i to i^p
>>> [i^p for i in range(p.size)]
[0, 2, 1]
The composite of two permutations p*q means first apply p, then q, so
i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules:
>>> q = Permutation([2, 1, 0])
>>> [i^p^q for i in range(3)]
[2, 0, 1]
>>> [i^(p*q) for i in range(3)]
[2, 0, 1]
One can use also the notation p(i) = i^p, but then the composition
rule is (p*q)(i) = q(p(i)), not p(q(i)):
>>> [(p*q)(i) for i in range(p.size)]
[2, 0, 1]
>>> [q(p(i)) for i in range(p.size)]
[2, 0, 1]
>>> [p(q(i)) for i in range(p.size)]
[1, 2, 0]
Disjoint Cycle Notation
-----------------------
In disjoint cycle notation, only the elements that have shifted are
indicated.
For example, [1, 3, 2, 0] can be represented as (0, 1, 3)(2).
This can be understood from the 2 line format of the given permutation.
In the 2-line form,
[0 1 2 3]
[1 3 2 0]
The element in the 0th position is 1, so 0 -> 1. The element in the 1st
position is three, so 1 -> 3. And the element in the third position is again
0, so 3 -> 0. Thus, 0 -> 1 -> 3 -> 0, and 2 -> 2. Thus, this can be represented
as 2 cycles: (0, 1, 3)(2).
In common notation, singular cycles are not explicitly written as they can be
inferred implicitly.
Only the relative ordering of elements in a cycle matter:
>>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2)
True
The disjoint cycle notation is convenient when representing
permutations that have several cycles in them:
>>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]])
True
It also provides some economy in entry when computing products of
permutations that are written in disjoint cycle notation:
>>> Permutation(1, 2)(1, 3)(2, 3)
Permutation([0, 3, 2, 1])
>>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]])
True
Caution: when the cycles have common elements between them then the order
in which the permutations are applied matters. This module applies
the permutations from *left to right*.
>>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)])
True
>>> Permutation(1, 2)(2, 3).list()
[0, 3, 1, 2]
In the above case, (1,2) is computed before (2,3).
As 0 -> 0, 0 -> 0, element in position 0 is 0.
As 1 -> 2, 2 -> 3, element in position 1 is 3.
As 2 -> 1, 1 -> 1, element in position 2 is 1.
As 3 -> 3, 3 -> 2, element in position 3 is 2.
If the first and second elements had been
swapped first, followed by the swapping of the second
and third, the result would have been [0, 2, 3, 1].
If, you want to apply the cycles in the conventional
right to left order, call the function with arguments in reverse order
as demonstrated below:
>>> Permutation([(1, 2), (2, 3)][::-1]).list()
[0, 2, 3, 1]
Entering a singleton in a permutation is a way to indicate the size of the
permutation. The ``size`` keyword can also be used.
Array-form entry:
>>> Permutation([[1, 2], [9]])
Permutation([0, 2, 1], size=10)
>>> Permutation([[1, 2]], size=10)
Permutation([0, 2, 1], size=10)
Cyclic-form entry:
>>> Permutation(1, 2, size=10)
Permutation([0, 2, 1], size=10)
>>> Permutation(9)(1, 2)
Permutation([0, 2, 1], size=10)
Caution: no singleton containing an element larger than the largest
in any previous cycle can be entered. This is an important difference
in how Permutation and Cycle handle the ``__call__`` syntax. A singleton
argument at the start of a Permutation performs instantiation of the
Permutation and is permitted:
>>> Permutation(5)
Permutation([], size=6)
A singleton entered after instantiation is a call to the permutation
-- a function call -- and if the argument is out of range it will
trigger an error. For this reason, it is better to start the cycle
with the singleton:
The following fails because there is no element 3:
>>> Permutation(1, 2)(3)
Traceback (most recent call last):
...
IndexError: list index out of range
This is ok: only the call to an out of range singleton is prohibited;
otherwise the permutation autosizes:
>>> Permutation(3)(1, 2)
Permutation([0, 2, 1, 3])
>>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2)
True
Equality testing
----------------
The array forms must be the same in order for permutations to be equal:
>>> Permutation([1, 0, 2, 3]) == Permutation([1, 0])
False
Identity Permutation
--------------------
The identity permutation is a permutation in which no element is out of
place. It can be entered in a variety of ways. All the following create
an identity permutation of size 4:
>>> I = Permutation([0, 1, 2, 3])
>>> all(p == I for p in [
... Permutation(3),
... Permutation(range(4)),
... Permutation([], size=4),
... Permutation(size=4)])
True
Watch out for entering the range *inside* a set of brackets (which is
cycle notation):
>>> I == Permutation([range(4)])
False
Permutation Printing
====================
There are a few things to note about how Permutations are printed.
.. deprecated:: 1.6
Configuring Permutation printing by setting
``Permutation.print_cyclic`` is deprecated. Users should use the
``perm_cyclic`` flag to the printers, as described below.
1) If you prefer one form (array or cycle) over another, you can set
``init_printing`` with the ``perm_cyclic`` flag.
>>> from sympy import init_printing
>>> p = Permutation(1, 2)(4, 5)(3, 4)
>>> p
Permutation([0, 2, 1, 4, 5, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(1 2)(3 4 5)
2) Regardless of the setting, a list of elements in the array for cyclic
form can be obtained and either of those can be copied and supplied as
the argument to Permutation:
>>> p.array_form
[0, 2, 1, 4, 5, 3]
>>> p.cyclic_form
[[1, 2], [3, 4, 5]]
>>> Permutation(_) == p
True
3) Printing is economical in that as little as possible is printed while
retaining all information about the size of the permutation:
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation([1, 0, 2, 3])
Permutation([1, 0, 2, 3])
>>> Permutation([1, 0, 2, 3], size=20)
Permutation([1, 0], size=20)
>>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20)
Permutation([1, 0, 2, 4, 3], size=20)
>>> p = Permutation([1, 0, 2, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(3)(0 1)
>>> init_printing(perm_cyclic=False, pretty_print=False)
The 2 was not printed but it is still there as can be seen with the
array_form and size methods:
>>> p.array_form
[1, 0, 2, 3]
>>> p.size
4
Short introduction to other methods
===================================
The permutation can act as a bijective function, telling what element is
located at a given position
>>> q = Permutation([5, 2, 3, 4, 1, 0])
>>> q.array_form[1] # the hard way
2
>>> q(1) # the easy way
2
>>> {i: q(i) for i in range(q.size)} # showing the bijection
{0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0}
The full cyclic form (including singletons) can be obtained:
>>> p.full_cyclic_form
[[0, 1], [2], [3]]
Any permutation can be factored into transpositions of pairs of elements:
>>> Permutation([[1, 2], [3, 4, 5]]).transpositions()
[(1, 2), (3, 5), (3, 4)]
>>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form
[[1, 2], [3, 4, 5]]
The number of permutations on a set of n elements is given by n! and is
called the cardinality.
>>> p.size
4
>>> p.cardinality
24
A given permutation has a rank among all the possible permutations of the
same elements, but what that rank is depends on how the permutations are
enumerated. (There are a number of different methods of doing so.) The
lexicographic rank is given by the rank method and this rank is used to
increment a permutation with addition/subtraction:
>>> p.rank()
6
>>> p + 1
Permutation([1, 0, 3, 2])
>>> p.next_lex()
Permutation([1, 0, 3, 2])
>>> _.rank()
7
>>> p.unrank_lex(p.size, rank=7)
Permutation([1, 0, 3, 2])
The product of two permutations p and q is defined as their composition as
functions, (p*q)(i) = q(p(i)) [6]_.
>>> p = Permutation([1, 0, 2, 3])
>>> q = Permutation([2, 3, 1, 0])
>>> list(q*p)
[2, 3, 0, 1]
>>> list(p*q)
[3, 2, 1, 0]
>>> [q(p(i)) for i in range(p.size)]
[3, 2, 1, 0]
The permutation can be 'applied' to any list-like object, not only
Permutations:
>>> p(['zero', 'one', 'four', 'two'])
['one', 'zero', 'four', 'two']
>>> p('zo42')
['o', 'z', '4', '2']
If you have a list of arbitrary elements, the corresponding permutation
can be found with the from_sequence method:
>>> Permutation.from_sequence('SymPy')
Permutation([1, 3, 2, 0, 4])
Checking if a Permutation is contained in a Group
=================================================
Generally if you have a group of permutations G on n symbols, and
you're checking if a permutation on less than n symbols is part
of that group, the check will fail.
Here is an example for n=5 and we check if the cycle
(1,2,3) is in G:
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> from sympy.combinatorics import Cycle, Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup(Cycle(2, 3)(4, 5), Cycle(1, 2, 3, 4, 5))
>>> p1 = Permutation(Cycle(2, 5, 3))
>>> p2 = Permutation(Cycle(1, 2, 3))
>>> a1 = Permutation(Cycle(1, 2, 3).list(6))
>>> a2 = Permutation(Cycle(1, 2, 3)(5))
>>> a3 = Permutation(Cycle(1, 2, 3),size=6)
>>> for p in [p1,p2,a1,a2,a3]: p, G.contains(p)
((2 5 3), True)
((1 2 3), False)
((5)(1 2 3), True)
((5)(1 2 3), True)
((5)(1 2 3), True)
The check for p2 above will fail.
Checking if p1 is in G works because SymPy knows
G is a group on 5 symbols, and p1 is also on 5 symbols
(its largest element is 5).
For ``a1``, the ``.list(6)`` call will extend the permutation to 5
symbols, so the test will work as well. In the case of ``a2`` the
permutation is being extended to 5 symbols by using a singleton,
and in the case of ``a3`` it's extended through the constructor
argument ``size=6``.
There is another way to do this, which is to tell the ``contains``
method that the number of symbols the group is on does not need to
match perfectly the number of symbols for the permutation:
>>> G.contains(p2,strict=False)
True
This can be via the ``strict`` argument to the ``contains`` method,
and SymPy will try to extend the permutation on its own and then
perform the containment check.
See Also
========
Cycle
References
==========
.. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics
Combinatorics and Graph Theory with Mathematica. Reading, MA:
Addison-Wesley, pp. 3-16, 1990.
.. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial
Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011.
.. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking
permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001),
281-284. DOI=10.1016/S0020-0190(01)00141-7
.. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms'
CRC Press, 1999
.. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O.
Concrete Mathematics: A Foundation for Computer Science, 2nd ed.
Reading, MA: Addison-Wesley, 1994.
.. [6] https://en.wikipedia.org/wiki/Permutation#Product_and_inverse
.. [7] https://en.wikipedia.org/wiki/Lehmer_code
"""
is_Permutation = True
_array_form = None
_cyclic_form = None
_cycle_structure = None
_size = None
_rank = None
def __new__(cls, *args, size=None, **kwargs):
"""
Constructor for the Permutation object from a list or a
list of lists in which all elements of the permutation may
appear only once.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations entered in array-form are left unaltered:
>>> Permutation([0, 2, 1])
Permutation([0, 2, 1])
Permutations entered in cyclic form are converted to array form;
singletons need not be entered, but can be entered to indicate the
largest element:
>>> Permutation([[4, 5, 6], [0, 1]])
Permutation([1, 0, 2, 3, 5, 6, 4])
>>> Permutation([[4, 5, 6], [0, 1], [19]])
Permutation([1, 0, 2, 3, 5, 6, 4], size=20)
All manipulation of permutations assumes that the smallest element
is 0 (in keeping with 0-based indexing in Python) so if the 0 is
missing when entering a permutation in array form, an error will be
raised:
>>> Permutation([2, 1])
Traceback (most recent call last):
...
ValueError: Integers 0 through 2 must be present.
If a permutation is entered in cyclic form, it can be entered without
singletons and the ``size`` specified so those values can be filled
in, otherwise the array form will only extend to the maximum value
in the cycles:
>>> Permutation([[1, 4], [3, 5, 2]], size=10)
Permutation([0, 4, 3, 5, 1, 2], size=10)
>>> _.array_form
[0, 4, 3, 5, 1, 2, 6, 7, 8, 9]
"""
if size is not None:
size = int(size)
#a) ()
#b) (1) = identity
#c) (1, 2) = cycle
#d) ([1, 2, 3]) = array form
#e) ([[1, 2]]) = cyclic form
#f) (Cycle) = conversion to permutation
#g) (Permutation) = adjust size or return copy
ok = True
if not args: # a
return cls._af_new(list(range(size or 0)))
elif len(args) > 1: # c
return cls._af_new(Cycle(*args).list(size))
if len(args) == 1:
a = args[0]
if isinstance(a, cls): # g
if size is None or size == a.size:
return a
return cls(a.array_form, size=size)
if isinstance(a, Cycle): # f
return cls._af_new(a.list(size))
if not is_sequence(a): # b
if size is not None and a + 1 > size:
raise ValueError('size is too small when max is %s' % a)
return cls._af_new(list(range(a + 1)))
if has_variety(is_sequence(ai) for ai in a):
ok = False
else:
ok = False
if not ok:
raise ValueError("Permutation argument must be a list of ints, "
"a list of lists, Permutation or Cycle.")
# safe to assume args are valid; this also makes a copy
# of the args
args = list(args[0])
is_cycle = args and is_sequence(args[0])
if is_cycle: # e
args = [[int(i) for i in c] for c in args]
else: # d
args = [int(i) for i in args]
# if there are n elements present, 0, 1, ..., n-1 should be present
# unless a cycle notation has been provided. A 0 will be added
# for convenience in case one wants to enter permutations where
# counting starts from 1.
temp = flatten(args)
if has_dups(temp) and not is_cycle:
raise ValueError('there were repeated elements.')
temp = set(temp)
if not is_cycle:
if temp != set(range(len(temp))):
raise ValueError('Integers 0 through %s must be present.' %
max(temp))
if size is not None and temp and max(temp) + 1 > size:
raise ValueError('max element should not exceed %s' % (size - 1))
if is_cycle:
# it's not necessarily canonical so we won't store
# it -- use the array form instead
c = Cycle()
for ci in args:
c = c(*ci)
aform = c.list()
else:
aform = list(args)
if size and size > len(aform):
# don't allow for truncation of permutation which
# might split a cycle and lead to an invalid aform
# but do allow the permutation size to be increased
aform.extend(list(range(len(aform), size)))
return cls._af_new(aform)
@classmethod
def _af_new(cls, perm):
"""A method to produce a Permutation object from a list;
the list is bound to the _array_form attribute, so it must
not be modified; this method is meant for internal use only;
the list ``a`` is supposed to be generated as a temporary value
in a method, so p = Perm._af_new(a) is the only object
to hold a reference to ``a``::
Examples
========
>>> from sympy.combinatorics.permutations import Perm
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = [2, 1, 3, 0]
>>> p = Perm._af_new(a)
>>> p
Permutation([2, 1, 3, 0])
"""
p = super().__new__(cls)
p._array_form = perm
p._size = len(perm)
return p
def _hashable_content(self):
# the array_form (a list) is the Permutation arg, so we need to
# return a tuple, instead
return tuple(self.array_form)
@property
def array_form(self):
"""
Return a copy of the attribute _array_form
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> Permutation([[2, 0, 3, 1]]).array_form
[3, 2, 0, 1]
>>> Permutation([2, 0, 3, 1]).array_form
[2, 0, 3, 1]
>>> Permutation([[1, 2], [4, 5]]).array_form
[0, 2, 1, 3, 5, 4]
"""
return self._array_form[:]
def list(self, size=None):
"""Return the permutation as an explicit list, possibly
trimming unmoved elements if size is less than the maximum
element in the permutation; if this is desired, setting
``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Permutation(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
>>> Permutation(3).list(-1)
[]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
rv = self.array_form
if size is not None:
if size > self.size:
rv.extend(list(range(self.size, size)))
else:
# find first value from rhs where rv[i] != i
i = self.size - 1
while rv:
if rv[-1] != i:
break
rv.pop()
i -= 1
return rv
@property
def cyclic_form(self):
"""
This is used to convert to the cyclic notation
from the canonical notation. Singletons are omitted.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 3, 1, 2])
>>> p.cyclic_form
[[1, 3, 2]]
>>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form
[[0, 1], [3, 4]]
See Also
========
array_form, full_cyclic_form
"""
if self._cyclic_form is not None:
return list(self._cyclic_form)
array_form = self.array_form
unchecked = [True] * len(array_form)
cyclic_form = []
for i in range(len(array_form)):
if unchecked[i]:
cycle = []
cycle.append(i)
unchecked[i] = False
j = i
while unchecked[array_form[j]]:
j = array_form[j]
cycle.append(j)
unchecked[j] = False
if len(cycle) > 1:
cyclic_form.append(cycle)
assert cycle == list(minlex(cycle))
cyclic_form.sort()
self._cyclic_form = cyclic_form[:]
return cyclic_form
@property
def full_cyclic_form(self):
"""Return permutation in cyclic form including singletons.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 2, 1]).full_cyclic_form
[[0], [1, 2]]
"""
need = set(range(self.size)) - set(flatten(self.cyclic_form))
rv = self.cyclic_form + [[i] for i in need]
rv.sort()
return rv
@property
def size(self):
"""
Returns the number of elements in the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([[3, 2], [0, 1]]).size
4
See Also
========
cardinality, length, order, rank
"""
return self._size
def support(self):
"""Return the elements in permutation, P, for which P[i] != i.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[3, 2], [0, 1], [4]])
>>> p.array_form
[1, 0, 3, 2, 4]
>>> p.support()
[0, 1, 2, 3]
"""
a = self.array_form
return [i for i, e in enumerate(a) if a[i] != i]
def __add__(self, other):
"""Return permutation that is other higher in rank than self.
The rank is the lexicographical rank, with the identity permutation
having rank of 0.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> I = Permutation([0, 1, 2, 3])
>>> a = Permutation([2, 1, 3, 0])
>>> I + a.rank() == a
True
See Also
========
__sub__, inversion_vector
"""
rank = (self.rank() + other) % self.cardinality
rv = self.unrank_lex(self.size, rank)
rv._rank = rank
return rv
def __sub__(self, other):
"""Return the permutation that is other lower in rank than self.
See Also
========
__add__
"""
return self.__add__(-other)
@staticmethod
def rmul(*args):
"""
Return product of Permutations [a, b, c, ...] as the Permutation whose
ith value is a(b(c(i))).
a, b, c, ... can be Permutation objects or tuples.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(Permutation.rmul(a, b))
[1, 2, 0]
>>> [a(b(i)) for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
Notes
=====
All items in the sequence will be parsed by Permutation as
necessary as long as the first item is a Permutation:
>>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b)
True
The reverse order of arguments will raise a TypeError.
"""
rv = args[0]
for i in range(1, len(args)):
rv = args[i]*rv
return rv
@classmethod
def rmul_with_af(cls, *args):
"""
same as rmul, but the elements of args are Permutation objects
which have _array_form
"""
a = [x._array_form for x in args]
rv = cls._af_new(_af_rmuln(*a))
return rv
def mul_inv(self, other):
"""
other*~self, self and other have _array_form
"""
a = _af_invert(self._array_form)
b = other._array_form
return self._af_new(_af_rmul(a, b))
def __rmul__(self, other):
"""This is needed to coerce other to Permutation in rmul."""
cls = type(self)
return cls(other)*self
def __mul__(self, other):
"""
Return the product a*b as a Permutation; the ith value is b(a(i)).
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
This handles operands in reverse order compared to _af_rmul and rmul:
>>> al = list(a); bl = list(b)
>>> _af_rmul(al, bl)
[1, 2, 0]
>>> [al[bl[i]] for i in range(3)]
[1, 2, 0]
It is acceptable for the arrays to have different lengths; the shorter
one will be padded to match the longer one:
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> b*Permutation([1, 0])
Permutation([1, 2, 0])
>>> Permutation([1, 0])*b
Permutation([2, 0, 1])
It is also acceptable to allow coercion to handle conversion of a
single list to the left of a Permutation:
>>> [0, 1]*a # no change: 2-element identity
Permutation([1, 0, 2])
>>> [[0, 1]]*a # exchange first two elements
Permutation([0, 1, 2])
You cannot use more than 1 cycle notation in a product of cycles
since coercion can only handle one argument to the left. To handle
multiple cycles it is convenient to use Cycle instead of Permutation:
>>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP
>>> from sympy.combinatorics.permutations import Cycle
>>> Cycle(1, 2)(2, 3)
(1 3 2)
"""
from sympy.combinatorics.perm_groups import PermutationGroup, Coset
if isinstance(other, PermutationGroup):
return Coset(self, other, dir='-')
a = self.array_form
# __rmul__ makes sure the other is a Permutation
b = other.array_form
if not b:
perm = a
else:
b.extend(list(range(len(b), len(a))))
perm = [b[i] for i in a] + b[len(a):]
return self._af_new(perm)
def commutes_with(self, other):
"""
Checks if the elements are commuting.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> a = Permutation([1, 4, 3, 0, 2, 5])
>>> b = Permutation([0, 1, 2, 3, 4, 5])
>>> a.commutes_with(b)
True
>>> b = Permutation([2, 3, 5, 4, 1, 0])
>>> a.commutes_with(b)
False
"""
a = self.array_form
b = other.array_form
return _af_commutes_with(a, b)
def __pow__(self, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> p**4
Permutation([0, 1, 2, 3])
"""
if isinstance(n, Permutation):
raise NotImplementedError(
'p**p is not defined; do you mean p^p (conjugate)?')
n = int(n)
return self._af_new(_af_pow(self.array_form, n))
def __rxor__(self, i):
"""Return self(i) when ``i`` is an int.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation(1, 2, 9)
>>> 2^p == p(2) == 9
True
"""
if int(i) == i:
return self(i)
else:
raise NotImplementedError(
"i^p = p(i) when i is an integer, not %s." % i)
def __xor__(self, h):
"""Return the conjugate permutation ``~h*self*h` `.
Explanation
===========
If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and
``b = ~h*a*h`` and both have the same cycle structure.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation(1, 2, 9)
>>> q = Permutation(6, 9, 8)
>>> p*q != q*p
True
Calculate and check properties of the conjugate:
>>> c = p^q
>>> c == ~q*p*q and p == q*c*~q
True
The expression q^p^r is equivalent to q^(p*r):
>>> r = Permutation(9)(4, 6, 8)
>>> q^p^r == q^(p*r)
True
If the term to the left of the conjugate operator, i, is an integer
then this is interpreted as selecting the ith element from the
permutation to the right:
>>> all(i^p == p(i) for i in range(p.size))
True
Note that the * operator as higher precedence than the ^ operator:
>>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4)
True
Notes
=====
In Python the precedence rule is p^q^r = (p^q)^r which differs
in general from p^(q^r)
>>> q^p^r
(9)(1 4 8)
>>> q^(p^r)
(9)(1 8 6)
For a given r and p, both of the following are conjugates of p:
~r*p*r and r*p*~r. But these are not necessarily the same:
>>> ~r*p*r == r*p*~r
True
>>> p = Permutation(1, 2, 9)(5, 6)
>>> ~r*p*r == r*p*~r
False
The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent
to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to
this method:
>>> p^~r == r*p*~r
True
"""
if self.size != h.size:
raise ValueError("The permutations must be of equal size.")
a = [None]*self.size
h = h._array_form
p = self._array_form
for i in range(self.size):
a[h[i]] = h[p[i]]
return self._af_new(a)
def transpositions(self):
"""
Return the permutation decomposed into a list of transpositions.
Explanation
===========
It is always possible to express a permutation as the product of
transpositions, see [1]
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]])
>>> t = p.transpositions()
>>> t
[(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)]
>>> print(''.join(str(c) for c in t))
(0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2)
>>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties
"""
a = self.cyclic_form
res = []
for x in a:
nx = len(x)
if nx == 2:
res.append(tuple(x))
elif nx > 2:
first = x[0]
for y in x[nx - 1:0:-1]:
res.append((first, y))
return res
@classmethod
def from_sequence(self, i, key=None):
"""Return the permutation needed to obtain ``i`` from the sorted
elements of ``i``. If custom sorting is desired, a key can be given.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.from_sequence('SymPy')
(4)(0 1 3)
>>> _(sorted("SymPy"))
['S', 'y', 'm', 'P', 'y']
>>> Permutation.from_sequence('SymPy', key=lambda x: x.lower())
(4)(0 2)(1 3)
"""
ic = list(zip(i, list(range(len(i)))))
if key:
ic.sort(key=lambda x: key(x[0]))
else:
ic.sort()
return ~Permutation([i[1] for i in ic])
def __invert__(self):
"""
Return the inverse of the permutation.
A permutation multiplied by its inverse is the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([[2, 0], [3, 1]])
>>> ~p
Permutation([2, 3, 0, 1])
>>> _ == p**-1
True
>>> p*~p == ~p*p == Permutation([0, 1, 2, 3])
True
"""
return self._af_new(_af_invert(self._array_form))
def __iter__(self):
"""Yield elements from array form.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> list(Permutation(range(3)))
[0, 1, 2]
"""
yield from self.array_form
def __repr__(self):
return srepr(self)
def __call__(self, *i):
"""
Allows applying a permutation instance as a bijective function.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> [p(i) for i in range(4)]
[2, 3, 0, 1]
If an array is given then the permutation selects the items
from the array (i.e. the permutation is applied to the array):
>>> from sympy.abc import x
>>> p([x, 1, 0, x**2])
[0, x**2, x, 1]
"""
# list indices can be Integer or int; leave this
# as it is (don't test or convert it) because this
# gets called a lot and should be fast
if len(i) == 1:
i = i[0]
if not isinstance(i, Iterable):
i = as_int(i)
if i < 0 or i > self.size:
raise TypeError(
"{} should be an integer between 0 and {}"
.format(i, self.size-1))
return self._array_form[i]
# P([a, b, c])
if len(i) != self.size:
raise TypeError(
"{} should have the length {}.".format(i, self.size))
return [i[j] for j in self._array_form]
# P(1, 2, 3)
return self*Permutation(Cycle(*i), size=self.size)
def atoms(self):
"""
Returns all the elements of a permutation
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2, 3, 4, 5]).atoms()
{0, 1, 2, 3, 4, 5}
>>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms()
{0, 1, 2, 3, 4, 5}
"""
return set(self.array_form)
def apply(self, i):
r"""Apply the permutation to an expression.
Parameters
==========
i : Expr
It should be an integer between $0$ and $n-1$ where $n$
is the size of the permutation.
If it is a symbol or a symbolic expression that can
have integer values, an ``AppliedPermutation`` object
will be returned which can represent an unevaluated
function.
Notes
=====
Any permutation can be defined as a bijective function
$\sigma : \{ 0, 1, \dots, n-1 \} \rightarrow \{ 0, 1, \dots, n-1 \}$
where $n$ denotes the size of the permutation.
The definition may even be extended for any set with distinctive
elements, such that the permutation can even be applied for
real numbers or such, however, it is not implemented for now for
computational reasons and the integrity with the group theory
module.
This function is similar to the ``__call__`` magic, however,
``__call__`` magic already has some other applications like
permuting an array or attaching new cycles, which would
not always be mathematically consistent.
This also guarantees that the return type is a SymPy integer,
which guarantees the safety to use assumptions.
"""
i = _sympify(i)
if i.is_integer is False:
raise NotImplementedError("{} should be an integer.".format(i))
n = self.size
if (i < 0) == True or (i >= n) == True:
raise NotImplementedError(
"{} should be an integer between 0 and {}".format(i, n-1))
if i.is_Integer:
return Integer(self._array_form[i])
return AppliedPermutation(self, i)
def next_lex(self):
"""
Returns the next permutation in lexicographical order.
If self is the last permutation in lexicographical order
it returns None.
See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([2, 3, 1, 0])
>>> p = Permutation([2, 3, 1, 0]); p.rank()
17
>>> p = p.next_lex(); p.rank()
18
See Also
========
rank, unrank_lex
"""
perm = self.array_form[:]
n = len(perm)
i = n - 2
while perm[i + 1] < perm[i]:
i -= 1
if i == -1:
return None
else:
j = n - 1
while perm[j] < perm[i]:
j -= 1
perm[j], perm[i] = perm[i], perm[j]
i += 1
j = n - 1
while i < j:
perm[j], perm[i] = perm[i], perm[j]
i += 1
j -= 1
return self._af_new(perm)
@classmethod
def unrank_nonlex(self, n, r):
"""
This is a linear time unranking algorithm that does not
respect lexicographic order [3].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_nonlex(4, 5)
Permutation([2, 0, 3, 1])
>>> Permutation.unrank_nonlex(4, -1)
Permutation([0, 1, 2, 3])
See Also
========
next_nonlex, rank_nonlex
"""
def _unrank1(n, r, a):
if n > 0:
a[n - 1], a[r % n] = a[r % n], a[n - 1]
_unrank1(n - 1, r//n, a)
id_perm = list(range(n))
n = int(n)
r = r % ifac(n)
_unrank1(n, r, id_perm)
return self._af_new(id_perm)
def rank_nonlex(self, inv_perm=None):
"""
This is a linear time ranking algorithm that does not
enforce lexicographic order [3].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_nonlex()
23
See Also
========
next_nonlex, unrank_nonlex
"""
def _rank1(n, perm, inv_perm):
if n == 1:
return 0
s = perm[n - 1]
t = inv_perm[n - 1]
perm[n - 1], perm[t] = perm[t], s
inv_perm[n - 1], inv_perm[s] = inv_perm[s], t
return s + n*_rank1(n - 1, perm, inv_perm)
if inv_perm is None:
inv_perm = (~self).array_form
if not inv_perm:
return 0
perm = self.array_form[:]
r = _rank1(len(perm), perm, inv_perm)
return r
def next_nonlex(self):
"""
Returns the next permutation in nonlex order [3].
If self is the last permutation in this order it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex()
5
>>> p = p.next_nonlex(); p
Permutation([3, 0, 1, 2])
>>> p.rank_nonlex()
6
See Also
========
rank_nonlex, unrank_nonlex
"""
r = self.rank_nonlex()
if r == ifac(self.size) - 1:
return None
return self.unrank_nonlex(self.size, r + 1)
def rank(self):
"""
Returns the lexicographic rank of the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank()
0
>>> p = Permutation([3, 2, 1, 0])
>>> p.rank()
23
See Also
========
next_lex, unrank_lex, cardinality, length, order, size
"""
if self._rank is not None:
return self._rank
rank = 0
rho = self.array_form[:]
n = self.size - 1
size = n + 1
psize = int(ifac(n))
for j in range(size - 1):
rank += rho[j]*psize
for i in range(j + 1, size):
if rho[i] > rho[j]:
rho[i] -= 1
psize //= n
n -= 1
self._rank = rank
return rank
@property
def cardinality(self):
"""
Returns the number of all possible permutations.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.cardinality
24
See Also
========
length, order, rank, size
"""
return int(ifac(self.size))
def parity(self):
"""
Computes the parity of a permutation.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.parity()
0
>>> p = Permutation([3, 2, 0, 1])
>>> p.parity()
1
See Also
========
_af_parity
"""
if self._cyclic_form is not None:
return (self.size - self.cycles) % 2
return _af_parity(self.array_form)
@property
def is_even(self):
"""
Checks if a permutation is even.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_even
True
>>> p = Permutation([3, 2, 1, 0])
>>> p.is_even
True
See Also
========
is_odd
"""
return not self.is_odd
@property
def is_odd(self):
"""
Checks if a permutation is odd.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_odd
False
>>> p = Permutation([3, 2, 0, 1])
>>> p.is_odd
True
See Also
========
is_even
"""
return bool(self.parity() % 2)
@property
def is_Singleton(self):
"""
Checks to see if the permutation contains only one number and is
thus the only possible permutation of this set of numbers
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0]).is_Singleton
True
>>> Permutation([0, 1]).is_Singleton
False
See Also
========
is_Empty
"""
return self.size == 1
@property
def is_Empty(self):
"""
Checks to see if the permutation is a set with zero elements
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([]).is_Empty
True
>>> Permutation([0]).is_Empty
False
See Also
========
is_Singleton
"""
return self.size == 0
@property
def is_identity(self):
return self.is_Identity
@property
def is_Identity(self):
"""
Returns True if the Permutation is an identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([])
>>> p.is_Identity
True
>>> p = Permutation([[0], [1], [2]])
>>> p.is_Identity
True
>>> p = Permutation([0, 1, 2])
>>> p.is_Identity
True
>>> p = Permutation([0, 2, 1])
>>> p.is_Identity
False
See Also
========
order
"""
af = self.array_form
return not af or all(i == af[i] for i in range(self.size))
def ascents(self):
"""
Returns the positions of ascents in a permutation, ie, the location
where p[i] < p[i+1]
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.ascents()
[1, 2]
See Also
========
descents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]]
return pos
def descents(self):
"""
Returns the positions of descents in a permutation, ie, the location
where p[i] > p[i+1]
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.descents()
[0, 3]
See Also
========
ascents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]]
return pos
def max(self):
"""
The maximum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([1, 0, 2, 3, 4])
>>> p.max()
1
See Also
========
min, descents, ascents, inversions
"""
max = 0
a = self.array_form
for i in range(len(a)):
if a[i] != i and a[i] > max:
max = a[i]
return max
def min(self):
"""
The minimum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 4, 3, 2])
>>> p.min()
2
See Also
========
max, descents, ascents, inversions
"""
a = self.array_form
min = len(a)
for i in range(len(a)):
if a[i] != i and a[i] < min:
min = a[i]
return min
def inversions(self):
"""
Computes the number of inversions of a permutation.
Explanation
===========
An inversion is where i > j but p[i] < p[j].
For small length of p, it iterates over all i and j
values and calculates the number of inversions.
For large length of p, it uses a variation of merge
sort to calculate the number of inversions.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3, 4, 5])
>>> p.inversions()
0
>>> Permutation([3, 2, 1, 0]).inversions()
6
See Also
========
descents, ascents, min, max
References
==========
.. [1] http://www.cp.eng.chula.ac.th/~piak/teaching/algo/algo2008/count-inv.htm
"""
inversions = 0
a = self.array_form
n = len(a)
if n < 130:
for i in range(n - 1):
b = a[i]
for c in a[i + 1:]:
if b > c:
inversions += 1
else:
k = 1
right = 0
arr = a[:]
temp = a[:]
while k < n:
i = 0
while i + k < n:
right = i + k * 2 - 1
if right >= n:
right = n - 1
inversions += _merge(arr, temp, i, i + k, right)
i = i + k * 2
k = k * 2
return inversions
def commutator(self, x):
"""Return the commutator of ``self`` and ``x``: ``~x*~self*x*self``
If f and g are part of a group, G, then the commutator of f and g
is the group identity iff f and g commute, i.e. fg == gf.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([0, 2, 3, 1])
>>> x = Permutation([2, 0, 3, 1])
>>> c = p.commutator(x); c
Permutation([2, 1, 3, 0])
>>> c == ~x*~p*x*p
True
>>> I = Permutation(3)
>>> p = [I + i for i in range(6)]
>>> for i in range(len(p)):
... for j in range(len(p)):
... c = p[i].commutator(p[j])
... if p[i]*p[j] == p[j]*p[i]:
... assert c == I
... else:
... assert c != I
...
References
==========
.. [1] https://en.wikipedia.org/wiki/Commutator
"""
a = self.array_form
b = x.array_form
n = len(a)
if len(b) != n:
raise ValueError("The permutations must be of equal size.")
inva = [None]*n
for i in range(n):
inva[a[i]] = i
invb = [None]*n
for i in range(n):
invb[b[i]] = i
return self._af_new([a[b[inva[i]]] for i in invb])
def signature(self):
"""
Gives the signature of the permutation needed to place the
elements of the permutation in canonical order.
The signature is calculated as (-1)^<number of inversions>
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2])
>>> p.inversions()
0
>>> p.signature()
1
>>> q = Permutation([0,2,1])
>>> q.inversions()
1
>>> q.signature()
-1
See Also
========
inversions
"""
if self.is_even:
return 1
return -1
def order(self):
"""
Computes the order of a permutation.
When the permutation is raised to the power of its
order it equals the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 1, 5, 2, 4, 0])
>>> p.order()
4
>>> (p**(p.order()))
Permutation([], size=6)
See Also
========
identity, cardinality, length, rank, size
"""
return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1)
def length(self):
"""
Returns the number of integers moved by a permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 3, 2, 1]).length()
2
>>> Permutation([[0, 1], [2, 3]]).length()
4
See Also
========
min, max, support, cardinality, order, rank, size
"""
return len(self.support())
@property
def cycle_structure(self):
"""Return the cycle structure of the permutation as a dictionary
indicating the multiplicity of each cycle length.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation(3).cycle_structure
{1: 4}
>>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure
{2: 2, 3: 1}
"""
if self._cycle_structure:
rv = self._cycle_structure
else:
rv = defaultdict(int)
singletons = self.size
for c in self.cyclic_form:
rv[len(c)] += 1
singletons -= len(c)
if singletons:
rv[1] = singletons
self._cycle_structure = rv
return dict(rv) # make a copy
@property
def cycles(self):
"""
Returns the number of cycles contained in the permutation
(including singletons).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2]).cycles
3
>>> Permutation([0, 1, 2]).full_cyclic_form
[[0], [1], [2]]
>>> Permutation(0, 1)(2, 3).cycles
2
See Also
========
sympy.functions.combinatorial.numbers.stirling
"""
return len(self.full_cyclic_form)
def index(self):
"""
Returns the index of a permutation.
The index of a permutation is the sum of all subscripts j such
that p[j] is greater than p[j+1].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([3, 0, 2, 1, 4])
>>> p.index()
2
"""
a = self.array_form
return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]])
def runs(self):
"""
Returns the runs of a permutation.
An ascending sequence in a permutation is called a run [5].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8])
>>> p.runs()
[[2, 5, 7], [3, 6], [0, 1, 4, 8]]
>>> q = Permutation([1,3,2,0])
>>> q.runs()
[[1, 3], [2], [0]]
"""
return runs(self.array_form)
def inversion_vector(self):
"""Return the inversion vector of the permutation.
The inversion vector consists of elements whose value
indicates the number of elements in the permutation
that are lesser than it and lie on its right hand side.
The inversion vector is the same as the Lehmer encoding of a
permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2])
>>> p.inversion_vector()
[4, 7, 0, 5, 0, 2, 1, 1]
>>> p = Permutation([3, 2, 1, 0])
>>> p.inversion_vector()
[3, 2, 1]
The inversion vector increases lexicographically with the rank
of the permutation, the -ith element cycling through 0..i.
>>> p = Permutation(2)
>>> while p:
... print('%s %s %s' % (p, p.inversion_vector(), p.rank()))
... p = p.next_lex()
(2) [0, 0] 0
(1 2) [0, 1] 1
(2)(0 1) [1, 0] 2
(0 1 2) [1, 1] 3
(0 2 1) [2, 0] 4
(0 2) [2, 1] 5
See Also
========
from_inversion_vector
"""
self_array_form = self.array_form
n = len(self_array_form)
inversion_vector = [0] * (n - 1)
for i in range(n - 1):
val = 0
for j in range(i + 1, n):
if self_array_form[j] < self_array_form[i]:
val += 1
inversion_vector[i] = val
return inversion_vector
def rank_trotterjohnson(self):
"""
Returns the Trotter Johnson rank, which we get from the minimal
change algorithm. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_trotterjohnson()
0
>>> p = Permutation([0, 2, 1, 3])
>>> p.rank_trotterjohnson()
7
See Also
========
unrank_trotterjohnson, next_trotterjohnson
"""
if self.array_form == [] or self.is_Identity:
return 0
if self.array_form == [1, 0]:
return 1
perm = self.array_form
n = self.size
rank = 0
for j in range(1, n):
k = 1
i = 0
while perm[i] != j:
if perm[i] < j:
k += 1
i += 1
j1 = j + 1
if rank % 2 == 0:
rank = j1*rank + j1 - k
else:
rank = j1*rank + k - 1
return rank
@classmethod
def unrank_trotterjohnson(cls, size, rank):
"""
Trotter Johnson permutation unranking. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_trotterjohnson(5, 10)
Permutation([0, 3, 1, 2, 4])
See Also
========
rank_trotterjohnson, next_trotterjohnson
"""
perm = [0]*size
r2 = 0
n = ifac(size)
pj = 1
for j in range(2, size + 1):
pj *= j
r1 = (rank * pj) // n
k = r1 - j*r2
if r2 % 2 == 0:
for i in range(j - 1, j - k - 1, -1):
perm[i] = perm[i - 1]
perm[j - k - 1] = j - 1
else:
for i in range(j - 1, k, -1):
perm[i] = perm[i - 1]
perm[k] = j - 1
r2 = r1
return cls._af_new(perm)
def next_trotterjohnson(self):
"""
Returns the next permutation in Trotter-Johnson order.
If self is the last permutation it returns None.
See [4] section 2.4. If it is desired to generate all such
permutations, they can be generated in order more quickly
with the ``generate_bell`` function.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 0, 2, 1])
>>> p.rank_trotterjohnson()
4
>>> p = p.next_trotterjohnson(); p
Permutation([0, 3, 2, 1])
>>> p.rank_trotterjohnson()
5
See Also
========
rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell
"""
pi = self.array_form[:]
n = len(pi)
st = 0
rho = pi[:]
done = False
m = n-1
while m > 0 and not done:
d = rho.index(m)
for i in range(d, m):
rho[i] = rho[i + 1]
par = _af_parity(rho[:m])
if par == 1:
if d == m:
m -= 1
else:
pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d]
done = True
else:
if d == 0:
m -= 1
st += 1
else:
pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d]
done = True
if m == 0:
return None
return self._af_new(pi)
def get_precedence_matrix(self):
"""
Gets the precedence matrix. This is used for computing the
distance between two permutations.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation.josephus(3, 6, 1)
>>> p
Permutation([2, 5, 3, 1, 4, 0])
>>> p.get_precedence_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0]])
See Also
========
get_precedence_distance, get_adjacency_matrix, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(m.rows):
for j in range(i + 1, m.cols):
m[perm[i], perm[j]] = 1
return m
def get_precedence_distance(self, other):
"""
Computes the precedence distance between two permutations.
Explanation
===========
Suppose p and p' represent n jobs. The precedence metric
counts the number of times a job j is preceded by job i
in both p and p'. This metric is commutative.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([2, 0, 4, 3, 1])
>>> q = Permutation([3, 1, 2, 4, 0])
>>> p.get_precedence_distance(q)
7
>>> q.get_precedence_distance(p)
7
See Also
========
get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance
"""
if self.size != other.size:
raise ValueError("The permutations must be of equal size.")
self_prec_mat = self.get_precedence_matrix()
other_prec_mat = other.get_precedence_matrix()
n_prec = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_prec_mat[i, j] * other_prec_mat[i, j] == 1:
n_prec += 1
d = self.size * (self.size - 1)//2 - n_prec
return d
def get_adjacency_matrix(self):
"""
Computes the adjacency matrix of a permutation.
Explanation
===========
If job i is adjacent to job j in a permutation p
then we set m[i, j] = 1 where m is the adjacency
matrix of p.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation.josephus(3, 6, 1)
>>> p.get_adjacency_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
>>> q = Permutation([0, 1, 2, 3])
>>> q.get_adjacency_matrix()
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(self.size - 1):
m[perm[i], perm[i + 1]] = 1
return m
def get_adjacency_distance(self, other):
"""
Computes the adjacency distance between two permutations.
Explanation
===========
This metric counts the number of times a pair i,j of jobs is
adjacent in both p and p'. If n_adj is this quantity then
the adjacency distance is n - n_adj - 1 [1]
[1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals
of Operational Research, 86, pp 473-490. (1999)
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> p.get_adjacency_distance(q)
3
>>> r = Permutation([0, 2, 1, 4, 3])
>>> p.get_adjacency_distance(r)
4
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_matrix
"""
if self.size != other.size:
raise ValueError("The permutations must be of the same size.")
self_adj_mat = self.get_adjacency_matrix()
other_adj_mat = other.get_adjacency_matrix()
n_adj = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_adj_mat[i, j] * other_adj_mat[i, j] == 1:
n_adj += 1
d = self.size - n_adj - 1
return d
def get_positional_distance(self, other):
"""
Computes the positional distance between two permutations.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> r = Permutation([3, 1, 4, 0, 2])
>>> p.get_positional_distance(q)
12
>>> p.get_positional_distance(r)
12
See Also
========
get_precedence_distance, get_adjacency_distance
"""
a = self.array_form
b = other.array_form
if len(a) != len(b):
raise ValueError("The permutations must be of the same size.")
return sum([abs(a[i] - b[i]) for i in range(len(a))])
@classmethod
def josephus(cls, m, n, s=1):
"""Return as a permutation the shuffling of range(n) using the Josephus
scheme in which every m-th item is selected until all have been chosen.
The returned permutation has elements listed by the order in which they
were selected.
The parameter ``s`` stops the selection process when there are ``s``
items remaining and these are selected by continuing the selection,
counting by 1 rather than by ``m``.
Consider selecting every 3rd item from 6 until only 2 remain::
choices chosen
======== ======
012345
01 345 2
01 34 25
01 4 253
0 4 2531
0 25314
253140
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.josephus(3, 6, 2).array_form
[2, 5, 3, 1, 4, 0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Flavius_Josephus
.. [2] https://en.wikipedia.org/wiki/Josephus_problem
.. [3] http://www.wou.edu/~burtonl/josephus.html
"""
from collections import deque
m -= 1
Q = deque(list(range(n)))
perm = []
while len(Q) > max(s, 1):
for dp in range(m):
Q.append(Q.popleft())
perm.append(Q.popleft())
perm.extend(list(Q))
return cls(perm)
@classmethod
def from_inversion_vector(cls, inversion):
"""
Calculates the permutation from the inversion vector.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.from_inversion_vector([3, 2, 1, 0, 0])
Permutation([3, 2, 1, 0, 4, 5])
"""
size = len(inversion)
N = list(range(size + 1))
perm = []
try:
for k in range(size):
val = N[inversion[k]]
perm.append(val)
N.remove(val)
except IndexError:
raise ValueError("The inversion vector is not valid.")
perm.extend(N)
return cls._af_new(perm)
@classmethod
def random(cls, n):
"""
Generates a random permutation of length ``n``.
Uses the underlying Python pseudo-random number generator.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
True
"""
perm_array = list(range(n))
random.shuffle(perm_array)
return cls._af_new(perm_array)
@classmethod
def unrank_lex(cls, size, rank):
"""
Lexicographic permutation unranking.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = Permutation.unrank_lex(5, 10)
>>> a.rank()
10
>>> a
Permutation([0, 2, 4, 1, 3])
See Also
========
rank, next_lex
"""
perm_array = [0] * size
psize = 1
for i in range(size):
new_psize = psize*(i + 1)
d = (rank % new_psize) // psize
rank -= d*psize
perm_array[size - i - 1] = d
for j in range(size - i, size):
if perm_array[j] > d - 1:
perm_array[j] += 1
psize = new_psize
return cls._af_new(perm_array)
def resize(self, n):
"""Resize the permutation to the new size ``n``.
Parameters
==========
n : int
The new size of the permutation.
Raises
======
ValueError
If the permutation cannot be resized to the given size.
This may only happen when resized to a smaller size than
the original.
Examples
========
>>> from sympy.combinatorics import Permutation
Increasing the size of a permutation:
>>> p = Permutation(0, 1, 2)
>>> p = p.resize(5)
>>> p
(4)(0 1 2)
Decreasing the size of the permutation:
>>> p = p.resize(4)
>>> p
(3)(0 1 2)
If resizing to the specific size breaks the cycles:
>>> p.resize(2)
Traceback (most recent call last):
...
ValueError: The permutation cannot be resized to 2 because the
cycle (0, 1, 2) may break.
"""
aform = self.array_form
l = len(aform)
if n > l:
aform += list(range(l, n))
return Permutation._af_new(aform)
elif n < l:
cyclic_form = self.full_cyclic_form
new_cyclic_form = []
for cycle in cyclic_form:
cycle_min = min(cycle)
cycle_max = max(cycle)
if cycle_min <= n-1:
if cycle_max > n-1:
raise ValueError(
"The permutation cannot be resized to {} "
"because the cycle {} may break."
.format(n, tuple(cycle)))
new_cyclic_form.append(cycle)
return Permutation(new_cyclic_form)
return self
# XXX Deprecated flag
print_cyclic = None
def _merge(arr, temp, left, mid, right):
"""
Merges two sorted arrays and calculates the inversion count.
Helper function for calculating inversions. This method is
for internal use only.
"""
i = k = left
j = mid
inv_count = 0
while i < mid and j <= right:
if arr[i] < arr[j]:
temp[k] = arr[i]
k += 1
i += 1
else:
temp[k] = arr[j]
k += 1
j += 1
inv_count += (mid -i)
while i < mid:
temp[k] = arr[i]
k += 1
i += 1
if j <= right:
k += right - j + 1
j += right - j + 1
arr[left:k + 1] = temp[left:k + 1]
else:
arr[left:right + 1] = temp[left:right + 1]
return inv_count
Perm = Permutation
_af_new = Perm._af_new
class AppliedPermutation(Expr):
"""A permutation applied to a symbolic variable.
Parameters
==========
perm : Permutation
x : Expr
Examples
========
>>> from sympy import Symbol
>>> from sympy.combinatorics import Permutation
Creating a symbolic permutation function application:
>>> x = Symbol('x')
>>> p = Permutation(0, 1, 2)
>>> p.apply(x)
AppliedPermutation((0 1 2), x)
>>> _.subs(x, 1)
2
"""
def __new__(cls, perm, x, evaluate=None):
if evaluate is None:
evaluate = global_parameters.evaluate
perm = _sympify(perm)
x = _sympify(x)
if not isinstance(perm, Permutation):
raise ValueError("{} must be a Permutation instance."
.format(perm))
if evaluate:
if x.is_Integer:
return perm.apply(x)
obj = super().__new__(cls, perm, x)
return obj
@dispatch(Permutation, Permutation)
def _eval_is_eq(lhs, rhs):
if lhs._size != rhs._size:
return None
return lhs._array_form == rhs._array_form
|
c7a5939021634b3765d8a2b48ccb3b264a9c351261d62510e1c219b48a87a0ec | from __future__ import annotations
from sympy.core import S
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol, symbols as _symbols
from sympy.core.sympify import CantSympify
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import flatten, is_sequence
from sympy.utilities.magic import pollute
from sympy.utilities.misc import as_int
@public
def free_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> x**2*y**-1
x**2*y**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group,) + tuple(_free_group.generators)
@public
def xfree_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import xfree_group
>>> F, (x, y, z) = xfree_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> y**2*x**-2*z**-1
y**2*x**-2*z**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group, _free_group.generators)
@public
def vfree_group(symbols):
"""Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols
into the global namespace.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import vfree_group
>>> vfree_group("x, y, z")
<free group on the generators (x, y, z)>
>>> x**2*y**-2*z # noqa: F821
x**2*y**-2*z
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
pollute([sym.name for sym in _free_group.symbols], _free_group.generators)
return _free_group
def _parse_symbols(symbols):
if not symbols:
return tuple()
if isinstance(symbols, str):
return _symbols(symbols, seq=True)
elif isinstance(symbols, (Expr, FreeGroupElement)):
return (symbols,)
elif is_sequence(symbols):
if all(isinstance(s, str) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise ValueError("The type of `symbols` must be one of the following: "
"a str, Symbol/Expr or a sequence of "
"one of these types")
##############################################################################
# FREE GROUP #
##############################################################################
_free_group_cache: dict[int, FreeGroup] = {}
class FreeGroup(DefaultPrinting):
"""
Free group with finite or infinite number of generators. Its input API
is that of a str, Symbol/Expr or a sequence of one of
these types (which may be empty)
See Also
========
sympy.polys.rings.PolyRing
References
==========
.. [1] http://www.gap-system.org/Manuals/doc/ref/chap37.html
.. [2] https://en.wikipedia.org/wiki/Free_group
"""
is_associative = True
is_group = True
is_FreeGroup = True
is_PermutationGroup = False
relators: list[Expr] = []
def __new__(cls, symbols):
symbols = tuple(_parse_symbols(symbols))
rank = len(symbols)
_hash = hash((cls.__name__, symbols, rank))
obj = _free_group_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj._rank = rank
# dtype method is used to create new instances of FreeGroupElement
obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj})
obj.symbols = symbols
obj.generators = obj._generators()
obj._gens_set = set(obj.generators)
for symbol, generator in zip(obj.symbols, obj.generators):
if isinstance(symbol, Symbol):
name = symbol.name
if hasattr(obj, name):
setattr(obj, name, generator)
_free_group_cache[_hash] = obj
return obj
def _generators(group):
"""Returns the generators of the FreeGroup.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F.generators
(x, y, z)
"""
gens = []
for sym in group.symbols:
elm = ((sym, 1),)
gens.append(group.dtype(elm))
return tuple(gens)
def clone(self, symbols=None):
return self.__class__(symbols or self.symbols)
def __contains__(self, i):
"""Return True if ``i`` is contained in FreeGroup."""
if not isinstance(i, FreeGroupElement):
return False
group = i.group
return self == group
def __hash__(self):
return self._hash
def __len__(self):
return self.rank
def __str__(self):
if self.rank > 30:
str_form = "<free group with %s generators>" % self.rank
else:
str_form = "<free group on the generators "
gens = self.generators
str_form += str(gens) + ">"
return str_form
__repr__ = __str__
def __getitem__(self, index):
symbols = self.symbols[index]
return self.clone(symbols=symbols)
def __eq__(self, other):
"""No ``FreeGroup`` is equal to any "other" ``FreeGroup``.
"""
return self is other
def index(self, gen):
"""Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> F.index(y)
1
>>> F.index(x)
0
"""
if isinstance(gen, self.dtype):
return self.generators.index(gen)
else:
raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen))
def order(self):
"""Return the order of the free group.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> F.order()
oo
>>> free_group("")[0].order()
1
"""
if self.rank == 0:
return S.One
else:
return S.Infinity
@property
def elements(self):
"""
Return the elements of the free group.
Examples
========
>>> from sympy.combinatorics import free_group
>>> (z,) = free_group("")
>>> z.elements
{<identity>}
"""
if self.rank == 0:
# A set containing Identity element of `FreeGroup` self is returned
return {self.identity}
else:
raise ValueError("Group contains infinitely many elements"
", hence cannot be represented")
@property
def rank(self):
r"""
In group theory, the `rank` of a group `G`, denoted `G.rank`,
can refer to the smallest cardinality of a generating set
for G, that is
\operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \left\langle X\right\rangle =G\}.
"""
return self._rank
@property
def is_abelian(self):
"""Returns if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.is_abelian
False
"""
return self.rank in (0, 1)
@property
def identity(self):
"""Returns the identity element of free group."""
return self.dtype()
def contains(self, g):
"""Tests if Free Group element ``g`` belong to self, ``G``.
In mathematical terms any linear combination of generators
of a Free Group is contained in it.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.contains(x**3*y**2)
True
"""
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
return True
def center(self):
"""Returns the center of the free group `self`."""
return {self.identity}
############################################################################
# FreeGroupElement #
############################################################################
class FreeGroupElement(CantSympify, DefaultPrinting, tuple):
"""Used to create elements of FreeGroup. It cannot be used directly to
create a free group element. It is called by the `dtype` method of the
`FreeGroup` class.
"""
is_assoc_word = True
def new(self, init):
return self.__class__(init)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.group, frozenset(tuple(self))))
return _hash
def copy(self):
return self.new(self)
@property
def is_identity(self):
if self.array_form == tuple():
return True
else:
return False
@property
def array_form(self):
"""
SymPy provides two different internal kinds of representation
of associative words. The first one is called the `array_form`
which is a tuple containing `tuples` as its elements, where the
size of each tuple is two. At the first position the tuple
contains the `symbol-generator`, while at the second position
of tuple contains the exponent of that generator at the position.
Since elements (i.e. words) do not commute, the indexing of tuple
makes that property to stay.
The structure in ``array_form`` of ``FreeGroupElement`` is of form:
``( ( symbol_of_gen, exponent ), ( , ), ... ( , ) )``
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> (x*z).array_form
((x, 1), (z, 1))
>>> (x**2*z*y*x**2).array_form
((x, 2), (z, 1), (y, 1), (x, 2))
See Also
========
letter_repr
"""
return tuple(self)
@property
def letter_form(self):
"""
The letter representation of a ``FreeGroupElement`` is a tuple
of generator symbols, with each entry corresponding to a group
generator. Inverses of the generators are represented by
negative generator symbols.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b, c, d = free_group("a b c d")
>>> (a**3).letter_form
(a, a, a)
>>> (a**2*d**-2*a*b**-4).letter_form
(a, a, -d, -d, a, -b, -b, -b, -b)
>>> (a**-2*b**3*d).letter_form
(-a, -a, b, b, b, d)
See Also
========
array_form
"""
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
def __getitem__(self, i):
group = self.group
r = self.letter_form[i]
if r.is_Symbol:
return group.dtype(((r, 1),))
else:
return group.dtype(((-r, -1),))
def index(self, gen):
if len(gen) != 1:
raise ValueError()
return (self.letter_form).index(gen.letter_form[0])
@property
def letter_form_elm(self):
"""
"""
group = self.group
r = self.letter_form
return [group.dtype(((elm,1),)) if elm.is_Symbol \
else group.dtype(((-elm,-1),)) for elm in r]
@property
def ext_rep(self):
"""This is called the External Representation of ``FreeGroupElement``
"""
return tuple(flatten(self.array_form))
def __contains__(self, gen):
return gen.array_form[0][0] in tuple([r[0] for r in self.array_form])
def __str__(self):
if self.is_identity:
return "<identity>"
str_form = ""
array_form = self.array_form
for i in range(len(array_form)):
if i == len(array_form) - 1:
if array_form[i][1] == 1:
str_form += str(array_form[i][0])
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1])
else:
if array_form[i][1] == 1:
str_form += str(array_form[i][0]) + "*"
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1]) + "*"
return str_form
__repr__ = __str__
def __pow__(self, n):
n = as_int(n)
group = self.group
if n == 0:
return group.identity
if n < 0:
n = -n
return (self.inverse())**n
result = self
for i in range(n - 1):
result = result*self
# this method can be improved instead of just returning the
# multiplication of elements
return result
def __mul__(self, other):
"""Returns the product of elements belonging to the same ``FreeGroup``.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> x*y**2*y**-4
x*y**-2
>>> z*y**-2
z*y**-2
>>> x**2*y*y**-1*x**-2
<identity>
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
if self.is_identity:
return other
if other.is_identity:
return self
r = list(self.array_form + other.array_form)
zero_mul_simp(r, len(self.array_form) - 1)
return group.dtype(tuple(r))
def __truediv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return self*(other.inverse())
def __rtruediv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return other*(self.inverse())
def __add__(self, other):
return NotImplemented
def inverse(self):
"""
Returns the inverse of a ``FreeGroupElement`` element
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> x.inverse()
x**-1
>>> (x*y).inverse()
y**-1*x**-1
"""
group = self.group
r = tuple([(i, -j) for i, j in self.array_form[::-1]])
return group.dtype(r)
def order(self):
"""Find the order of a ``FreeGroupElement``.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y = free_group("x y")
>>> (x**2*y*y**-1*x**-2).order()
1
"""
if self.is_identity:
return S.One
else:
return S.Infinity
def commutator(self, other):
"""
Return the commutator of `self` and `x`: ``~x*~self*x*self``
"""
group = self.group
if not isinstance(other, group.dtype):
raise ValueError("commutator of only FreeGroupElement of the same "
"FreeGroup exists")
else:
return self.inverse()*other.inverse()*self*other
def eliminate_words(self, words, _all=False, inverse=True):
'''
Replace each subword from the dictionary `words` by words[subword].
If words is a list, replace the words by the identity.
'''
again = True
new = self
if isinstance(words, dict):
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse)
if new != prev:
again = True
else:
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, _all=_all, inverse=inverse)
if new != prev:
again = True
return new
def eliminate_word(self, gen, by=None, _all=False, inverse=True):
"""
For an associative word `self`, a subword `gen`, and an associative
word `by` (identity by default), return the associative word obtained by
replacing each occurrence of `gen` in `self` by `by`. If `_all = True`,
the occurrences of `gen` that may appear after the first substitution will
also be replaced and so on until no occurrences are found. This might not
always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`).
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y = free_group("x y")
>>> w = x**5*y*x**2*y**-4*x
>>> w.eliminate_word( x, x**2 )
x**10*y*x**4*y**-4*x**2
>>> w.eliminate_word( x, y**-1 )
y**-11
>>> w.eliminate_word(x**5)
y*x**2*y**-4*x
>>> w.eliminate_word(x*y, y)
x**4*y*x**2*y**-4*x
See Also
========
substituted_word
"""
if by is None:
by = self.group.identity
if self.is_independent(gen) or gen == by:
return self
if gen == self:
return by
if gen**-1 == by:
_all = False
word = self
l = len(gen)
try:
i = word.subword_index(gen)
k = 1
except ValueError:
if not inverse:
return word
try:
i = word.subword_index(gen**-1)
k = -1
except ValueError:
return word
word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by)
if _all:
return word.eliminate_word(gen, by, _all=True, inverse=inverse)
else:
return word
def __len__(self):
"""
For an associative word `self`, returns the number of letters in it.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> len(w)
13
>>> len(a**17)
17
>>> len(w**0)
0
"""
return sum(abs(j) for (i, j) in self)
def __eq__(self, other):
"""
Two associative words are equal if they are words over the
same alphabet and if they are sequences of the same letters.
This is equivalent to saying that the external representations
of the words are equal.
There is no "universal" empty word, every alphabet has its own
empty word.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> f
<free group on the generators (swapnil0, swapnil1)>
>>> g, swap0, swap1 = free_group("swap0 swap1")
>>> g
<free group on the generators (swap0, swap1)>
>>> swapnil0 == swapnil1
False
>>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1
True
>>> swapnil0*swapnil1 == swapnil1*swapnil0
False
>>> swapnil1**0 == swap0**0
False
"""
group = self.group
if not isinstance(other, group.dtype):
return False
return tuple.__eq__(self, other)
def __lt__(self, other):
"""
The ordering of associative words is defined by length and
lexicography (this ordering is called short-lex ordering), that
is, shorter words are smaller than longer words, and words of the
same length are compared w.r.t. the lexicographical ordering induced
by the ordering of generators. Generators are sorted according
to the order in which they were created. If the generators are
invertible then each generator `g` is larger than its inverse `g^{-1}`,
and `g^{-1}` is larger than every generator that is smaller than `g`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> b < a
False
>>> a < a.inverse()
False
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
l = len(self)
m = len(other)
# implement lenlex order
if l < m:
return True
elif l > m:
return False
for i in range(l):
a = self[i].array_form[0]
b = other[i].array_form[0]
p = group.symbols.index(a[0])
q = group.symbols.index(b[0])
if p < q:
return True
elif p > q:
return False
elif a[1] < b[1]:
return True
elif a[1] > b[1]:
return False
return False
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
"""
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, x, y, z = free_group("x y z")
>>> y**2 > x**2
True
>>> y*z > z*y
False
>>> x > x.inverse()
True
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
return not self <= other
def __ge__(self, other):
return not self < other
def exponent_sum(self, gen):
"""
For an associative word `self` and a generator or inverse of generator
`gen`, ``exponent_sum`` returns the number of times `gen` appears in
`self` minus the number of times its inverse appears in `self`. If
neither `gen` nor its inverse occur in `self` then 0 is returned.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.exponent_sum(x)
2
>>> w.exponent_sum(x**-1)
-2
>>> w = x**2*y**4*x**-3
>>> w.exponent_sum(x)
-1
See Also
========
generator_count
"""
if len(gen) != 1:
raise ValueError("gen must be a generator or inverse of a generator")
s = gen.array_form[0]
return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]])
def generator_count(self, gen):
"""
For an associative word `self` and a generator `gen`,
``generator_count`` returns the multiplicity of generator
`gen` in `self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.generator_count(x)
2
>>> w = x**2*y**4*x**-3
>>> w.generator_count(x)
5
See Also
========
exponent_sum
"""
if len(gen) != 1 or gen.array_form[0][1] < 0:
raise ValueError("gen must be a generator")
s = gen.array_form[0]
return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])
def subword(self, from_i, to_j, strict=True):
"""
For an associative word `self` and two positive integers `from_i` and
`to_j`, `subword` returns the subword of `self` that begins at position
`from_i` and ends at `to_j - 1`, indexing is done with origin 0.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.subword(2, 6)
a**3*b
"""
group = self.group
if not strict:
from_i = max(from_i, 0)
to_j = min(len(self), to_j)
if from_i < 0 or to_j > len(self):
raise ValueError("`from_i`, `to_j` must be positive and no greater than "
"the length of associative word")
if to_j <= from_i:
return group.identity
else:
letter_form = self.letter_form[from_i: to_j]
array_form = letter_form_to_array_form(letter_form, group)
return group.dtype(array_form)
def subword_index(self, word, start = 0):
'''
Find the index of `word` in `self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> w = a**2*b*a*b**3
>>> w.subword_index(a*b*a*b)
1
'''
l = len(word)
self_lf = self.letter_form
word_lf = word.letter_form
index = None
for i in range(start,len(self_lf)-l+1):
if self_lf[i:i+l] == word_lf:
index = i
break
if index is not None:
return index
else:
raise ValueError("The given word is not a subword of self")
def is_dependent(self, word):
"""
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> (x**4*y**-3).is_dependent(x**4*y**-2)
True
>>> (x**2*y**-1).is_dependent(x*y)
False
>>> (x*y**2*x*y**2).is_dependent(x*y**2)
True
>>> (x**12).is_dependent(x**-4)
True
See Also
========
is_independent
"""
try:
return self.subword_index(word) is not None
except ValueError:
pass
try:
return self.subword_index(word**-1) is not None
except ValueError:
return False
def is_independent(self, word):
"""
See Also
========
is_dependent
"""
return not self.is_dependent(word)
def contains_generators(self):
"""
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> (x**2*y**-1).contains_generators()
{x, y}
>>> (x**3*z).contains_generators()
{x, z}
"""
group = self.group
gens = set()
for syllable in self.array_form:
gens.add(group.dtype(((syllable[0], 1),)))
return set(gens)
def cyclic_subword(self, from_i, to_j):
group = self.group
l = len(self)
letter_form = self.letter_form
period1 = int(from_i/l)
if from_i >= l:
from_i -= l*period1
to_j -= l*period1
diff = to_j - from_i
word = letter_form[from_i: to_j]
period2 = int(to_j/l) - 1
word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2]
word = letter_form_to_array_form(word, group)
return group.dtype(word)
def cyclic_conjugates(self):
"""Returns a words which are cyclic to the word `self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> w = x*y*x*y*x
>>> w.cyclic_conjugates()
{x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x}
>>> s = x*y*x**2*y*x
>>> s.cyclic_conjugates()
{x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x}
References
==========
.. [1] http://planetmath.org/cyclicpermutation
"""
return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))}
def is_cyclic_conjugate(self, w):
"""
Checks whether words ``self``, ``w`` are cyclic conjugates.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> w1 = x**2*y**5
>>> w2 = x*y**5*x
>>> w1.is_cyclic_conjugate(w2)
True
>>> w3 = x**-1*y**5*x**-1
>>> w3.is_cyclic_conjugate(w2)
False
"""
l1 = len(self)
l2 = len(w)
if l1 != l2:
return False
w1 = self.identity_cyclic_reduction()
w2 = w.identity_cyclic_reduction()
letter1 = w1.letter_form
letter2 = w2.letter_form
str1 = ' '.join(map(str, letter1))
str2 = ' '.join(map(str, letter2))
if len(str1) != len(str2):
return False
return str1 in str2 + ' ' + str2
def number_syllables(self):
"""Returns the number of syllables of the associative word `self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables()
3
"""
return len(self.array_form)
def exponent_syllable(self, i):
"""
Returns the exponent of the `i`-th syllable of the associative word
`self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.exponent_syllable( 2 )
2
"""
return self.array_form[i][1]
def generator_syllable(self, i):
"""
Returns the symbol of the generator that is involved in the
i-th syllable of the associative word `self`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.generator_syllable( 3 )
b
"""
return self.array_form[i][0]
def sub_syllables(self, from_i, to_j):
"""
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
"""
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
def substituted_word(self, from_i, to_j, by):
"""
Returns the associative word obtained by replacing the subword of
`self` that begins at position `from_i` and ends at position `to_j - 1`
by the associative word `by`. `from_i` and `to_j` must be positive
integers, indexing is done with origin 0. In other words,
`w.substituted_word(w, from_i, to_j, by)` is the product of the three
words: `w.subword(0, from_i)`, `by`, and
`w.subword(to_j len(w))`.
See Also
========
eliminate_word
"""
lw = len(self)
if from_i >= to_j or from_i > lw or to_j > lw:
raise ValueError("values should be within bounds")
# otherwise there are four possibilities
# first if from=1 and to=lw then
if from_i == 0 and to_j == lw:
return by
elif from_i == 0: # second if from_i=1 (and to_j < lw) then
return by*self.subword(to_j, lw)
elif to_j == lw: # third if to_j=1 (and from_i > 1) then
return self.subword(0, from_i)*by
else: # finally
return self.subword(0, from_i)*by*self.subword(to_j, lw)
def is_cyclically_reduced(self):
r"""Returns whether the word is cyclically reduced or not.
A word is cyclically reduced if by forming the cycle of the
word, the word is not reduced, i.e a word w = `a_1 ... a_n`
is called cyclically reduced if `a_1 \ne a_n^{-1}`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**-1*x**-1).is_cyclically_reduced()
False
>>> (y*x**2*y**2).is_cyclically_reduced()
True
"""
if not self:
return True
return self[0] != self[-1]**-1
def identity_cyclic_reduction(self):
"""Return a unique cyclically reduced version of the word.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).identity_cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).identity_cyclic_reduction()
x**2*y**-1
References
==========
.. [1] http://planetmath.org/cyclicallyreduced
"""
word = self.copy()
group = self.group
while not word.is_cyclically_reduced():
exp1 = word.exponent_syllable(0)
exp2 = word.exponent_syllable(-1)
r = exp1 + exp2
if r == 0:
rep = word.array_form[1: word.number_syllables() - 1]
else:
rep = ((word.generator_syllable(0), exp1 + exp2),) + \
word.array_form[1: word.number_syllables() - 1]
word = group.dtype(rep)
return word
def cyclic_reduction(self, removed=False):
"""Return a cyclically reduced version of the word. Unlike
`identity_cyclic_reduction`, this will not cyclically permute
the reduced word - just remove the "unreduced" bits on either
side of it. Compare the examples with those of
`identity_cyclic_reduction`.
When `removed` is `True`, return a tuple `(word, r)` where
self `r` is such that before the reduction the word was either
`r*word*r**-1`.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).cyclic_reduction()
y**-1*x**2
>>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)
(y**-1*x**2, x**-3)
"""
word = self.copy()
g = self.group.identity
while not word.is_cyclically_reduced():
exp1 = abs(word.exponent_syllable(0))
exp2 = abs(word.exponent_syllable(-1))
exp = min(exp1, exp2)
start = word[0]**abs(exp)
end = word[-1]**abs(exp)
word = start**-1*word*end**-1
g = g*start
if removed:
return word, g
return word
def power_of(self, other):
'''
Check if `self == other**n` for some integer n.
Examples
========
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> ((x*y)**2).power_of(x*y)
True
>>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3)
True
'''
if self.is_identity:
return True
l = len(other)
if l == 1:
# self has to be a power of one generator
gens = self.contains_generators()
s = other in gens or other**-1 in gens
return len(gens) == 1 and s
# if self is not cyclically reduced and it is a power of other,
# other isn't cyclically reduced and the parts removed during
# their reduction must be equal
reduced, r1 = self.cyclic_reduction(removed=True)
if not r1.is_identity:
other, r2 = other.cyclic_reduction(removed=True)
if r1 == r2:
return reduced.power_of(other)
return False
if len(self) < l or len(self) % l:
return False
prefix = self.subword(0, l)
if prefix == other or prefix**-1 == other:
rest = self.subword(l, len(self))
return rest.power_of(other)
return False
def letter_form_to_array_form(array_form, group):
"""
This method converts a list given with possible repetitions of elements in
it. It returns a new list such that repetitions of consecutive elements is
removed and replace with a tuple element of size two such that the first
index contains `value` and the second index contains the number of
consecutive repetitions of `value`.
"""
a = list(array_form[:])
new_array = []
n = 1
symbols = group.symbols
for i in range(len(a)):
if i == len(a) - 1:
if a[i] == a[i - 1]:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -1))
else:
new_array.append((a[i], 1))
return new_array
elif a[i] == a[i + 1]:
n += 1
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
n = 1
def zero_mul_simp(l, index):
"""Used to combine two reduced words."""
while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]:
exp = l[index][1] + l[index + 1][1]
base = l[index][0]
l[index] = (base, exp)
del l[index + 1]
if l[index][1] == 0:
del l[index]
index -= 1
|
689ce656dbfb7b51c4002bfebf68deb4fc4f62b3ed9c0c76d4264d1cbf4afa76 | from sympy.core import Basic, Dict, sympify, Tuple
from sympy.core.numbers import Integer
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from sympy.utilities.misc import as_int
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3])
>>> a
Partition({3}, {1, 2})
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
Partition({4, 5}, {1, 2, 3})
Creating Partition from SymPy finite sets:
>>> from sympy import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
Partition({4, 5}, {1, 2, 3})
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " \
"a list, set, or a FiniteSet")
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy import default_sort_key
>>> from sympy.combinatorics import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[Partition({1, 2}), Partition({1}, {2}), Partition({1, x}), Partition({3, 4}), Partition({0, 1, 2, 3})]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
Explanation
===========
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
Partition({3}, {4}, {5}, {1, 2})
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
Explanation
===========
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
Partition({c}, {a, d}, {b, e})
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
Partition({e}, {a, c}, {b, d})
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
Partition({2}, {1, 4}, {3, 5})
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
Explanation
===========
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
.. [1] https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
Explanation
===========
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("All integer summands must be greater than one")
obj = Basic.__new__(cls, Integer(integer), Tuple(*partition))
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.core.random import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
|
89d8be54bf38afc0a425e888acd7bda5b26cb057b868fba5bcb6ff419420f236 | from sympy.combinatorics.group_constructs import DirectProduct
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.permutations import Permutation
_af_new = Permutation._af_new
def AbelianGroup(*cyclic_orders):
"""
Returns the direct product of cyclic groups with the given orders.
Explanation
===========
According to the structure theorem for finite abelian groups ([1]),
every finite abelian group can be written as the direct product of
finitely many cyclic groups.
Examples
========
>>> from sympy.combinatorics.named_groups import AbelianGroup
>>> AbelianGroup(3, 4)
PermutationGroup([
(6)(0 1 2),
(3 4 5 6)])
>>> _.is_group
True
See Also
========
DirectProduct
References
==========
.. [1] http://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups
"""
groups = []
degree = 0
order = 1
for size in cyclic_orders:
degree += size
order *= size
groups.append(CyclicGroup(size))
G = DirectProduct(*groups)
G._is_abelian = True
G._degree = degree
G._order = order
return G
def AlternatingGroup(n):
"""
Generates the alternating group on ``n`` elements as a permutation group.
Explanation
===========
For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for
``n`` odd
and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.).
After the group is generated, some of its basic properties are set.
The cases ``n = 1, 2`` are handled separately.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(4)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> len(a)
12
>>> all(perm.is_even for perm in a)
True
See Also
========
SymmetricGroup, CyclicGroup, DihedralGroup
References
==========
.. [1] Armstrong, M. "Groups and Symmetry"
"""
# small cases are special
if n in (1, 2):
return PermutationGroup([Permutation([0])])
a = list(range(n))
a[0], a[1], a[2] = a[1], a[2], a[0]
gen1 = a
if n % 2:
a = list(range(1, n))
a.append(0)
gen2 = a
else:
a = list(range(2, n))
a.append(1)
a.insert(0, 0)
gen2 = a
gens = [gen1, gen2]
if gen1 == gen2:
gens = gens[:1]
G = PermutationGroup([_af_new(a) for a in gens], dups=False)
if n < 4:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_alt = True
G._is_dihedral = False
return G
def CyclicGroup(n):
"""
Generates the cyclic group of order ``n`` as a permutation group.
Explanation
===========
The generator taken is the ``n``-cycle ``(0 1 2 ... n-1)``
(in cycle notation). After the group is generated, some of its basic
properties are set.
Examples
========
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(6)
>>> G.is_group
True
>>> G.order()
6
>>> list(G.generate_schreier_sims(af=True))
[[0, 1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 0], [2, 3, 4, 5, 0, 1],
[3, 4, 5, 0, 1, 2], [4, 5, 0, 1, 2, 3], [5, 0, 1, 2, 3, 4]]
See Also
========
SymmetricGroup, DihedralGroup, AlternatingGroup
"""
a = list(range(1, n))
a.append(0)
gen = _af_new(a)
G = PermutationGroup([gen])
G._is_abelian = True
G._is_nilpotent = True
G._is_solvable = True
G._degree = n
G._is_transitive = True
G._order = n
G._is_dihedral = (n == 2)
return G
def DihedralGroup(n):
r"""
Generates the dihedral group `D_n` as a permutation group.
Explanation
===========
The dihedral group `D_n` is the group of symmetries of the regular
``n``-gon. The generators taken are the ``n``-cycle ``a = (0 1 2 ... n-1)``
(a rotation of the ``n``-gon) and ``b = (0 n-1)(1 n-2)...``
(a reflection of the ``n``-gon) in cycle rotation. It is easy to see that
these satisfy ``a**n = b**2 = 1`` and ``bab = ~a`` so they indeed generate
`D_n` (See [1]). After the group is generated, some of its basic properties
are set.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(5)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> [perm.cyclic_form for perm in a]
[[], [[0, 1, 2, 3, 4]], [[0, 2, 4, 1, 3]],
[[0, 3, 1, 4, 2]], [[0, 4, 3, 2, 1]], [[0, 4], [1, 3]],
[[1, 4], [2, 3]], [[0, 1], [2, 4]], [[0, 2], [3, 4]],
[[0, 3], [1, 2]]]
See Also
========
SymmetricGroup, CyclicGroup, AlternatingGroup
References
==========
.. [1] https://en.wikipedia.org/wiki/Dihedral_group
"""
# small cases are special
if n == 1:
return PermutationGroup([Permutation([1, 0])])
if n == 2:
return PermutationGroup([Permutation([1, 0, 3, 2]),
Permutation([2, 3, 0, 1]), Permutation([3, 2, 1, 0])])
a = list(range(1, n))
a.append(0)
gen1 = _af_new(a)
a = list(range(n))
a.reverse()
gen2 = _af_new(a)
G = PermutationGroup([gen1, gen2])
# if n is a power of 2, group is nilpotent
if n & (n-1) == 0:
G._is_nilpotent = True
else:
G._is_nilpotent = False
G._is_dihedral = True
G._is_abelian = False
G._is_solvable = True
G._degree = n
G._is_transitive = True
G._order = 2*n
return G
def SymmetricGroup(n):
"""
Generates the symmetric group on ``n`` elements as a permutation group.
Explanation
===========
The generators taken are the ``n``-cycle
``(0 1 2 ... n-1)`` and the transposition ``(0 1)`` (in cycle notation).
(See [1]). After the group is generated, some of its basic properties
are set.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(4)
>>> G.is_group
True
>>> G.order()
24
>>> list(G.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [1, 2, 3, 0], [2, 3, 0, 1], [3, 1, 2, 0], [0, 2, 3, 1],
[1, 3, 0, 2], [2, 0, 1, 3], [3, 2, 0, 1], [0, 3, 1, 2], [1, 0, 2, 3],
[2, 1, 3, 0], [3, 0, 1, 2], [0, 1, 3, 2], [1, 2, 0, 3], [2, 3, 1, 0],
[3, 1, 0, 2], [0, 2, 1, 3], [1, 3, 2, 0], [2, 0, 3, 1], [3, 2, 1, 0],
[0, 3, 2, 1], [1, 0, 3, 2], [2, 1, 0, 3], [3, 0, 2, 1]]
See Also
========
CyclicGroup, DihedralGroup, AlternatingGroup
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_group#Generators_and_relations
"""
if n == 1:
G = PermutationGroup([Permutation([0])])
elif n == 2:
G = PermutationGroup([Permutation([1, 0])])
else:
a = list(range(1, n))
a.append(0)
gen1 = _af_new(a)
a = list(range(n))
a[0], a[1] = a[1], a[0]
gen2 = _af_new(a)
G = PermutationGroup([gen1, gen2])
if n < 3:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_sym = True
G._is_dihedral = (n in [2, 3]) # cf Landau's func and Stirling's approx
return G
def RubikGroup(n):
"""Return a group of Rubik's cube generators
>>> from sympy.combinatorics.named_groups import RubikGroup
>>> RubikGroup(2).is_group
True
"""
from sympy.combinatorics.generators import rubik
if n <= 1:
raise ValueError("Invalid cube. n has to be greater than 1")
return PermutationGroup(rubik(n))
|
88e3cd4c8efeae17ecfe8306f737690054c2d61058efad40ee59ef23ef231491 | """
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
do not compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from functools import reduce
from sympy.core import Basic, S, Mul, PoleError, expand_mul
from sympy.core.cache import cacheit
from sympy.core.numbers import ilcm, I, oo
from sympy.core.symbol import Dummy, Wild
from sympy.core.traversal import bottom_up
from sympy.functions import log, exp, sign as _sign
from sympy.series.order import Order
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import debug_decorator as debug
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)):
la = a.exp
if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)):
lb = b.exp
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_infinite:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
Explanation
===========
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be chosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super().__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if key not in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
"""Substitute the variables with expressions"""
for expr, var in self.items():
e = e.xreplace({var: expr})
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.xreplace({var: res[expr]})
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.xreplace(tr)
return res, exps
def copy(self):
"""Create a shallow copy of SubsSet"""
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
from sympy.simplify.powsimp import powsimp
e = powsimp(e, deep=True, combine='exp')
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow and e.base != S.Exp1:
e1 = S.One
while e.is_Pow:
b1 = e.base
e1 *= e.exp
e = b1
if b1 == 1:
return SubsSet(), b1
if e1.has(x):
base_lim = limitinf(b1, x)
if base_lim is S.One:
return mrv(exp(e1 * (b1 - 1)), x)
return mrv(exp(e1 * log(b1)), x)
else:
s, expr = mrv(b1, x)
return s, expr**e1
elif isinstance(e, log):
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1):
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if isinstance(e.exp, log):
return mrv(e.exp.args[0], x)
# if a product has an infinite factor the result will be
# infinite if there is no zero, otherwise NaN; here, we
# consider the result infinite if any factor is infinite
li = limitinf(e.exp, x)
if any(_.is_infinite for _ in Mul.make_args(li)):
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.exp, x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.exp, x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derivatives"
" not implemented yet.")
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""
Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
if not isinstance(f, SubsSet):
raise TypeError("f should be an instance of SubsSet")
if not isinstance(g, SubsSet):
raise TypeError("g should be an instance of SubsSet")
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
if c != "=":
raise ValueError("c should be =")
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitrarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
from sympy.simplify import logcombine
e = logcombine(e)
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
if e.base == S.Exp1:
return 1
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x):
"""Limit e(x) for x-> oo."""
# rewrite e in terms of tractable functions only
old = e
if not e.has(x):
return e # e is a constant
from sympy.simplify.powsimp import powdenest
from sympy.calculus.util import AccumBounds
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive or x.is_integer:
# We make sure that x.is_positive is True and x.is_integer is None
# so we get all the correct mathematical behavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True)
e = e.subs(x, p)
x = p
e = e.rewrite('tractable', deep=True, limitvar=x)
e = powdenest(e)
if isinstance(e, AccumBounds):
if mrv_leadterm(e.min, x) != mrv_leadterm(e.max, x):
raise NotImplementedError
c0, e0 = mrv_leadterm(e.min, x)
else:
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
# the leading term shouldn't be 0:
if s == 0:
raise ValueError("Leading term should not be 0")
return s*oo
elif sig == 0:
if c0 == old:
c0 = c0.cancel()
return limitinf(c0, x) # e0=0: lim f = lim c0
else:
raise ValueError("{} could not be evaluated".format(sig))
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.xreplace({x: exp(x)})] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})
return r
def moveup(l, x):
return [e.xreplace({x: exp(x)}) for e in l]
@debug
@timeit
def calculate_series(e, x, logx=None):
""" Calculates at least one term of the series of ``e`` in ``x``.
This is a place that fails most often, so it is in its own function.
"""
SymPyDeprecationWarning(
feature="calculate_series",
useinstead="series() with suitable n, or as_leading_term",
issue=21838,
deprecated_since_version="1.12"
).warn()
from sympy.simplify.powsimp import powdenest
for t in e.lseries(x, logx=logx):
# bottom_up function is required for a specific case - when e is
# -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p)
t = bottom_up(t, lambda w:
getattr(w, 'normal', lambda: w)())
# And the expression
# `(-sin(1/x) + sin((x + exp(x))*exp(-x)/x))*exp(x)`
# from the first test of test_gruntz_eval_special needs to
# be expanded. But other forms need to be have at least
# factor_terms applied. `factor` accomplishes both and is
# faster than using `factor_terms` for the gruntz suite. It
# does not appear that use of `cancel` is necessary.
# t = cancel(t, expand=False)
t = t.factor()
if t.has(exp) and t.has(log):
t = powdenest(t)
if not t.is_zero:
break
return t
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
return exps, S.Zero
if x in Omega:
# move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", positive=True)
f, logw = rewrite(exps, Omega, x, w)
try:
lt = f.leadterm(w, logx=logw)
except (NotImplementedError, PoleError, ValueError):
n0 = 1
_series = Order(1)
incr = S.One
while _series.is_Order:
_series = f._eval_nseries(w, n=n0+incr, logx=logw)
incr *= 2
series = _series.expand().removeO()
try:
lt = series.leadterm(w, logx=logw)
except (NotImplementedError, PoleError, ValueError):
lt = f.as_coeff_exponent(w)
if lt[0].has(w):
base = f.as_base_exp()[0].as_coeff_exponent(w)
ex = f.as_base_exp()[1]
lt = (base[0]**ex, base[1]*ex)
return (lt[0].subs(log(w), logw), lt[1])
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def __init__(self):
self.before = []
self.expr = None
self.var = None
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
from sympy import AccumBounds
if not isinstance(Omega, SubsSet):
raise TypeError("Omega should be an instance of SubsSet")
if len(Omega) == 0:
raise ValueError("Length cannot be 0")
# all items in Omega must be exponentials
for t in Omega.keys():
if not isinstance(t, exp):
raise ValueError("Value should be exp")
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
# make sure we know the sign of each exp() term; after the loop,
# g is going to be the "w" - the simplest one in the mrv set
for g, _ in Omega:
sig = sign(g.exp, x)
if sig != 1 and sig != -1 and not sig.has(AccumBounds):
raise NotImplementedError('Result depends on the sign of %s' % sig)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
# O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.exp/g.exp, x)
if c.is_Rational:
denominators.append(c.q)
arg = f.exp
if var in rewrites:
if not isinstance(rewrites[var], exp):
raise ValueError("Value should be exp")
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))
# Remember that Omega contains subexpressions of "e". So now we find
# them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .xreplace() below succeeds:
# TODO this should not be necessary
from sympy.simplify.powsimp import powsimp
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.xreplace({a: b})
for _, var in Omega:
assert not f.has(var)
# finally compute the logarithm of w (logw).
logw = g.exp
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of SymPy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs({wsym: wsym**exponent})
logw /= exponent
# bottom_up function is required for a specific case - when f is
# -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification
# methods reduce this to 0 while not expanding polynomials.
f = bottom_up(f, lambda w: getattr(w, 'normal', lambda: w)())
f = expand_mul(f)
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
Explanation
===========
``z0`` can be any expression, including oo and -oo.
For ``dir="+"`` (default) it calculates the limit from the right
(z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument does not matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not z.is_symbol:
raise NotImplementedError("Second argument must be a Symbol")
# convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 in (oo, I*oo):
e0 = e
elif z0 in (-oo, -I*oo):
e0 = e.subs(z, -z)
else:
if str(dir) == "-":
e0 = e.subs(z, z0 - 1/z)
elif str(dir) == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
r = limitinf(e0, z)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
3da79d84b63cb51cd9a69e7c3aab452cffa59b31960de8c925d1187f42abe755 | from sympy.calculus.accumulationbounds import AccumBounds
from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul
from sympy.core.exprtools import factor_terms
from sympy.core.numbers import Float, _illegal
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (Abs, sign, arg, re)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.special.gamma_functions import gamma
from sympy.polys import PolynomialError, factor
from sympy.series.order import Order
from .gruntz import gruntz
def limit(e, z, z0, dir="+"):
"""Computes the limit of ``e(z)`` at the point ``z0``.
Parameters
==========
e : expression, the limit of which is to be taken
z : symbol representing the variable in the limit.
Other symbols are treated as constants. Multivariate limits
are not supported.
z0 : the value toward which ``z`` tends. Can be any expression,
including ``oo`` and ``-oo``.
dir : string, optional (default: "+")
The limit is bi-directional if ``dir="+-"``, from the right
(z->z0+) if ``dir="+"``, and from the left (z->z0-) if
``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir``
argument is determined from the direction of the infinity
(i.e., ``dir="-"`` for ``oo``).
Examples
========
>>> from sympy import limit, sin, oo
>>> from sympy.abc import x
>>> limit(sin(x)/x, x, 0)
1
>>> limit(1/x, x, 0) # default dir='+'
oo
>>> limit(1/x, x, 0, dir="-")
-oo
>>> limit(1/x, x, 0, dir='+-')
zoo
>>> limit(1/x, x, oo)
0
Notes
=====
First we try some heuristics for easy and frequent cases like "x", "1/x",
"x**2" and similar, so that it's fast. For all other cases, we use the
Gruntz algorithm (see the gruntz() function).
See Also
========
limit_seq : returns the limit of a sequence.
"""
return Limit(e, z, z0, dir).doit(deep=False)
def heuristics(e, z, z0, dir):
"""Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
"""
rv = None
if z0 is S.Infinity:
rv = limit(e.subs(z, 1/z), z, S.Zero, "+")
if isinstance(rv, Limit):
return
elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function:
r = []
from sympy.simplify.simplify import together
for a in e.args:
l = limit(a, z, z0, dir)
if l.has(S.Infinity) and l.is_finite is None:
if isinstance(e, Add):
m = factor_terms(e)
if not isinstance(m, Mul): # try together
m = together(m)
if not isinstance(m, Mul): # try factor if the previous methods failed
m = factor(e)
if isinstance(m, Mul):
return heuristics(m, z, z0, dir)
return
return
elif isinstance(l, Limit):
return
elif l is S.NaN:
return
else:
r.append(l)
if r:
rv = e.func(*r)
if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r):
r2 = []
e2 = []
for ii, rval in enumerate(r):
if isinstance(rval, AccumBounds):
r2.append(rval)
else:
e2.append(e.args[ii])
if len(e2) > 0:
e3 = Mul(*e2).simplify()
l = limit(e3, z, z0, dir)
rv = l * Mul(*r2)
if rv is S.NaN:
try:
from sympy.simplify.ratsimp import ratsimp
rat_e = ratsimp(e)
except PolynomialError:
return
if rat_e is S.NaN or rat_e == e:
return
return limit(rat_e, z, z0, dir)
return rv
class Limit(Expr):
"""Represents an unevaluated limit.
Examples
========
>>> from sympy import Limit, sin
>>> from sympy.abc import x
>>> Limit(sin(x)/x, x, 0)
Limit(sin(x)/x, x, 0, dir='+')
>>> Limit(1/x, x, 0, dir="-")
Limit(1/x, x, 0, dir='-')
"""
def __new__(cls, e, z, z0, dir="+"):
e = sympify(e)
z = sympify(z)
z0 = sympify(z0)
if z0 in (S.Infinity, S.ImaginaryUnit*S.Infinity):
dir = "-"
elif z0 in (S.NegativeInfinity, S.ImaginaryUnit*S.NegativeInfinity):
dir = "+"
if(z0.has(z)):
raise NotImplementedError("Limits approaching a variable point are"
" not supported (%s -> %s)" % (z, z0))
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("direction must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-', '+-'):
raise ValueError("direction must be one of '+', '-' "
"or '+-', not %s" % dir)
obj = Expr.__new__(cls)
obj._args = (e, z, z0, dir)
return obj
@property
def free_symbols(self):
e = self.args[0]
isyms = e.free_symbols
isyms.difference_update(self.args[1].free_symbols)
isyms.update(self.args[2].free_symbols)
return isyms
def pow_heuristics(self, e):
_, z, z0, _ = self.args
b1, e1 = e.base, e.exp
if not b1.has(z):
res = limit(e1*log(b1), z, z0)
return exp(res)
ex_lim = limit(e1, z, z0)
base_lim = limit(b1, z, z0)
if base_lim is S.One:
if ex_lim in (S.Infinity, S.NegativeInfinity):
res = limit(e1*(b1 - 1), z, z0)
return exp(res)
if base_lim is S.NegativeInfinity and ex_lim is S.Infinity:
return S.ComplexInfinity
def doit(self, **hints):
"""Evaluates the limit.
Parameters
==========
deep : bool, optional (default: True)
Invoke the ``doit`` method of the expressions involved before
taking the limit.
hints : optional keyword arguments
To be passed to ``doit`` methods; only used if deep is True.
"""
e, z, z0, dir = self.args
if str(dir) == '+-':
r = limit(e, z, z0, dir='+')
l = limit(e, z, z0, dir='-')
if isinstance(r, Limit) and isinstance(l, Limit):
if r.args[0] == l.args[0]:
return self
if r == l:
return l
if r.is_infinite and l.is_infinite:
return S.ComplexInfinity
raise ValueError("The limit does not exist since "
"left hand limit = %s and right hand limit = %s"
% (l, r))
if z0 is S.ComplexInfinity:
raise NotImplementedError("Limits at complex "
"infinity are not implemented")
if z0.is_infinite:
cdir = sign(z0)
cdir = cdir/abs(cdir)
e = e.subs(z, cdir*z)
dir = "-"
z0 = S.Infinity
if hints.get('deep', True):
e = e.doit(**hints)
z = z.doit(**hints)
z0 = z0.doit(**hints)
if e == z:
return z0
if not e.has(z):
return e
if z0 is S.NaN:
return S.NaN
if e.has(*_illegal):
return self
if e.is_Order:
return Order(limit(e.expr, z, z0), *e.args[1:])
cdir = 0
if str(dir) == "+":
cdir = 1
elif str(dir) == "-":
cdir = -1
def set_signs(expr):
if not expr.args:
return expr
newargs = tuple(set_signs(arg) for arg in expr.args)
if newargs != expr.args:
expr = expr.func(*newargs)
abs_flag = isinstance(expr, Abs)
arg_flag = isinstance(expr, arg)
sign_flag = isinstance(expr, sign)
if abs_flag or sign_flag or arg_flag:
sig = limit(expr.args[0], z, z0, dir)
if sig.is_zero:
sig = limit(1/expr.args[0], z, z0, dir)
if sig.is_extended_real:
if (sig < 0) == True:
return (-expr.args[0] if abs_flag else
S.NegativeOne if sign_flag else S.Pi)
elif (sig > 0) == True:
return (expr.args[0] if abs_flag else
S.One if sign_flag else S.Zero)
return expr
if e.has(Float):
# Convert floats like 0.5 to exact SymPy numbers like S.Half, to
# prevent rounding errors which can lead to unexpected execution
# of conditional blocks that work on comparisons
# Also see comments in https://github.com/sympy/sympy/issues/19453
from sympy.simplify.simplify import nsimplify
e = nsimplify(e)
e = set_signs(e)
if e.is_meromorphic(z, z0):
if z0 is S.Infinity:
newe = e.subs(z, 1/z)
# cdir changes sign as oo- should become 0+
cdir = -cdir
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir=cdir)
except ValueError:
pass
else:
if ex > 0:
return S.Zero
elif ex == 0:
return coeff
if cdir == 1 or not(int(ex) & 1):
return S.Infinity*sign(coeff)
elif cdir == -1:
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
if z0 is S.Infinity:
if e.is_Mul:
e = factor_terms(e)
newe = e.subs(z, 1/z)
# cdir changes sign as oo- should become 0+
cdir = -cdir
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir=cdir)
except (ValueError, NotImplementedError, PoleError):
# The NotImplementedError catching is for custom functions
from sympy.simplify.powsimp import powsimp
e = powsimp(e)
if e.is_Pow:
r = self.pow_heuristics(e)
if r is not None:
return r
try:
coeff = newe.as_leading_term(z, cdir=cdir)
if coeff != newe and coeff.has(exp):
return gruntz(coeff, z, 0, "-" if re(cdir).is_negative else "+")
except (ValueError, NotImplementedError, PoleError):
pass
else:
if isinstance(coeff, AccumBounds) and ex == S.Zero:
return coeff
if coeff.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN):
return self
if not coeff.has(z):
if ex.is_positive:
return S.Zero
elif ex == 0:
return coeff
elif ex.is_negative:
if ex.is_integer:
if cdir == 1 or ex.is_even:
return S.Infinity*sign(coeff)
elif cdir == -1:
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
else:
if cdir == 1:
return S.Infinity*sign(coeff)
elif cdir == -1:
return S.Infinity*sign(coeff)*S.NegativeOne**ex
else:
return S.ComplexInfinity
else:
raise NotImplementedError("Not sure of sign of %s" % ex)
# gruntz fails on factorials but works with the gamma function
# If no factorial term is present, e should remain unchanged.
# factorial is defined to be zero for negative inputs (which
# differs from gamma) so only rewrite for positive z0.
if z0.is_extended_positive:
e = e.rewrite(factorial, gamma)
l = None
try:
r = gruntz(e, z, z0, dir)
if r is S.NaN or l is S.NaN:
raise PoleError()
except (PoleError, ValueError):
if l is not None:
raise
r = heuristics(e, z, z0, dir)
if r is None:
return self
return r
|
6cabcb3c6d10c0f9776bb430f92310123929c9a4f1c727bba6f0b6efe8efb7cd | from sympy.core import S, sympify, Expr, Dummy, Add, Mul
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.function import Function, PoleError, expand_power_base, expand_log
from sympy.core.sorting import default_sort_key
from sympy.functions.elementary.exponential import exp, log
from sympy.sets.sets import Complement
from sympy.utilities.iterables import uniq, is_sequence
class Order(Expr):
r""" Represents the limiting behavior of some function.
Explanation
===========
The order of a function characterizes the function based on the limiting
behavior of the function as it goes to some limit. Only taking the limit
point to be a number is currently supported. This is expressed in
big O notation [1]_.
The formal definition for the order of a function `g(x)` about a point `a`
is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if there
exists a `\delta > 0` and an `M > 0` such that `|g(x)| \leq M|f(x)|` for
`|x-a| < \delta`. This is equivalent to `\limsup_{x \rightarrow a}
|g(x)/f(x)| < \infty`.
Let's illustrate it on the following example by taking the expansion of
`\sin(x)` about 0:
.. math ::
\sin(x) = x - x^3/3! + O(x^5)
where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition
of `O`, there is a `\delta > 0` and an `M` such that:
.. math ::
|x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta
or by the alternate definition:
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty
which surely is true, because
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5!
As it is usually used, the order of a function can be intuitively thought
of representing all terms of powers greater than the one specified. For
example, `O(x^3)` corresponds to any terms proportional to `x^3,
x^4,\ldots` and any higher power. For a polynomial, this leaves terms
proportional to `x^2`, `x` and constants.
Examples
========
>>> from sympy import O, oo, cos, pi
>>> from sympy.abc import x, y
>>> O(x + x**2)
O(x)
>>> O(x + x**2, (x, 0))
O(x)
>>> O(x + x**2, (x, oo))
O(x**2, (x, oo))
>>> O(1 + x*y)
O(1, x, y)
>>> O(1 + x*y, (x, 0), (y, 0))
O(1, x, y)
>>> O(1 + x*y, (x, oo), (y, oo))
O(x*y, (x, oo), (y, oo))
>>> O(1) in O(1, x)
True
>>> O(1, x) in O(1)
False
>>> O(x) in O(1, x)
True
>>> O(x**2) in O(x)
True
>>> O(x)*x
O(x**2)
>>> O(x) - O(x)
O(x)
>>> O(cos(x))
O(1)
>>> O(cos(x), (x, pi/2))
O(x - pi/2, (x, pi/2))
References
==========
.. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_
Notes
=====
In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading
term. ``O(f(x), x)`` is automatically transformed to
``O(f(x).as_leading_term(x),x)``.
``O(expr*f(x), x)`` is ``O(f(x), x)``
``O(expr, x)`` is ``O(1)``
``O(0, x)`` is 0.
Multivariate O is also supported:
``O(f(x, y), x, y)`` is transformed to
``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)``
In the multivariate case, it is assumed the limits w.r.t. the various
symbols commute.
If no symbols are passed then all symbols in the expression are used
and the limit point is assumed to be zero.
"""
is_Order = True
__slots__ = ()
@cacheit
def __new__(cls, expr, *args, **kwargs):
expr = sympify(expr)
if not args:
if expr.is_Order:
variables = expr.variables
point = expr.point
else:
variables = list(expr.free_symbols)
point = [S.Zero]*len(variables)
else:
args = list(args if is_sequence(args) else [args])
variables, point = [], []
if is_sequence(args[0]):
for a in args:
v, p = list(map(sympify, a))
variables.append(v)
point.append(p)
else:
variables = list(map(sympify, args))
point = [S.Zero]*len(variables)
if not all(v.is_symbol for v in variables):
raise TypeError('Variables are not symbols, got %s' % variables)
if len(list(uniq(variables))) != len(variables):
raise ValueError('Variables are supposed to be unique symbols, got %s' % variables)
if expr.is_Order:
expr_vp = dict(expr.args[1:])
new_vp = dict(expr_vp)
vp = dict(zip(variables, point))
for v, p in vp.items():
if v in new_vp.keys():
if p != new_vp[v]:
raise NotImplementedError(
"Mixing Order at different points is not supported.")
else:
new_vp[v] = p
if set(expr_vp.keys()) == set(new_vp.keys()):
return expr
else:
variables = list(new_vp.keys())
point = [new_vp[v] for v in variables]
if expr is S.NaN:
return S.NaN
if any(x in p.free_symbols for x in variables for p in point):
raise ValueError('Got %s as a point.' % point)
if variables:
if any(p != point[0] for p in point):
raise NotImplementedError(
"Multivariable orders at different points are not supported.")
if point[0] in (S.Infinity, S.Infinity*S.ImaginaryUnit):
s = {k: 1/Dummy() for k in variables}
rs = {1/v: 1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] in (S.NegativeInfinity, S.NegativeInfinity*S.ImaginaryUnit):
s = {k: -1/Dummy() for k in variables}
rs = {-1/v: -1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] is not S.Zero:
s = {k: Dummy() + point[0] for k in variables}
rs = {(v - point[0]).together(): k - point[0] for k, v in s.items()}
ps = [S.Zero for p in point]
else:
s = ()
rs = ()
ps = list(point)
expr = expr.subs(s)
if expr.is_Add:
expr = expr.factor()
if s:
args = tuple([r[0] for r in rs.items()])
else:
args = tuple(variables)
if len(variables) > 1:
# XXX: better way? We need this expand() to
# workaround e.g: expr = x*(x + y).
# (x*(x + y)).as_leading_term(x, y) currently returns
# x*y (wrong order term!). That's why we want to deal with
# expand()'ed expr (handled in "if expr.is_Add" branch below).
expr = expr.expand()
old_expr = None
while old_expr != expr:
old_expr = expr
if expr.is_Add:
lst = expr.extract_leading_order(args)
expr = Add(*[f.expr for (e, f) in lst])
elif expr:
try:
expr = expr.as_leading_term(*args)
except PoleError:
if isinstance(expr, Function) or\
all(isinstance(arg, Function) for arg in expr.args):
# It is not possible to simplify an expression
# containing only functions (which raise error on
# call to leading term) further
pass
else:
orders = []
pts = tuple(zip(args, ps))
for arg in expr.args:
try:
lt = arg.as_leading_term(*args)
except PoleError:
lt = arg
if lt not in args:
order = Order(lt)
else:
order = Order(lt, *pts)
orders.append(order)
if expr.is_Add:
new_expr = Order(Add(*orders), *pts)
if new_expr.is_Add:
new_expr = Order(Add(*[a.expr for a in new_expr.args]), *pts)
expr = new_expr.expr
elif expr.is_Mul:
expr = Mul(*[a.expr for a in orders])
elif expr.is_Pow:
e = expr.exp
b = expr.base
expr = exp(e * log(b))
# It would probably be better to handle this somewhere
# else. This is needed for a testcase in which there is a
# symbol with the assumptions zero=True.
if expr.is_zero:
expr = S.Zero
else:
expr = expr.as_independent(*args, as_Add=False)[1]
expr = expand_power_base(expr)
expr = expand_log(expr)
if len(args) == 1:
# The definition of O(f(x)) symbol explicitly stated that
# the argument of f(x) is irrelevant. That's why we can
# combine some power exponents (only "on top" of the
# expression tree for f(x)), e.g.:
# x**p * (-x)**q -> x**(p+q) for real p, q.
x = args[0]
margs = list(Mul.make_args(
expr.as_independent(x, as_Add=False)[1]))
for i, t in enumerate(margs):
if t.is_Pow:
b, q = t.args
if b in (x, -x) and q.is_real and not q.has(x):
margs[i] = x**q
elif b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
elif b.is_Mul and b.args[0] is S.NegativeOne:
b = -b
if b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
expr = Mul(*margs)
expr = expr.subs(rs)
if expr.is_Order:
expr = expr.expr
if not expr.has(*variables) and not expr.is_zero:
expr = S.One
# create Order instance:
vp = dict(zip(variables, point))
variables.sort(key=default_sort_key)
point = [vp[v] for v in variables]
args = (expr,) + Tuple(*zip(variables, point))
obj = Expr.__new__(cls, *args)
return obj
def _eval_nseries(self, x, n, logx, cdir=0):
return self
@property
def expr(self):
return self.args[0]
@property
def variables(self):
if self.args[1:]:
return tuple(x[0] for x in self.args[1:])
else:
return ()
@property
def point(self):
if self.args[1:]:
return tuple(x[1] for x in self.args[1:])
else:
return ()
@property
def free_symbols(self):
return self.expr.free_symbols | set(self.variables)
def _eval_power(b, e):
if e.is_Number and e.is_nonnegative:
return b.func(b.expr ** e, *b.args[1:])
if e == O(1):
return b
return
def as_expr_variables(self, order_symbols):
if order_symbols is None:
order_symbols = self.args[1:]
else:
if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and
not all(p == self.point[0] for p in self.point)): # pragma: no cover
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % self.point)
if order_symbols and order_symbols[0][1] != self.point[0]:
raise NotImplementedError(
"Multiplying Order at different points is not supported.")
order_symbols = dict(order_symbols)
for s, p in dict(self.args[1:]).items():
if s not in order_symbols.keys():
order_symbols[s] = p
order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0]))
return self.expr, tuple(order_symbols)
def removeO(self):
return S.Zero
def getO(self):
return self
@cacheit
def contains(self, expr):
r"""
Return True if expr belongs to Order(self.expr, \*self.variables).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined
(e.g. when self and expr have different symbols).
"""
expr = sympify(expr)
if expr.is_zero:
return True
if expr is S.NaN:
return False
point = self.point[0] if self.point else S.Zero
if expr.is_Order:
if (any(p != point for p in expr.point) or
any(p != point for p in self.point)):
return None
if expr.expr == self.expr:
# O(1) + O(1), O(1) + O(1, x), etc.
return all(x in self.args[1:] for x in expr.args[1:])
if expr.expr.is_Add:
return all(self.contains(x) for x in expr.expr.args)
if self.expr.is_Add and point.is_zero:
return any(self.func(x, *self.args[1:]).contains(expr)
for x in self.expr.args)
if self.variables and expr.variables:
common_symbols = tuple(
[s for s in self.variables if s in expr.variables])
elif self.variables:
common_symbols = self.variables
else:
common_symbols = expr.variables
if not common_symbols:
return None
if (self.expr.is_Pow and len(self.variables) == 1
and self.variables == expr.variables):
symbol = self.variables[0]
other = expr.expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
from sympy.simplify.powsimp import powsimp
r = None
ratio = self.expr/expr.expr
ratio = powsimp(ratio, deep=True, combine='exp')
for s in common_symbols:
from sympy.series.limits import Limit
l = Limit(ratio, s, point).doit(heuristics=False)
if not isinstance(l, Limit):
l = l != 0
else:
l = None
if r is None:
r = l
else:
if r != l:
return
return r
if self.expr.is_Pow and len(self.variables) == 1:
symbol = self.variables[0]
other = expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
obj = self.func(expr, *self.args[1:])
return self.contains(obj)
def __contains__(self, other):
result = self.contains(other)
if result is None:
raise TypeError('contains did not evaluate to a bool')
return result
def _eval_subs(self, old, new):
if old in self.variables:
newexpr = self.expr.subs(old, new)
i = self.variables.index(old)
newvars = list(self.variables)
newpt = list(self.point)
if new.is_symbol:
newvars[i] = new
else:
syms = new.free_symbols
if len(syms) == 1 or old in syms:
if old in syms:
var = self.variables[i]
else:
var = syms.pop()
# First, try to substitute self.point in the "new"
# expr to see if this is a fixed point.
# E.g. O(y).subs(y, sin(x))
point = new.subs(var, self.point[i])
if point != self.point[i]:
from sympy.solvers.solveset import solveset
d = Dummy()
sol = solveset(old - new.subs(var, d), d)
if isinstance(sol, Complement):
e1 = sol.args[0]
e2 = sol.args[1]
sol = set(e1) - set(e2)
res = [dict(zip((d, ), sol))]
point = d.subs(res[0]).limit(old, self.point[i])
newvars[i] = var
newpt[i] = point
elif old not in syms:
del newvars[i], newpt[i]
if not syms and new == self.point[i]:
newvars.extend(syms)
newpt.extend([S.Zero]*len(syms))
else:
return
return Order(newexpr, *zip(newvars, newpt))
def _eval_conjugate(self):
expr = self.expr._eval_conjugate()
if expr is not None:
return self.func(expr, *self.args[1:])
def _eval_derivative(self, x):
return self.func(self.expr.diff(x), *self.args[1:]) or self
def _eval_transpose(self):
expr = self.expr._eval_transpose()
if expr is not None:
return self.func(expr, *self.args[1:])
def __neg__(self):
return self
O = Order
|
22e5c01a1478f8d340e8516d64868996a66a874f0bee36ca09ea0ac6ced2f4e9 | from collections import defaultdict
from functools import reduce
from sympy.core import (sympify, Basic, S, Expr, factor_terms,
Mul, Add, bottom_up)
from sympy.core.cache import cacheit
from sympy.core.function import (count_ops, _mexpand, FunctionClass, expand,
expand_mul, _coeff_isneg, Derivative)
from sympy.core.numbers import I, Integer, igcd
from sympy.core.sorting import _nodes
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.external.gmpy import SYMPY_INTS
from sympy.functions import sin, cos, exp, cosh, tanh, sinh, tan, cot, coth
from sympy.functions import atan2
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys import Poly, factor, cancel, parallel_poly_from_expr
from sympy.polys.domains import ZZ
from sympy.polys.polyerrors import PolificationFailed
from sympy.polys.polytools import groebner
from sympy.simplify.cse_main import cse
from sympy.strategies.core import identity
from sympy.strategies.tree import greedy
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import debug
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex",
polynomial=False):
"""
Simplify trigonometric expressions using a groebner basis algorithm.
Explanation
===========
This routine takes a fraction involving trigonometric or hyperbolic
expressions, and tries to simplify it. The primary metric is the
total degree. Some attempts are made to choose the simplest possible
expression of the minimal degree, but this is non-rigorous, and also
very slow (see the ``quick=True`` option).
If ``polynomial`` is set to True, instead of simplifying numerator and
denominator together, this function just brings numerator and denominator
into a canonical form. This is much faster, but has potentially worse
results. However, if the input is a polynomial, then the result is
guaranteed to be an equivalent polynomial of minimal degree.
The most important option is hints. Its entries can be any of the
following:
- a natural number
- a function
- an iterable of the form (func, var1, var2, ...)
- anything else, interpreted as a generator
A number is used to indicate that the search space should be increased.
A function is used to indicate that said function is likely to occur in a
simplified expression.
An iterable is used indicate that func(var1 + var2 + ...) is likely to
occur in a simplified .
An additional generator also indicates that it is likely to occur.
(See examples below).
This routine carries out various computationally intensive algorithms.
The option ``quick=True`` can be used to suppress one particularly slow
step (at the expense of potentially more complicated results, but never at
the expense of increased total degree).
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, tan, cos, sinh, cosh, tanh
>>> from sympy.simplify.trigsimp import trigsimp_groebner
Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens:
>>> ex = sin(x)*cos(x)
>>> trigsimp_groebner(ex)
sin(x)*cos(x)
This is because ``trigsimp_groebner`` only looks for a simplification
involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try
``2*x`` by passing ``hints=[2]``:
>>> trigsimp_groebner(ex, hints=[2])
sin(2*x)/2
>>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2])
-cos(2*x)
Increasing the search space this way can quickly become expensive. A much
faster way is to give a specific expression that is likely to occur:
>>> trigsimp_groebner(ex, hints=[sin(2*x)])
sin(2*x)/2
Hyperbolic expressions are similarly supported:
>>> trigsimp_groebner(sinh(2*x)/sinh(x))
2*cosh(x)
Note how no hints had to be passed, since the expression already involved
``2*x``.
The tangent function is also supported. You can either pass ``tan`` in the
hints, to indicate that tan should be tried whenever cosine or sine are,
or you can pass a specific generator:
>>> trigsimp_groebner(sin(x)/cos(x), hints=[tan])
tan(x)
>>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)])
tanh(x)
Finally, you can use the iterable form to suggest that angle sum formulae
should be tried:
>>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y))
>>> trigsimp_groebner(ex, hints=[(tan, x, y)])
tan(x + y)
"""
# TODO
# - preprocess by replacing everything by funcs we can handle
# - optionally use cot instead of tan
# - more intelligent hinting.
# For example, if the ideal is small, and we have sin(x), sin(y),
# add sin(x + y) automatically... ?
# - algebraic numbers ...
# - expressions of lowest degree are not distinguished properly
# e.g. 1 - sin(x)**2
# - we could try to order the generators intelligently, so as to influence
# which monomials appear in the quotient basis
# THEORY
# ------
# Ratsimpmodprime above can be used to "simplify" a rational function
# modulo a prime ideal. "Simplify" mainly means finding an equivalent
# expression of lower total degree.
#
# We intend to use this to simplify trigonometric functions. To do that,
# we need to decide (a) which ring to use, and (b) modulo which ideal to
# simplify. In practice, (a) means settling on a list of "generators"
# a, b, c, ..., such that the fraction we want to simplify is a rational
# function in a, b, c, ..., with coefficients in ZZ (integers).
# (2) means that we have to decide what relations to impose on the
# generators. There are two practical problems:
# (1) The ideal has to be *prime* (a technical term).
# (2) The relations have to be polynomials in the generators.
#
# We typically have two kinds of generators:
# - trigonometric expressions, like sin(x), cos(5*x), etc
# - "everything else", like gamma(x), pi, etc.
#
# Since this function is trigsimp, we will concentrate on what to do with
# trigonometric expressions. We can also simplify hyperbolic expressions,
# but the extensions should be clear.
#
# One crucial point is that all *other* generators really should behave
# like indeterminates. In particular if (say) "I" is one of them, then
# in fact I**2 + 1 = 0 and we may and will compute non-sensical
# expressions. However, we can work with a dummy and add the relation
# I**2 + 1 = 0 to our ideal, then substitute back in the end.
#
# Now regarding trigonometric generators. We split them into groups,
# according to the argument of the trigonometric functions. We want to
# organise this in such a way that most trigonometric identities apply in
# the same group. For example, given sin(x), cos(2*x) and cos(y), we would
# group as [sin(x), cos(2*x)] and [cos(y)].
#
# Our prime ideal will be built in three steps:
# (1) For each group, compute a "geometrically prime" ideal of relations.
# Geometrically prime means that it generates a prime ideal in
# CC[gens], not just ZZ[gens].
# (2) Take the union of all the generators of the ideals for all groups.
# By the geometric primality condition, this is still prime.
# (3) Add further inter-group relations which preserve primality.
#
# Step (1) works as follows. We will isolate common factors in the
# argument, so that all our generators are of the form sin(n*x), cos(n*x)
# or tan(n*x), with n an integer. Suppose first there are no tan terms.
# The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since
# X**2 + Y**2 - 1 is irreducible over CC.
# Now, if we have a generator sin(n*x), than we can, using trig identities,
# express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this
# relation to the ideal, preserving geometric primality, since the quotient
# ring is unchanged.
# Thus we have treated all sin and cos terms.
# For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0.
# (This requires of course that we already have relations for cos(n*x) and
# sin(n*x).) It is not obvious, but it seems that this preserves geometric
# primality.
# XXX A real proof would be nice. HELP!
# Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of
# CC[S, C, T]:
# - it suffices to show that the projective closure in CP**3 is
# irreducible
# - using the half-angle substitutions, we can express sin(x), tan(x),
# cos(x) as rational functions in tan(x/2)
# - from this, we get a rational map from CP**1 to our curve
# - this is a morphism, hence the curve is prime
#
# Step (2) is trivial.
#
# Step (3) works by adding selected relations of the form
# sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
# preserved by the same argument as before.
def parse_hints(hints):
"""Split hints into (n, funcs, iterables, gens)."""
n = 1
funcs, iterables, gens = [], [], []
for e in hints:
if isinstance(e, (SYMPY_INTS, Integer)):
n = e
elif isinstance(e, FunctionClass):
funcs.append(e)
elif iterable(e):
iterables.append((e[0], e[1:]))
# XXX sin(x+2y)?
# Note: we go through polys so e.g.
# sin(-x) -> -sin(x) -> sin(x)
gens.extend(parallel_poly_from_expr(
[e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens)
else:
gens.append(e)
return n, funcs, iterables, gens
def build_ideal(x, terms):
"""
Build generators for our ideal. ``Terms`` is an iterable with elements of
the form (fn, coeff), indicating that we have a generator fn(coeff*x).
If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
sin(n*x) and cos(n*x) are guaranteed.
"""
I = []
y = Dummy('y')
for fn, coeff in terms:
for c, s, t, rel in (
[cos, sin, tan, cos(x)**2 + sin(x)**2 - 1],
[cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]):
if coeff == 1 and fn in [c, s]:
I.append(rel)
elif fn == t:
I.append(t(coeff*x)*c(coeff*x) - s(coeff*x))
elif fn in [c, s]:
cn = fn(coeff*y).expand(trig=True).subs(y, x)
I.append(fn(coeff*x) - cn)
return list(set(I))
def analyse_gens(gens, hints):
"""
Analyse the generators ``gens``, using the hints ``hints``.
The meaning of ``hints`` is described in the main docstring.
Return a new list of generators, and also the ideal we should
work with.
"""
# First parse the hints
n, funcs, iterables, extragens = parse_hints(hints)
debug('n=%s' % n, 'funcs:', funcs, 'iterables:',
iterables, 'extragens:', extragens)
# We just add the extragens to gens and analyse them as before
gens = list(gens)
gens.extend(extragens)
# remove duplicates
funcs = list(set(funcs))
iterables = list(set(iterables))
gens = list(set(gens))
# all the functions we can do anything with
allfuncs = {sin, cos, tan, sinh, cosh, tanh}
# sin(3*x) -> ((3, x), sin)
trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
if g.func in allfuncs]
# Our list of new generators - start with anything that we cannot
# work with (i.e. is not a trigonometric term)
freegens = [g for g in gens if g.func not in allfuncs]
newgens = []
trigdict = {}
for (coeff, var), fn in trigterms:
trigdict.setdefault(var, []).append((coeff, fn))
res = [] # the ideal
for key, val in trigdict.items():
# We have now assembeled a dictionary. Its keys are common
# arguments in trigonometric expressions, and values are lists of
# pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
# need to deal with fn(coeff*x0). We take the rational gcd of the
# coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
# all other arguments are integral multiples thereof.
# We will build an ideal which works with sin(x), cos(x).
# If hint tan is provided, also work with tan(x). Moreover, if
# n > 1, also work with sin(k*x) for k <= n, and similarly for cos
# (and tan if the hint is provided). Finally, any generators which
# the ideal does not work with but we need to accommodate (either
# because it was in expr or because it was provided as a hint)
# we also build into the ideal.
# This selection process is expressed in the list ``terms``.
# build_ideal then generates the actual relations in our ideal,
# from this list.
fns = [x[1] for x in val]
val = [x[0] for x in val]
gcd = reduce(igcd, val)
terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)]
fs = set(funcs + fns)
for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
if any(x in fs for x in (c, s, t)):
fs.add(c)
fs.add(s)
for fn in fs:
for k in range(1, n + 1):
terms.append((fn, k))
extra = []
for fn, v in terms:
if fn == tan:
extra.append((sin, v))
extra.append((cos, v))
if fn in [sin, cos] and tan in fs:
extra.append((tan, v))
if fn == tanh:
extra.append((sinh, v))
extra.append((cosh, v))
if fn in [sinh, cosh] and tanh in fs:
extra.append((tanh, v))
terms.extend(extra)
x = gcd*Mul(*key)
r = build_ideal(x, terms)
res.extend(r)
newgens.extend({fn(v*x) for fn, v in terms})
# Add generators for compound expressions from iterables
for fn, args in iterables:
if fn == tan:
# Tan expressions are recovered from sin and cos.
iterables.extend([(sin, args), (cos, args)])
elif fn == tanh:
# Tanh expressions are recovered from sihn and cosh.
iterables.extend([(sinh, args), (cosh, args)])
else:
dummys = symbols('d:%i' % len(args), cls=Dummy)
expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args)))
res.append(fn(Add(*args)) - expr)
if myI in gens:
res.append(myI**2 + 1)
freegens.remove(myI)
newgens.append(myI)
return res, freegens, newgens
myI = Dummy('I')
expr = expr.subs(S.ImaginaryUnit, myI)
subs = [(myI, S.ImaginaryUnit)]
num, denom = cancel(expr).as_numer_denom()
try:
(pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
except PolificationFailed:
return expr
debug('initial gens:', opt.gens)
ideal, freegens, gens = analyse_gens(opt.gens, hints)
debug('ideal:', ideal)
debug('new gens:', gens, " -- len", len(gens))
debug('free gens:', freegens, " -- len", len(gens))
# NOTE we force the domain to be ZZ to stop polys from injecting generators
# (which is usually a sign of a bug in the way we build the ideal)
if not gens:
return expr
G = groebner(ideal, order=order, gens=gens, domain=ZZ)
debug('groebner basis:', list(G), " -- len", len(G))
# If our fraction is a polynomial in the free generators, simplify all
# coefficients separately:
from sympy.simplify.ratsimp import ratsimpmodprime
if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
num = Poly(num, gens=gens+freegens).eject(*gens)
res = []
for monom, coeff in num.terms():
ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
# We compute the transitive closure of all generators that can
# be reached from our generators through relations in the ideal.
changed = True
while changed:
changed = False
for p in ideal:
p = Poly(p)
if not ourgens.issuperset(p.gens) and \
not p.has_only_gens(*set(p.gens).difference(ourgens)):
changed = True
ourgens.update(p.exclude().gens)
# NOTE preserve order!
realgens = [x for x in gens if x in ourgens]
# The generators of the ideal have now been (implicitly) split
# into two groups: those involving ourgens and those that don't.
# Since we took the transitive closure above, these two groups
# live in subgrings generated by a *disjoint* set of variables.
# Any sensible groebner basis algorithm will preserve this disjoint
# structure (i.e. the elements of the groebner basis can be split
# similarly), and and the two subsets of the groebner basis then
# form groebner bases by themselves. (For the smaller generating
# sets, of course.)
ourG = [g.as_expr() for g in G.polys if
g.has_only_gens(*ourgens.intersection(g.gens))]
res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, ourG, order=order,
gens=realgens, quick=quick, domain=ZZ,
polynomial=polynomial).subs(subs))
return Add(*res)
# NOTE The following is simpler and has less assumptions on the
# groebner basis algorithm. If the above turns out to be broken,
# use this.
return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, list(G), order=order,
gens=gens, quick=quick, domain=ZZ)
for monom, coeff in num.terms()])
else:
return ratsimpmodprime(
expr, list(G), order=order, gens=freegens+gens,
quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
_trigs = (TrigonometricFunction, HyperbolicFunction)
def _trigsimp_inverse(rv):
def check_args(x, y):
try:
return x.args[0] == y.args[0]
except IndexError:
return False
def f(rv):
# for simple functions
g = getattr(rv, 'inverse', None)
if (g is not None and isinstance(rv.args[0], g()) and
isinstance(g()(1), TrigonometricFunction)):
return rv.args[0].args[0]
# for atan2 simplifications, harder because atan2 has 2 args
if isinstance(rv, atan2):
y, x = rv.args
if _coeff_isneg(y):
return -f(atan2(-y, x))
elif _coeff_isneg(x):
return S.Pi - f(atan2(y, -x))
if check_args(x, y):
if isinstance(y, sin) and isinstance(x, cos):
return x.args[0]
if isinstance(y, cos) and isinstance(x, sin):
return S.Pi / 2 - x.args[0]
return rv
return bottom_up(rv, f)
def trigsimp(expr, inverse=False, **opts):
"""Returns a reduced expression by using known trig identities.
Parameters
==========
inverse : bool, optional
If ``inverse=True``, it will be assumed that a composition of inverse
functions, such as sin and asin, can be cancelled in any order.
For example, ``asin(sin(x))`` will yield ``x`` without checking whether
x belongs to the set where this relation is true. The default is False.
Default : True
method : string, optional
Specifies the method to use. Valid choices are:
- ``'matching'``, default
- ``'groebner'``
- ``'combined'``
- ``'fu'``
- ``'old'``
If ``'matching'``, simplify the expression recursively by targeting
common patterns. If ``'groebner'``, apply an experimental groebner
basis algorithm. In this case further options are forwarded to
``trigsimp_groebner``, please refer to
its docstring. If ``'combined'``, it first runs the groebner basis
algorithm with small default parameters, then runs the ``'matching'``
algorithm. If ``'fu'``, run the collection of trigonometric
transformations described by Fu, et al. (see the
:py:func:`~sympy.simplify.fu.fu` docstring). If ``'old'``, the original
SymPy trig simplification function is run.
opts :
Optional keyword arguments passed to the method. See each method's
function docstring for details.
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs wherever trigonometric functions are located.
>>> trigsimp(log(e))
log(2)
Using ``method='groebner'`` (or ``method='combined'``) might lead to
greater simplification.
The old trigsimp routine can be accessed as with method ``method='old'``.
>>> from sympy import coth, tanh
>>> t = 3*tanh(x)**7 - 2/coth(x)**7
>>> trigsimp(t, method='old') == t
True
>>> trigsimp(t)
tanh(x)**7
"""
from sympy.simplify.fu import fu
expr = sympify(expr)
_eval_trigsimp = getattr(expr, '_eval_trigsimp', None)
if _eval_trigsimp is not None:
return _eval_trigsimp(**opts)
old = opts.pop('old', False)
if not old:
opts.pop('deep', None)
opts.pop('recursive', None)
method = opts.pop('method', 'matching')
else:
method = 'old'
def groebnersimp(ex, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
new = traverse(ex)
if not isinstance(new, Expr):
return new
return trigsimp_groebner(new, **opts)
trigsimpfunc = {
'fu': (lambda x: fu(x, **opts)),
'matching': (lambda x: futrig(x)),
'groebner': (lambda x: groebnersimp(x, **opts)),
'combined': (lambda x: futrig(groebnersimp(x,
polynomial=True, hints=[2, tan]))),
'old': lambda x: trigsimp_old(x, **opts),
}[method]
expr_simplified = trigsimpfunc(expr)
if inverse:
expr_simplified = _trigsimp_inverse(expr_simplified)
return expr_simplified
def exptrigsimp(expr):
"""
Simplifies exponential / trigonometric / hyperbolic functions.
Examples
========
>>> from sympy import exptrigsimp, exp, cosh, sinh
>>> from sympy.abc import z
>>> exptrigsimp(exp(z) + exp(-z))
2*cosh(z)
>>> exptrigsimp(cosh(z) - sinh(z))
exp(-z)
"""
from sympy.simplify.fu import hyper_as_trig, TR2i
def exp_trig(e):
# select the better of e, and e rewritten in terms of exp or trig
# functions
choices = [e]
if e.has(*_trigs):
choices.append(e.rewrite(exp))
choices.append(e.rewrite(cos))
return min(*choices, key=count_ops)
newexpr = bottom_up(expr, exp_trig)
def f(rv):
if not rv.is_Mul:
return rv
commutative_part, noncommutative_part = rv.args_cnc()
# Since as_powers_dict loses order information,
# if there is more than one noncommutative factor,
# it should only be used to simplify the commutative part.
if (len(noncommutative_part) > 1):
return f(Mul(*commutative_part))*Mul(*noncommutative_part)
rvd = rv.as_powers_dict()
newd = rvd.copy()
def signlog(expr, sign=S.One):
if expr is S.Exp1:
return sign, S.One
elif isinstance(expr, exp) or (expr.is_Pow and expr.base == S.Exp1):
return sign, expr.exp
elif sign is S.One:
return signlog(-expr, sign=-S.One)
else:
return None, None
ee = rvd[S.Exp1]
for k in rvd:
if k.is_Add and len(k.args) == 2:
# k == c*(1 + sign*E**x)
c = k.args[0]
sign, x = signlog(k.args[1]/c)
if not x:
continue
m = rvd[k]
newd[k] -= m
if ee == -x*m/2:
# sinh and cosh
newd[S.Exp1] -= ee
ee = 0
if sign == 1:
newd[2*c*cosh(x/2)] += m
else:
newd[-2*c*sinh(x/2)] += m
elif newd[1 - sign*S.Exp1**x] == -m:
# tanh
del newd[1 - sign*S.Exp1**x]
if sign == 1:
newd[-c/tanh(x/2)] += m
else:
newd[-c*tanh(x/2)] += m
else:
newd[1 + sign*S.Exp1**x] += m
newd[c] += m
return Mul(*[k**newd[k] for k in newd])
newexpr = bottom_up(newexpr, f)
# sin/cos and sinh/cosh ratios to tan and tanh, respectively
if newexpr.has(HyperbolicFunction):
e, f = hyper_as_trig(newexpr)
newexpr = f(TR2i(e))
if newexpr.has(TrigonometricFunction):
newexpr = TR2i(newexpr)
# can we ever generate an I where there was none previously?
if not (newexpr.has(I) and not expr.has(I)):
expr = newexpr
return expr
#-------------------- the old trigsimp routines ---------------------
def trigsimp_old(expr, *, first=True, **opts):
"""
Reduces expression by using known trig identities.
Notes
=====
deep:
- Apply trigsimp inside all objects with arguments
recursive:
- Use common subexpression elimination (cse()) and apply
trigsimp recursively (this is quite expensive if the
expression is large)
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', 'fu' and 'futrig'. If 'matching', simplify the
expression recursively by pattern matching. If 'groebner', apply an
experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring) while `futrig` runs a subset of Fu-transforms
that mimic the behavior of `trigsimp`.
compare:
- show input and output from `trigsimp` and `futrig` when different,
but returns the `trigsimp` value.
Examples
========
>>> from sympy import trigsimp, sin, cos, log, cot
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e, old=True)
2
>>> trigsimp(log(e), old=True)
log(2*sin(x)**2 + 2*cos(x)**2)
>>> trigsimp(log(e), deep=True, old=True)
log(2)
Using `method="groebner"` (or `"combined"`) can sometimes lead to a lot
more simplification:
>>> e = (-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1)
>>> trigsimp(e, old=True)
(1 - sin(x))/cos(x) + cos(x)/(1 - sin(x))
>>> trigsimp(e, method="groebner", old=True)
2/cos(x)
>>> trigsimp(1/cot(x)**2, compare=True, old=True)
futrig: tan(x)**2
cot(x)**(-2)
"""
old = expr
if first:
if not expr.has(*_trigs):
return expr
trigsyms = set().union(*[t.free_symbols for t in expr.atoms(*_trigs)])
if len(trigsyms) > 1:
from sympy.simplify.simplify import separatevars
d = separatevars(expr)
if d.is_Mul:
d = separatevars(d, dict=True) or d
if isinstance(d, dict):
expr = 1
for k, v in d.items():
# remove hollow factoring
was = v
v = expand_mul(v)
opts['first'] = False
vnew = trigsimp(v, **opts)
if vnew == v:
vnew = was
expr *= vnew
old = expr
else:
if d.is_Add:
for s in trigsyms:
r, e = expr.as_independent(s)
if r:
opts['first'] = False
expr = r + trigsimp(e, **opts)
if not expr.is_Add:
break
old = expr
recursive = opts.pop('recursive', False)
deep = opts.pop('deep', False)
method = opts.pop('method', 'matching')
def groebnersimp(ex, deep, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
if deep:
ex = traverse(ex)
return trigsimp_groebner(ex, **opts)
trigsimpfunc = {
'matching': (lambda x, d: _trigsimp(x, d)),
'groebner': (lambda x, d: groebnersimp(x, d, **opts)),
'combined': (lambda x, d: _trigsimp(groebnersimp(x,
d, polynomial=True, hints=[2, tan]),
d))
}[method]
if recursive:
w, g = cse(expr)
g = trigsimpfunc(g[0], deep)
for sub in reversed(w):
g = g.subs(sub[0], sub[1])
g = trigsimpfunc(g, deep)
result = g
else:
result = trigsimpfunc(expr, deep)
if opts.get('compare', False):
f = futrig(old)
if f != result:
print('\tfutrig:', f)
return result
def _dotrig(a, b):
"""Helper to tell whether ``a`` and ``b`` have the same sorts
of symbols in them -- no need to test hyperbolic patterns against
expressions that have no hyperbolics in them."""
return a.func == b.func and (
a.has(TrigonometricFunction) and b.has(TrigonometricFunction) or
a.has(HyperbolicFunction) and b.has(HyperbolicFunction))
_trigpat = None
def _trigpats():
global _trigpat
a, b, c = symbols('a b c', cls=Wild)
d = Wild('d', commutative=False)
# for the simplifications like sinh/cosh -> tanh:
# DO NOT REORDER THE FIRST 14 since these are assumed to be in this
# order in _match_div_rewrite.
matchers_division = (
(a*sin(b)**c/cos(b)**c, a*tan(b)**c, sin(b), cos(b)),
(a*tan(b)**c*cos(b)**c, a*sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*sin(b)**c, a*cos(b)**c, sin(b), cos(b)),
(a*tan(b)**c/sin(b)**c, a/cos(b)**c, sin(b), cos(b)),
(a*cot(b)**c/cos(b)**c, a/sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*tan(b)**c, a, sin(b), cos(b)),
(a*(cos(b) + 1)**c*(cos(b) - 1)**c,
a*(-sin(b)**2)**c, cos(b) + 1, cos(b) - 1),
(a*(sin(b) + 1)**c*(sin(b) - 1)**c,
a*(-cos(b)**2)**c, sin(b) + 1, sin(b) - 1),
(a*sinh(b)**c/cosh(b)**c, a*tanh(b)**c, S.One, S.One),
(a*tanh(b)**c*cosh(b)**c, a*sinh(b)**c, S.One, S.One),
(a*coth(b)**c*sinh(b)**c, a*cosh(b)**c, S.One, S.One),
(a*tanh(b)**c/sinh(b)**c, a/cosh(b)**c, S.One, S.One),
(a*coth(b)**c/cosh(b)**c, a/sinh(b)**c, S.One, S.One),
(a*coth(b)**c*tanh(b)**c, a, S.One, S.One),
(c*(tanh(a) + tanh(b))/(1 + tanh(a)*tanh(b)),
tanh(a + b)*c, S.One, S.One),
)
matchers_add = (
(c*sin(a)*cos(b) + c*cos(a)*sin(b) + d, sin(a + b)*c + d),
(c*cos(a)*cos(b) - c*sin(a)*sin(b) + d, cos(a + b)*c + d),
(c*sin(a)*cos(b) - c*cos(a)*sin(b) + d, sin(a - b)*c + d),
(c*cos(a)*cos(b) + c*sin(a)*sin(b) + d, cos(a - b)*c + d),
(c*sinh(a)*cosh(b) + c*sinh(b)*cosh(a) + d, sinh(a + b)*c + d),
(c*cosh(a)*cosh(b) + c*sinh(a)*sinh(b) + d, cosh(a + b)*c + d),
)
# for cos(x)**2 + sin(x)**2 -> 1
matchers_identity = (
(a*sin(b)**2, a - a*cos(b)**2),
(a*tan(b)**2, a*(1/cos(b))**2 - a),
(a*cot(b)**2, a*(1/sin(b))**2 - a),
(a*sin(b + c), a*(sin(b)*cos(c) + sin(c)*cos(b))),
(a*cos(b + c), a*(cos(b)*cos(c) - sin(b)*sin(c))),
(a*tan(b + c), a*((tan(b) + tan(c))/(1 - tan(b)*tan(c)))),
(a*sinh(b)**2, a*cosh(b)**2 - a),
(a*tanh(b)**2, a - a*(1/cosh(b))**2),
(a*coth(b)**2, a + a*(1/sinh(b))**2),
(a*sinh(b + c), a*(sinh(b)*cosh(c) + sinh(c)*cosh(b))),
(a*cosh(b + c), a*(cosh(b)*cosh(c) + sinh(b)*sinh(c))),
(a*tanh(b + c), a*((tanh(b) + tanh(c))/(1 + tanh(b)*tanh(c)))),
)
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1-cos(x)**2 when sin(x)**2 was "simpler"
artifacts = (
(a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
(a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
(a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin),
(a - a*cosh(b)**2 + c, -a*sinh(b)**2 + c, cosh),
(a - a*(1/cosh(b))**2 + c, a*tanh(b)**2 + c, cosh),
(a + a*(1/sinh(b))**2 + c, a*coth(b)**2 + c, sinh),
# same as above but with noncommutative prefactor
(a*d - a*d*cos(b)**2 + c, a*d*sin(b)**2 + c, cos),
(a*d - a*d*(1/cos(b))**2 + c, -a*d*tan(b)**2 + c, cos),
(a*d - a*d*(1/sin(b))**2 + c, -a*d*cot(b)**2 + c, sin),
(a*d - a*d*cosh(b)**2 + c, -a*d*sinh(b)**2 + c, cosh),
(a*d - a*d*(1/cosh(b))**2 + c, a*d*tanh(b)**2 + c, cosh),
(a*d + a*d*(1/sinh(b))**2 + c, a*d*coth(b)**2 + c, sinh),
)
_trigpat = (a, b, c, d, matchers_division, matchers_add,
matchers_identity, artifacts)
return _trigpat
def _replace_mul_fpowxgpow(expr, f, g, rexp, h, rexph):
"""Helper for _match_div_rewrite.
Replace f(b_)**c_*g(b_)**(rexp(c_)) with h(b)**rexph(c) if f(b_)
and g(b_) are both positive or if c_ is an integer.
"""
# assert expr.is_Mul and expr.is_commutative and f != g
fargs = defaultdict(int)
gargs = defaultdict(int)
args = []
for x in expr.args:
if x.is_Pow or x.func in (f, g):
b, e = x.as_base_exp()
if b.is_positive or e.is_integer:
if b.func == f:
fargs[b.args[0]] += e
continue
elif b.func == g:
gargs[b.args[0]] += e
continue
args.append(x)
common = set(fargs) & set(gargs)
hit = False
while common:
key = common.pop()
fe = fargs.pop(key)
ge = gargs.pop(key)
if fe == rexp(ge):
args.append(h(key)**rexph(fe))
hit = True
else:
fargs[key] = fe
gargs[key] = ge
if not hit:
return expr
while fargs:
key, e = fargs.popitem()
args.append(f(key)**e)
while gargs:
key, e = gargs.popitem()
args.append(g(key)**e)
return Mul(*args)
_idn = lambda x: x
_midn = lambda x: -x
_one = lambda x: S.One
def _match_div_rewrite(expr, i):
"""helper for __trigsimp"""
if i == 0:
expr = _replace_mul_fpowxgpow(expr, sin, cos,
_midn, tan, _idn)
elif i == 1:
expr = _replace_mul_fpowxgpow(expr, tan, cos,
_idn, sin, _idn)
elif i == 2:
expr = _replace_mul_fpowxgpow(expr, cot, sin,
_idn, cos, _idn)
elif i == 3:
expr = _replace_mul_fpowxgpow(expr, tan, sin,
_midn, cos, _midn)
elif i == 4:
expr = _replace_mul_fpowxgpow(expr, cot, cos,
_midn, sin, _midn)
elif i == 5:
expr = _replace_mul_fpowxgpow(expr, cot, tan,
_idn, _one, _idn)
# i in (6, 7) is skipped
elif i == 8:
expr = _replace_mul_fpowxgpow(expr, sinh, cosh,
_midn, tanh, _idn)
elif i == 9:
expr = _replace_mul_fpowxgpow(expr, tanh, cosh,
_idn, sinh, _idn)
elif i == 10:
expr = _replace_mul_fpowxgpow(expr, coth, sinh,
_idn, cosh, _idn)
elif i == 11:
expr = _replace_mul_fpowxgpow(expr, tanh, sinh,
_midn, cosh, _midn)
elif i == 12:
expr = _replace_mul_fpowxgpow(expr, coth, cosh,
_midn, sinh, _midn)
elif i == 13:
expr = _replace_mul_fpowxgpow(expr, coth, tanh,
_idn, _one, _idn)
else:
return None
return expr
def _trigsimp(expr, deep=False):
# protect the cache from non-trig patterns; we only allow
# trig patterns to enter the cache
if expr.has(*_trigs):
return __trigsimp(expr, deep)
return expr
@cacheit
def __trigsimp(expr, deep=False):
"""recursive helper for trigsimp"""
from sympy.simplify.fu import TR10i
if _trigpat is None:
_trigpats()
a, b, c, d, matchers_division, matchers_add, \
matchers_identity, artifacts = _trigpat
if expr.is_Mul:
# do some simplifications like sin/cos -> tan:
if not expr.is_commutative:
com, nc = expr.args_cnc()
expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc)
else:
for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division):
if not _dotrig(expr, pattern):
continue
newexpr = _match_div_rewrite(expr, i)
if newexpr is not None:
if newexpr != expr:
expr = newexpr
break
else:
continue
# use SymPy matching instead
res = expr.match(pattern)
if res and res.get(c, 0):
if not res[c].is_integer:
ok = ok1.subs(res)
if not ok.is_positive:
continue
ok = ok2.subs(res)
if not ok.is_positive:
continue
# if "a" contains any of trig or hyperbolic funcs with
# argument "b" then skip the simplification
if any(w.args[0] == res[b] for w in res[a].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
# simplify and finish:
expr = simp.subs(res)
break # process below
if expr.is_Add:
args = []
for term in expr.args:
if not term.is_commutative:
com, nc = term.args_cnc()
nc = Mul._from_args(nc)
term = Mul._from_args(com)
else:
nc = S.One
term = _trigsimp(term, deep)
for pattern, result in matchers_identity:
res = term.match(pattern)
if res is not None:
term = result.subs(res)
break
args.append(term*nc)
if args != expr.args:
expr = Add(*args)
expr = min(expr, expand(expr), key=count_ops)
if expr.is_Add:
for pattern, result in matchers_add:
if not _dotrig(expr, pattern):
continue
expr = TR10i(expr)
if expr.has(HyperbolicFunction):
res = expr.match(pattern)
# if "d" contains any trig or hyperbolic funcs with
# argument "a" or "b" then skip the simplification;
# this isn't perfect -- see tests
if res is None or not (a in res and b in res) or any(
w.args[0] in (res[a], res[b]) for w in res[d].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
expr = result.subs(res)
break
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1 - cos(x)**2 when sin(x)**2 was "simpler"
for pattern, result, ex in artifacts:
if not _dotrig(expr, pattern):
continue
# Substitute a new wild that excludes some function(s)
# to help influence a better match. This is because
# sometimes, for example, 'a' would match sec(x)**2
a_t = Wild('a', exclude=[ex])
pattern = pattern.subs(a, a_t)
result = result.subs(a, a_t)
m = expr.match(pattern)
was = None
while m and was != expr:
was = expr
if m[a_t] == 0 or \
-m[a_t] in m[c].args or m[a_t] + m[c] == 0:
break
if d in m and m[a_t]*m[d] + m[c] == 0:
break
expr = result.subs(m)
m = expr.match(pattern)
m.setdefault(c, S.Zero)
elif expr.is_Mul or expr.is_Pow or deep and expr.args:
expr = expr.func(*[_trigsimp(a, deep) for a in expr.args])
try:
if not expr.has(*_trigs):
raise TypeError
e = expr.atoms(exp)
new = expr.rewrite(exp, deep=deep)
if new == e:
raise TypeError
fnew = factor(new)
if fnew != new:
new = sorted([new, factor(new)], key=count_ops)[0]
# if all exp that were introduced disappeared then accept it
if not (new.atoms(exp) - e):
expr = new
except TypeError:
pass
return expr
#------------------- end of old trigsimp routines --------------------
def futrig(e, *, hyper=True, **kwargs):
"""Return simplified ``e`` using Fu-like transformations.
This is not the "Fu" algorithm. This is called by default
from ``trigsimp``. By default, hyperbolics subexpressions
will be simplified, but this can be disabled by setting
``hyper=False``.
Examples
========
>>> from sympy import trigsimp, tan, sinh, tanh
>>> from sympy.simplify.trigsimp import futrig
>>> from sympy.abc import x
>>> trigsimp(1/tan(x)**2)
tan(x)**(-2)
>>> futrig(sinh(x)/tanh(x))
cosh(x)
"""
from sympy.simplify.fu import hyper_as_trig
e = sympify(e)
if not isinstance(e, Basic):
return e
if not e.args:
return e
old = e
e = bottom_up(e, _futrig)
if hyper and e.has(HyperbolicFunction):
e, f = hyper_as_trig(e)
e = f(bottom_up(e, _futrig))
if e != old and e.is_Mul and e.args[0].is_Rational:
# redistribute leading coeff on 2-arg Add
e = Mul(*e.as_coeff_Mul())
return e
def _futrig(e):
"""Helper for futrig."""
from sympy.simplify.fu import (
TR1, TR2, TR3, TR2i, TR10, L, TR10i,
TR8, TR6, TR15, TR16, TR111, TR5, TRmorrie, TR11, _TR11, TR14, TR22,
TR12)
if not e.has(TrigonometricFunction):
return e
if e.is_Mul:
coeff, e = e.as_independent(TrigonometricFunction)
else:
coeff = None
Lops = lambda x: (L(x), x.count_ops(), _nodes(x), len(x.args), x.is_Add)
trigs = lambda x: x.has(TrigonometricFunction)
tree = [identity,
(
TR3, # canonical angles
TR1, # sec-csc -> cos-sin
TR12, # expand tan of sum
lambda x: _eapply(factor, x, trigs),
TR2, # tan-cot -> sin-cos
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR2i, # sin-cos ratio -> tan
lambda x: _eapply(lambda i: factor(i.normal()), x, trigs),
TR14, # factored identities
TR5, # sin-pow -> cos_pow
TR10, # sin-cos of sums -> sin-cos prod
TR11, _TR11, TR6, # reduce double angles and rewrite cos pows
lambda x: _eapply(factor, x, trigs),
TR14, # factored powers of identities
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR10i, # sin-cos products > sin-cos of sums
TRmorrie,
[identity, TR8], # sin-cos products -> sin-cos of sums
[identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan
[
lambda x: _eapply(expand_mul, TR5(x), trigs),
lambda x: _eapply(
expand_mul, TR15(x), trigs)], # pos/neg powers of sin
[
lambda x: _eapply(expand_mul, TR6(x), trigs),
lambda x: _eapply(
expand_mul, TR16(x), trigs)], # pos/neg powers of cos
TR111, # tan, sin, cos to neg power -> cot, csc, sec
[identity, TR2i], # sin-cos ratio to tan
[identity, lambda x: _eapply(
expand_mul, TR22(x), trigs)], # tan-cot to sec-csc
TR1, TR2, TR2i,
[identity, lambda x: _eapply(
factor_terms, TR12(x), trigs)], # expand tan of sum
)]
e = greedy(tree, objective=Lops)(e)
if coeff is not None:
e = coeff * e
return e
def _is_Expr(e):
"""_eapply helper to tell whether ``e`` and all its args
are Exprs."""
if isinstance(e, Derivative):
return _is_Expr(e.expr)
if not isinstance(e, Expr):
return False
return all(_is_Expr(i) for i in e.args)
def _eapply(func, e, cond=None):
"""Apply ``func`` to ``e`` if all args are Exprs else only
apply it to those args that *are* Exprs."""
if not isinstance(e, Expr):
return e
if _is_Expr(e) or not e.args:
return func(e)
return e.func(*[
_eapply(func, ei) if (cond is None or cond(ei)) else ei
for ei in e.args])
|
40f898bab4a798abdaba0c082eaffdfa61749769766edf04b127e94651c8cfce | import warnings
from sympy.core.numbers import Rational
from sympy.core.singleton import S
from sympy.core.relational import is_eq
from sympy.functions.elementary.complexes import (conjugate, im, re, sign)
from sympy.functions.elementary.exponential import (exp, log as ln)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, cos, sin, atan2)
from sympy.simplify.trigsimp import trigsimp
from sympy.integrals.integrals import integrate
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.core.sympify import sympify, _sympify
from sympy.core.expr import Expr
from sympy.core.logic import fuzzy_not, fuzzy_or
from mpmath.libmp.libmpf import prec_to_dps
def _check_norm(elements, norm):
"""validate if input norm is consistent"""
if norm is not None and norm.is_number:
if norm.is_positive is False:
raise ValueError("Input norm must be positive.")
numerical = all(i.is_number and i.is_real is True for i in elements)
if numerical and is_eq(norm**2, sum(i**2 for i in elements)) is False:
raise ValueError("Incompatible value for norm.")
def _is_extrinsic(seq):
"""validate seq and return True if seq is lowercase and False if uppercase"""
if type(seq) != str:
raise ValueError('Expected seq to be a string.')
if len(seq) != 3:
raise ValueError("Expected 3 axes, got `{}`.".format(seq))
intrinsic = seq.isupper()
extrinsic = seq.islower()
if not (intrinsic or extrinsic):
raise ValueError("seq must either be fully uppercase (for extrinsic "
"rotations), or fully lowercase, for intrinsic "
"rotations).")
i, j, k = seq.lower()
if (i == j) or (j == k):
raise ValueError("Consecutive axes must be different")
bad = set(seq) - set('xyzXYZ')
if bad:
raise ValueError("Expected axes from `seq` to be from "
"['x', 'y', 'z'] or ['X', 'Y', 'Z'], "
"got {}".format(''.join(bad)))
return extrinsic
class Quaternion(Expr):
"""Provides basic quaternion operations.
Quaternion objects can be instantiated as Quaternion(a, b, c, d)
as in (a + b*i + c*j + d*k).
Parameters
==========
norm : None or number
Pre-defined quaternion norm. If a value is given, Quaternion.norm
returns this pre-defined value instead of calculating the norm
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q
1 + 2*i + 3*j + 4*k
Quaternions over complex fields can be defined as :
>>> from sympy import Quaternion
>>> from sympy import symbols, I
>>> x = symbols('x')
>>> q1 = Quaternion(x, x**3, x, x**2, real_field = False)
>>> q2 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q1
x + x**3*i + x*j + x**2*k
>>> q2
(3 + 4*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k
Defining symbolic unit quaternions:
>>> from sympy import Quaternion
>>> from sympy.abc import w, x, y, z
>>> q = Quaternion(w, x, y, z, norm=1)
>>> q
w + x*i + y*j + z*k
>>> q.norm()
1
References
==========
.. [1] http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/
.. [2] https://en.wikipedia.org/wiki/Quaternion
"""
_op_priority = 11.0
is_commutative = False
def __new__(cls, a=0, b=0, c=0, d=0, real_field=True, norm=None):
a, b, c, d = map(sympify, (a, b, c, d))
if any(i.is_commutative is False for i in [a, b, c, d]):
raise ValueError("arguments have to be commutative")
else:
obj = Expr.__new__(cls, a, b, c, d)
obj._a = a
obj._b = b
obj._c = c
obj._d = d
obj._real_field = real_field
obj.set_norm(norm)
return obj
def set_norm(self, norm):
"""Sets norm of an already instantiated quaternion.:
Parameters
==========
norm : None or number
Pre-defined quaternion norm. If a value is given, Quaternion.norm
returns this pre-defined value instead of calculating the norm
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> q = Quaternion(a, b, c, d)
>>> q.norm()
sqrt(a**2 + b**2 + c**2 + d**2)
Setting the norm:
>>> q.set_norm(1)
>>> q.norm()
1
Removing set norm:
>>> q.set_norm(None)
>>> q.norm()
sqrt(a**2 + b**2 + c**2 + d**2)
"""
norm = sympify(norm)
_check_norm(self.args, norm)
self._norm = norm
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@property
def c(self):
return self._c
@property
def d(self):
return self._d
@property
def real_field(self):
return self._real_field
@property
def product_matrix_left(self):
r"""Returns 4 x 4 Matrix equivalent to a Hamilton product from the
left. This can be useful when treating quaternion elements as column
vectors. Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d
are real numbers, the product matrix from the left is:
.. math::
M = \begin{bmatrix} a &-b &-c &-d \\
b & a &-d & c \\
c & d & a &-b \\
d &-c & b & a \end{bmatrix}
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> q1 = Quaternion(1, 0, 0, 1)
>>> q2 = Quaternion(a, b, c, d)
>>> q1.product_matrix_left
Matrix([
[1, 0, 0, -1],
[0, 1, -1, 0],
[0, 1, 1, 0],
[1, 0, 0, 1]])
>>> q1.product_matrix_left * q2.to_Matrix()
Matrix([
[a - d],
[b - c],
[b + c],
[a + d]])
This is equivalent to:
>>> (q1 * q2).to_Matrix()
Matrix([
[a - d],
[b - c],
[b + c],
[a + d]])
"""
return Matrix([
[self.a, -self.b, -self.c, -self.d],
[self.b, self.a, -self.d, self.c],
[self.c, self.d, self.a, -self.b],
[self.d, -self.c, self.b, self.a]])
@property
def product_matrix_right(self):
r"""Returns 4 x 4 Matrix equivalent to a Hamilton product from the
right. This can be useful when treating quaternion elements as column
vectors. Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d
are real numbers, the product matrix from the left is:
.. math::
M = \begin{bmatrix} a &-b &-c &-d \\
b & a & d &-c \\
c &-d & a & b \\
d & c &-b & a \end{bmatrix}
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> q1 = Quaternion(a, b, c, d)
>>> q2 = Quaternion(1, 0, 0, 1)
>>> q2.product_matrix_right
Matrix([
[1, 0, 0, -1],
[0, 1, 1, 0],
[0, -1, 1, 0],
[1, 0, 0, 1]])
Note the switched arguments: the matrix represents the quaternion on
the right, but is still considered as a matrix multiplication from the
left.
>>> q2.product_matrix_right * q1.to_Matrix()
Matrix([
[ a - d],
[ b + c],
[-b + c],
[ a + d]])
This is equivalent to:
>>> (q1 * q2).to_Matrix()
Matrix([
[ a - d],
[ b + c],
[-b + c],
[ a + d]])
"""
return Matrix([
[self.a, -self.b, -self.c, -self.d],
[self.b, self.a, self.d, -self.c],
[self.c, -self.d, self.a, self.b],
[self.d, self.c, -self.b, self.a]])
def to_Matrix(self, vector_only=False):
"""Returns elements of quaternion as a column vector.
By default, a Matrix of length 4 is returned, with the real part as the
first element.
If vector_only is True, returns only imaginary part as a Matrix of
length 3.
Parameters
==========
vector_only : bool
If True, only imaginary part is returned.
Default : False
Returns
=======
Matrix
A column vector constructed by the elements of the quaternion.
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> q = Quaternion(a, b, c, d)
>>> q
a + b*i + c*j + d*k
>>> q.to_Matrix()
Matrix([
[a],
[b],
[c],
[d]])
>>> q.to_Matrix(vector_only=True)
Matrix([
[b],
[c],
[d]])
"""
if vector_only:
return Matrix(self.args[1:])
else:
return Matrix(self.args)
@classmethod
def from_Matrix(cls, elements):
"""Returns quaternion from elements of a column vector`.
If vector_only is True, returns only imaginary part as a Matrix of
length 3.
Parameters
==========
elements : Matrix, list or tuple of length 3 or 4. If length is 3,
assume real part is zero.
Default : False
Returns
=======
Quaternion
A quaternion created from the input elements.
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> q = Quaternion.from_Matrix([a, b, c, d])
>>> q
a + b*i + c*j + d*k
>>> q = Quaternion.from_Matrix([b, c, d])
>>> q
0 + b*i + c*j + d*k
"""
length = len(elements)
if length != 3 and length != 4:
raise ValueError("Input elements must have length 3 or 4, got {} "
"elements".format(length))
if length == 3:
return Quaternion(0, *elements)
else:
return Quaternion(*elements)
@classmethod
def from_euler(cls, angles, seq):
"""Returns quaternion equivalent to rotation represented by the Euler
angles, in the sequence defined by `seq`.
Parameters
==========
angles : list, tuple or Matrix of 3 numbers
The Euler angles (in radians).
seq : string of length 3
Represents the sequence of rotations.
For intrinsic rotations, seq must be all lowercase and its elements
must be from the set `{'x', 'y', 'z'}`
For extrinsic rotations, seq must be all uppercase and its elements
must be from the set `{'X', 'Y', 'Z'}`
Returns
=======
Quaternion
The normalized rotation quaternion calculated from the Euler angles
in the given sequence.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import pi
>>> q = Quaternion.from_euler([pi/2, 0, 0], 'xyz')
>>> q
sqrt(2)/2 + sqrt(2)/2*i + 0*j + 0*k
>>> q = Quaternion.from_euler([0, pi/2, pi] , 'zyz')
>>> q
0 + (-sqrt(2)/2)*i + 0*j + sqrt(2)/2*k
>>> q = Quaternion.from_euler([0, pi/2, pi] , 'ZYZ')
>>> q
0 + sqrt(2)/2*i + 0*j + sqrt(2)/2*k
"""
if len(angles) != 3:
raise ValueError("3 angles must be given.")
extrinsic = _is_extrinsic(seq)
i, j, k = seq.lower()
# get elementary basis vectors
ei = [1 if n == i else 0 for n in 'xyz']
ej = [1 if n == j else 0 for n in 'xyz']
ek = [1 if n == k else 0 for n in 'xyz']
# calculate distinct quaternions
qi = cls.from_axis_angle(ei, angles[0])
qj = cls.from_axis_angle(ej, angles[1])
qk = cls.from_axis_angle(ek, angles[2])
if extrinsic:
return trigsimp(qk * qj * qi)
else:
return trigsimp(qi * qj * qk)
def to_euler(self, seq, angle_addition=True, avoid_square_root=False):
r"""Returns Euler angles representing same rotation as the quaternion,
in the sequence given by `seq`. This implements the method described
in [1]_.
Parameters
==========
seq : string of length 3
Represents the sequence of rotations.
For intrinsic rotations, seq must be all lowercase and its elements
must be from the set `{'x', 'y', 'z'}`
For extrinsic rotations, seq must be all uppercase and its elements
must be from the set `{'X', 'Y', 'Z'}`
angle_addition : bool
Default : True
When True, first and third angles are given as an addition and
subtraction of two simpler `atan2` expressions. When False, the
first and third angles are each given by a single more complicated
`atan2` expression. This equivalent is given by:
--math::
\operatorname{atan_2} (b,a) \pm \operatorname{atan_2} (d,c) =
\operatorname{atan_2} (bc\pm ad, ac\mp bd)
avoid_square_root : bool
Default : False
When True, the second angle is calculated with an expression based
on acos`, which is slightly more complicated but avoids a square
root. When False, second angle is calculated with `atan2`, which
is simpler and can be better for numerical reasons (some
numerical implementations of `acos` have problems near zero).
Returns
=======
Tuple
The Euler angles calculated from the quaternion
Examples
========
>>> from sympy import Quaternion
>>> from sympy.abc import a, b, c, d
>>> euler = Quaternion(a, b, c, d).to_euler('zyz')
>>> euler
(-atan2(-b, c) + atan2(d, a),
2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2)),
atan2(-b, c) + atan2(d, a))
References
==========
.. [1] https://doi.org/10.1371/journal.pone.0276302
"""
if self.is_zero_quaternion():
raise ValueError('Cannot convert a quaternion with norm 0.')
angles = [0, 0, 0]
extrinsic = _is_extrinsic(seq)
i, j, k = seq.lower()
# get index corresponding to elementary basis vectors
i = 'xyz'.index(i) + 1
j = 'xyz'.index(j) + 1
k = 'xyz'.index(k) + 1
if not extrinsic:
i, k = k, i
# check if sequence is symmetric
symmetric = i == k
if symmetric:
k = 6 - i - j
# parity of the permutation
sign = (i - j) * (j - k) * (k - i) // 2
# permutate elements
elements = [self.a, self.b, self.c, self.d]
a = elements[0]
b = elements[i]
c = elements[j]
d = elements[k] * sign
if not symmetric:
a, b, c, d = a - c, b + d, c + a, d - b
if avoid_square_root:
n2 = self.norm()**2 if symmetric else 2 * self.norm()**2
angles[1] = acos((a*a + b*b - c*c - d*d) / n2)
else:
angles[1] = 2 * atan2(sqrt(c * c + d * d), sqrt(a * a + b * b))
# Check for singularities in numerical cases
angle_test = angles[1]
case = 0
if angle_test.is_number:
if is_eq(angle_test, S.Zero):
case = 1
if is_eq(angle_test, S.Pi):
case = 2
if case == 0:
if angle_addition:
angles[0] = atan2(b, a) + atan2(d, c)
angles[2] = atan2(b, a) - atan2(d, c)
else:
angles[0] = atan2(b*c + a*d, a*c - b*d)
angles[2] = atan2(b*c - a*d, a*c + b*d)
else: # any degenerate case
warnings.warn('Singularity case, setting third angle to zero')
angles[2 * (not extrinsic)] = sympify(0)
if case == 1:
angles[2 * extrinsic] = 2 * atan2(b, a)
else:
angles[2 * extrinsic] = 2 * atan2(d, c)
angles[2 * extrinsic] *= (-1 if extrinsic else 1)
# for Tait-Bryan angles
if not symmetric:
angles[1] -= S.Pi / 2
angles[0] *= sign
if extrinsic:
return tuple(angles[::-1])
else:
return tuple(angles)
@classmethod
def from_axis_angle(cls, vector, angle):
"""Returns a rotation quaternion given the axis and the angle of rotation.
Parameters
==========
vector : tuple of three numbers
The vector representation of the given axis.
angle : number
The angle by which axis is rotated (in radians).
Returns
=======
Quaternion
The normalized rotation quaternion calculated from the given axis and the angle of rotation.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import pi, sqrt
>>> q = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), 2*pi/3)
>>> q
1/2 + 1/2*i + 1/2*j + 1/2*k
"""
(x, y, z) = vector
norm = sqrt(x**2 + y**2 + z**2)
(x, y, z) = (x / norm, y / norm, z / norm)
s = sin(angle * S.Half)
a = cos(angle * S.Half)
b = x * s
c = y * s
d = z * s
# note that this quaternion is already normalized by construction:
# c^2 + (s*x)^2 + (s*y)^2 + (s*z)^2 = c^2 + s^2*(x^2 + y^2 + z^2) = c^2 + s^2 * 1 = c^2 + s^2 = 1
# so, what we return is a normalized quaternion
return cls(a, b, c, d)
@classmethod
def from_rotation_matrix(cls, M):
"""Returns the equivalent quaternion of a matrix. The quaternion will be normalized
only if the matrix is special orthogonal (orthogonal and det(M) = 1).
Parameters
==========
M : Matrix
Input matrix to be converted to equivalent quaternion. M must be special
orthogonal (orthogonal and det(M) = 1) for the quaternion to be normalized.
Returns
=======
Quaternion
The quaternion equivalent to given matrix.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import Matrix, symbols, cos, sin, trigsimp
>>> x = symbols('x')
>>> M = Matrix([[cos(x), -sin(x), 0], [sin(x), cos(x), 0], [0, 0, 1]])
>>> q = trigsimp(Quaternion.from_rotation_matrix(M))
>>> q
sqrt(2)*sqrt(cos(x) + 1)/2 + 0*i + 0*j + sqrt(2 - 2*cos(x))*sign(sin(x))/2*k
"""
absQ = M.det()**Rational(1, 3)
a = sqrt(absQ + M[0, 0] + M[1, 1] + M[2, 2]) / 2
b = sqrt(absQ + M[0, 0] - M[1, 1] - M[2, 2]) / 2
c = sqrt(absQ - M[0, 0] + M[1, 1] - M[2, 2]) / 2
d = sqrt(absQ - M[0, 0] - M[1, 1] + M[2, 2]) / 2
b = b * sign(M[2, 1] - M[1, 2])
c = c * sign(M[0, 2] - M[2, 0])
d = d * sign(M[1, 0] - M[0, 1])
return Quaternion(a, b, c, d)
def __add__(self, other):
return self.add(other)
def __radd__(self, other):
return self.add(other)
def __sub__(self, other):
return self.add(other*-1)
def __mul__(self, other):
return self._generic_mul(self, _sympify(other))
def __rmul__(self, other):
return self._generic_mul(_sympify(other), self)
def __pow__(self, p):
return self.pow(p)
def __neg__(self):
return Quaternion(-self._a, -self._b, -self._c, -self.d)
def __truediv__(self, other):
return self * sympify(other)**-1
def __rtruediv__(self, other):
return sympify(other) * self**-1
def _eval_Integral(self, *args):
return self.integrate(*args)
def diff(self, *symbols, **kwargs):
kwargs.setdefault('evaluate', True)
return self.func(*[a.diff(*symbols, **kwargs) for a in self.args])
def add(self, other):
"""Adds quaternions.
Parameters
==========
other : Quaternion
The quaternion to add to current (self) quaternion.
Returns
=======
Quaternion
The resultant quaternion after adding self to other
Examples
========
>>> from sympy import Quaternion
>>> from sympy import symbols
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> q1.add(q2)
6 + 8*i + 10*j + 12*k
>>> q1 + 5
6 + 2*i + 3*j + 4*k
>>> x = symbols('x', real = True)
>>> q1.add(x)
(x + 1) + 2*i + 3*j + 4*k
Quaternions over complex fields :
>>> from sympy import Quaternion
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q3.add(2 + 3*I)
(5 + 7*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k
"""
q1 = self
q2 = sympify(other)
# If q2 is a number or a SymPy expression instead of a quaternion
if not isinstance(q2, Quaternion):
if q1.real_field and q2.is_complex:
return Quaternion(re(q2) + q1.a, im(q2) + q1.b, q1.c, q1.d)
elif q2.is_commutative:
return Quaternion(q1.a + q2, q1.b, q1.c, q1.d)
else:
raise ValueError("Only commutative expressions can be added with a Quaternion.")
return Quaternion(q1.a + q2.a, q1.b + q2.b, q1.c + q2.c, q1.d
+ q2.d)
def mul(self, other):
"""Multiplies quaternions.
Parameters
==========
other : Quaternion or symbol
The quaternion to multiply to current (self) quaternion.
Returns
=======
Quaternion
The resultant quaternion after multiplying self with other
Examples
========
>>> from sympy import Quaternion
>>> from sympy import symbols
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> q1.mul(q2)
(-60) + 12*i + 30*j + 24*k
>>> q1.mul(2)
2 + 4*i + 6*j + 8*k
>>> x = symbols('x', real = True)
>>> q1.mul(x)
x + 2*x*i + 3*x*j + 4*x*k
Quaternions over complex fields :
>>> from sympy import Quaternion
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q3.mul(2 + 3*I)
(2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k
"""
return self._generic_mul(self, _sympify(other))
@staticmethod
def _generic_mul(q1, q2):
"""Generic multiplication.
Parameters
==========
q1 : Quaternion or symbol
q2 : Quaternion or symbol
It is important to note that if neither q1 nor q2 is a Quaternion,
this function simply returns q1 * q2.
Returns
=======
Quaternion
The resultant quaternion after multiplying q1 and q2
Examples
========
>>> from sympy import Quaternion
>>> from sympy import Symbol, S
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> Quaternion._generic_mul(q1, q2)
(-60) + 12*i + 30*j + 24*k
>>> Quaternion._generic_mul(q1, S(2))
2 + 4*i + 6*j + 8*k
>>> x = Symbol('x', real = True)
>>> Quaternion._generic_mul(q1, x)
x + 2*x*i + 3*x*j + 4*x*k
Quaternions over complex fields :
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> Quaternion._generic_mul(q3, 2 + 3*I)
(2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k
"""
# None is a Quaternion:
if not isinstance(q1, Quaternion) and not isinstance(q2, Quaternion):
return q1 * q2
# If q1 is a number or a SymPy expression instead of a quaternion
if not isinstance(q1, Quaternion):
if q2.real_field and q1.is_complex:
return Quaternion(re(q1), im(q1), 0, 0) * q2
elif q1.is_commutative:
return Quaternion(q1 * q2.a, q1 * q2.b, q1 * q2.c, q1 * q2.d)
else:
raise ValueError("Only commutative expressions can be multiplied with a Quaternion.")
# If q2 is a number or a SymPy expression instead of a quaternion
if not isinstance(q2, Quaternion):
if q1.real_field and q2.is_complex:
return q1 * Quaternion(re(q2), im(q2), 0, 0)
elif q2.is_commutative:
return Quaternion(q2 * q1.a, q2 * q1.b, q2 * q1.c, q2 * q1.d)
else:
raise ValueError("Only commutative expressions can be multiplied with a Quaternion.")
# If any of the quaternions has a fixed norm, pre-compute norm
if q1._norm is None and q2._norm is None:
norm = None
else:
norm = q1.norm() * q2.norm()
return Quaternion(-q1.b*q2.b - q1.c*q2.c - q1.d*q2.d + q1.a*q2.a,
q1.b*q2.a + q1.c*q2.d - q1.d*q2.c + q1.a*q2.b,
-q1.b*q2.d + q1.c*q2.a + q1.d*q2.b + q1.a*q2.c,
q1.b*q2.c - q1.c*q2.b + q1.d*q2.a + q1.a * q2.d,
norm=norm)
def _eval_conjugate(self):
"""Returns the conjugate of the quaternion."""
q = self
return Quaternion(q.a, -q.b, -q.c, -q.d, norm=q._norm)
def norm(self):
"""Returns the norm of the quaternion."""
if self._norm is None: # check if norm is pre-defined
q = self
# trigsimp is used to simplify sin(x)^2 + cos(x)^2 (these terms
# arise when from_axis_angle is used).
self._norm = sqrt(trigsimp(q.a**2 + q.b**2 + q.c**2 + q.d**2))
return self._norm
def normalize(self):
"""Returns the normalized form of the quaternion."""
q = self
return q * (1/q.norm())
def inverse(self):
"""Returns the inverse of the quaternion."""
q = self
if not q.norm():
raise ValueError("Cannot compute inverse for a quaternion with zero norm")
return conjugate(q) * (1/q.norm()**2)
def pow(self, p):
"""Finds the pth power of the quaternion.
Parameters
==========
p : int
Power to be applied on quaternion.
Returns
=======
Quaternion
Returns the p-th power of the current quaternion.
Returns the inverse if p = -1.
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.pow(4)
668 + (-224)*i + (-336)*j + (-448)*k
"""
p = sympify(p)
q = self
if p == -1:
return q.inverse()
res = 1
if not p.is_Integer:
return NotImplemented
if p < 0:
q, p = q.inverse(), -p
while p > 0:
if p % 2 == 1:
res = q * res
p = p//2
q = q * q
return res
def exp(self):
"""Returns the exponential of q (e^q).
Returns
=======
Quaternion
Exponential of q (e^q).
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.exp()
E*cos(sqrt(29))
+ 2*sqrt(29)*E*sin(sqrt(29))/29*i
+ 3*sqrt(29)*E*sin(sqrt(29))/29*j
+ 4*sqrt(29)*E*sin(sqrt(29))/29*k
"""
# exp(q) = e^a(cos||v|| + v/||v||*sin||v||)
q = self
vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2)
a = exp(q.a) * cos(vector_norm)
b = exp(q.a) * sin(vector_norm) * q.b / vector_norm
c = exp(q.a) * sin(vector_norm) * q.c / vector_norm
d = exp(q.a) * sin(vector_norm) * q.d / vector_norm
return Quaternion(a, b, c, d)
def _ln(self):
"""Returns the natural logarithm of the quaternion (_ln(q)).
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q._ln()
log(sqrt(30))
+ 2*sqrt(29)*acos(sqrt(30)/30)/29*i
+ 3*sqrt(29)*acos(sqrt(30)/30)/29*j
+ 4*sqrt(29)*acos(sqrt(30)/30)/29*k
"""
# _ln(q) = _ln||q|| + v/||v||*arccos(a/||q||)
q = self
vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2)
q_norm = q.norm()
a = ln(q_norm)
b = q.b * acos(q.a / q_norm) / vector_norm
c = q.c * acos(q.a / q_norm) / vector_norm
d = q.d * acos(q.a / q_norm) / vector_norm
return Quaternion(a, b, c, d)
def _eval_subs(self, *args):
elements = [i.subs(*args) for i in self.args]
norm = self._norm
try:
norm = norm.subs(*args)
except AttributeError:
pass
_check_norm(elements, norm)
return Quaternion(*elements, norm=norm)
def _eval_evalf(self, prec):
"""Returns the floating point approximations (decimal numbers) of the quaternion.
Returns
=======
Quaternion
Floating point approximations of quaternion(self)
Examples
========
>>> from sympy import Quaternion
>>> from sympy import sqrt
>>> q = Quaternion(1/sqrt(1), 1/sqrt(2), 1/sqrt(3), 1/sqrt(4))
>>> q.evalf()
1.00000000000000
+ 0.707106781186547*i
+ 0.577350269189626*j
+ 0.500000000000000*k
"""
nprec = prec_to_dps(prec)
return Quaternion(*[arg.evalf(n=nprec) for arg in self.args])
def pow_cos_sin(self, p):
"""Computes the pth power in the cos-sin form.
Parameters
==========
p : int
Power to be applied on quaternion.
Returns
=======
Quaternion
The p-th power in the cos-sin form.
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.pow_cos_sin(4)
900*cos(4*acos(sqrt(30)/30))
+ 1800*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*i
+ 2700*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*j
+ 3600*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*k
"""
# q = ||q||*(cos(a) + u*sin(a))
# q^p = ||q||^p * (cos(p*a) + u*sin(p*a))
q = self
(v, angle) = q.to_axis_angle()
q2 = Quaternion.from_axis_angle(v, p * angle)
return q2 * (q.norm()**p)
def integrate(self, *args):
"""Computes integration of quaternion.
Returns
=======
Quaternion
Integration of the quaternion(self) with the given variable.
Examples
========
Indefinite Integral of quaternion :
>>> from sympy import Quaternion
>>> from sympy.abc import x
>>> q = Quaternion(1, 2, 3, 4)
>>> q.integrate(x)
x + 2*x*i + 3*x*j + 4*x*k
Definite integral of quaternion :
>>> from sympy import Quaternion
>>> from sympy.abc import x
>>> q = Quaternion(1, 2, 3, 4)
>>> q.integrate((x, 1, 5))
4 + 8*i + 12*j + 16*k
"""
# TODO: is this expression correct?
return Quaternion(integrate(self.a, *args), integrate(self.b, *args),
integrate(self.c, *args), integrate(self.d, *args))
@staticmethod
def rotate_point(pin, r):
"""Returns the coordinates of the point pin(a 3 tuple) after rotation.
Parameters
==========
pin : tuple
A 3-element tuple of coordinates of a point which needs to be
rotated.
r : Quaternion or tuple
Axis and angle of rotation.
It's important to note that when r is a tuple, it must be of the form
(axis, angle)
Returns
=======
tuple
The coordinates of the point after rotation.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(Quaternion.rotate_point((1, 1, 1), q))
(sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)
>>> (axis, angle) = q.to_axis_angle()
>>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle)))
(sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)
"""
if isinstance(r, tuple):
# if r is of the form (vector, angle)
q = Quaternion.from_axis_angle(r[0], r[1])
else:
# if r is a quaternion
q = r.normalize()
pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q)
return (pout.b, pout.c, pout.d)
def to_axis_angle(self):
"""Returns the axis and angle of rotation of a quaternion
Returns
=======
tuple
Tuple of (axis, angle)
Examples
========
>>> from sympy import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> (axis, angle) = q.to_axis_angle()
>>> axis
(sqrt(3)/3, sqrt(3)/3, sqrt(3)/3)
>>> angle
2*pi/3
"""
q = self
if q.a.is_negative:
q = q * -1
q = q.normalize()
angle = trigsimp(2 * acos(q.a))
# Since quaternion is normalised, q.a is less than 1.
s = sqrt(1 - q.a*q.a)
x = trigsimp(q.b / s)
y = trigsimp(q.c / s)
z = trigsimp(q.d / s)
v = (x, y, z)
t = (v, angle)
return t
def to_rotation_matrix(self, v=None, homogeneous=True):
"""Returns the equivalent rotation transformation matrix of the quaternion
which represents rotation about the origin if v is not passed.
Parameters
==========
v : tuple or None
Default value: None
homogeneous : bool
When True, gives an expression that may be more efficient for
symbolic calculations but less so for direct evaluation. Both
formulas are mathematically equivalent.
Default value: True
Returns
=======
tuple
Returns the equivalent rotation transformation matrix of the quaternion
which represents rotation about the origin if v is not passed.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(q.to_rotation_matrix())
Matrix([
[cos(x), -sin(x), 0],
[sin(x), cos(x), 0],
[ 0, 0, 1]])
Generates a 4x4 transformation matrix (used for rotation about a point
other than the origin) if the point(v) is passed as an argument.
Examples
========
>>> from sympy import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(q.to_rotation_matrix((1, 1, 1)))
Matrix([
[cos(x), -sin(x), 0, sin(x) - cos(x) + 1],
[sin(x), cos(x), 0, -sin(x) - cos(x) + 1],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
"""
q = self
s = q.norm()**-2
# diagonal elements are different according to parameter normal
if homogeneous:
m00 = s*(q.a**2 + q.b**2 - q.c**2 - q.d**2)
m11 = s*(q.a**2 - q.b**2 + q.c**2 - q.d**2)
m22 = s*(q.a**2 - q.b**2 - q.c**2 + q.d**2)
else:
m00 = 1 - 2*s*(q.c**2 + q.d**2)
m11 = 1 - 2*s*(q.b**2 + q.d**2)
m22 = 1 - 2*s*(q.b**2 + q.c**2)
m01 = 2*s*(q.b*q.c - q.d*q.a)
m02 = 2*s*(q.b*q.d + q.c*q.a)
m10 = 2*s*(q.b*q.c + q.d*q.a)
m12 = 2*s*(q.c*q.d - q.b*q.a)
m20 = 2*s*(q.b*q.d - q.c*q.a)
m21 = 2*s*(q.c*q.d + q.b*q.a)
if not v:
return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]])
else:
(x, y, z) = v
m03 = x - x*m00 - y*m01 - z*m02
m13 = y - x*m10 - y*m11 - z*m12
m23 = z - x*m20 - y*m21 - z*m22
m30 = m31 = m32 = 0
m33 = 1
return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13],
[m20, m21, m22, m23], [m30, m31, m32, m33]])
def scalar_part(self):
r"""Returns scalar part($\mathbf{S}(q)$) of the quaternion q.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{S}(q) = a$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(4, 8, 13, 12)
>>> q.scalar_part()
4
"""
return self.a
def vector_part(self):
r"""
Returns vector part($\mathbf{V}(q)$) of the quaternion q.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{V}(q) = bi + cj + dk$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> q.vector_part()
0 + 1*i + 1*j + 1*k
>>> q = Quaternion(4, 8, 13, 12)
>>> q.vector_part()
0 + 8*i + 13*j + 12*k
"""
return Quaternion(0, self.b, self.c, self.d)
def axis(self):
r"""
Returns the axis($\mathbf{Ax}(q)$) of the quaternion.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{Ax}(q)$ i.e., the versor of the vector part of that quaternion
equal to $\mathbf{U}[\mathbf{V}(q)]$.
The axis is always an imaginary unit with square equal to $-1 + 0i + 0j + 0k$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> q.axis()
0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k
See Also
========
vector_part
"""
axis = self.vector_part().normalize()
return Quaternion(0, axis.b, axis.c, axis.d)
def is_pure(self):
"""
Returns true if the quaternion is pure, false if the quaternion is not pure
or returns none if it is unknown.
Explanation
===========
A pure quaternion (also a vector quaternion) is a quaternion with scalar
part equal to 0.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 8, 13, 12)
>>> q.is_pure()
True
See Also
========
scalar_part
"""
return self.a.is_zero
def is_zero_quaternion(self):
"""
Returns true if the quaternion is a zero quaternion or false if it is not a zero quaternion
and None if the value is unknown.
Explanation
===========
A zero quaternion is a quaternion with both scalar part and
vector part equal to 0.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 0, 0, 0)
>>> q.is_zero_quaternion()
False
>>> q = Quaternion(0, 0, 0, 0)
>>> q.is_zero_quaternion()
True
See Also
========
scalar_part
vector_part
"""
return self.norm().is_zero
def angle(self):
r"""
Returns the angle of the quaternion measured in the real-axis plane.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d
are real numbers, returns the angle of the quaternion given by
.. math::
angle := atan2(\sqrt{b^2 + c^2 + d^2}, {a})
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 4, 4, 4)
>>> q.angle()
atan(4*sqrt(3))
"""
return atan2(self.vector_part().norm(), self.scalar_part())
def arc_coplanar(self, other):
"""
Returns True if the transformation arcs represented by the input quaternions happen in the same plane.
Explanation
===========
Two quaternions are said to be coplanar (in this arc sense) when their axes are parallel.
The plane of a quaternion is the one normal to its axis.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the planes of the two quaternions are the same, apart from its orientation/sign.
False : if the planes of the two quaternions are not the same, apart from its orientation/sign.
None : if plane of either of the quaternion is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q1 = Quaternion(1, 4, 4, 4)
>>> q2 = Quaternion(3, 8, 8, 8)
>>> Quaternion.arc_coplanar(q1, q2)
True
>>> q1 = Quaternion(2, 8, 13, 12)
>>> Quaternion.arc_coplanar(q1, q2)
False
See Also
========
vector_coplanar
is_pure
"""
if (self.is_zero_quaternion()) or (other.is_zero_quaternion()):
raise ValueError('Neither of the given quaternions can be 0')
return fuzzy_or([(self.axis() - other.axis()).is_zero_quaternion(), (self.axis() + other.axis()).is_zero_quaternion()])
@classmethod
def vector_coplanar(cls, q1, q2, q3):
r"""
Returns True if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar.
Explanation
===========
Three pure quaternions are vector coplanar if the quaternions seen as 3D vectors are coplanar.
Parameters
==========
q1 : a pure Quaternion.
q2 : a pure Quaternion.
q3 : a pure Quaternion.
Returns
=======
True : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar.
False : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are not coplanar.
None : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q1 = Quaternion(0, 4, 4, 4)
>>> q2 = Quaternion(0, 8, 8, 8)
>>> q3 = Quaternion(0, 24, 24, 24)
>>> Quaternion.vector_coplanar(q1, q2, q3)
True
>>> q1 = Quaternion(0, 8, 16, 8)
>>> q2 = Quaternion(0, 8, 3, 12)
>>> Quaternion.vector_coplanar(q1, q2, q3)
False
See Also
========
axis
is_pure
"""
if fuzzy_not(q1.is_pure()) or fuzzy_not(q2.is_pure()) or fuzzy_not(q3.is_pure()):
raise ValueError('The given quaternions must be pure')
M = Matrix([[q1.b, q1.c, q1.d], [q2.b, q2.c, q2.d], [q3.b, q3.c, q3.d]]).det()
return M.is_zero
def parallel(self, other):
"""
Returns True if the two pure quaternions seen as 3D vectors are parallel.
Explanation
===========
Two pure quaternions are called parallel when their vector product is commutative which
implies that the quaternions seen as 3D vectors have same direction.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the two pure quaternions seen as 3D vectors are parallel.
False : if the two pure quaternions seen as 3D vectors are not parallel.
None : if the two pure quaternions seen as 3D vectors are parallel is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 4, 4, 4)
>>> q1 = Quaternion(0, 8, 8, 8)
>>> q.parallel(q1)
True
>>> q1 = Quaternion(0, 8, 13, 12)
>>> q.parallel(q1)
False
"""
if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()):
raise ValueError('The provided quaternions must be pure')
return (self*other - other*self).is_zero_quaternion()
def orthogonal(self, other):
"""
Returns the orthogonality of two quaternions.
Explanation
===========
Two pure quaternions are called orthogonal when their product is anti-commutative.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the two pure quaternions seen as 3D vectors are orthogonal.
False : if the two pure quaternions seen as 3D vectors are not orthogonal.
None : if the two pure quaternions seen as 3D vectors are orthogonal is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 4, 4, 4)
>>> q1 = Quaternion(0, 8, 8, 8)
>>> q.orthogonal(q1)
False
>>> q1 = Quaternion(0, 2, 2, 0)
>>> q = Quaternion(0, 2, -2, 0)
>>> q.orthogonal(q1)
True
"""
if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()):
raise ValueError('The given quaternions must be pure')
return (self*other + other*self).is_zero_quaternion()
def index_vector(self):
r"""
Returns the index vector of the quaternion.
Explanation
===========
Index vector is given by $\mathbf{T}(q)$ multiplied by $\mathbf{Ax}(q)$ where $\mathbf{Ax}(q)$ is the axis of the quaternion q,
and mod(q) is the $\mathbf{T}(q)$ (magnitude) of the quaternion.
Returns
=======
Quaternion: representing index vector of the provided quaternion.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(2, 4, 2, 4)
>>> q.index_vector()
0 + 4*sqrt(10)/3*i + 2*sqrt(10)/3*j + 4*sqrt(10)/3*k
See Also
========
axis
norm
"""
return self.norm() * self.axis()
def mensor(self):
"""
Returns the natural logarithm of the norm(magnitude) of the quaternion.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(2, 4, 2, 4)
>>> q.mensor()
log(2*sqrt(10))
>>> q.norm()
2*sqrt(10)
See Also
========
norm
"""
return ln(self.norm())
|
92f82d87aacd256b7de24cd5a2ca59c221f9dc2face154a2a2b92d40d98ca1b4 | """Predefined R^n manifolds together with common coord. systems.
Coordinate systems are predefined as well as the transformation laws between
them.
Coordinate functions can be accessed as attributes of the manifold (eg `R2.x`),
as attributes of the coordinate systems (eg `R2_r.x` and `R2_p.theta`), or by
using the usual `coord_sys.coord_function(index, name)` interface.
"""
from typing import Any
import warnings
from sympy.core.symbol import (Dummy, symbols)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, atan2, cos, sin)
from .diffgeom import Manifold, Patch, CoordSystem
__all__ = [
'R2', 'R2_origin', 'relations_2d', 'R2_r', 'R2_p',
'R3', 'R3_origin', 'relations_3d', 'R3_r', 'R3_c', 'R3_s'
]
###############################################################################
# R2
###############################################################################
R2: Any = Manifold('R^2', 2)
R2_origin: Any = Patch('origin', R2)
x, y = symbols('x y', real=True)
r, theta = symbols('rho theta', nonnegative=True)
relations_2d = {
('rectangular', 'polar'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))],
('polar', 'rectangular'): [(r, theta), (r*cos(theta), r*sin(theta))],
}
R2_r: Any = CoordSystem('rectangular', R2_origin, (x, y), relations_2d)
R2_p: Any = CoordSystem('polar', R2_origin, (r, theta), relations_2d)
# support deprecated feature
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y, r, theta = symbols('x y r theta', cls=Dummy)
R2_r.connect_to(R2_p, [x, y],
[sqrt(x**2 + y**2), atan2(y, x)],
inverse=False, fill_in_gaps=False)
R2_p.connect_to(R2_r, [r, theta],
[r*cos(theta), r*sin(theta)],
inverse=False, fill_in_gaps=False)
# Defining the basis coordinate functions and adding shortcuts for them to the
# manifold and the patch.
R2.x, R2.y = R2_origin.x, R2_origin.y = R2_r.x, R2_r.y = R2_r.coord_functions()
R2.r, R2.theta = R2_origin.r, R2_origin.theta = R2_p.r, R2_p.theta = R2_p.coord_functions()
# Defining the basis vector fields and adding shortcuts for them to the
# manifold and the patch.
R2.e_x, R2.e_y = R2_origin.e_x, R2_origin.e_y = R2_r.e_x, R2_r.e_y = R2_r.base_vectors()
R2.e_r, R2.e_theta = R2_origin.e_r, R2_origin.e_theta = R2_p.e_r, R2_p.e_theta = R2_p.base_vectors()
# Defining the basis oneform fields and adding shortcuts for them to the
# manifold and the patch.
R2.dx, R2.dy = R2_origin.dx, R2_origin.dy = R2_r.dx, R2_r.dy = R2_r.base_oneforms()
R2.dr, R2.dtheta = R2_origin.dr, R2_origin.dtheta = R2_p.dr, R2_p.dtheta = R2_p.base_oneforms()
###############################################################################
# R3
###############################################################################
R3: Any = Manifold('R^3', 3)
R3_origin: Any = Patch('origin', R3)
x, y, z = symbols('x y z', real=True)
rho, psi, r, theta, phi = symbols('rho psi r theta phi', nonnegative=True)
relations_3d = {
('rectangular', 'cylindrical'): [(x, y, z),
(sqrt(x**2 + y**2), atan2(y, x), z)],
('cylindrical', 'rectangular'): [(rho, psi, z),
(rho*cos(psi), rho*sin(psi), z)],
('rectangular', 'spherical'): [(x, y, z),
(sqrt(x**2 + y**2 + z**2),
acos(z/sqrt(x**2 + y**2 + z**2)),
atan2(y, x))],
('spherical', 'rectangular'): [(r, theta, phi),
(r*sin(theta)*cos(phi),
r*sin(theta)*sin(phi),
r*cos(theta))],
('cylindrical', 'spherical'): [(rho, psi, z),
(sqrt(rho**2 + z**2),
acos(z/sqrt(rho**2 + z**2)),
psi)],
('spherical', 'cylindrical'): [(r, theta, phi),
(r*sin(theta), phi, r*cos(theta))],
}
R3_r: Any = CoordSystem('rectangular', R3_origin, (x, y, z), relations_3d)
R3_c: Any = CoordSystem('cylindrical', R3_origin, (rho, psi, z), relations_3d)
R3_s: Any = CoordSystem('spherical', R3_origin, (r, theta, phi), relations_3d)
# support deprecated feature
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y, z, rho, psi, r, theta, phi = symbols('x y z rho psi r theta phi', cls=Dummy)
R3_r.connect_to(R3_c, [x, y, z],
[sqrt(x**2 + y**2), atan2(y, x), z],
inverse=False, fill_in_gaps=False)
R3_c.connect_to(R3_r, [rho, psi, z],
[rho*cos(psi), rho*sin(psi), z],
inverse=False, fill_in_gaps=False)
## rectangular <-> spherical
R3_r.connect_to(R3_s, [x, y, z],
[sqrt(x**2 + y**2 + z**2), acos(z/
sqrt(x**2 + y**2 + z**2)), atan2(y, x)],
inverse=False, fill_in_gaps=False)
R3_s.connect_to(R3_r, [r, theta, phi],
[r*sin(theta)*cos(phi), r*sin(
theta)*sin(phi), r*cos(theta)],
inverse=False, fill_in_gaps=False)
## cylindrical <-> spherical
R3_c.connect_to(R3_s, [rho, psi, z],
[sqrt(rho**2 + z**2), acos(z/sqrt(rho**2 + z**2)), psi],
inverse=False, fill_in_gaps=False)
R3_s.connect_to(R3_c, [r, theta, phi],
[r*sin(theta), phi, r*cos(theta)],
inverse=False, fill_in_gaps=False)
# Defining the basis coordinate functions.
R3_r.x, R3_r.y, R3_r.z = R3_r.coord_functions()
R3_c.rho, R3_c.psi, R3_c.z = R3_c.coord_functions()
R3_s.r, R3_s.theta, R3_s.phi = R3_s.coord_functions()
# Defining the basis vector fields.
R3_r.e_x, R3_r.e_y, R3_r.e_z = R3_r.base_vectors()
R3_c.e_rho, R3_c.e_psi, R3_c.e_z = R3_c.base_vectors()
R3_s.e_r, R3_s.e_theta, R3_s.e_phi = R3_s.base_vectors()
# Defining the basis oneform fields.
R3_r.dx, R3_r.dy, R3_r.dz = R3_r.base_oneforms()
R3_c.drho, R3_c.dpsi, R3_c.dz = R3_c.base_oneforms()
R3_s.dr, R3_s.dtheta, R3_s.dphi = R3_s.base_oneforms()
|
dcd6e88d1315a1c4ebe4ec0593c9c117b4da4b300384009df8a7f8616d143d3d | from __future__ import annotations
from typing import Any
from functools import reduce
from itertools import permutations
from sympy.combinatorics import Permutation
from sympy.core import (
Basic, Expr, Function, diff,
Pow, Mul, Add, Lambda, S, Tuple, Dict
)
from sympy.core.cache import cacheit
from sympy.core.symbol import Symbol, Dummy
from sympy.core.symbol import Str
from sympy.core.sympify import _sympify
from sympy.functions import factorial
from sympy.matrices import ImmutableDenseMatrix as Matrix
from sympy.solvers import solve
from sympy.utilities.exceptions import (sympy_deprecation_warning,
SymPyDeprecationWarning,
ignore_warnings)
# TODO you are a bit excessive in the use of Dummies
# TODO dummy point, literal field
# TODO too often one needs to call doit or simplify on the output, check the
# tests and find out why
from sympy.tensor.array import ImmutableDenseNDimArray
class Manifold(Basic):
"""
A mathematical manifold.
Explanation
===========
A manifold is a topological space that locally resembles
Euclidean space near each point [1].
This class does not provide any means to study the topological
characteristics of the manifold that it represents, though.
Parameters
==========
name : str
The name of the manifold.
dim : int
The dimension of the manifold.
Examples
========
>>> from sympy.diffgeom import Manifold
>>> m = Manifold('M', 2)
>>> m
M
>>> m.dim
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Manifold
"""
def __new__(cls, name, dim, **kwargs):
if not isinstance(name, Str):
name = Str(name)
dim = _sympify(dim)
obj = super().__new__(cls, name, dim)
obj.patches = _deprecated_list(
"""
Manifold.patches is deprecated. The Manifold object is now
immutable. Instead use a separate list to keep track of the
patches.
""", [])
return obj
@property
def name(self):
return self.args[0]
@property
def dim(self):
return self.args[1]
class Patch(Basic):
"""
A patch on a manifold.
Explanation
===========
Coordinate patch, or patch in short, is a simply-connected open set around
a point in the manifold [1]. On a manifold one can have many patches that
do not always include the whole manifold. On these patches coordinate
charts can be defined that permit the parameterization of any point on the
patch in terms of a tuple of real numbers (the coordinates).
This class does not provide any means to study the topological
characteristics of the patch that it represents.
Parameters
==========
name : str
The name of the patch.
manifold : Manifold
The manifold on which the patch is defined.
Examples
========
>>> from sympy.diffgeom import Manifold, Patch
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> p
P
>>> p.dim
2
References
==========
.. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry
(2013)
"""
def __new__(cls, name, manifold, **kwargs):
if not isinstance(name, Str):
name = Str(name)
obj = super().__new__(cls, name, manifold)
obj.manifold.patches.append(obj) # deprecated
obj.coord_systems = _deprecated_list(
"""
Patch.coord_systms is deprecated. The Patch class is now
immutable. Instead use a separate list to keep track of coordinate
systems.
""", [])
return obj
@property
def name(self):
return self.args[0]
@property
def manifold(self):
return self.args[1]
@property
def dim(self):
return self.manifold.dim
class CoordSystem(Basic):
"""
A coordinate system defined on the patch.
Explanation
===========
Coordinate system is a system that uses one or more coordinates to uniquely
determine the position of the points or other geometric elements on a
manifold [1].
By passing ``Symbols`` to *symbols* parameter, user can define the name and
assumptions of coordinate symbols of the coordinate system. If not passed,
these symbols are generated automatically and are assumed to be real valued.
By passing *relations* parameter, user can define the transform relations of
coordinate systems. Inverse transformation and indirect transformation can
be found automatically. If this parameter is not passed, coordinate
transformation cannot be done.
Parameters
==========
name : str
The name of the coordinate system.
patch : Patch
The patch where the coordinate system is defined.
symbols : list of Symbols, optional
Defines the names and assumptions of coordinate symbols.
relations : dict, optional
Key is a tuple of two strings, who are the names of the systems where
the coordinates transform from and transform to.
Value is a tuple of the symbols before transformation and a tuple of
the expressions after transformation.
Examples
========
We define two-dimensional Cartesian coordinate system and polar coordinate
system.
>>> from sympy import symbols, pi, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))],
... ('Pol', 'Car2D'): [(r, theta), (r*cos(theta), r*sin(theta))]
... }
>>> Car2D = CoordSystem('Car2D', p, (x, y), relation_dict)
>>> Pol = CoordSystem('Pol', p, (r, theta), relation_dict)
``symbols`` property returns ``CoordinateSymbol`` instances. These symbols
are not same with the symbols used to construct the coordinate system.
>>> Car2D
Car2D
>>> Car2D.dim
2
>>> Car2D.symbols
(x, y)
>>> _[0].func
<class 'sympy.diffgeom.diffgeom.CoordinateSymbol'>
``transformation()`` method returns the transformation function from
one coordinate system to another. ``transform()`` method returns the
transformed coordinates.
>>> Car2D.transformation(Pol)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
>>> Car2D.transform(Pol)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> Car2D.transform(Pol, [1, 2])
Matrix([
[sqrt(5)],
[atan(2)]])
``jacobian()`` method returns the Jacobian matrix of coordinate
transformation between two systems. ``jacobian_determinant()`` method
returns the Jacobian determinant of coordinate transformation between two
systems.
>>> Pol.jacobian(Car2D)
Matrix([
[cos(theta), -r*sin(theta)],
[sin(theta), r*cos(theta)]])
>>> Pol.jacobian(Car2D, [1, pi/2])
Matrix([
[0, -1],
[1, 0]])
>>> Car2D.jacobian_determinant(Pol)
1/sqrt(x**2 + y**2)
>>> Car2D.jacobian_determinant(Pol, [1,0])
1
References
==========
.. [1] https://en.wikipedia.org/wiki/Coordinate_system
"""
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True)
for i in range(patch.dim)]
)
else:
sympy_deprecation_warning(
f"""
The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That
is, replace
CoordSystem(..., names={names})
with
CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}])
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
)
else:
syms = []
for s in symbols:
if isinstance(s, Symbol):
syms.append(Symbol(s.name, **s._assumptions.generator))
elif isinstance(s, str):
sympy_deprecation_warning(
f"""
Passing a string as the coordinate symbol name to CoordSystem is deprecated.
Pass a Symbol with the appropriate name and assumptions instead.
That is, replace {s} with Symbol({s!r}, real=True).
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
syms.append(Symbol(s, real=True))
symbols = Tuple(*syms)
# canonicallize the relations
rel_temp = {}
for k,v in relations.items():
s1, s2 = k
if not isinstance(s1, Str):
s1 = Str(s1)
if not isinstance(s2, Str):
s2 = Str(s2)
key = Tuple(s1, s2)
# Old version used Lambda as a value.
if isinstance(v, Lambda):
v = (tuple(v.signature), tuple(v.expr))
else:
v = (tuple(v[0]), tuple(v[1]))
rel_temp[key] = v
relations = Dict(rel_temp)
# construct the object
obj = super().__new__(cls, name, patch, symbols, relations)
# Add deprecated attributes
obj.transforms = _deprecated_dict(
"""
CoordSystem.transforms is deprecated. The CoordSystem class is now
immutable. Use the 'relations' keyword argument to the
CoordSystems() constructor to specify relations.
""", {})
obj._names = [str(n) for n in symbols]
obj.patch.coord_systems.append(obj) # deprecated
obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated
obj._dummy = Dummy()
return obj
@property
def name(self):
return self.args[0]
@property
def patch(self):
return self.args[1]
@property
def manifold(self):
return self.patch.manifold
@property
def symbols(self):
return tuple(CoordinateSymbol(self, i, **s._assumptions.generator)
for i,s in enumerate(self.args[2]))
@property
def relations(self):
return self.args[3]
@property
def dim(self):
return self.patch.dim
##########################################################################
# Finding transformation relation
##########################################################################
def transformation(self, sys):
"""
Return coordinate transformation function from *self* to *sys*.
Parameters
==========
sys : CoordSystem
Returns
=======
sympy.Lambda
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transformation(R2_p)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
"""
signature = self.args[2]
key = Tuple(self.name, sys.name)
if self == sys:
expr = Matrix(self.symbols)
elif key in self.relations:
expr = Matrix(self.relations[key][1])
elif key[::-1] in self.relations:
expr = Matrix(self._inverse_transformation(sys, self))
else:
expr = Matrix(self._indirect_transformation(self, sys))
return Lambda(signature, expr)
@staticmethod
def _solve_inverse(sym1, sym2, exprs, sys1_name, sys2_name):
ret = solve(
[t[0] - t[1] for t in zip(sym2, exprs)],
list(sym1), dict=True)
if len(ret) == 0:
temp = "Cannot solve inverse relation from {} to {}."
raise NotImplementedError(temp.format(sys1_name, sys2_name))
elif len(ret) > 1:
temp = "Obtained multiple inverse relation from {} to {}."
raise ValueError(temp.format(sys1_name, sys2_name))
return ret[0]
@classmethod
def _inverse_transformation(cls, sys1, sys2):
# Find the transformation relation from sys2 to sys1
forward = sys1.transform(sys2)
inv_results = cls._solve_inverse(sys1.symbols, sys2.symbols, forward,
sys1.name, sys2.name)
signature = tuple(sys1.symbols)
return [inv_results[s] for s in signature]
@classmethod
@cacheit
def _indirect_transformation(cls, sys1, sys2):
# Find the transformation relation between two indirectly connected
# coordinate systems
rel = sys1.relations
path = cls._dijkstra(sys1, sys2)
transforms = []
for s1, s2 in zip(path, path[1:]):
if (s1, s2) in rel:
transforms.append(rel[(s1, s2)])
else:
sym2, inv_exprs = rel[(s2, s1)]
sym1 = tuple(Dummy() for i in sym2)
ret = cls._solve_inverse(sym2, sym1, inv_exprs, s2, s1)
ret = tuple(ret[s] for s in sym2)
transforms.append((sym1, ret))
syms = sys1.args[2]
exprs = syms
for newsyms, newexprs in transforms:
exprs = tuple(e.subs(zip(newsyms, exprs)) for e in newexprs)
return exprs
@staticmethod
def _dijkstra(sys1, sys2):
# Use Dijkstra algorithm to find the shortest path between two indirectly-connected
# coordinate systems
# return value is the list of the names of the systems.
relations = sys1.relations
graph = {}
for s1, s2 in relations.keys():
if s1 not in graph:
graph[s1] = {s2}
else:
graph[s1].add(s2)
if s2 not in graph:
graph[s2] = {s1}
else:
graph[s2].add(s1)
path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited
def visit(sys):
path_dict[sys][2] = 1
for newsys in graph[sys]:
distance = path_dict[sys][0] + 1
if path_dict[newsys][0] >= distance or not path_dict[newsys][1]:
path_dict[newsys][0] = distance
path_dict[newsys][1] = [i for i in path_dict[sys][1]]
path_dict[newsys][1].append(sys)
visit(sys1.name)
while True:
min_distance = max(path_dict.values(), key=lambda x:x[0])[0]
newsys = None
for sys, lst in path_dict.items():
if 0 < lst[0] <= min_distance and not lst[2]:
min_distance = lst[0]
newsys = sys
if newsys is None:
break
visit(newsys)
result = path_dict[sys2.name][1]
result.append(sys2.name)
if result == [sys2.name]:
raise KeyError("Two coordinate systems are not connected.")
return result
def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False):
sympy_deprecation_warning(
"""
The CoordSystem.connect_to() method is deprecated. Instead,
generate a new instance of CoordSystem with the 'relations'
keyword argument (CoordSystem classes are now immutable).
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
from_coords, to_exprs = dummyfy(from_coords, to_exprs)
self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs)
if inverse:
to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs)
if fill_in_gaps:
self._fill_gaps_in_transformations()
@staticmethod
def _inv_transf(from_coords, to_exprs):
# Will be removed when connect_to is removed
inv_from = [i.as_dummy() for i in from_coords]
inv_to = solve(
[t[0] - t[1] for t in zip(inv_from, to_exprs)],
list(from_coords), dict=True)[0]
inv_to = [inv_to[fc] for fc in from_coords]
return Matrix(inv_from), Matrix(inv_to)
@staticmethod
def _fill_gaps_in_transformations():
# Will be removed when connect_to is removed
raise NotImplementedError
##########################################################################
# Coordinate transformations
##########################################################################
def transform(self, sys, coordinates=None):
"""
Return the result of coordinate transformation from *self* to *sys*.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix containing CoordinateSymbol
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transform(R2_p)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> R2_r.transform(R2_p, [0, 1])
Matrix([
[ 1],
[pi/2]])
"""
if coordinates is None:
coordinates = self.symbols
if self != sys:
transf = self.transformation(sys)
coordinates = transf(*coordinates)
else:
coordinates = Matrix(coordinates)
return coordinates
def coord_tuple_transform_to(self, to_sys, coords):
"""Transform ``coords`` to coord system ``to_sys``."""
sympy_deprecation_warning(
"""
The CoordSystem.coord_tuple_transform_to() method is deprecated.
Use the CoordSystem.transform() method instead.
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
coords = Matrix(coords)
if self != to_sys:
with ignore_warnings(SymPyDeprecationWarning):
transf = self.transforms[to_sys]
coords = transf[1].subs(list(zip(transf[0], coords)))
return coords
def jacobian(self, sys, coordinates=None):
"""
Return the jacobian matrix of a transformation on given coordinates.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_p.jacobian(R2_r)
Matrix([
[cos(theta), -rho*sin(theta)],
[sin(theta), rho*cos(theta)]])
>>> R2_p.jacobian(R2_r, [1, 0])
Matrix([
[1, 0],
[0, 1]])
"""
result = self.transform(sys).jacobian(self.symbols)
if coordinates is not None:
result = result.subs(list(zip(self.symbols, coordinates)))
return result
jacobian_matrix = jacobian
def jacobian_determinant(self, sys, coordinates=None):
"""
Return the jacobian determinant of a transformation on given
coordinates. If coordinates are not given, coordinate symbols of *self*
are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.Expr
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.jacobian_determinant(R2_p)
1/sqrt(x**2 + y**2)
>>> R2_r.jacobian_determinant(R2_p, [1, 0])
1
"""
return self.jacobian(sys, coordinates).det()
##########################################################################
# Points
##########################################################################
def point(self, coords):
"""Create a ``Point`` with coordinates given in this coord system."""
return Point(self, coords)
def point_to_coords(self, point):
"""Calculate the coordinates of a point in this coord system."""
return point.coords(self)
##########################################################################
# Base fields.
##########################################################################
def base_scalar(self, coord_index):
"""Return ``BaseScalarField`` that takes a point and returns one of the coordinates."""
return BaseScalarField(self, coord_index)
coord_function = base_scalar
def base_scalars(self):
"""Returns a list of all coordinate functions.
For more details see the ``base_scalar`` method of this class."""
return [self.base_scalar(i) for i in range(self.dim)]
coord_functions = base_scalars
def base_vector(self, coord_index):
"""Return a basis vector field.
The basis vector field for this coordinate system. It is also an
operator on scalar fields."""
return BaseVectorField(self, coord_index)
def base_vectors(self):
"""Returns a list of all base vectors.
For more details see the ``base_vector`` method of this class."""
return [self.base_vector(i) for i in range(self.dim)]
def base_oneform(self, coord_index):
"""Return a basis 1-form field.
The basis one-form field for this coordinate system. It is also an
operator on vector fields."""
return Differential(self.coord_function(coord_index))
def base_oneforms(self):
"""Returns a list of all base oneforms.
For more details see the ``base_oneform`` method of this class."""
return [self.base_oneform(i) for i in range(self.dim)]
class CoordinateSymbol(Symbol):
"""A symbol which denotes an abstract value of i-th coordinate of
the coordinate system with given context.
Explanation
===========
Each coordinates in coordinate system are represented by unique symbol,
such as x, y, z in Cartesian coordinate system.
You may not construct this class directly. Instead, use `symbols` method
of CoordSystem.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import symbols, Lambda, Matrix, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])),
... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)]))
... }
>>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict)
>>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict)
>>> x, y = Car2D.symbols
``CoordinateSymbol`` contains its coordinate symbol and index.
>>> x.name
'x'
>>> x.coord_sys == Car2D
True
>>> x.index
0
>>> x.is_real
True
You can transform ``CoordinateSymbol`` into other coordinate system using
``rewrite()`` method.
>>> x.rewrite(Pol)
r*cos(theta)
>>> sqrt(x**2 + y**2).rewrite(Pol).simplify()
r
"""
def __new__(cls, coord_sys, index, **assumptions):
name = coord_sys.args[2][index].name
obj = super().__new__(cls, name, **assumptions)
obj.coord_sys = coord_sys
obj.index = index
return obj
def __getnewargs__(self):
return (self.coord_sys, self.index)
def _hashable_content(self):
return (
self.coord_sys, self.index
) + tuple(sorted(self.assumptions0.items()))
def _eval_rewrite(self, rule, args, **hints):
if isinstance(rule, CoordSystem):
return rule.transform(self.coord_sys)[self.index]
return super()._eval_rewrite(rule, args, **hints)
class Point(Basic):
"""Point defined in a coordinate system.
Explanation
===========
Mathematically, point is defined in the manifold and does not have any coordinates
by itself. Coordinate system is what imbues the coordinates to the point by coordinate
chart. However, due to the difficulty of realizing such logic, you must supply
a coordinate system and coordinates to define a Point here.
The usage of this object after its definition is independent of the
coordinate system that was used in order to define it, however due to
limitations in the simplification routines you can arrive at complicated
expressions if you use inappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
coords : list
The coordinates of the point.
Examples
========
>>> from sympy import pi
>>> from sympy.diffgeom import Point
>>> from sympy.diffgeom.rn import R2, R2_r, R2_p
>>> rho, theta = R2_p.symbols
>>> p = Point(R2_p, [rho, 3*pi/4])
>>> p.manifold == R2
True
>>> p.coords()
Matrix([
[ rho],
[3*pi/4]])
>>> p.coords(R2_r)
Matrix([
[-sqrt(2)*rho/2],
[ sqrt(2)*rho/2]])
"""
def __new__(cls, coord_sys, coords, **kwargs):
coords = Matrix(coords)
obj = super().__new__(cls, coord_sys, coords)
obj._coord_sys = coord_sys
obj._coords = coords
return obj
@property
def patch(self):
return self._coord_sys.patch
@property
def manifold(self):
return self._coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def coords(self, sys=None):
"""
Coordinates of the point in given coordinate system. If coordinate system
is not passed, it returns the coordinates in the coordinate system in which
the poin was defined.
"""
if sys is None:
return self._coords
else:
return self._coord_sys.transform(sys, self._coords)
@property
def free_symbols(self):
return self._coords.free_symbols
class BaseScalarField(Expr):
"""Base scalar field over a manifold for a given coordinate system.
Explanation
===========
A scalar field takes a point as an argument and returns a scalar.
A base scalar field of a coordinate system takes a point and returns one of
the coordinates of that point in the coordinate system in question.
To define a scalar field you need to choose the coordinate system and the
index of the coordinate.
The use of the scalar field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in
the simplification routines you may arrive at more complicated
expression if you use unappropriate coordinate systems.
You can build complicated scalar fields by just building up SymPy
expressions containing ``BaseScalarField`` instances.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function, pi
>>> from sympy.diffgeom import BaseScalarField
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> rho, _ = R2_p.symbols
>>> point = R2_p.point([rho, 0])
>>> fx, fy = R2_r.base_scalars()
>>> ftheta = BaseScalarField(R2_r, 1)
>>> fx(point)
rho
>>> fy(point)
0
>>> (fx**2+fy**2).rcall(point)
rho**2
>>> g = Function('g')
>>> fg = g(ftheta-pi)
>>> fg.rcall(point)
g(-pi)
"""
is_commutative = True
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, *args):
"""Evaluating the field at a point or doing nothing.
If the argument is a ``Point`` instance, the field is evaluated at that
point. The field is returned itself if the argument is any other
object. It is so in order to have working recursive calling mechanics
for all fields (check the ``__call__`` method of ``Expr``).
"""
point = args[0]
if len(args) != 1 or not isinstance(point, Point):
return self
coords = point.coords(self._coord_sys)
# XXX Calling doit is necessary with all the Subs expressions
# XXX Calling simplify is necessary with all the trig expressions
return simplify(coords[self._index]).doit()
# XXX Workaround for limitations on the content of args
free_symbols: set[Any] = set()
class BaseVectorField(Expr):
r"""Base vector field over a manifold for a given coordinate system.
Explanation
===========
A vector field is an operator taking a scalar field and returning a
directional derivative (which is also a scalar field).
A base vector field is the same type of operator, however the derivation is
specifically done with respect to a chosen coordinate.
To define a base vector field you need to choose the coordinate system and
the index of the coordinate.
The use of the vector field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in the
simplification routines you may arrive at more complicated expression if you
use unappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import BaseVectorField
>>> from sympy import pprint
>>> x, y = R2_r.symbols
>>> rho, theta = R2_p.symbols
>>> fx, fy = R2_r.base_scalars()
>>> point_p = R2_p.point([rho, theta])
>>> point_r = R2_r.point([x, y])
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> v = BaseVectorField(R2_r, 1)
>>> pprint(v(s_field))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
>>> pprint(v(s_field).rcall(point_r).doit())
d
--(g(x, y))
dy
>>> pprint(v(s_field).rcall(point_p))
/ d \|
|---(g(rho*cos(theta), xi))||
\dxi /|xi=rho*sin(theta)
"""
is_commutative = False
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, scalar_field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field an error is raised.
"""
if covariant_order(scalar_field) or contravariant_order(scalar_field):
raise ValueError('Only scalar fields can be supplied as arguments to vector fields.')
if scalar_field is None:
return self
base_scalars = list(scalar_field.atoms(BaseScalarField))
# First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r)
d_var = self._coord_sys._dummy
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(d_var) for i,
b in enumerate(base_scalars)]
d_result = scalar_field.subs(list(zip(base_scalars, d_funcs)))
d_result = d_result.diff(d_var)
# Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y))
coords = self._coord_sys.symbols
d_funcs_deriv = [f.diff(d_var) for f in d_funcs]
d_funcs_deriv_sub = []
for b in base_scalars:
jac = self._coord_sys.jacobian(b._coord_sys, coords)
d_funcs_deriv_sub.append(jac[b._index, self._index])
d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub)))
# Remove the dummies
result = d_result.subs(list(zip(d_funcs, base_scalars)))
result = result.subs(list(zip(coords, self._coord_sys.coord_functions())))
return result.doit()
def _find_coords(expr):
# Finds CoordinateSystems existing in expr
fields = expr.atoms(BaseScalarField, BaseVectorField)
result = set()
for f in fields:
result.add(f._coord_sys)
return result
class Commutator(Expr):
r"""Commutator of two vector fields.
Explanation
===========
The commutator of two vector fields `v_1` and `v_2` is defined as the
vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal
to `v_1(v_2(f)) - v_2(v_1(f))`.
Examples
========
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import Commutator
>>> from sympy import simplify
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_r = R2_p.base_vector(0)
>>> c_xy = Commutator(e_x, e_y)
>>> c_xr = Commutator(e_x, e_r)
>>> c_xy
0
Unfortunately, the current code is not able to compute everything:
>>> c_xr
Commutator(e_x, e_rho)
>>> simplify(c_xr(fy**2))
-2*cos(theta)*y**2/(x**2 + y**2)
"""
def __new__(cls, v1, v2):
if (covariant_order(v1) or contravariant_order(v1) != 1
or covariant_order(v2) or contravariant_order(v2) != 1):
raise ValueError(
'Only commutators of vector fields are supported.')
if v1 == v2:
return S.Zero
coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)])
if len(coord_sys) == 1:
# Only one coordinate systems is used, hence it is easy enough to
# actually evaluate the commutator.
if all(isinstance(v, BaseVectorField) for v in (v1, v2)):
return S.Zero
bases_1, bases_2 = [list(v.atoms(BaseVectorField))
for v in (v1, v2)]
coeffs_1 = [v1.expand().coeff(b) for b in bases_1]
coeffs_2 = [v2.expand().coeff(b) for b in bases_2]
res = 0
for c1, b1 in zip(coeffs_1, bases_1):
for c2, b2 in zip(coeffs_2, bases_2):
res += c1*b1(c2)*b2 - c2*b2(c1)*b1
return res
else:
obj = super().__new__(cls, v1, v2)
obj._v1 = v1 # deprecated assignment
obj._v2 = v2 # deprecated assignment
return obj
@property
def v1(self):
return self.args[0]
@property
def v2(self):
return self.args[1]
def __call__(self, scalar_field):
"""Apply on a scalar field.
If the argument is not a scalar field an error is raised.
"""
return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field))
class Differential(Expr):
r"""Return the differential (exterior derivative) of a form field.
Explanation
===========
The differential of a form (i.e. the exterior derivative) has a complicated
definition in the general case.
The differential `df` of the 0-form `f` is defined for any vector field `v`
as `df(v) = v(f)`.
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import Differential
>>> from sympy import pprint
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> dg = Differential(s_field)
>>> dg
d(g(x, y))
>>> pprint(dg(e_x))
/ d \|
|---(g(xi, y))||
\dxi /|xi=x
>>> pprint(dg(e_y))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
Applying the exterior derivative operator twice always results in:
>>> Differential(dg)
0
"""
is_commutative = False
def __new__(cls, form_field):
if contravariant_order(form_field):
raise ValueError(
'A vector field was supplied as an argument to Differential.')
if isinstance(form_field, Differential):
return S.Zero
else:
obj = super().__new__(cls, form_field)
obj._form_field = form_field # deprecated assignment
return obj
@property
def form_field(self):
return self.args[0]
def __call__(self, *vector_fields):
"""Apply on a list of vector_fields.
Explanation
===========
If the number of vector fields supplied is not equal to 1 + the order of
the form field inside the differential the result is undefined.
For 1-forms (i.e. differentials of scalar fields) the evaluation is
done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector
field, the differential is returned unchanged. This is done in order to
permit partial contractions for higher forms.
In the general case the evaluation is done by applying the form field
inside the differential on a list with one less elements than the number
of elements in the original list. Lowering the number of vector fields
is achieved through replacing each pair of fields by their
commutator.
If the arguments are not vectors or ``None``s an error is raised.
"""
if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None
for a in vector_fields):
raise ValueError('The arguments supplied to Differential should be vector fields or Nones.')
k = len(vector_fields)
if k == 1:
if vector_fields[0]:
return vector_fields[0].rcall(self._form_field)
return self
else:
# For higher form it is more complicated:
# Invariant formula:
# https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula
# df(v1, ... vn) = +/- vi(f(v1..no i..vn))
# +/- f([vi,vj],v1..no i, no j..vn)
f = self._form_field
v = vector_fields
ret = 0
for i in range(k):
t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:]))
ret += (-1)**i*t
for j in range(i + 1, k):
c = Commutator(v[i], v[j])
if c: # TODO this is ugly - the Commutator can be Zero and
# this causes the next line to fail
t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:])
ret += (-1)**(i + j)*t
return ret
class TensorProduct(Expr):
"""Tensor product of forms.
Explanation
===========
The tensor product permits the creation of multilinear functionals (i.e.
higher order tensors) out of lower order fields (e.g. 1-forms and vector
fields). However, the higher tensors thus created lack the interesting
features provided by the other type of product, the wedge product, namely
they are not antisymmetric and hence are not form fields.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> TensorProduct(dx, dy)(e_x, e_y)
1
>>> TensorProduct(dx, dy)(e_y, e_x)
0
>>> TensorProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> TensorProduct(e_x, e_y)(fx**2, fy**2)
4*x*y
>>> TensorProduct(e_y, dx)(fy)
dx
You can nest tensor products.
>>> tp1 = TensorProduct(dx, dy)
>>> TensorProduct(tp1, dx)(e_x, e_y, e_x)
1
You can make partial contraction for instance when 'raising an index'.
Putting ``None`` in the second argument of ``rcall`` means that the
respective position in the tensor product is left as it is.
>>> TP = TensorProduct
>>> metric = TP(dx, dx) + 3*TP(dy, dy)
>>> metric.rcall(e_y, None)
3*dy
Or automatically pad the args with ``None`` without specifying them.
>>> metric.rcall(e_y)
3*dy
"""
def __new__(cls, *args):
scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0])
multifields = [m for m in args if covariant_order(m) + contravariant_order(m)]
if multifields:
if len(multifields) == 1:
return scalar*multifields[0]
return scalar*super().__new__(cls, *multifields)
else:
return scalar
def __call__(self, *fields):
"""Apply on a list of fields.
If the number of input fields supplied is not equal to the order of
the tensor product field, the list of arguments is padded with ``None``'s.
The list of arguments is divided in sublists depending on the order of
the forms inside the tensor product. The sublists are provided as
arguments to these forms and the resulting expressions are given to the
constructor of ``TensorProduct``.
"""
tot_order = covariant_order(self) + contravariant_order(self)
tot_args = len(fields)
if tot_args != tot_order:
fields = list(fields) + [None]*(tot_order - tot_args)
orders = [covariant_order(f) + contravariant_order(f) for f in self._args]
indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)]
fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])]
multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)]
return TensorProduct(*multipliers)
class WedgeProduct(TensorProduct):
"""Wedge product of forms.
Explanation
===========
In the context of integration only completely antisymmetric forms make
sense. The wedge product permits the creation of such forms.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import WedgeProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> WedgeProduct(dx, dy)(e_x, e_y)
1
>>> WedgeProduct(dx, dy)(e_y, e_x)
-1
>>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> WedgeProduct(e_x, e_y)(fy, None)
-e_x
You can nest wedge products.
>>> wp1 = WedgeProduct(dx, dy)
>>> WedgeProduct(wp1, dx)(e_x, e_y, e_x)
0
"""
# TODO the calculation of signatures is slow
# TODO you do not need all these permutations (neither the prefactor)
def __call__(self, *fields):
"""Apply on a list of vector_fields.
The expression is rewritten internally in terms of tensor products and evaluated."""
orders = (covariant_order(e) + contravariant_order(e) for e in self.args)
mul = 1/Mul(*(factorial(o) for o in orders))
perms = permutations(fields)
perms_par = (Permutation(
p).signature() for p in permutations(range(len(fields))))
tensor_prod = TensorProduct(*self.args)
return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])
class LieDerivative(Expr):
"""Lie derivative with respect to a vector field.
Explanation
===========
The transport operator that defines the Lie derivative is the pushforward of
the field to be derived along the integral curve of the field with respect
to which one derives.
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> from sympy.diffgeom import (LieDerivative, TensorProduct)
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_rho, e_theta = R2_p.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> LieDerivative(e_x, fy)
0
>>> LieDerivative(e_x, fx)
1
>>> LieDerivative(e_x, e_x)
0
The Lie derivative of a tensor field by another tensor field is equal to
their commutator:
>>> LieDerivative(e_x, e_rho)
Commutator(e_x, e_rho)
>>> LieDerivative(e_x + e_y, fx)
1
>>> tp = TensorProduct(dx, dy)
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
"""
def __new__(cls, v_field, expr):
expr_form_ord = covariant_order(expr)
if contravariant_order(v_field) != 1 or covariant_order(v_field):
raise ValueError('Lie derivatives are defined only with respect to'
' vector fields. The supplied argument was not a '
'vector field.')
if expr_form_ord > 0:
obj = super().__new__(cls, v_field, expr)
# deprecated assignments
obj._v_field = v_field
obj._expr = expr
return obj
if expr.atoms(BaseVectorField):
return Commutator(v_field, expr)
else:
return v_field.rcall(expr)
@property
def v_field(self):
return self.args[0]
@property
def expr(self):
return self.args[1]
def __call__(self, *args):
v = self.v_field
expr = self.expr
lead_term = v(expr(*args))
rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:])
for i in range(len(args))])
return lead_term - rest
class BaseCovarDerivativeOp(Expr):
"""Covariant derivative operator with respect to a base vector.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import BaseCovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch)
>>> cvd(fx)
1
>>> cvd(fx*e_x)
e_x
"""
def __new__(cls, coord_sys, index, christoffel):
index = _sympify(index)
christoffel = ImmutableDenseNDimArray(christoffel)
obj = super().__new__(cls, coord_sys, index, christoffel)
# deprecated assignments
obj._coord_sys = coord_sys
obj._index = index
obj._christoffel = christoffel
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def christoffel(self):
return self.args[2]
def __call__(self, field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field the behaviour is undefined.
"""
if covariant_order(field) != 0:
raise NotImplementedError()
field = vectors_in_basis(field, self._coord_sys)
wrt_vector = self._coord_sys.base_vector(self._index)
wrt_scalar = self._coord_sys.coord_function(self._index)
vectors = list(field.atoms(BaseVectorField))
# First step: replace all vectors with something susceptible to
# derivation and do the derivation
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i,
b in enumerate(vectors)]
d_result = field.subs(list(zip(vectors, d_funcs)))
d_result = wrt_vector(d_result)
# Second step: backsubstitute the vectors in
d_result = d_result.subs(list(zip(d_funcs, vectors)))
# Third step: evaluate the derivatives of the vectors
derivs = []
for v in vectors:
d = Add(*[(self._christoffel[k, wrt_vector._index, v._index]
*v._coord_sys.base_vector(k))
for k in range(v._coord_sys.dim)])
derivs.append(d)
to_subs = [wrt_vector(d) for d in d_funcs]
# XXX: This substitution can fail when there are Dummy symbols and the
# cache is disabled: https://github.com/sympy/sympy/issues/17794
result = d_result.subs(list(zip(to_subs, derivs)))
# Remove the dummies
result = result.subs(list(zip(d_funcs, vectors)))
return result.doit()
class CovarDerivativeOp(Expr):
"""Covariant derivative operator.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import CovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = CovarDerivativeOp(fx*e_x, ch)
>>> cvd(fx)
x
>>> cvd(fx*e_x)
x*e_x
"""
def __new__(cls, wrt, christoffel):
if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1:
raise NotImplementedError()
if contravariant_order(wrt) != 1 or covariant_order(wrt):
raise ValueError('Covariant derivatives are defined only with '
'respect to vector fields. The supplied argument '
'was not a vector field.')
christoffel = ImmutableDenseNDimArray(christoffel)
obj = super().__new__(cls, wrt, christoffel)
# deprecated assignments
obj._wrt = wrt
obj._christoffel = christoffel
return obj
@property
def wrt(self):
return self.args[0]
@property
def christoffel(self):
return self.args[1]
def __call__(self, field):
vectors = list(self._wrt.atoms(BaseVectorField))
base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel)
for v in vectors]
return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field)
###############################################################################
# Integral curves on vector fields
###############################################################################
def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False):
r"""Return the series expansion for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This equation can also be decomposed of a basis of coordinate functions
`V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i`
This function returns a series expansion of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
n
the order to which to expand
coord_sys
the coordinate system in which to expand
coeffs (default False) - if True return a list of elements of the expansion
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t, x, y
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import intcurve_series
Specify a starting point and a vector field:
>>> start_point = R2_r.point([x, y])
>>> vector_field = R2_r.e_x
Calculate the series:
>>> intcurve_series(vector_field, t, start_point, n=3)
Matrix([
[t + x],
[ y]])
Or get the elements of the expansion in a list:
>>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True)
>>> series[0]
Matrix([
[x],
[y]])
>>> series[1]
Matrix([
[t],
[0]])
>>> series[2]
Matrix([
[0],
[0]])
The series in the polar coordinate system:
>>> series = intcurve_series(vector_field, t, start_point,
... n=3, coord_sys=R2_p, coeffs=True)
>>> series[0]
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> series[1]
Matrix([
[t*x/sqrt(x**2 + y**2)],
[ -t*y/(x**2 + y**2)]])
>>> series[2]
Matrix([
[t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2],
[ t**2*x*y/(x**2 + y**2)**2]])
See Also
========
intcurve_diffequ
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
def iter_vfield(scalar_field, i):
"""Return ``vector_field`` called `i` times on ``scalar_field``."""
return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field)
def taylor_terms_per_coord(coord_function):
"""Return the series for one of the coordinates."""
return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i)
for i in range(n)]
coord_sys = coord_sys if coord_sys else start_point._coord_sys
coord_functions = coord_sys.coord_functions()
taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions]
if coeffs:
return [Matrix(t) for t in zip(*taylor_terms)]
else:
return Matrix([sum(c) for c in taylor_terms])
def intcurve_diffequ(vector_field, param, start_point, coord_sys=None):
r"""Return the differential equation for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This function returns the differential equation of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
coord_sys
the coordinate system in which to give the equations
Returns
=======
a tuple of (equations, initial conditions)
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t
>>> from sympy.diffgeom.rn import R2, R2_p, R2_r
>>> from sympy.diffgeom import intcurve_diffequ
Specify a starting point and a vector field:
>>> start_point = R2_r.point([0, 1])
>>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y
Get the equation:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point)
>>> equations
[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]
>>> init_cond
[f_0(0), f_1(0) - 1]
The series in the polar coordinate system:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p)
>>> equations
[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]
>>> init_cond
[f_0(0) - 1, f_1(0) - pi/2]
See Also
========
intcurve_series
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
coord_sys = coord_sys if coord_sys else start_point._coord_sys
gammas = [Function('f_%d' % i)(param) for i in range(
start_point._coord_sys.dim)]
arbitrary_p = Point(coord_sys, gammas)
coord_functions = coord_sys.coord_functions()
equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p))
for cf in coord_functions]
init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point))
for cf in coord_functions]
return equations, init_cond
###############################################################################
# Helpers
###############################################################################
def dummyfy(args, exprs):
# TODO Is this a good idea?
d_args = Matrix([s.as_dummy() for s in args])
reps = dict(zip(args, d_args))
d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs])
return d_args, d_exprs
###############################################################################
# Helpers
###############################################################################
def contravariant_order(expr, _strict=False):
"""Return the contravariant order of an expression.
Examples
========
>>> from sympy.diffgeom import contravariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> contravariant_order(a)
0
>>> contravariant_order(a*R2.x + 2)
0
>>> contravariant_order(a*R2.x*R2.e_y + R2.e_x)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [contravariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing contravariant fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [contravariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between vectors.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a vector.')
return 0
elif isinstance(expr, BaseVectorField):
return 1
elif isinstance(expr, TensorProduct):
return sum(contravariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
def covariant_order(expr, _strict=False):
"""Return the covariant order of an expression.
Examples
========
>>> from sympy.diffgeom import covariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> covariant_order(a)
0
>>> covariant_order(a*R2.x + 2)
0
>>> covariant_order(a*R2.x*R2.dy + R2.dx)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [covariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing form fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [covariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between forms.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a form.')
return 0
elif isinstance(expr, Differential):
return covariant_order(*expr.args) + 1
elif isinstance(expr, TensorProduct):
return sum(covariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
###############################################################################
# Coordinate transformation functions
###############################################################################
def vectors_in_basis(expr, to_sys):
"""Transform all base vectors in base vectors of a specified coord basis.
While the new base vectors are in the new coordinate system basis, any
coefficients are kept in the old system.
Examples
========
>>> from sympy.diffgeom import vectors_in_basis
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> vectors_in_basis(R2_r.e_x, R2_p)
-y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2)
>>> vectors_in_basis(R2_p.e_r, R2_r)
sin(theta)*e_y + cos(theta)*e_x
"""
vectors = list(expr.atoms(BaseVectorField))
new_vectors = []
for v in vectors:
cs = v._coord_sys
jac = cs.jacobian(to_sys, cs.coord_functions())
new = (jac.T*Matrix(to_sys.base_vectors()))[v._index]
new_vectors.append(new)
return expr.subs(list(zip(vectors, new_vectors)))
###############################################################################
# Coordinate-dependent functions
###############################################################################
def twoform_to_matrix(expr):
"""Return the matrix representing the twoform.
For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`,
where `e_i` is the i-th base vector field for the coordinate system in
which the expression of `w` is given.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import twoform_to_matrix, TensorProduct
>>> TP = TensorProduct
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[1, 0],
[0, 1]])
>>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[x, 0],
[0, 1]])
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2)
Matrix([
[ 1, 0],
[-1/2, 1]])
"""
if covariant_order(expr) != 2 or contravariant_order(expr):
raise ValueError('The input expression is not a two-form.')
coord_sys = _find_coords(expr)
if len(coord_sys) != 1:
raise ValueError('The input expression concerns more than one '
'coordinate systems, hence there is no unambiguous '
'way to choose a coordinate system for the matrix.')
coord_sys = coord_sys.pop()
vectors = coord_sys.base_vectors()
expr = expr.expand()
matrix_content = [[expr.rcall(v1, v2) for v1 in vectors]
for v2 in vectors]
return Matrix(matrix_content)
def metric_to_Christoffel_1st(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]]
"""
matrix = twoform_to_matrix(expr)
if not matrix.is_symmetric():
raise ValueError(
'The two-form representing the metric is not symmetric.')
coord_sys = _find_coords(expr).pop()
deriv_matrices = [matrix.applyfunc(d) for d in coord_sys.base_vectors()]
indices = list(range(coord_sys.dim))
christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Christoffel_2nd(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]]
"""
ch_1st = metric_to_Christoffel_1st(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
# XXX workaround, inverting a matrix does not work if it contains non
# symbols
#matrix = twoform_to_matrix(expr).inv()
matrix = twoform_to_matrix(expr)
s_fields = set()
for e in matrix:
s_fields.update(e.atoms(BaseScalarField))
s_fields = list(s_fields)
dums = coord_sys.symbols
matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields)))
# XXX end of workaround
christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices])
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Riemann_components(expr):
"""Return the components of the Riemann tensor expressed in a given basis.
Given a metric it calculates the components of the Riemann tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> riemann = metric_to_Riemann_components(non_trivial_metric)
>>> riemann[0, :, :, :]
[[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]]
>>> riemann[1, :, :, :]
[[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]]
"""
ch_2nd = metric_to_Christoffel_2nd(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
deriv_ch = [[[[d(ch_2nd[i, j, k])
for d in coord_sys.base_vectors()]
for k in indices]
for j in indices]
for i in indices]
riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices])
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
return ImmutableDenseNDimArray(riemann)
def metric_to_Ricci_components(expr):
"""Return the components of the Ricci tensor expressed in a given basis.
Given a metric it calculates the components of the Ricci tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[0, 0], [0, 0]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> metric_to_Ricci_components(non_trivial_metric)
[[1/rho, 0], [0, exp(-2*rho)*rho]]
"""
riemann = metric_to_Riemann_components(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
ricci = [[Add(*[riemann[k, i, k, j] for k in indices])
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(ricci)
###############################################################################
# Classes for deprecation
###############################################################################
class _deprecated_container:
# This class gives deprecation warning.
# When deprecated features are completely deleted, this should be removed as well.
# See https://github.com/sympy/sympy/pull/19368
def __init__(self, message, data):
super().__init__(data)
self.message = message
def warn(self):
sympy_deprecation_warning(
self.message,
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
stacklevel=4
)
def __iter__(self):
self.warn()
return super().__iter__()
def __getitem__(self, key):
self.warn()
return super().__getitem__(key)
def __contains__(self, key):
self.warn()
return super().__contains__(key)
class _deprecated_list(_deprecated_container, list):
pass
class _deprecated_dict(_deprecated_container, dict):
pass
# Import at end to avoid cyclic imports
from sympy.simplify.simplify import simplify
|
858971b46c9333467ae0d6b39b54de319dd8f1d32c883fc736136f6d4f5e8906 | from sympy.core.function import Add, ArgumentIndexError, Function
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.trigonometric import cos, sin
def _cosm1(x, *, evaluate=True):
return Add(cos(x, evaluate=evaluate), -S.One, evaluate=evaluate)
class cosm1(Function):
""" Minus one plus cosine of x, i.e. cos(x) - 1. For use when x is close to zero.
Helper class for use with e.g. scipy.special.cosm1
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.cosm1.html
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return -sin(*self.args)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_cos(self, x, **kwargs):
return _cosm1(x)
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(cos).evalf(*args, **kwargs)
def _eval_simplify(self, **kwargs):
x, = self.args
candidate = _cosm1(x.simplify(**kwargs))
if candidate != _cosm1(x, evaluate=False):
return candidate
else:
return cosm1(x)
def _powm1(x, y, *, evaluate=True):
return Add(Pow(x, y, evaluate=evaluate), -S.One, evaluate=evaluate)
class powm1(Function):
""" Minus one plus x to the power of y, i.e. x**y - 1. For use when x is close to one or y is close to zero.
Helper class for use with e.g. scipy.special.powm1
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.powm1.html
"""
nargs = 2
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], self.args[1])*self.args[1]/self.args[0]
elif argindex == 2:
return log(self.args[0])*Pow(*self.args)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Pow(self, x, y, **kwargs):
return _powm1(x, y)
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(Pow).evalf(*args, **kwargs)
def _eval_simplify(self, **kwargs):
x, y = self.args
candidate = _powm1(x.simplify(**kwargs), y.simplify(**kwargs))
if candidate != _powm1(x, y, evaluate=False):
return candidate
else:
return powm1(x, y)
|
3fb23e4c3fb81f80ebe1bc45352ee9e3a383f5b8b7da5f12a4511ecd55aeb526 | """
AST nodes specific to Fortran.
The functions defined in this module allows the user to express functions such as ``dsign``
as a SymPy function for symbolic manipulation.
"""
from sympy.codegen.ast import (
Attribute, CodeBlock, FunctionCall, Node, none, String,
Token, _mk_Tuple, Variable
)
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Function
from sympy.core.numbers import Float, Integer
from sympy.core.symbol import Str
from sympy.core.sympify import sympify
from sympy.logic import true, false
from sympy.utilities.iterables import iterable
pure = Attribute('pure')
elemental = Attribute('elemental') # (all elemental procedures are also pure)
intent_in = Attribute('intent_in')
intent_out = Attribute('intent_out')
intent_inout = Attribute('intent_inout')
allocatable = Attribute('allocatable')
class Program(Token):
""" Represents a 'program' block in Fortran.
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy.codegen.fnodes import Program
>>> prog = Program('myprogram', [Print([42])])
>>> from sympy import fcode
>>> print(fcode(prog, source_format='free'))
program myprogram
print *, 42
end program
"""
__slots__ = _fields = ('name', 'body')
_construct_name = String
_construct_body = staticmethod(lambda body: CodeBlock(*body))
class use_rename(Token):
""" Represents a renaming in a use statement in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import use_rename, use
>>> from sympy import fcode
>>> ren = use_rename("thingy", "convolution2d")
>>> print(fcode(ren, source_format='free'))
thingy => convolution2d
>>> full = use('signallib', only=['snr', ren])
>>> print(fcode(full, source_format='free'))
use signallib, only: snr, thingy => convolution2d
"""
__slots__ = _fields = ('local', 'original')
_construct_local = String
_construct_original = String
def _name(arg):
if hasattr(arg, 'name'):
return arg.name
else:
return String(arg)
class use(Token):
""" Represents a use statement in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import use
>>> from sympy import fcode
>>> fcode(use('signallib'), source_format='free')
'use signallib'
>>> fcode(use('signallib', [('metric', 'snr')]), source_format='free')
'use signallib, metric => snr'
>>> fcode(use('signallib', only=['snr', 'convolution2d']), source_format='free')
'use signallib, only: snr, convolution2d'
"""
__slots__ = _fields = ('namespace', 'rename', 'only')
defaults = {'rename': none, 'only': none}
_construct_namespace = staticmethod(_name)
_construct_rename = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else use_rename(*arg) for arg in args]))
_construct_only = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else _name(arg) for arg in args]))
class Module(Token):
""" Represents a module in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import Module
>>> from sympy import fcode
>>> print(fcode(Module('signallib', ['implicit none'], []), source_format='free'))
module signallib
implicit none
<BLANKLINE>
contains
<BLANKLINE>
<BLANKLINE>
end module
"""
__slots__ = _fields = ('name', 'declarations', 'definitions')
defaults = {'declarations': Tuple()}
_construct_name = String
@classmethod
def _construct_declarations(cls, args):
args = [Str(arg) if isinstance(arg, str) else arg for arg in args]
return CodeBlock(*args)
_construct_definitions = staticmethod(lambda arg: CodeBlock(*arg))
class Subroutine(Node):
""" Represents a subroutine in Fortran.
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.ast import Print
>>> from sympy.codegen.fnodes import Subroutine
>>> x, y = symbols('x y', real=True)
>>> sub = Subroutine('mysub', [x, y], [Print([x**2 + y**2, x*y])])
>>> print(fcode(sub, source_format='free', standard=2003))
subroutine mysub(x, y)
real*8 :: x
real*8 :: y
print *, x**2 + y**2, x*y
end subroutine
"""
__slots__ = ('name', 'parameters', 'body')
_fields = __slots__ + Node._fields
_construct_name = String
_construct_parameters = staticmethod(lambda params: Tuple(*map(Variable.deduced, params)))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class SubroutineCall(Token):
""" Represents a call to a subroutine in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import SubroutineCall
>>> from sympy import fcode
>>> fcode(SubroutineCall('mysub', 'x y'.split()))
' call mysub(x, y)'
"""
__slots__ = _fields = ('name', 'subroutine_args')
_construct_name = staticmethod(_name)
_construct_subroutine_args = staticmethod(_mk_Tuple)
class Do(Token):
""" Represents a Do loop in in Fortran.
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.ast import aug_assign, Print
>>> from sympy.codegen.fnodes import Do
>>> i, n = symbols('i n', integer=True)
>>> r = symbols('r', real=True)
>>> body = [aug_assign(r, '+', 1/i), Print([i, r])]
>>> do1 = Do(body, i, 1, n)
>>> print(fcode(do1, source_format='free'))
do i = 1, n
r = r + 1d0/i
print *, i, r
end do
>>> do2 = Do(body, i, 1, n, 2)
>>> print(fcode(do2, source_format='free'))
do i = 1, n, 2
r = r + 1d0/i
print *, i, r
end do
"""
__slots__ = _fields = ('body', 'counter', 'first', 'last', 'step', 'concurrent')
defaults = {'step': Integer(1), 'concurrent': false}
_construct_body = staticmethod(lambda body: CodeBlock(*body))
_construct_counter = staticmethod(sympify)
_construct_first = staticmethod(sympify)
_construct_last = staticmethod(sympify)
_construct_step = staticmethod(sympify)
_construct_concurrent = staticmethod(lambda arg: true if arg else false)
class ArrayConstructor(Token):
""" Represents an array constructor.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import ArrayConstructor
>>> ac = ArrayConstructor([1, 2, 3])
>>> fcode(ac, standard=95, source_format='free')
'(/1, 2, 3/)'
>>> fcode(ac, standard=2003, source_format='free')
'[1, 2, 3]'
"""
__slots__ = _fields = ('elements',)
_construct_elements = staticmethod(_mk_Tuple)
class ImpliedDoLoop(Token):
""" Represents an implied do loop in Fortran.
Examples
========
>>> from sympy import Symbol, fcode
>>> from sympy.codegen.fnodes import ImpliedDoLoop, ArrayConstructor
>>> i = Symbol('i', integer=True)
>>> idl = ImpliedDoLoop(i**3, i, -3, 3, 2) # -27, -1, 1, 27
>>> ac = ArrayConstructor([-28, idl, 28]) # -28, -27, -1, 1, 27, 28
>>> fcode(ac, standard=2003, source_format='free')
'[-28, (i**3, i = -3, 3, 2), 28]'
"""
__slots__ = _fields = ('expr', 'counter', 'first', 'last', 'step')
defaults = {'step': Integer(1)}
_construct_expr = staticmethod(sympify)
_construct_counter = staticmethod(sympify)
_construct_first = staticmethod(sympify)
_construct_last = staticmethod(sympify)
_construct_step = staticmethod(sympify)
class Extent(Basic):
""" Represents a dimension extent.
Examples
========
>>> from sympy.codegen.fnodes import Extent
>>> e = Extent(-3, 3) # -3, -2, -1, 0, 1, 2, 3
>>> from sympy import fcode
>>> fcode(e, source_format='free')
'-3:3'
>>> from sympy.codegen.ast import Variable, real
>>> from sympy.codegen.fnodes import dimension, intent_out
>>> dim = dimension(e, e)
>>> arr = Variable('x', real, attrs=[dim, intent_out])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'real*8, dimension(-3:3, -3:3), intent(out) :: x'
"""
def __new__(cls, *args):
if len(args) == 2:
low, high = args
return Basic.__new__(cls, sympify(low), sympify(high))
elif len(args) == 0 or (len(args) == 1 and args[0] in (':', None)):
return Basic.__new__(cls) # assumed shape
else:
raise ValueError("Expected 0 or 2 args (or one argument == None or ':')")
def _sympystr(self, printer):
if len(self.args) == 0:
return ':'
return ":".join(str(arg) for arg in self.args)
assumed_extent = Extent() # or Extent(':'), Extent(None)
def dimension(*args):
""" Creates a 'dimension' Attribute with (up to 7) extents.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import dimension, intent_in
>>> dim = dimension('2', ':') # 2 rows, runtime determined number of columns
>>> from sympy.codegen.ast import Variable, integer
>>> arr = Variable('a', integer, attrs=[dim, intent_in])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'integer*4, dimension(2, :), intent(in) :: a'
"""
if len(args) > 7:
raise ValueError("Fortran only supports up to 7 dimensional arrays")
parameters = []
for arg in args:
if isinstance(arg, Extent):
parameters.append(arg)
elif isinstance(arg, str):
if arg == ':':
parameters.append(Extent())
else:
parameters.append(String(arg))
elif iterable(arg):
parameters.append(Extent(*arg))
else:
parameters.append(sympify(arg))
if len(args) == 0:
raise ValueError("Need at least one dimension")
return Attribute('dimension', parameters)
assumed_size = dimension('*')
def array(symbol, dim, intent=None, *, attrs=(), value=None, type=None):
""" Convenience function for creating a Variable instance for a Fortran array.
Parameters
==========
symbol : symbol
dim : Attribute or iterable
If dim is an ``Attribute`` it need to have the name 'dimension'. If it is
not an ``Attribute``, then it is passed to :func:`dimension` as ``*dim``
intent : str
One of: 'in', 'out', 'inout' or None
\\*\\*kwargs:
Keyword arguments for ``Variable`` ('type' & 'value')
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.ast import integer, real
>>> from sympy.codegen.fnodes import array
>>> arr = array('a', '*', 'in', type=integer)
>>> print(fcode(arr.as_Declaration(), source_format='free', standard=2003))
integer*4, dimension(*), intent(in) :: a
>>> x = array('x', [3, ':', ':'], intent='out', type=real)
>>> print(fcode(x.as_Declaration(value=1), source_format='free', standard=2003))
real*8, dimension(3, :, :), intent(out) :: x = 1
"""
if isinstance(dim, Attribute):
if str(dim.name) != 'dimension':
raise ValueError("Got an unexpected Attribute argument as dim: %s" % str(dim))
else:
dim = dimension(*dim)
attrs = list(attrs) + [dim]
if intent is not None:
if intent not in (intent_in, intent_out, intent_inout):
intent = {'in': intent_in, 'out': intent_out, 'inout': intent_inout}[intent]
attrs.append(intent)
if type is None:
return Variable.deduced(symbol, value=value, attrs=attrs)
else:
return Variable(symbol, type, value=value, attrs=attrs)
def _printable(arg):
return String(arg) if isinstance(arg, str) else sympify(arg)
def allocated(array):
""" Creates an AST node for a function call to Fortran's "allocated(...)"
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import allocated
>>> alloc = allocated('x')
>>> fcode(alloc, source_format='free')
'allocated(x)'
"""
return FunctionCall('allocated', [_printable(array)])
def lbound(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "lbound(...)"
Parameters
==========
array : Symbol or String
dim : expr
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import lbound
>>> lb = lbound('arr', dim=2)
>>> fcode(lb, source_format='free')
'lbound(arr, 2)'
"""
return FunctionCall(
'lbound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def ubound(array, dim=None, kind=None):
return FunctionCall(
'ubound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def shape(source, kind=None):
""" Creates an AST node for a function call to Fortran's "shape(...)"
Parameters
==========
source : Symbol or String
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import shape
>>> shp = shape('x')
>>> fcode(shp, source_format='free')
'shape(x)'
"""
return FunctionCall(
'shape',
[_printable(source)] +
([_printable(kind)] if kind else [])
)
def size(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "size(...)"
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, size
>>> a = Symbol('a', real=True)
>>> body = [Return((sum_(a**2)/size(a))**.5)]
>>> arr = array(a, dim=[':'], intent='in')
>>> fd = FunctionDefinition(real, 'rms', [arr], body)
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a)
real*8, dimension(:), intent(in) :: a
rms = sqrt(sum(a**2)*1d0/size(a))
end function
"""
return FunctionCall(
'size',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def reshape(source, shape, pad=None, order=None):
""" Creates an AST node for a function call to Fortran's "reshape(...)"
Parameters
==========
source : Symbol or String
shape : ArrayExpr
"""
return FunctionCall(
'reshape',
[_printable(source), _printable(shape)] +
([_printable(pad)] if pad else []) +
([_printable(order)] if pad else [])
)
def bind_C(name=None):
""" Creates an Attribute ``bind_C`` with a name.
Parameters
==========
name : str
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, bind_C
>>> a = Symbol('a', real=True)
>>> s = Symbol('s', integer=True)
>>> arr = array(a, dim=[s], intent='in')
>>> body = [Return((sum_(a**2)/s)**.5)]
>>> fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')])
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a, s) bind(C, name="rms")
real*8, dimension(s), intent(in) :: a
integer*4 :: s
rms = sqrt(sum(a**2)/s)
end function
"""
return Attribute('bind_C', [String(name)] if name else [])
class GoTo(Token):
""" Represents a goto statement in Fortran
Examples
========
>>> from sympy.codegen.fnodes import GoTo
>>> go = GoTo([10, 20, 30], 'i')
>>> from sympy import fcode
>>> fcode(go, source_format='free')
'go to (10, 20, 30), i'
"""
__slots__ = _fields = ('labels', 'expr')
defaults = {'expr': none}
_construct_labels = staticmethod(_mk_Tuple)
_construct_expr = staticmethod(sympify)
class FortranReturn(Token):
""" AST node explicitly mapped to a fortran "return".
Explanation
===========
Because a return statement in fortran is different from C, and
in order to aid reuse of our codegen ASTs the ordinary
``.codegen.ast.Return`` is interpreted as assignment to
the result variable of the function. If one for some reason needs
to generate a fortran RETURN statement, this node should be used.
Examples
========
>>> from sympy.codegen.fnodes import FortranReturn
>>> from sympy import fcode
>>> fcode(FortranReturn('x'))
' return x'
"""
__slots__ = _fields = ('return_value',)
defaults = {'return_value': none}
_construct_return_value = staticmethod(sympify)
class FFunction(Function):
_required_standard = 77
def _fcode(self, printer):
name = self.__class__.__name__
if printer._settings['standard'] < self._required_standard:
raise NotImplementedError("%s requires Fortran %d or newer" %
(name, self._required_standard))
return '{}({})'.format(name, ', '.join(map(printer._print, self.args)))
class F95Function(FFunction):
_required_standard = 95
class isign(FFunction):
""" Fortran sign intrinsic for integer arguments. """
nargs = 2
class dsign(FFunction):
""" Fortran sign intrinsic for double precision arguments. """
nargs = 2
class cmplx(FFunction):
""" Fortran complex conversion function. """
nargs = 2 # may be extended to (2, 3) at a later point
class kind(FFunction):
""" Fortran kind function. """
nargs = 1
class merge(F95Function):
""" Fortran merge function """
nargs = 3
class _literal(Float):
_token = None # type: str
_decimals = None # type: int
def _fcode(self, printer, *args, **kwargs):
mantissa, sgnd_ex = ('%.{}e'.format(self._decimals) % self).split('e')
mantissa = mantissa.strip('0').rstrip('.')
ex_sgn, ex_num = sgnd_ex[0], sgnd_ex[1:].lstrip('0')
ex_sgn = '' if ex_sgn == '+' else ex_sgn
return (mantissa or '0') + self._token + ex_sgn + (ex_num or '0')
class literal_sp(_literal):
""" Fortran single precision real literal """
_token = 'e'
_decimals = 9
class literal_dp(_literal):
""" Fortran double precision real literal """
_token = 'd'
_decimals = 17
class sum_(Token, Expr):
__slots__ = _fields = ('array', 'dim', 'mask')
defaults = {'dim': none, 'mask': none}
_construct_array = staticmethod(sympify)
_construct_dim = staticmethod(sympify)
class product_(Token, Expr):
__slots__ = _fields = ('array', 'dim', 'mask')
defaults = {'dim': none, 'mask': none}
_construct_array = staticmethod(sympify)
_construct_dim = staticmethod(sympify)
|
11e48e0be7dac13c458ae78c22e3254853ee112ffdee18710b6863a19ffaa4f4 | """
Classes and functions useful for rewriting expressions for optimized code
generation. Some languages (or standards thereof), e.g. C99, offer specialized
math functions for better performance and/or precision.
Using the ``optimize`` function in this module, together with a collection of
rules (represented as instances of ``Optimization``), one can rewrite the
expressions for this purpose::
>>> from sympy import Symbol, exp, log
>>> from sympy.codegen.rewriting import optimize, optims_c99
>>> x = Symbol('x')
>>> optimize(3*exp(2*x) - 3, optims_c99)
3*expm1(2*x)
>>> optimize(exp(2*x) - 1 - exp(-33), optims_c99)
expm1(2*x) - exp(-33)
>>> optimize(log(3*x + 3), optims_c99)
log1p(x) + log(3)
>>> optimize(log(2*x + 3), optims_c99)
log(2*x + 3)
The ``optims_c99`` imported above is tuple containing the following instances
(which may be imported from ``sympy.codegen.rewriting``):
- ``expm1_opt``
- ``log1p_opt``
- ``exp2_opt``
- ``log2_opt``
- ``log2const_opt``
"""
from sympy.core.function import expand_log
from sympy.core.singleton import S
from sympy.core.symbol import Wild
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import (Max, Min)
from sympy.functions.elementary.trigonometric import (cos, sin, sinc)
from sympy.assumptions import Q, ask
from sympy.codegen.cfunctions import log1p, log2, exp2, expm1
from sympy.codegen.matrix_nodes import MatrixSolve
from sympy.core.expr import UnevaluatedExpr
from sympy.core.power import Pow
from sympy.codegen.numpy_nodes import logaddexp, logaddexp2
from sympy.codegen.scipy_nodes import cosm1, powm1
from sympy.core.mul import Mul
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.utilities.iterables import sift
class Optimization:
""" Abstract base class for rewriting optimization.
Subclasses should implement ``__call__`` taking an expression
as argument.
Parameters
==========
cost_function : callable returning number
priority : number
"""
def __init__(self, cost_function=None, priority=1):
self.cost_function = cost_function
self.priority=priority
def cheapest(self, *args):
return sorted(args, key=self.cost_function)[0]
class ReplaceOptim(Optimization):
""" Rewriting optimization calling replace on expressions.
Explanation
===========
The instance can be used as a function on expressions for which
it will apply the ``replace`` method (see
:meth:`sympy.core.basic.Basic.replace`).
Parameters
==========
query :
First argument passed to replace.
value :
Second argument passed to replace.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.rewriting import ReplaceOptim
>>> from sympy.codegen.cfunctions import exp2
>>> x = Symbol('x')
>>> exp2_opt = ReplaceOptim(lambda p: p.is_Pow and p.base == 2,
... lambda p: exp2(p.exp))
>>> exp2_opt(2**x)
exp2(x)
"""
def __init__(self, query, value, **kwargs):
super().__init__(**kwargs)
self.query = query
self.value = value
def __call__(self, expr):
return expr.replace(self.query, self.value)
def optimize(expr, optimizations):
""" Apply optimizations to an expression.
Parameters
==========
expr : expression
optimizations : iterable of ``Optimization`` instances
The optimizations will be sorted with respect to ``priority`` (highest first).
Examples
========
>>> from sympy import log, Symbol
>>> from sympy.codegen.rewriting import optims_c99, optimize
>>> x = Symbol('x')
>>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)
log1p(x**2) + log2(x + 3)
"""
for optim in sorted(optimizations, key=lambda opt: opt.priority, reverse=True):
new_expr = optim(expr)
if optim.cost_function is None:
expr = new_expr
else:
expr = optim.cheapest(expr, new_expr)
return expr
exp2_opt = ReplaceOptim(
lambda p: p.is_Pow and p.base == 2,
lambda p: exp2(p.exp)
)
_d = Wild('d', properties=[lambda x: x.is_Dummy])
_u = Wild('u', properties=[lambda x: not x.is_number and not x.is_Add])
_v = Wild('v')
_w = Wild('w')
_n = Wild('n', properties=[lambda x: x.is_number])
sinc_opt1 = ReplaceOptim(
sin(_w)/_w, sinc(_w)
)
sinc_opt2 = ReplaceOptim(
sin(_n*_w)/_w, _n*sinc(_n*_w)
)
sinc_opts = (sinc_opt1, sinc_opt2)
log2_opt = ReplaceOptim(_v*log(_w)/log(2), _v*log2(_w), cost_function=lambda expr: expr.count(
lambda e: ( # division & eval of transcendentals are expensive floating point operations...
e.is_Pow and e.exp.is_negative # division
or (isinstance(e, (log, log2)) and not e.args[0].is_number)) # transcendental
)
)
log2const_opt = ReplaceOptim(log(2)*log2(_w), log(_w))
logsumexp_2terms_opt = ReplaceOptim(
lambda l: (isinstance(l, log)
and l.args[0].is_Add
and len(l.args[0].args) == 2
and all(isinstance(t, exp) for t in l.args[0].args)),
lambda l: (
Max(*[e.args[0] for e in l.args[0].args]) +
log1p(exp(Min(*[e.args[0] for e in l.args[0].args])))
)
)
class FuncMinusOneOptim(ReplaceOptim):
"""Specialization of ReplaceOptim for functions evaluating "f(x) - 1".
Explanation
===========
Numerical functions which go toward one as x go toward zero is often best
implemented by a dedicated function in order to avoid catastrophic
cancellation. One such example is ``expm1(x)`` in the C standard library
which evaluates ``exp(x) - 1``. Such functions preserves many more
significant digits when its argument is much smaller than one, compared
to subtracting one afterwards.
Parameters
==========
func :
The function which is subtracted by one.
func_m_1 :
The specialized function evaluating ``func(x) - 1``.
opportunistic : bool
When ``True``, apply the transformation as long as the magnitude of the
remaining number terms decreases. When ``False``, only apply the
transformation if it completely eliminates the number term.
Examples
========
>>> from sympy import symbols, exp
>>> from sympy.codegen.rewriting import FuncMinusOneOptim
>>> from sympy.codegen.cfunctions import expm1
>>> x, y = symbols('x y')
>>> expm1_opt = FuncMinusOneOptim(exp, expm1)
>>> expm1_opt(exp(x) + 2*exp(5*y) - 3)
expm1(x) + 2*expm1(5*y)
"""
def __init__(self, func, func_m_1, opportunistic=True):
weight = 10 # <-- this is an arbitrary number (heuristic)
super().__init__(lambda e: e.is_Add, self.replace_in_Add,
cost_function=lambda expr: expr.count_ops() - weight*expr.count(func_m_1))
self.func = func
self.func_m_1 = func_m_1
self.opportunistic = opportunistic
def _group_Add_terms(self, add):
numbers, non_num = sift(add.args, lambda arg: arg.is_number, binary=True)
numsum = sum(numbers)
terms_with_func, other = sift(non_num, lambda arg: arg.has(self.func), binary=True)
return numsum, terms_with_func, other
def replace_in_Add(self, e):
""" passed as second argument to Basic.replace(...) """
numsum, terms_with_func, other_non_num_terms = self._group_Add_terms(e)
if numsum == 0:
return e
substituted, untouched = [], []
for with_func in terms_with_func:
if with_func.is_Mul:
func, coeff = sift(with_func.args, lambda arg: arg.func == self.func, binary=True)
if len(func) == 1 and len(coeff) == 1:
func, coeff = func[0], coeff[0]
else:
coeff = None
elif with_func.func == self.func:
func, coeff = with_func, S.One
else:
coeff = None
if coeff is not None and coeff.is_number and sign(coeff) == -sign(numsum):
if self.opportunistic:
do_substitute = abs(coeff+numsum) < abs(numsum)
else:
do_substitute = coeff+numsum == 0
if do_substitute: # advantageous substitution
numsum += coeff
substituted.append(coeff*self.func_m_1(*func.args))
continue
untouched.append(with_func)
return e.func(numsum, *substituted, *untouched, *other_non_num_terms)
def __call__(self, expr):
alt1 = super().__call__(expr)
alt2 = super().__call__(expr.factor())
return self.cheapest(alt1, alt2)
expm1_opt = FuncMinusOneOptim(exp, expm1)
cosm1_opt = FuncMinusOneOptim(cos, cosm1)
powm1_opt = FuncMinusOneOptim(Pow, powm1)
log1p_opt = ReplaceOptim(
lambda e: isinstance(e, log),
lambda l: expand_log(l.replace(
log, lambda arg: log(arg.factor())
)).replace(log(_u+1), log1p(_u))
)
def create_expand_pow_optimization(limit, *, base_req=lambda b: b.is_symbol):
""" Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.
Explanation
===========
The requirements for expansions are that the base needs to be a symbol
and the exponent needs to be an Integer (and be less than or equal to
``limit``).
Parameters
==========
limit : int
The highest power which is expanded into multiplication.
base_req : function returning bool
Requirement on base for expansion to happen, default is to return
the ``is_symbol`` attribute of the base.
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.codegen.rewriting import create_expand_pow_optimization
>>> x = Symbol('x')
>>> expand_opt = create_expand_pow_optimization(3)
>>> expand_opt(x**5 + x**3)
x**5 + x*x*x
>>> expand_opt(x**5 + x**3 + sin(x)**3)
x**5 + sin(x)**3 + x*x*x
>>> opt2 = create_expand_pow_optimization(3, base_req=lambda b: not b.is_Function)
>>> opt2((x+1)**2 + sin(x)**2)
sin(x)**2 + (x + 1)*(x + 1)
"""
return ReplaceOptim(
lambda e: e.is_Pow and base_req(e.base) and e.exp.is_Integer and abs(e.exp) <= limit,
lambda p: (
UnevaluatedExpr(Mul(*([p.base]*+p.exp), evaluate=False)) if p.exp > 0 else
1/UnevaluatedExpr(Mul(*([p.base]*-p.exp), evaluate=False))
))
# Optimization procedures for turning A**(-1) * x into MatrixSolve(A, x)
def _matinv_predicate(expr):
# TODO: We should be able to support more than 2 elements
if expr.is_MatMul and len(expr.args) == 2:
left, right = expr.args
if left.is_Inverse and right.shape[1] == 1:
inv_arg = left.arg
if isinstance(inv_arg, MatrixSymbol):
return bool(ask(Q.fullrank(left.arg)))
return False
def _matinv_transform(expr):
left, right = expr.args
inv_arg = left.arg
return MatrixSolve(inv_arg, right)
matinv_opt = ReplaceOptim(_matinv_predicate, _matinv_transform)
logaddexp_opt = ReplaceOptim(log(exp(_v)+exp(_w)), logaddexp(_v, _w))
logaddexp2_opt = ReplaceOptim(log(Pow(2, _v)+Pow(2, _w)), logaddexp2(_v, _w)*log(2))
# Collections of optimizations:
optims_c99 = (expm1_opt, log1p_opt, exp2_opt, log2_opt, log2const_opt)
optims_numpy = optims_c99 + (logaddexp_opt, logaddexp2_opt,) + sinc_opts
optims_scipy = (cosm1_opt, powm1_opt)
|
0749d93637c1a63f59bf5912c2ae5c05abf968622faf5eb966dac8a0f6fe51d1 | """
Types used to represent a full function/module as an Abstract Syntax Tree.
Most types are small, and are merely used as tokens in the AST. A tree diagram
has been included below to illustrate the relationships between the AST types.
AST Type Tree
-------------
::
*Basic*
|
|
CodegenAST
|
|--->AssignmentBase
| |--->Assignment
| |--->AugmentedAssignment
| |--->AddAugmentedAssignment
| |--->SubAugmentedAssignment
| |--->MulAugmentedAssignment
| |--->DivAugmentedAssignment
| |--->ModAugmentedAssignment
|
|--->CodeBlock
|
|
|--->Token
|--->Attribute
|--->For
|--->String
| |--->QuotedString
| |--->Comment
|--->Type
| |--->IntBaseType
| | |--->_SizedIntType
| | |--->SignedIntType
| | |--->UnsignedIntType
| |--->FloatBaseType
| |--->FloatType
| |--->ComplexBaseType
| |--->ComplexType
|--->Node
| |--->Variable
| | |---> Pointer
| |--->FunctionPrototype
| |--->FunctionDefinition
|--->Element
|--->Declaration
|--->While
|--->Scope
|--->Stream
|--->Print
|--->FunctionCall
|--->BreakToken
|--->ContinueToken
|--->NoneToken
|--->Return
Predefined types
----------------
A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module
for convenience. Perhaps the two most common ones for code-generation (of numeric
codes) are ``float32`` and ``float64`` (known as single and double precision respectively).
There are also precision generic versions of Types (for which the codeprinters selects the
underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``.
The other ``Type`` instances defined are:
- ``intc``: Integer type used by C's "int".
- ``intp``: Integer type used by C's "unsigned".
- ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers.
- ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers.
- ``float80``: known as "extended precision" on modern x86/amd64 hardware.
- ``complex64``: Complex number represented by two ``float32`` numbers
- ``complex128``: Complex number represented by two ``float64`` numbers
Using the nodes
---------------
It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying
Newton's method::
>>> from sympy import symbols, cos
>>> from sympy.codegen.ast import While, Assignment, aug_assign, Print
>>> t, dx, x = symbols('tol delta val')
>>> expr = cos(x) - x**3
>>> whl = While(abs(dx) > t, [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx),
... Print([x])
... ])
>>> from sympy import pycode
>>> py_str = pycode(whl)
>>> print(py_str)
while (abs(delta) > tol):
delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val))
val += delta
print(val)
>>> import math
>>> tol, val, delta = 1e-5, 0.5, float('inf')
>>> exec(py_str)
1.1121416371
0.909672693737
0.867263818209
0.865477135298
0.865474033111
>>> print('%3.1g' % (math.cos(val) - val**3))
-3e-11
If we want to generate Fortran code for the same while loop we simple call ``fcode``::
>>> from sympy import fcode
>>> print(fcode(whl, standard=2003, source_format='free'))
do while (abs(delta) > tol)
delta = (val**3 - cos(val))/(-3*val**2 - sin(val))
val = val + delta
print *, val
end do
There is a function constructing a loop (or a complete function) like this in
:mod:`sympy.codegen.algorithms`.
"""
from __future__ import annotations
from typing import Any
from collections import defaultdict
from sympy.core.relational import (Ge, Gt, Le, Lt)
from sympy.core import Symbol, Tuple, Dummy
from sympy.core.basic import Basic
from sympy.core.expr import Expr, Atom
from sympy.core.numbers import Float, Integer, oo
from sympy.core.sympify import _sympify, sympify, SympifyError
from sympy.utilities.iterables import (iterable, topological_sort,
numbered_symbols, filter_symbols)
def _mk_Tuple(args):
"""
Create a SymPy Tuple object from an iterable, converting Python strings to
AST strings.
Parameters
==========
args: iterable
Arguments to :class:`sympy.Tuple`.
Returns
=======
sympy.Tuple
"""
args = [String(arg) if isinstance(arg, str) else arg for arg in args]
return Tuple(*args)
class CodegenAST(Basic):
__slots__ = ()
class Token(CodegenAST):
""" Base class for the AST types.
Explanation
===========
Defining fields are set in ``_fields``. Attributes (defined in _fields)
are only allowed to contain instances of Basic (unless atomic, see
``String``). The arguments to ``__new__()`` correspond to the attributes in
the order defined in ``_fields`. The ``defaults`` class attribute is a
dictionary mapping attribute names to their default values.
Subclasses should not need to override the ``__new__()`` method. They may
define a class or static method named ``_construct_<attr>`` for each
attribute to process the value passed to ``__new__()``. Attributes listed
in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`.
"""
__slots__: tuple[str, ...] = ()
_fields = __slots__
defaults: dict[str, Any] = {}
not_in_args: list[str] = []
indented_args = ['body']
@property
def is_Atom(self):
return len(self._fields) == 0
@classmethod
def _get_constructor(cls, attr):
""" Get the constructor function for an attribute by name. """
return getattr(cls, '_construct_%s' % attr, lambda x: x)
@classmethod
def _construct(cls, attr, arg):
""" Construct an attribute value from argument passed to ``__new__()``. """
# arg may be ``NoneToken()``, so comparison is done using == instead of ``is`` operator
if arg == None:
return cls.defaults.get(attr, none)
else:
if isinstance(arg, Dummy): # SymPy's replace uses Dummy instances
return arg
else:
return cls._get_constructor(attr)(arg)
def __new__(cls, *args, **kwargs):
# Pass through existing instances when given as sole argument
if len(args) == 1 and not kwargs and isinstance(args[0], cls):
return args[0]
if len(args) > len(cls._fields):
raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls._fields)))
attrvals = []
# Process positional arguments
for attrname, argval in zip(cls._fields, args):
if attrname in kwargs:
raise TypeError('Got multiple values for attribute %r' % attrname)
attrvals.append(cls._construct(attrname, argval))
# Process keyword arguments
for attrname in cls._fields[len(args):]:
if attrname in kwargs:
argval = kwargs.pop(attrname)
elif attrname in cls.defaults:
argval = cls.defaults[attrname]
else:
raise TypeError('No value for %r given and attribute has no default' % attrname)
attrvals.append(cls._construct(attrname, argval))
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs))
# Parent constructor
basic_args = [
val for attr, val in zip(cls._fields, attrvals)
if attr not in cls.not_in_args
]
obj = CodegenAST.__new__(cls, *basic_args)
# Set attributes
for attr, arg in zip(cls._fields, attrvals):
setattr(obj, attr, arg)
return obj
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self._fields:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def _hashable_content(self):
return tuple([getattr(self, attr) for attr in self._fields])
def __hash__(self):
return super().__hash__()
def _joiner(self, k, indent_level):
return (',\n' + ' '*indent_level) if k in self.indented_args else ', '
def _indented(self, printer, k, v, *args, **kwargs):
il = printer._context['indent_level']
def _print(arg):
if isinstance(arg, Token):
return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs)
else:
return printer._print(arg, *args, **kwargs)
if isinstance(v, Tuple):
joined = self._joiner(k, il).join([_print(arg) for arg in v.args])
if k in self.indented_args:
return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')'
else:
return ('({0},)' if len(v.args) == 1 else '({0})').format(joined)
else:
return _print(v)
def _sympyrepr(self, printer, *args, joiner=', ', **kwargs):
from sympy.printing.printer import printer_context
exclude = kwargs.get('exclude', ())
values = [getattr(self, k) for k in self._fields]
indent_level = printer._context.get('indent_level', 0)
arg_reprs = []
for i, (attr, value) in enumerate(zip(self._fields, values)):
if attr in exclude:
continue
# Skip attributes which have the default value
if attr in self.defaults and value == self.defaults[attr]:
continue
ilvl = indent_level + 4 if attr in self.indented_args else 0
with printer_context(printer, indent_level=ilvl):
indented = self._indented(printer, attr, value, *args, **kwargs)
arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip()))
return "{}({})".format(self.__class__.__name__, joiner.join(arg_reprs))
_sympystr = _sympyrepr
def __repr__(self): # sympy.core.Basic.__repr__ uses sstr
from sympy.printing import srepr
return srepr(self)
def kwargs(self, exclude=(), apply=None):
""" Get instance's attributes as dict of keyword arguments.
Parameters
==========
exclude : collection of str
Collection of keywords to exclude.
apply : callable, optional
Function to apply to all values.
"""
kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}
if apply is not None:
return {k: apply(v) for k, v in kwargs.items()}
else:
return kwargs
class BreakToken(Token):
""" Represents 'break' in C/Python ('exit' in Fortran).
Use the premade instance ``break_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import break_
>>> ccode(break_)
'break'
>>> fcode(break_, source_format='free')
'exit'
"""
break_ = BreakToken()
class ContinueToken(Token):
""" Represents 'continue' in C/Python ('cycle' in Fortran)
Use the premade instance ``continue_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import continue_
>>> ccode(continue_)
'continue'
>>> fcode(continue_, source_format='free')
'cycle'
"""
continue_ = ContinueToken()
class NoneToken(Token):
""" The AST equivalence of Python's NoneType
The corresponding instance of Python's ``None`` is ``none``.
Examples
========
>>> from sympy.codegen.ast import none, Variable
>>> from sympy import pycode
>>> print(pycode(Variable('x').as_Declaration(value=none)))
x = None
"""
def __eq__(self, other):
return other is None or isinstance(other, NoneToken)
def _hashable_content(self):
return ()
def __hash__(self):
return super().__hash__()
none = NoneToken()
class AssignmentBase(CodegenAST):
""" Abstract base class for Assignment and AugmentedAssignment.
Attributes:
===========
op : str
Symbol for assignment operator, e.g. "=", "+=", etc.
"""
def __new__(cls, lhs, rhs):
lhs = _sympify(lhs)
rhs = _sympify(rhs)
cls._check_args(lhs, rhs)
return super().__new__(cls, lhs, rhs)
@property
def lhs(self):
return self.args[0]
@property
def rhs(self):
return self.args[1]
@classmethod
def _check_args(cls, lhs, rhs):
""" Check arguments to __new__ and raise exception if any problems found.
Derived classes may wish to override this.
"""
from sympy.matrices.expressions.matexpr import (
MatrixElement, MatrixSymbol)
from sympy.tensor.indexed import Indexed
from sympy.tensor.array.expressions import ArrayElement
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable,
ArrayElement)
if not isinstance(lhs, assignable):
raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError("Cannot assign a scalar to a matrix.")
elif lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs do not align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError("Cannot assign a matrix to a scalar.")
class Assignment(AssignmentBase):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.codegen.ast import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
Assignment(x, y)
>>> Assignment(x, 0)
Assignment(x, 0)
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
Assignment(A, Matrix([[x, y, z]]))
>>> Assignment(A[0, 1], x)
Assignment(A[0, 1], x)
"""
op = ':='
class AugmentedAssignment(AssignmentBase):
"""
Base class for augmented assignments.
Attributes:
===========
binop : str
Symbol for binary operation being applied in the assignment, such as "+",
"*", etc.
"""
binop = None # type: str
@property
def op(self):
return self.binop + '='
class AddAugmentedAssignment(AugmentedAssignment):
binop = '+'
class SubAugmentedAssignment(AugmentedAssignment):
binop = '-'
class MulAugmentedAssignment(AugmentedAssignment):
binop = '*'
class DivAugmentedAssignment(AugmentedAssignment):
binop = '/'
class ModAugmentedAssignment(AugmentedAssignment):
binop = '%'
# Mapping from binary op strings to AugmentedAssignment subclasses
augassign_classes = {
cls.binop: cls for cls in [
AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
DivAugmentedAssignment, ModAugmentedAssignment
]
}
def aug_assign(lhs, op, rhs):
"""
Create 'lhs op= rhs'.
Explanation
===========
Represents augmented variable assignment for code generation. This is a
convenience function. You can also use the AugmentedAssignment classes
directly, like AddAugmentedAssignment(x, y).
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
op : str
Operator (+, -, /, \\*, %).
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import aug_assign
>>> x, y = symbols('x, y')
>>> aug_assign(x, '+', y)
AddAugmentedAssignment(x, y)
"""
if op not in augassign_classes:
raise ValueError("Unrecognized operator %s" % op)
return augassign_classes[op](lhs, rhs)
class CodeBlock(CodegenAST):
"""
Represents a block of code.
Explanation
===========
For now only assignments are supported. This restriction will be lifted in
the future.
Useful attributes on this object are:
``left_hand_sides``:
Tuple of left-hand sides of assignments, in order.
``left_hand_sides``:
Tuple of right-hand sides of assignments, in order.
``free_symbols``: Free symbols of the expressions in the right-hand sides
which do not appear in the left-hand side of an assignment.
Useful methods on this object are:
``topological_sort``:
Class method. Return a CodeBlock with assignments
sorted so that variables are assigned before they
are used.
``cse``:
Return a new CodeBlock with common subexpressions eliminated and
pulled out as assignments.
Examples
========
>>> from sympy import symbols, ccode
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y = symbols('x y')
>>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1))
>>> print(ccode(c))
x = 1;
y = x + 1;
"""
def __new__(cls, *args):
left_hand_sides = []
right_hand_sides = []
for i in args:
if isinstance(i, Assignment):
lhs, rhs = i.args
left_hand_sides.append(lhs)
right_hand_sides.append(rhs)
obj = CodegenAST.__new__(cls, *args)
obj.left_hand_sides = Tuple(*left_hand_sides)
obj.right_hand_sides = Tuple(*right_hand_sides)
return obj
def __iter__(self):
return iter(self.args)
def _sympyrepr(self, printer, *args, **kwargs):
il = printer._context.get('indent_level', 0)
joiner = ',\n' + ' '*il
joined = joiner.join(map(printer._print, self.args))
return ('{}(\n'.format(' '*(il-4) + self.__class__.__name__,) +
' '*il + joined + '\n' + ' '*(il - 4) + ')')
_sympystr = _sympyrepr
@property
def free_symbols(self):
return super().free_symbols - set(self.left_hand_sides)
@classmethod
def topological_sort(cls, assignments):
"""
Return a CodeBlock with topologically sorted assignments so that
variables are assigned before they are used.
Examples
========
The existing order of assignments is preserved as much as possible.
This function assumes that variables are assigned to only once.
This is a class constructor so that the default constructor for
CodeBlock can error when variables are used before they are assigned.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> assignments = [
... Assignment(x, y + z),
... Assignment(y, z + 1),
... Assignment(z, 2),
... ]
>>> CodeBlock.topological_sort(assignments)
CodeBlock(
Assignment(z, 2),
Assignment(y, z + 1),
Assignment(x, y + z)
)
"""
if not all(isinstance(i, Assignment) for i in assignments):
# Will support more things later
raise NotImplementedError("CodeBlock.topological_sort only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in assignments):
raise NotImplementedError("CodeBlock.topological_sort does not yet work with AugmentedAssignments")
# Create a graph where the nodes are assignments and there is a directed edge
# between nodes that use a variable and nodes that assign that
# variable, like
# [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)]
# If we then topologically sort these nodes, they will be in
# assignment order, like
# x := 1
# y := x + 1
# z := y + z
# A = The nodes
#
# enumerate keeps nodes in the same order they are already in if
# possible. It will also allow us to handle duplicate assignments to
# the same variable when those are implemented.
A = list(enumerate(assignments))
# var_map = {variable: [nodes for which this variable is assigned to]}
# like {x: [(1, x := y + z), (4, x := 2 * w)], ...}
var_map = defaultdict(list)
for node in A:
i, a = node
var_map[a.lhs].append(node)
# E = Edges in the graph
E = []
for dst_node in A:
i, a = dst_node
for s in a.rhs.free_symbols:
for src_node in var_map[s]:
E.append((src_node, dst_node))
ordered_assignments = topological_sort([A, E])
# De-enumerate the result
return cls(*[a for i, a in ordered_assignments])
def cse(self, symbols=None, optimizations=None, postprocess=None,
order='canonical'):
"""
Return a new code block with common subexpressions eliminated.
Explanation
===========
See the docstring of :func:`sympy.simplify.cse_main.cse` for more
information.
Examples
========
>>> from sympy import symbols, sin
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> c = CodeBlock(
... Assignment(x, 1),
... Assignment(y, sin(x) + 1),
... Assignment(z, sin(x) - 1),
... )
...
>>> c.cse()
CodeBlock(
Assignment(x, 1),
Assignment(x0, sin(x)),
Assignment(y, x0 + 1),
Assignment(z, x0 - 1)
)
"""
from sympy.simplify.cse_main import cse
# Check that the CodeBlock only contains assignments to unique variables
if not all(isinstance(i, Assignment) for i in self.args):
# Will support more things later
raise NotImplementedError("CodeBlock.cse only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in self.args):
raise NotImplementedError("CodeBlock.cse does not yet work with AugmentedAssignments")
for i, lhs in enumerate(self.left_hand_sides):
if lhs in self.left_hand_sides[:i]:
raise NotImplementedError("Duplicate assignments to the same "
"variable are not yet supported (%s)" % lhs)
# Ensure new symbols for subexpressions do not conflict with existing
existing_symbols = self.atoms(Symbol)
if symbols is None:
symbols = numbered_symbols()
symbols = filter_symbols(symbols, existing_symbols)
replacements, reduced_exprs = cse(list(self.right_hand_sides),
symbols=symbols, optimizations=optimizations, postprocess=postprocess,
order=order)
new_block = [Assignment(var, expr) for var, expr in
zip(self.left_hand_sides, reduced_exprs)]
new_assignments = [Assignment(var, expr) for var, expr in replacements]
return self.topological_sort(new_assignments + new_block)
class For(Token):
"""Represents a 'for-loop' in the code.
Expressions are of the form:
"for target in iter:
body..."
Parameters
==========
target : symbol
iter : iterable
body : CodeBlock or iterable
! When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Range
>>> from sympy.codegen.ast import aug_assign, For
>>> x, i, j, k = symbols('x i j k')
>>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)])
>>> for_i # doctest: -NORMALIZE_WHITESPACE
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
>>> for_ji = For(j, Range(7), [for_i])
>>> for_ji # doctest: -NORMALIZE_WHITESPACE
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
>>> for_kji =For(k, Range(5), [for_ji])
>>> for_kji # doctest: -NORMALIZE_WHITESPACE
For(k, iterable=Range(0, 5, 1), body=CodeBlock(
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
))
"""
__slots__ = _fields = ('target', 'iterable', 'body')
_construct_target = staticmethod(_sympify)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def _construct_iterable(cls, itr):
if not iterable(itr):
raise TypeError("iterable must be an iterable")
if isinstance(itr, list): # _sympify errors on lists because they are mutable
itr = tuple(itr)
return _sympify(itr)
class String(Atom, Token):
""" SymPy object representing a string.
Atomic object which is not an expression (as opposed to Symbol).
Parameters
==========
text : str
Examples
========
>>> from sympy.codegen.ast import String
>>> f = String('foo')
>>> f
foo
>>> str(f)
'foo'
>>> f.text
'foo'
>>> print(repr(f))
String('foo')
"""
__slots__ = _fields = ('text',)
not_in_args = ['text']
is_Atom = True
@classmethod
def _construct_text(cls, text):
if not isinstance(text, str):
raise TypeError("Argument text is not a string type.")
return text
def _sympystr(self, printer, *args, **kwargs):
return self.text
def kwargs(self, exclude = (), apply = None):
return {}
#to be removed when Atom is given a suitable func
@property
def func(self):
return lambda: self
def _latex(self, printer):
from sympy.printing.latex import latex_escape
return r'\texttt{{"{}"}}'.format(latex_escape(self.text))
class QuotedString(String):
""" Represents a string which should be printed with quotes. """
class Comment(String):
""" Represents a comment. """
class Node(Token):
""" Subclass of Token, carrying the attribute 'attrs' (Tuple)
Examples
========
>>> from sympy.codegen.ast import Node, value_const, pointer_const
>>> n1 = Node([value_const])
>>> n1.attr_params('value_const') # get the parameters of attribute (by name)
()
>>> from sympy.codegen.fnodes import dimension
>>> n2 = Node([value_const, dimension(5, 3)])
>>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance)
()
>>> n2.attr_params('dimension') # get the parameters of attribute (by name)
(5, 3)
>>> n2.attr_params(pointer_const) is None
True
"""
__slots__: tuple[str, ...] = ('attrs',)
_fields = __slots__
defaults: dict[str, Any] = {'attrs': Tuple()}
_construct_attrs = staticmethod(_mk_Tuple)
def attr_params(self, looking_for):
""" Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """
for attr in self.attrs:
if str(attr.name) == str(looking_for):
return attr.parameters
class Type(Token):
""" Represents a type.
Explanation
===========
The naming is a super-set of NumPy naming. Type has a classmethod
``from_expr`` which offer type deduction. It also has a method
``cast_check`` which casts the argument to its type, possibly raising an
exception if rounding error is not within tolerances, or if the value is not
representable by the underlying data type (e.g. unsigned integers).
Parameters
==========
name : str
Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two
would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively).
If a ``Type`` instance is given, the said instance is returned.
Examples
========
>>> from sympy.codegen.ast import Type
>>> t = Type.from_expr(42)
>>> t
integer
>>> print(repr(t))
IntBaseType(String('integer'))
>>> from sympy.codegen.ast import uint8
>>> uint8.cast_check(-1) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> from sympy.codegen.ast import float32
>>> v6 = 0.123456
>>> float32.cast_check(v6)
0.123456
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50')
>>> from sympy import cxxcode
>>> from sympy.codegen.ast import Declaration, Variable
>>> cxxcode(Declaration(Variable('x', type=boost_mp50)))
'boost::multiprecision::cpp_dec_float_50 x'
References
==========
.. [1] https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
__slots__: tuple[str, ...] = ('name',)
_fields = __slots__
_construct_name = String
def _sympystr(self, printer, *args, **kwargs):
return str(self.name)
@classmethod
def from_expr(cls, expr):
""" Deduces type from an expression or a ``Symbol``.
Parameters
==========
expr : number or SymPy object
The type will be deduced from type or properties.
Examples
========
>>> from sympy.codegen.ast import Type, integer, complex_
>>> Type.from_expr(2) == integer
True
>>> from sympy import Symbol
>>> Type.from_expr(Symbol('z', complex=True)) == complex_
True
>>> Type.from_expr(sum) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Could not deduce type from expr.
Raises
======
ValueError when type deduction fails.
"""
if isinstance(expr, (float, Float)):
return real
if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False):
return integer
if getattr(expr, 'is_real', False):
return real
if isinstance(expr, complex) or getattr(expr, 'is_complex', False):
return complex_
if isinstance(expr, bool) or getattr(expr, 'is_Relational', False):
return bool_
else:
raise ValueError("Could not deduce type from expr.")
def _check(self, value):
pass
def cast_check(self, value, rtol=None, atol=0, precision_targets=None):
""" Casts a value to the data type of the instance.
Parameters
==========
value : number
rtol : floating point number
Relative tolerance. (will be deduced if not given).
atol : floating point number
Absolute tolerance (in addition to ``rtol``).
type_aliases : dict
Maps substitutions for Type, e.g. {integer: int64, real: float32}
Examples
========
>>> from sympy.codegen.ast import integer, float32, int8
>>> integer.cast_check(3.0) == 3
True
>>> float32.cast_check(1e-40) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> int8.cast_check(256) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float64
>>> float64.cast_check(v10)
12345.67894
>>> from sympy import Float
>>> v18 = Float('0.123456789012345646')
>>> float64.cast_check(v18)
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float80
>>> float80.cast_check(v18)
0.123456789012345649
"""
val = sympify(value)
ten = Integer(10)
exp10 = getattr(self, 'decimal_dig', None)
if rtol is None:
rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10)
def tol(num):
return atol + rtol*abs(num)
new_val = self.cast_nocheck(value)
self._check(new_val)
delta = new_val - val
if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5
raise ValueError("Casting gives a significantly different value.")
return new_val
def _latex(self, printer):
from sympy.printing.latex import latex_escape
type_name = latex_escape(self.__class__.__name__)
name = latex_escape(self.name.text)
return r"\text{{{}}}\left(\texttt{{{}}}\right)".format(type_name, name)
class IntBaseType(Type):
""" Integer base type, contains no size information. """
__slots__ = ()
cast_nocheck = lambda self, i: Integer(int(i))
class _SizedIntType(IntBaseType):
__slots__ = ('nbits',)
_fields = Type._fields + __slots__
_construct_nbits = Integer
def _check(self, value):
if value < self.min:
raise ValueError("Value is too small: %d < %d" % (value, self.min))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
class SignedIntType(_SizedIntType):
""" Represents a signed integer type. """
__slots__ = ()
@property
def min(self):
return -2**(self.nbits-1)
@property
def max(self):
return 2**(self.nbits-1) - 1
class UnsignedIntType(_SizedIntType):
""" Represents an unsigned integer type. """
__slots__ = ()
@property
def min(self):
return 0
@property
def max(self):
return 2**self.nbits - 1
two = Integer(2)
class FloatBaseType(Type):
""" Represents a floating point number type. """
__slots__ = ()
cast_nocheck = Float
class FloatType(FloatBaseType):
""" Represents a floating point type with fixed bit width.
Base 2 & one sign bit is assumed.
Parameters
==========
name : str
Name of the type.
nbits : integer
Number of bits used (storage).
nmant : integer
Number of bits used to represent the mantissa.
nexp : integer
Number of bits used to represent the mantissa.
Examples
========
>>> from sympy import S
>>> from sympy.codegen.ast import FloatType
>>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5)
>>> half_precision.max
65504
>>> half_precision.tiny == S(2)**-14
True
>>> half_precision.eps == S(2)**-10
True
>>> half_precision.dig == 3
True
>>> half_precision.decimal_dig == 5
True
>>> half_precision.cast_check(1.0)
1.0
>>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
"""
__slots__ = ('nbits', 'nmant', 'nexp',)
_fields = Type._fields + __slots__
_construct_nbits = _construct_nmant = _construct_nexp = Integer
@property
def max_exponent(self):
""" The largest positive number n, such that 2**(n - 1) is a representable finite value. """
# cf. C++'s ``std::numeric_limits::max_exponent``
return two**(self.nexp - 1)
@property
def min_exponent(self):
""" The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """
# cf. C++'s ``std::numeric_limits::min_exponent``
return 3 - self.max_exponent
@property
def max(self):
""" Maximum value representable. """
return (1 - two**-(self.nmant+1))*two**self.max_exponent
@property
def tiny(self):
""" The minimum positive normalized value. """
# See C macros: FLT_MIN, DBL_MIN, LDBL_MIN
# or C++'s ``std::numeric_limits::min``
# or numpy.finfo(dtype).tiny
return two**(self.min_exponent - 1)
@property
def eps(self):
""" Difference between 1.0 and the next representable value. """
return two**(-self.nmant)
@property
def dig(self):
""" Number of decimal digits that are guaranteed to be preserved in text.
When converting text -> float -> text, you are guaranteed that at least ``dig``
number of digits are preserved with respect to rounding or overflow.
"""
from sympy.functions import floor, log
return floor(self.nmant * log(2)/log(10))
@property
def decimal_dig(self):
""" Number of digits needed to store & load without loss.
Explanation
===========
Number of decimal digits needed to guarantee that two consecutive conversions
(float -> text -> float) to be idempotent. This is useful when one do not want
to loose precision due to rounding errors when storing a floating point value
as text.
"""
from sympy.functions import ceiling, log
return ceiling((self.nmant + 1) * log(2)/log(10) + 1)
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
if value == oo: # float(oo) or oo
return float(oo)
elif value == -oo: # float(-oo) or -oo
return float(-oo)
return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig)
def _check(self, value):
if value < -self.max:
raise ValueError("Value is too small: %d < %d" % (value, -self.max))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
if abs(value) < self.tiny:
raise ValueError("Smallest (absolute) value for data type bigger than new value.")
class ComplexBaseType(FloatBaseType):
__slots__ = ()
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
from sympy.functions import re, im
return (
super().cast_nocheck(re(value)) +
super().cast_nocheck(im(value))*1j
)
def _check(self, value):
from sympy.functions import re, im
super()._check(re(value))
super()._check(im(value))
class ComplexType(ComplexBaseType, FloatType):
""" Represents a complex floating point number. """
__slots__ = ()
# NumPy types:
intc = IntBaseType('intc')
intp = IntBaseType('intp')
int8 = SignedIntType('int8', 8)
int16 = SignedIntType('int16', 16)
int32 = SignedIntType('int32', 32)
int64 = SignedIntType('int64', 64)
uint8 = UnsignedIntType('uint8', 8)
uint16 = UnsignedIntType('uint16', 16)
uint32 = UnsignedIntType('uint32', 32)
uint64 = UnsignedIntType('uint64', 64)
float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision
float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision
float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision
float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double"
float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision
float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision
complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits')))
complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits')))
# Generic types (precision may be chosen by code printers):
untyped = Type('untyped')
real = FloatBaseType('real')
integer = IntBaseType('integer')
complex_ = ComplexBaseType('complex')
bool_ = Type('bool')
class Attribute(Token):
""" Attribute (possibly parametrized)
For use with :class:`sympy.codegen.ast.Node` (which takes instances of
``Attribute`` as ``attrs``).
Parameters
==========
name : str
parameters : Tuple
Examples
========
>>> from sympy.codegen.ast import Attribute
>>> volatile = Attribute('volatile')
>>> volatile
volatile
>>> print(repr(volatile))
Attribute(String('volatile'))
>>> a = Attribute('foo', [1, 2, 3])
>>> a
foo(1, 2, 3)
>>> a.parameters == (1, 2, 3)
True
"""
__slots__ = _fields = ('name', 'parameters')
defaults = {'parameters': Tuple()}
_construct_name = String
_construct_parameters = staticmethod(_mk_Tuple)
def _sympystr(self, printer, *args, **kwargs):
result = str(self.name)
if self.parameters:
result += '(%s)' % ', '.join(map(lambda arg: printer._print(
arg, *args, **kwargs), self.parameters))
return result
value_const = Attribute('value_const')
pointer_const = Attribute('pointer_const')
class Variable(Node):
""" Represents a variable.
Parameters
==========
symbol : Symbol
type : Type (optional)
Type of the variable.
attrs : iterable of Attribute instances
Will be stored as a Tuple.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, float32, integer
>>> x = Symbol('x')
>>> v = Variable(x, type=float32)
>>> v.attrs
()
>>> v == Variable('x')
False
>>> v == Variable('x', type=float32)
True
>>> v
Variable(x, type=float32)
One may also construct a ``Variable`` instance with the type deduced from
assumptions about the symbol using the ``deduced`` classmethod:
>>> i = Symbol('i', integer=True)
>>> v = Variable.deduced(i)
>>> v.type == integer
True
>>> v == Variable('i')
False
>>> from sympy.codegen.ast import value_const
>>> value_const in v.attrs
False
>>> w = Variable('w', attrs=[value_const])
>>> w
Variable(w, attrs=(value_const,))
>>> value_const in w.attrs
True
>>> w.as_Declaration(value=42)
Declaration(Variable(w, value=42, attrs=(value_const,)))
"""
__slots__ = ('symbol', 'type', 'value')
_fields = __slots__ + Node._fields
defaults = Node.defaults.copy()
defaults.update({'type': untyped, 'value': none})
_construct_symbol = staticmethod(sympify)
_construct_value = staticmethod(sympify)
@classmethod
def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True):
""" Alt. constructor with type deduction from ``Type.from_expr``.
Deduces type primarily from ``symbol``, secondarily from ``value``.
Parameters
==========
symbol : Symbol
value : expr
(optional) value of the variable.
attrs : iterable of Attribute instances
cast_check : bool
Whether to apply ``Type.cast_check`` on ``value``.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, complex_
>>> n = Symbol('n', integer=True)
>>> str(Variable.deduced(n).type)
'integer'
>>> x = Symbol('x', real=True)
>>> v = Variable.deduced(x)
>>> v.type
real
>>> z = Symbol('z', complex=True)
>>> Variable.deduced(z).type == complex_
True
"""
if isinstance(symbol, Variable):
return symbol
try:
type_ = Type.from_expr(symbol)
except ValueError:
type_ = Type.from_expr(value)
if value is not None and cast_check:
value = type_.cast_check(value)
return cls(symbol, type=type_, value=value, attrs=attrs)
def as_Declaration(self, **kwargs):
""" Convenience method for creating a Declaration instance.
Explanation
===========
If the variable of the Declaration need to wrap a modified
variable keyword arguments may be passed (overriding e.g.
the ``value`` of the Variable instance).
Examples
========
>>> from sympy.codegen.ast import Variable, NoneToken
>>> x = Variable('x')
>>> decl1 = x.as_Declaration()
>>> # value is special NoneToken() which must be tested with == operator
>>> decl1.variable.value is None # won't work
False
>>> decl1.variable.value == None # not PEP-8 compliant
True
>>> decl1.variable.value == NoneToken() # OK
True
>>> decl2 = x.as_Declaration(value=42.0)
>>> decl2.variable.value == 42
True
"""
kw = self.kwargs()
kw.update(kwargs)
return Declaration(self.func(**kw))
def _relation(self, rhs, op):
try:
rhs = _sympify(rhs)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, rhs))
return op(self, rhs, evaluate=False)
__lt__ = lambda self, other: self._relation(other, Lt)
__le__ = lambda self, other: self._relation(other, Le)
__ge__ = lambda self, other: self._relation(other, Ge)
__gt__ = lambda self, other: self._relation(other, Gt)
class Pointer(Variable):
""" Represents a pointer. See ``Variable``.
Examples
========
Can create instances of ``Element``:
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Pointer
>>> i = Symbol('i', integer=True)
>>> p = Pointer('x')
>>> p[i+1]
Element(x, indices=(i + 1,))
"""
__slots__ = ()
def __getitem__(self, key):
try:
return Element(self.symbol, key)
except TypeError:
return Element(self.symbol, (key,))
class Element(Token):
""" Element in (a possibly N-dimensional) array.
Examples
========
>>> from sympy.codegen.ast import Element
>>> elem = Element('x', 'ijk')
>>> elem.symbol.name == 'x'
True
>>> elem.indices
(i, j, k)
>>> from sympy import ccode
>>> ccode(elem)
'x[i][j][k]'
>>> ccode(Element('x', 'ijk', strides='lmn', offset='o'))
'x[i*l + j*m + k*n + o]'
"""
__slots__ = _fields = ('symbol', 'indices', 'strides', 'offset')
defaults = {'strides': none, 'offset': none}
_construct_symbol = staticmethod(sympify)
_construct_indices = staticmethod(lambda arg: Tuple(*arg))
_construct_strides = staticmethod(lambda arg: Tuple(*arg))
_construct_offset = staticmethod(sympify)
class Declaration(Token):
""" Represents a variable declaration
Parameters
==========
variable : Variable
Examples
========
>>> from sympy.codegen.ast import Declaration, NoneToken, untyped
>>> z = Declaration('z')
>>> z.variable.type == untyped
True
>>> # value is special NoneToken() which must be tested with == operator
>>> z.variable.value is None # won't work
False
>>> z.variable.value == None # not PEP-8 compliant
True
>>> z.variable.value == NoneToken() # OK
True
"""
__slots__ = _fields = ('variable',)
_construct_variable = Variable
class While(Token):
""" Represents a 'for-loop' in the code.
Expressions are of the form:
"while condition:
body..."
Parameters
==========
condition : expression convertible to Boolean
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Gt, Abs
>>> from sympy.codegen import aug_assign, Assignment, While
>>> x, dx = symbols('x dx')
>>> expr = 1 - x**2
>>> whl = While(Gt(Abs(dx), 1e-9), [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx)
... ])
"""
__slots__ = _fields = ('condition', 'body')
_construct_condition = staticmethod(lambda cond: _sympify(cond))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Scope(Token):
""" Represents a scope in the code.
Parameters
==========
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
"""
__slots__ = _fields = ('body',)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Stream(Token):
""" Represents a stream.
There are two predefined Stream instances ``stdout`` & ``stderr``.
Parameters
==========
name : str
Examples
========
>>> from sympy import pycode, Symbol
>>> from sympy.codegen.ast import Print, stderr, QuotedString
>>> print(pycode(Print(['x'], file=stderr)))
print(x, file=sys.stderr)
>>> x = Symbol('x')
>>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x"
print("x", file=sys.stderr)
"""
__slots__ = _fields = ('name',)
_construct_name = String
stdout = Stream('stdout')
stderr = Stream('stderr')
class Print(Token):
""" Represents print command in the code.
Parameters
==========
formatstring : str
*args : Basic instances (or convertible to such through sympify)
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy import pycode
>>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g")))
print("coordinate: %12.5g %12.5g" % (x, y))
"""
__slots__ = _fields = ('print_args', 'format_string', 'file')
defaults = {'format_string': none, 'file': none}
_construct_print_args = staticmethod(_mk_Tuple)
_construct_format_string = QuotedString
_construct_file = Stream
class FunctionPrototype(Node):
""" Represents a function prototype
Allows the user to generate forward declaration in e.g. C/C++.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
"""
__slots__ = ('return_type', 'name', 'parameters')
_fields: tuple[str, ...] = __slots__ + Node._fields
_construct_return_type = Type
_construct_name = String
@staticmethod
def _construct_parameters(args):
def _var(arg):
if isinstance(arg, Declaration):
return arg.variable
elif isinstance(arg, Variable):
return arg
else:
return Variable.deduced(arg)
return Tuple(*map(_var, args))
@classmethod
def from_FunctionDefinition(cls, func_def):
if not isinstance(func_def, FunctionDefinition):
raise TypeError("func_def is not an instance of FunctionDefinition")
return cls(**func_def.kwargs(exclude=('body',)))
class FunctionDefinition(FunctionPrototype):
""" Represents a function definition in the code.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
body : CodeBlock or iterable
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
>>> from sympy.codegen.ast import FunctionDefinition, Return
>>> body = [Return(x*y)]
>>> fd = FunctionDefinition.from_FunctionPrototype(fp, body)
>>> print(ccode(fd))
double foo(double x, double y){
return x*y;
}
"""
__slots__ = ('body', )
_fields = FunctionPrototype._fields[:-1] + __slots__ + Node._fields
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def from_FunctionPrototype(cls, func_proto, body):
if not isinstance(func_proto, FunctionPrototype):
raise TypeError("func_proto is not an instance of FunctionPrototype")
return cls(body=body, **func_proto.kwargs())
class Return(Token):
""" Represents a return command in the code.
Parameters
==========
return : Basic
Examples
========
>>> from sympy.codegen.ast import Return
>>> from sympy.printing.pycode import pycode
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> print(pycode(Return(x)))
return x
"""
__slots__ = _fields = ('return',)
_construct_return=staticmethod(_sympify)
class FunctionCall(Token, Expr):
""" Represents a call to a function in the code.
Parameters
==========
name : str
function_args : Tuple
Examples
========
>>> from sympy.codegen.ast import FunctionCall
>>> from sympy import pycode
>>> fcall = FunctionCall('foo', 'bar baz'.split())
>>> print(pycode(fcall))
foo(bar, baz)
"""
__slots__ = _fields = ('name', 'function_args')
_construct_name = String
_construct_function_args = staticmethod(lambda args: Tuple(*args))
|
cea70b9676fa2dad55af73241433b84ea548b3b108ef11cc038aea1cd64ad1a4 | """
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
.. warning::
This module is intended for educational purposes only. Do not use the
functions in this module for real cryptographic applications. If you wish
to encrypt real data, we recommend using something like the `cryptography
<https://cryptography.io/en/latest/>`_ module.
"""
from string import whitespace, ascii_uppercase as uppercase, printable
from functools import reduce
import warnings
from itertools import cycle
from sympy.core import Symbol
from sympy.core.numbers import igcdex, mod_inverse, igcd, Rational
from sympy.core.random import _randrange, _randint
from sympy.matrices import Matrix
from sympy.ntheory import isprime, primitive_root, factorint
from sympy.ntheory import totient as _euler
from sympy.ntheory import reduced_totient as _carmichael
from sympy.ntheory.generate import nextprime
from sympy.ntheory.modular import crt
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import as_int, filldedent, translate
from sympy.utilities.iterables import uniq, multiset
class NonInvertibleCipherWarning(RuntimeWarning):
"""A warning raised if the cipher is not invertible."""
def __init__(self, msg):
self.fullMessage = msg
def __str__(self):
return '\n\t' + self.fullMessage
def warn(self, stacklevel=3):
warnings.warn(self, stacklevel=stacklevel)
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = isinstance(s, str)
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first. A ValueError is raised if
a) there are duplicate characters in ``symbols`` or
b) there are characters in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted({
i for i in symbols if symbols.count(i) > 1}))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
# remove from syms characters in key0
return key0 + translate(''.join(syms), None, key0)
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Parameters
==========
key : int
The secret key.
msg : str
Plaintext of upper-case letters.
Returns
=======
str
Ciphertext of upper-case letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
References
==========
.. [1] https://en.wikipedia.org/wiki/Caesar_cipher
.. [2] http://mathworld.wolfram.com/CaesarsMethod.html
See Also
========
decipher_shift
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
def encipher_rot13(msg, symbols=None):
"""
Performs the ROT13 encryption on a given plaintext ``msg``.
Explanation
===========
ROT13 is a substitution cipher which substitutes each letter
in the plaintext message for the letter furthest away from it
in the English alphabet.
Equivalently, it is just a Caeser (shift) cipher with a shift
key of 13 (midway point of the alphabet).
References
==========
.. [1] https://en.wikipedia.org/wiki/ROT13
See Also
========
decipher_rot13
encipher_shift
"""
return encipher_shift(msg, 13, symbols)
def decipher_rot13(msg, symbols=None):
"""
Performs the ROT13 decryption on a given plaintext ``msg``.
Explanation
============
``decipher_rot13`` is equivalent to ``encipher_rot13`` as both
``decipher_shift`` with a key of 13 and ``encipher_shift`` key with a
key of 13 will return the same results. Nonetheless,
``decipher_rot13`` has nonetheless been explicitly defined here for
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_rot13, decipher_rot13
>>> msg = 'GONAVYBEATARMY'
>>> ciphertext = encipher_rot13(msg);ciphertext
'TBANILORNGNEZL'
>>> decipher_rot13(ciphertext)
'GONAVYBEATARMY'
>>> encipher_rot13(msg) == decipher_rot13(msg)
True
>>> msg == decipher_rot13(ciphertext)
True
"""
return decipher_shift(msg, 13, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Parameters
==========
msg : str
Characters that appear in ``symbols``.
a, b : int, int
A pair integers, with ``gcd(a, N) = 1`` (the secret key).
symbols
String of characters (default = uppercase letters).
When no symbols are given, ``msg`` is converted to upper case
letters and all other characters are ignored.
Returns
=======
ct
String of characters (the ciphertext message)
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
replacing ``x`` by ``a*x + b (mod N)``, for each element
``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
References
==========
.. [1] https://en.wikipedia.org/wiki/Affine_cipher
See Also
========
decipher_affine
"""
msg, _, A = _prep(msg, '', symbols)
N = len(A)
a, b = key
assert gcd(a, N) == 1
if _inverse:
c = mod_inverse(a, N)
d = -b*c
a, b = c, d
B = ''.join([A[(a*i + b) % N] for i in range(N)])
return translate(msg, A, B)
def decipher_affine(msg, key, symbols=None):
r"""
Return the deciphered text that was made from the mapping,
`x \rightarrow ax+b` (mod `N`), where ``N`` is the
number of characters in the alphabet. Deciphering is done by
reciphering with a new key: `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
Examples
========
>>> from sympy.crypto.crypto import encipher_affine, decipher_affine
>>> msg = "GO NAVY BEAT ARMY"
>>> key = (3, 1)
>>> encipher_affine(msg, key)
'TROBMVENBGBALV'
>>> decipher_affine(_, key)
'GONAVYBEATARMY'
See Also
========
encipher_affine
"""
return encipher_affine(msg, key, symbols, _inverse=True)
def encipher_atbash(msg, symbols=None):
r"""
Enciphers a given ``msg`` into its Atbash ciphertext and returns it.
Explanation
===========
Atbash is a substitution cipher originally used to encrypt the Hebrew
alphabet. Atbash works on the principle of mapping each alphabet to its
reverse / counterpart (i.e. a would map to z, b to y etc.)
Atbash is functionally equivalent to the affine cipher with ``a = 25``
and ``b = 25``
See Also
========
decipher_atbash
"""
return encipher_affine(msg, (25, 25), symbols)
def decipher_atbash(msg, symbols=None):
r"""
Deciphers a given ``msg`` using Atbash cipher and returns it.
Explanation
===========
``decipher_atbash`` is functionally equivalent to ``encipher_atbash``.
However, it has still been added as a separate function to maintain
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_atbash, decipher_atbash
>>> msg = 'GONAVYBEATARMY'
>>> encipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> decipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> encipher_atbash(msg) == decipher_atbash(msg)
True
>>> msg == encipher_atbash(encipher_atbash(msg))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Atbash
See Also
========
encipher_atbash
"""
return decipher_affine(msg, (25, 25), symbols)
#################### substitution cipher ###########################
def encipher_substitution(msg, old, new=None):
r"""
Returns the ciphertext obtained by replacing each character that
appears in ``old`` with the corresponding character in ``new``.
If ``old`` is a mapping, then new is ignored and the replacements
defined by ``old`` are used.
Explanation
===========
This is a more general than the affine cipher in that the key can
only be recovered by determining the mapping for each symbol.
Though in practice, once a few symbols are recognized the mappings
for other characters can be quickly guessed.
Examples
========
>>> from sympy.crypto.crypto import encipher_substitution, AZ
>>> old = 'OEYAG'
>>> new = '034^6'
>>> msg = AZ("go navy! beat army!")
>>> ct = encipher_substitution(msg, old, new); ct
'60N^V4B3^T^RM4'
To decrypt a substitution, reverse the last two arguments:
>>> encipher_substitution(ct, new, old)
'GONAVYBEATARMY'
In the special case where ``old`` and ``new`` are a permutation of
order 2 (representing a transposition of characters) their order
is immaterial:
>>> old = 'NAVY'
>>> new = 'ANYV'
>>> encipher = lambda x: encipher_substitution(x, old, new)
>>> encipher('NAVY')
'ANYV'
>>> encipher(_)
'NAVY'
The substitution cipher, in general, is a method
whereby "units" (not necessarily single characters) of plaintext
are replaced with ciphertext according to a regular system.
>>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc']))
>>> print(encipher_substitution('abc', ords))
\97\98\99
References
==========
.. [1] https://en.wikipedia.org/wiki/Substitution_cipher
"""
return translate(msg, old, new)
######################################################################
#################### Vigenere cipher examples ########################
######################################################################
def encipher_vigenere(msg, key, symbols=None):
"""
Performs the Vigenere cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere, AZ
>>> key = "encrypt"
>>> msg = "meet me on monday"
>>> encipher_vigenere(msg, key)
'QRGKKTHRZQEBPR'
Section 1 of the Kryptos sculpture at the CIA headquarters
uses this cipher and also changes the order of the
alphabet [2]_. Here is the first line of that section of
the sculpture:
>>> from sympy.crypto.crypto import decipher_vigenere, padded_key
>>> alp = padded_key('KRYPTOS', AZ())
>>> key = 'PALIMPSEST'
>>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ'
>>> decipher_vigenere(msg, key, alp)
'BETWEENSUBTLESHADINGANDTHEABSENC'
Explanation
===========
The Vigenere cipher is named after Blaise de Vigenere, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenere actually invented a different and more complicated cipher.
The so-called *Vigenere cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American
Civil War. The Confederacy used a brass cipher disk to implement the
Vigenere cipher (now on display in the NSA Museum in Fort
Meade) [1]_.
The Vigenere cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount
(that amount being the key of the shift cipher) the Vigenere
cipher shifts a letter by an amount determined by the key (which is
a word or phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenere cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
(the plaintext)
``key``: a string of characters that appear in ``symbols``
(the secret key)
``symbols``: a string of letters defining the alphabet
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``key`` a list ``L1`` of
corresponding integers. Let ``n1 = len(L1)``.
2. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequentially into sublists of size
``n1``; the last sublist may be smaller than ``n1``
4. For each of these sublists ``L`` of ``L2``, compute a
new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)``
to the ``i``-th element in the sublist, for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new
list of length ``n2``.
6. Compute from the new list a string ``ct`` of
corresponding letters.
Once it is known that the key is, say, `n` characters long,
frequency analysis can be applied to every `n`-th letter of
the ciphertext to determine the plaintext. This method is
called *Kasiski examination* (although it was first discovered
by Babbage). If they key is as long as the message and is
comprised of randomly selected characters -- a one-time pad -- the
message is theoretically unbreakable.
The cipher Vigenere actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of letters (the secret key)
``msg``: string of letters (the plaintext message)
OUTPUT:
``ct``: string of upper-case letters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Append to the
string ``key`` the first ``n2 - n1`` characters of
the plaintext message. Compute from this string (also of
length ``n2``) a list ``L1`` of integers corresponding
to the letter numbers in the first step.
3. Compute a new list ``C`` given by
``C[i] = L1[i] + L2[i] (mod N)``.
4. Compute from the new list a string ``ct`` of letters
corresponding to the new integers.
To decipher the auto-key ciphertext, the key is used to decipher
the first ``n1`` characters and then those characters become the
key to decipher the next ``n1`` characters, etc...:
>>> m = AZ('go navy, beat army! yes you can'); m
'GONAVYBEATARMYYESYOUCAN'
>>> key = AZ('gold bug'); n1 = len(key); n2 = len(m)
>>> auto_key = key + m[:n2 - n1]; auto_key
'GOLDBUGGONAVYBEATARMYYE'
>>> ct = encipher_vigenere(m, auto_key); ct
'MCYDWSHKOGAMKZCELYFGAYR'
>>> n1 = len(key)
>>> pt = []
>>> while ct:
... part, ct = ct[:n1], ct[n1:]
... pt.append(decipher_vigenere(part, key))
... key = pt[-1]
...
>>> ''.join(pt) == m
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Vigenere_cipher
.. [2] http://web.archive.org/web/20071116100808/
.. [3] http://web.archive.org/web/20071116100808/http://filebox.vt.edu/users/batman/kryptos.html
(short URL: https://goo.gl/ijr22d)
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
key = [map[c] for c in key]
N = len(map)
k = len(key)
rv = []
for i, m in enumerate(msg):
rv.append(A[(map[m] + key[i % k]) % N])
rv = ''.join(rv)
return rv
def decipher_vigenere(msg, key, symbols=None):
"""
Decode using the Vigenere cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
N = len(A) # normally, 26
K = [map[c] for c in key]
n = len(K)
C = [map[c] for c in msg]
rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)])
return rv
#################### Hill cipher ########################
def encipher_hill(msg, key, symbols=None, pad="Q"):
r"""
Return the Hill cipher encryption of ``msg``.
Explanation
===========
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical
(though barely) to operate on more than three symbols at once.
The following discussion assumes an elementary knowledge of
matrices.
First, each letter is first encoded as a number starting with 0.
Suppose your message `msg` consists of `n` capital letters, with no
spaces. This may be regarded an `n`-tuple M of elements of
`Z_{26}` (if the letters are those of the English alphabet). A key
in the Hill cipher is a `k x k` matrix `K`, all of whose entries
are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the
linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k`
is one-to-one).
Parameters
==========
msg
Plaintext message of `n` upper-case letters.
key
A `k \times k` invertible matrix `K`, all of whose entries are
in `Z_{26}` (or whatever number of symbols are being used).
pad
Character (default "Q") to use to make length of text be a
multiple of ``k``.
Returns
=======
ct
Ciphertext of upper-case letters.
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L`` of
corresponding integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)``
sublists ``L_1``, ..., ``L_t`` of size ``k`` (with
the last list "padded" to ensure its size is
``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by
``C[i] = K*L_i`` (arithmetic is done mod N), for each
``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``ct`` of corresponding
letters. This has length ``k*t``.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet,
The American Mathematical Monthly Vol.36, June-July 1929,
pp.306-312.
See Also
========
decipher_hill
"""
assert key.is_square
assert len(pad) == 1
msg, pad, A = _prep(msg, pad, symbols)
map = {c: i for i, c in enumerate(A)}
P = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(P)
m, r = divmod(n, k)
if r:
P = P + [map[pad]]*(k - r)
m += 1
rv = ''.join([A[c % N] for j in range(m) for c in
list(key*Matrix(k, 1, [P[i]
for i in range(k*j, k*(j + 1))]))])
return rv
def decipher_hill(msg, key, symbols=None):
"""
Deciphering is the same as enciphering but using the inverse of the
key matrix.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill, decipher_hill
>>> from sympy import Matrix
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill("meet me on monday", key)
'UEQDUEODOCTCWQ'
>>> decipher_hill(_, key)
'MEETMEONMONDAY'
When the length of the plaintext (stripped of invalid characters)
is not a multiple of the key dimension, extra characters will
appear at the end of the enciphered and deciphered text. In order to
decipher the text, those characters must be included in the text to
be deciphered. In the following, the key has a dimension of 4 but
the text is 2 short of being a multiple of 4 so two characters will
be added.
>>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0],
... [2, 2, 3, 4], [1, 1, 0, 1]])
>>> msg = "ST"
>>> encipher_hill(msg, key)
'HJEB'
>>> decipher_hill(_, key)
'STQQ'
>>> encipher_hill(msg, key, pad="Z")
'ISPK'
>>> decipher_hill(_, key)
'STZZ'
If the last two characters of the ciphertext were ignored in
either case, the wrong plaintext would be recovered:
>>> decipher_hill("HD", key)
'ORMV'
>>> decipher_hill("IS", key)
'UIKY'
See Also
========
encipher_hill
"""
assert key.is_square
msg, _, A = _prep(msg, '', symbols)
map = {c: i for i, c in enumerate(A)}
C = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(C)
m, r = divmod(n, k)
if r:
C = C + [0]*(k - r)
m += 1
key_inv = key.inv_mod(N)
rv = ''.join([A[p % N] for j in range(m) for p in
list(key_inv*Matrix(
k, 1, [C[i] for i in range(k*j, k*(j + 1))]))])
return rv
#################### Bifid cipher ########################
def encipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses an `n \times n`
Polybius square.
Parameters
==========
msg
Plaintext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in ``symbols`` that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default is string.printable)
Returns
=======
ciphertext
Ciphertext using Bifid5 cipher without spaces.
See Also
========
decipher_bifid, encipher_bifid5, encipher_bifid6
References
==========
.. [1] https://en.wikipedia.org/wiki/Bifid_cipher
"""
msg, key, A = _prep(msg, key, symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the fractionalization
row_col = {ch: divmod(i, N) for i, ch in enumerate(long_key)}
r, c = zip(*[row_col[x] for x in msg])
rc = r + c
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in zip(rc[::2], rc[1::2]))
return rv
def decipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `n \times n`
Polybius square.
Parameters
==========
msg
Ciphertext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in symbols that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default=string.printable, a `10 \times 10` matrix)
Returns
=======
deciphered
Deciphered text.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid, decipher_bifid, AZ)
Do an encryption using the bifid5 alphabet:
>>> alp = AZ().replace('J', '')
>>> ct = AZ("meet me on monday!")
>>> key = AZ("gold bug")
>>> encipher_bifid(ct, key, alp)
'IEILHHFSTSFQYE'
When entering the text or ciphertext, spaces are ignored so it
can be formatted as desired. Re-entering the ciphertext from the
preceding, putting 4 characters per line and padding with an extra
J, does not cause problems for the deciphering:
>>> decipher_bifid('''
... IEILH
... HFSTS
... FQYEJ''', key, alp)
'MEETMEONMONDAY'
When no alphabet is given, all 100 printable characters will be
used:
>>> key = ''
>>> encipher_bifid('hello world!', key)
'bmtwmg-bIo*w'
>>> decipher_bifid(_, key)
'hello world!'
If the key is changed, a different encryption is obtained:
>>> key = 'gold bug'
>>> encipher_bifid('hello world!', 'gold_bug')
'hg2sfuei7t}w'
And if the key used to decrypt the message is not exact, the
original text will not be perfectly obtained:
>>> decipher_bifid(_, 'gold pug')
'heldo~wor6d!'
"""
msg, _, A = _prep(msg, '', symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the reverse fractionalization
row_col = {
ch: divmod(i, N) for i, ch in enumerate(long_key)}
rc = [i for c in msg for i in row_col[c]]
n = len(msg)
rc = zip(*(rc[:n], rc[n:]))
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in rc)
return rv
def bifid_square(key):
"""Return characters of ``key`` arranged in a square.
Examples
========
>>> from sympy.crypto.crypto import (
... bifid_square, AZ, padded_key, bifid5)
>>> bifid_square(AZ().replace('J', ''))
Matrix([
[A, B, C, D, E],
[F, G, H, I, K],
[L, M, N, O, P],
[Q, R, S, T, U],
[V, W, X, Y, Z]])
>>> bifid_square(padded_key(AZ('gold bug!'), bifid5))
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
See Also
========
padded_key
"""
A = ''.join(uniq(''.join(key)))
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
n = int(n)
f = lambda i, j: Symbol(A[n*i + j])
rv = Matrix(n, n, f)
return rv
def encipher_bifid5(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square. The letter "J" is ignored so it must be replaced
with something else (traditionally an "I") before encryption.
ALGORITHM: (5x5 case)
STEPS:
0. Create the `5 \times 5` Polybius square ``S`` associated
to ``key`` as follows:
a) moving from left-to-right, top-to-bottom,
place the letters of the key into a `5 \times 5`
matrix,
b) if the key has less than 25 letters, add the
letters of the alphabet not in the key until the
`5 \times 5` square is filled.
1. Create a list ``P`` of pairs of numbers which are the
coordinates in the Polybius square of the letters in
``msg``.
2. Let ``L1`` be the list of all first coordinates of ``P``
(length of ``L1 = n``), let ``L2`` be the list of all
second coordinates of ``P`` (so the length of ``L2``
is also ``n``).
3. Let ``L`` be the concatenation of ``L1`` and ``L2``
(length ``L = 2*n``), except that consecutive numbers
are paired ``(L[2*i], L[2*i + 1])``. You can regard
``L`` as a list of pairs of length ``n``.
4. Let ``C`` be the list of all letters which are of the
form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a
string, this is the ciphertext of ``msg``.
Parameters
==========
msg : str
Plaintext string.
Converted to upper case and filtered of anything but all letters
except J.
key
Short string for key; non-alphabetic letters, J and duplicated
characters are ignored and then, if the length is less than 25
characters, it is padded with other letters of the alphabet
(in alphabetical order).
Returns
=======
ct
Ciphertext (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid5, decipher_bifid5)
"J" will be omitted unless it is replaced with something else:
>>> round_trip = lambda m, k: \
... decipher_bifid5(encipher_bifid5(m, k), k)
>>> key = 'a'
>>> msg = "JOSIE"
>>> round_trip(msg, key)
'OSIE'
>>> round_trip(msg.replace("J", "I"), key)
'IOSIE'
>>> j = "QIQ"
>>> round_trip(msg.replace("J", j), key).replace(j, "J")
'JOSIE'
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the
alphabet, except that "J" is replaced with "I" (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in
"J" and also append onto the usual 26 letter alphabet, the digits
0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
See Also
========
decipher_bifid5, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return encipher_bifid(msg, '', key)
def decipher_bifid5(msg, key):
r"""
Return the Bifid cipher decryption of ``msg``.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square; the letter "J" is ignored unless a ``key`` of
length 25 is used.
Parameters
==========
msg
Ciphertext string.
key
Short string for key; duplicated characters are ignored and if
the length is less then 25 characters, it will be padded with
other letters from the alphabet omitting "J".
Non-alphabetic characters are ignored.
Returns
=======
plaintext
Plaintext from Bifid5 cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "gold bug"
>>> encipher_bifid5('meet me on friday', key)
'IEILEHFSTSFXEE'
>>> encipher_bifid5('meet me on monday', key)
'IEILHHFSTSFQYE'
>>> decipher_bifid5(_, key)
'MEETMEONMONDAY'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return decipher_bifid(msg, '', key)
def bifid5_square(key=None):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
if not key:
key = bifid5
else:
_, key, _ = _prep('', key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return bifid_square(key)
def encipher_bifid6(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Plaintext string (digits okay).
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
Returns
=======
ciphertext
Ciphertext from Bifid cipher (all caps, no spaces).
See Also
========
decipher_bifid6, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return encipher_bifid(msg, '', key)
def decipher_bifid6(msg, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Ciphertext string (digits okay); converted to upper case
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
All letters are converted to uppercase.
Returns
=======
plaintext
Plaintext from Bifid cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "gold bug"
>>> encipher_bifid6('meet me on monday at 8am', key)
'KFKLJJHF5MMMKTFRGPL'
>>> decipher_bifid6(_, key)
'MEETMEONMONDAYAT8AM'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return decipher_bifid(msg, '', key)
def bifid6_square(key=None):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "gold bug"
>>> bifid6_square(key)
Matrix([
[G, O, L, D, B, U],
[A, C, E, F, H, I],
[J, K, M, N, P, Q],
[R, S, T, V, W, X],
[Y, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
if not key:
key = bifid6
else:
_, key, _ = _prep('', key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return bifid_square(key)
#################### RSA #############################
def _decipher_rsa_crt(i, d, factors):
"""Decipher RSA using chinese remainder theorem from the information
of the relatively-prime factors of the modulus.
Parameters
==========
i : integer
Ciphertext
d : integer
The exponent component.
factors : list of relatively-prime integers
The integers given must be coprime and the product must equal
the modulus component of the original RSA key.
Examples
========
How to decrypt RSA with CRT:
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
>>> primes = [61, 53]
>>> e = 17
>>> args = primes + [e]
>>> puk = rsa_public_key(*args)
>>> prk = rsa_private_key(*args)
>>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt
>>> msg = 65
>>> crt_primes = primes
>>> encrypted = encipher_rsa(msg, puk)
>>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes)
>>> decrypted
65
"""
moduluses = [pow(i, d, p) for p in factors]
result = crt(factors, moduluses)
if not result:
raise ValueError("CRT failed")
return result[0]
def _rsa_key(*args, public=True, private=True, totient='Euler', index=None, multipower=None):
r"""A private subroutine to generate RSA key
Parameters
==========
public, private : bool, optional
Flag to generate either a public key, a private key.
totient : 'Euler' or 'Carmichael'
Different notation used for totient.
multipower : bool, optional
Flag to bypass warning for multipower RSA.
"""
if len(args) < 2:
return False
if totient not in ('Euler', 'Carmichael'):
raise ValueError(
"The argument totient={} should either be " \
"'Euler', 'Carmichalel'." \
.format(totient))
if totient == 'Euler':
_totient = _euler
else:
_totient = _carmichael
if index is not None:
index = as_int(index)
if totient != 'Carmichael':
raise ValueError(
"Setting the 'index' keyword argument requires totient"
"notation to be specified as 'Carmichael'.")
primes, e = args[:-1], args[-1]
if not all(isprime(p) for p in primes):
new_primes = []
for i in primes:
new_primes.extend(factorint(i, multiple=True))
primes = new_primes
n = reduce(lambda i, j: i*j, primes)
tally = multiset(primes)
if all(v == 1 for v in tally.values()):
multiple = list(tally.keys())
phi = _totient._from_distinct_primes(*multiple)
else:
if not multipower:
NonInvertibleCipherWarning(
'Non-distinctive primes found in the factors {}. '
'The cipher may not be decryptable for some numbers '
'in the complete residue system Z[{}], but the cipher '
'can still be valid if you restrict the domain to be '
'the reduced residue system Z*[{}]. You can pass '
'the flag multipower=True if you want to suppress this '
'warning.'
.format(primes, n, n)
# stacklevel=4 because most users will call a function that
# calls this function
).warn(stacklevel=4)
phi = _totient._from_factors(tally)
if igcd(e, phi) == 1:
if public and not private:
if isinstance(index, int):
e = e % phi
e += index * phi
return n, e
if private and not public:
d = mod_inverse(e, phi)
if isinstance(index, int):
d += index * phi
return n, d
return False
def rsa_public_key(*args, **kwargs):
r"""Return the RSA *public key* pair, `(n, e)`
Parameters
==========
args : naturals
If specified as `p, q, e` where `p` and `q` are distinct primes
and `e` is a desired public exponent of the RSA, `n = p q` and
`e` will be verified against the totient
`\phi(n)` (Euler totient) or `\lambda(n)` (Carmichael totient)
to be `\gcd(e, \phi(n)) = 1` or `\gcd(e, \lambda(n)) = 1`.
If specified as `p_1, p_2, \dots, p_n, e` where
`p_1, p_2, \dots, p_n` are specified as primes,
and `e` is specified as a desired public exponent of the RSA,
it will be able to form a multi-prime RSA, which is a more
generalized form of the popular 2-prime RSA.
It can also be possible to form a single-prime RSA by specifying
the argument as `p, e`, which can be considered a trivial case
of a multiprime RSA.
Furthermore, it can be possible to form a multi-power RSA by
specifying two or more pairs of the primes to be same.
However, unlike the two-distinct prime RSA or multi-prime
RSA, not every numbers in the complete residue system
(`\mathbb{Z}_n`) will be decryptable since the mapping
`\mathbb{Z}_{n} \rightarrow \mathbb{Z}_{n}`
will not be bijective.
(Only except for the trivial case when
`e = 1`
or more generally,
.. math::
e \in \left \{ 1 + k \lambda(n)
\mid k \in \mathbb{Z} \land k \geq 0 \right \}
when RSA reduces to the identity.)
However, the RSA can still be decryptable for the numbers in the
reduced residue system (`\mathbb{Z}_n^{\times}`), since the
mapping
`\mathbb{Z}_{n}^{\times} \rightarrow \mathbb{Z}_{n}^{\times}`
can still be bijective.
If you pass a non-prime integer to the arguments
`p_1, p_2, \dots, p_n`, the particular number will be
prime-factored and it will become either a multi-prime RSA or a
multi-power RSA in its canonical form, depending on whether the
product equals its radical or not.
`p_1 p_2 \dots p_n = \text{rad}(p_1 p_2 \dots p_n)`
totient : bool, optional
If ``'Euler'``, it uses Euler's totient `\phi(n)` which is
:meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient `\lambda(n)`
which is :meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
Unlike private key generation, this is a trivial keyword for
public key generation because
`\gcd(e, \phi(n)) = 1 \iff \gcd(e, \lambda(n)) = 1`.
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA public key at the index
specified at `0, 1, 2, \dots`. This parameter needs to be
specified along with ``totient='Carmichael'``.
Similarly to the non-uniquenss of a RSA private key as described
in the ``index`` parameter documentation in
:meth:`rsa_private_key`, RSA public key is also not unique and
there is an infinite number of RSA public exponents which
can behave in the same manner.
From any given RSA public exponent `e`, there are can be an
another RSA public exponent `e + k \lambda(n)` where `k` is an
integer, `\lambda` is a Carmichael's totient function.
However, considering only the positive cases, there can be
a principal solution of a RSA public exponent `e_0` in
`0 < e_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `e_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA public key can have.
An example of computing any arbitrary RSA public key:
>>> from sympy.crypto.crypto import rsa_public_key
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 17)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 797)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1577)
multipower : bool, optional
Any pair of non-distinct primes found in the RSA specification
will restrict the domain of the cryptosystem, as noted in the
explanation of the parameter ``args``.
SymPy RSA key generator may give a warning before dispatching it
as a multi-power RSA, however, you can disable the warning if
you pass ``True`` to this keyword.
Returns
=======
(n, e) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`e` is relatively prime (coprime) to the Euler totient
`\phi(n)`.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
A public key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_public_key(p, q, e)
(15, 7)
>>> rsa_public_key(p, q, 30)
False
A public key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_public_key(*args)
(30030, 7)
Notes
=====
Although the RSA can be generalized over any modulus `n`, using
two large primes had became the most popular specification because a
product of two large primes is usually the hardest to factor
relatively to the digits of `n` can have.
However, it may need further understanding of the time complexities
of each prime-factoring algorithms to verify the claim.
See Also
========
rsa_private_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=True, private=False, **kwargs)
def rsa_private_key(*args, **kwargs):
r"""Return the RSA *private key* pair, `(n, d)`
Parameters
==========
args : naturals
The keyword is identical to the ``args`` in
:meth:`rsa_public_key`.
totient : bool, optional
If ``'Euler'``, it uses Euler's totient convention `\phi(n)`
which is :meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient convention
`\lambda(n)` which is
:meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
There can be some output differences for private key generation
as examples below.
Example using Euler's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Euler')
(3233, 2753)
Example using Carmichael's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael')
(3233, 413)
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA private key at the index
specified at `0, 1, 2, \dots`. This parameter needs to be
specified along with ``totient='Carmichael'``.
RSA private exponent is a non-unique solution of
`e d \mod \lambda(n) = 1` and it is possible in any form of
`d + k \lambda(n)`, where `d` is an another
already-computed private exponent, and `\lambda` is a
Carmichael's totient function, and `k` is any integer.
However, considering only the positive cases, there can be
a principal solution of a RSA private exponent `d_0` in
`0 < d_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `d_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA private key can have.
An example of computing any arbitrary RSA private key:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 413)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 1193)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1973)
multipower : bool, optional
The keyword is identical to the ``multipower`` in
:meth:`rsa_public_key`.
Returns
=======
(n, d) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`d` is the inverse of `e` (mod `\phi(n)`) where `e` is the
exponent given, and `\phi` is a Euler totient.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the totient of the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
A private key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
>>> rsa_private_key(p, q, 30)
False
A private key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_private_key(*args)
(30030, 823)
See Also
========
rsa_public_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=False, private=True, **kwargs)
def _encipher_decipher_rsa(i, key, factors=None):
n, d = key
if not factors:
return pow(i, d, n)
def _is_coprime_set(l):
is_coprime_set = True
for i in range(len(l)):
for j in range(i+1, len(l)):
if igcd(l[i], l[j]) != 1:
is_coprime_set = False
break
return is_coprime_set
prod = reduce(lambda i, j: i*j, factors)
if prod == n and _is_coprime_set(factors):
return _decipher_rsa_crt(i, d, factors)
return _encipher_decipher_rsa(i, key, factors=None)
def encipher_rsa(i, key, factors=None):
r"""Encrypt the plaintext with RSA.
Parameters
==========
i : integer
The plaintext to be encrypted for.
key : (n, e) where n, e are integers
`n` is the modulus of the key and `e` is the exponent of the
key. The encryption is computed by `i^e \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
This is identical to the keyword ``factors`` in
:meth:`decipher_rsa`.
Notes
=====
Some specifications may make the RSA not cryptographically
meaningful.
For example, `0`, `1` will remain always same after taking any
number of exponentiation, thus, should be avoided.
Furthermore, if `i^e < n`, `i` may easily be figured out by taking
`e` th root.
And also, specifying the exponent as `1` or in more generalized form
as `1 + k \lambda(n)` where `k` is an nonnegative integer,
`\lambda` is a carmichael totient, the RSA becomes an identity
mapping.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption:
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, puk)
3
Private Key Encryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, prk)
3
Encryption using chinese remainder theorem:
>>> encipher_rsa(msg, prk, factors=[p, q])
3
"""
return _encipher_decipher_rsa(i, key, factors=factors)
def decipher_rsa(i, key, factors=None):
r"""Decrypt the ciphertext with RSA.
Parameters
==========
i : integer
The ciphertext to be decrypted for.
key : (n, d) where n, d are integers
`n` is the modulus of the key and `d` is the exponent of the
key. The decryption is computed by `i^d \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
As the modulus `n` created from RSA key generation is composed
of arbitrary prime factors
`n = {p_1}^{k_1}{p_2}^{k_2}\dots{p_n}^{k_n}` where
`p_1, p_2, \dots, p_n` are distinct primes and
`k_1, k_2, \dots, k_n` are positive integers, chinese remainder
theorem can be used to compute `i^d \bmod n` from the
fragmented modulo operations like
.. math::
i^d \bmod {p_1}^{k_1}, i^d \bmod {p_2}^{k_2}, \dots,
i^d \bmod {p_n}^{k_n}
or like
.. math::
i^d \bmod {p_1}^{k_1}{p_2}^{k_2},
i^d \bmod {p_3}^{k_3}, \dots ,
i^d \bmod {p_n}^{k_n}
as long as every moduli does not share any common divisor each
other.
The raw primes used in generating the RSA key pair can be a good
option.
Note that the speed advantage of using this is only viable for
very large cases (Like 2048-bit RSA keys) since the
overhead of using pure Python implementation of
:meth:`sympy.ntheory.modular.crt` may overcompensate the
theoretical speed advantage.
Notes
=====
See the ``Notes`` section in the documentation of
:meth:`encipher_rsa`
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, prk)
>>> new_msg
3
>>> decipher_rsa(new_msg, puk)
12
Private Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, puk)
>>> new_msg
3
>>> decipher_rsa(new_msg, prk)
12
Decryption using chinese remainder theorem:
>>> decipher_rsa(new_msg, prk, factors=[p, q])
12
See Also
========
encipher_rsa
"""
return _encipher_decipher_rsa(i, key, factors=factors)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Explanation
===========
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1)//M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `(n, d)`, which Bob keeps secret.
Encryption: If `p` is the plaintext message then the
ciphertext is `c = p e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `p = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1) / M`. The *private key* is `d`, which Bob
keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, d
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n
#################### Morse Code ######################################
morse_char = {
".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", ".----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-....-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
char_morse = {v: k for k, v in morse_char.items()}
def encode_morse(msg, sep='|', mapping=None):
"""
Encodes a plaintext into popular Morse Code with letters
separated by ``sep`` and words by a double ``sep``.
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> msg = 'ATTACK RIGHT FLANK'
>>> encode_morse(msg)
'.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or char_morse
assert sep not in mapping
word_sep = 2*sep
mapping[" "] = word_sep
suffix = msg and msg[-1] in whitespace
# normalize whitespace
msg = (' ' if word_sep else '').join(msg.split())
# omit unmapped chars
chars = set(''.join(msg.split()))
ok = set(mapping.keys())
msg = translate(msg, None, ''.join(chars - ok))
morsestring = []
words = msg.split()
for word in words:
morseword = []
for letter in word:
morseletter = mapping[letter]
morseword.append(morseletter)
word = sep.join(morseword)
morsestring.append(word)
return word_sep.join(morsestring) + (word_sep if suffix else '')
def decode_morse(msg, sep='|', mapping=None):
"""
Decodes a Morse Code with letters separated by ``sep``
(default is '|') and words by `word_sep` (default is '||)
into plaintext.
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or morse_char
word_sep = 2*sep
characterstring = []
words = msg.strip(word_sep).split(word_sep)
for word in words:
letters = word.split(sep)
chars = [mapping[c] for c in letters]
word = ''.join(chars)
characterstring.append(word)
rv = " ".join(characterstring)
return rv
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an LFSR sequence.
Parameters
==========
key : list
A list of finite field elements, `[c_0, c_1, \ldots, c_k].`
fill : list
The list of the initial terms of the LFSR sequence,
`[x_0, x_1, \ldots, x_k].`
n
Number of terms of the sequence that the function returns.
Returns
=======
L
The LFSR sequence defined by
`x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2,
1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press,
Laguna Hills, Ca, 1967
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the LFSR autocorrelation function.
Parameters
==========
L
A periodic sequence of elements of `GF(2)`.
L must have length larger than P.
P
The period of L.
k : int
An integer `k` (`0 < k < P`).
Returns
=======
autocorrelation
The k-th value of the autocorrelation of the LFSR L.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_autocorrelation)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the LFSR connection polynomial.
Parameters
==========
s
A sequence of elements of even length, with entries in a finite
field.
Returns
=======
C(x)
The connection polynomial of a minimal LFSR yielding s.
This implements the algorithm in section 3 of J. L. Massey's
article [M]_.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_connection_polynomial)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
Jan 1969.
"""
# Initialization:
p = s[0].mod
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int()
for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1)
if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10, seed=None):
r"""
Return three number tuple as private key.
Explanation
===========
Elgamal encryption is based on the mathematical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if ``a`` and ``b`` are known, ``ct`` is easily
calculated. If ``b`` is unknown, it is hard to use
``a`` and ``ct`` to get ``b``.
Parameters
==========
digit : int
Minimum number of binary digits for key.
Returns
=======
tuple : (p, r, d)
p = prime number.
r = primitive root.
d = random number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.core.random._randrange.
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
randrange = _randrange(seed)
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(key):
r"""
Return three number tuple as public key.
Parameters
==========
key : (p, r, e)
Tuple generated by ``elgamal_private_key``.
Returns
=======
tuple : (p, r, e)
`e = r**d \bmod p`
`d` is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
p, r, e = key
return p, r, pow(r, e, p)
def encipher_elgamal(i, key, seed=None):
r"""
Encrypt message with public key.
Explanation
===========
``i`` is a plaintext message expressed as an integer.
``key`` is public key (p, r, e). In order to encrypt
a message, a random number ``a`` in ``range(2, p)``
is generated and the encryped message is returned as
`c_{1}` and `c_{2}` where:
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
msg
int of encoded message.
key
Public key.
Returns
=======
tuple : (c1, c2)
Encipher into two number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.core.random._randrange.
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3]); pri
(37, 2, 3)
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 36
>>> encipher_elgamal(msg, pub, seed=[3])
(8, 6)
"""
p, r, e = key
if i < 0 or i >= p:
raise ValueError(
'Message (%s) should be in range(%s)' % (i, p))
randrange = _randrange(seed)
a = randrange(2, p)
return pow(r, a, p), i*pow(e, a, p) % p
def decipher_elgamal(msg, key):
r"""
Decrypt message with private key.
`msg = (c_{1}, c_{2})`
`key = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> from sympy.crypto.crypto import encipher_elgamal
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.crypto.crypto import elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3])
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 17
>>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg
True
"""
p, _, d = key
c1, c2 = msg
u = igcdex(c1**d, p)[0]
return u * c2 % p
################ Diffie-Hellman Key Exchange #########################
def dh_private_key(digit=10, seed=None):
r"""
Return three integer tuple as private key.
Explanation
===========
Diffie-Hellman key exchange is based on the mathematical problem
called the Discrete Logarithm Problem (see ElGamal).
Diffie-Hellman key exchange is divided into the following steps:
* Alice and Bob agree on a base that consist of a prime ``p``
and a primitive root of ``p`` called ``g``
* Alice choses a number ``a`` and Bob choses a number ``b`` where
``a`` and ``b`` are random numbers in range `[2, p)`. These are
their private keys.
* Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends
Alice `g^{b} \pmod p`
* They both raise the received value to their secretly chosen
number (``a`` or ``b``) and now have both as their shared key
`g^{ab} \pmod p`
Parameters
==========
digit
Minimum number of binary digits required in key.
Returns
=======
tuple : (p, g, a)
p = prime number.
g = primitive root of p.
a = random number from 2 through p - 1.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.core.random._randrange.
Examples
========
>>> from sympy.crypto.crypto import dh_private_key
>>> from sympy.ntheory import isprime, is_primitive_root
>>> p, g, _ = dh_private_key()
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
>>> p, g, _ = dh_private_key(5)
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
"""
p = nextprime(2**digit)
g = primitive_root(p)
randrange = _randrange(seed)
a = randrange(2, p)
return p, g, a
def dh_public_key(key):
r"""
Return three number tuple as public key.
This is the tuple that Alice sends to Bob.
Parameters
==========
key : (p, g, a)
A tuple generated by ``dh_private_key``.
Returns
=======
tuple : int, int, int
A tuple of `(p, g, g^a \mod p)` with `p`, `g` and `a` given as
parameters.s
Examples
========
>>> from sympy.crypto.crypto import dh_private_key, dh_public_key
>>> p, g, a = dh_private_key();
>>> _p, _g, x = dh_public_key((p, g, a))
>>> p == _p and g == _g
True
>>> x == pow(g, a, p)
True
"""
p, g, a = key
return p, g, pow(g, a, p)
def dh_shared_key(key, b):
"""
Return an integer that is the shared key.
This is what Bob and Alice can both calculate using the public
keys they received from each other and their private keys.
Parameters
==========
key : (p, g, x)
Tuple `(p, g, x)` generated by ``dh_public_key``.
b
Random number in the range of `2` to `p - 1`
(Chosen by second key exchange member (Bob)).
Returns
=======
int
A shared key.
Examples
========
>>> from sympy.crypto.crypto import (
... dh_private_key, dh_public_key, dh_shared_key)
>>> prk = dh_private_key();
>>> p, g, x = dh_public_key(prk);
>>> sk = dh_shared_key((p, g, x), 1000)
>>> sk == pow(x, 1000, p)
True
"""
p, _, x = key
if 1 >= b or b >= p:
raise ValueError(filldedent('''
Value of b should be greater 1 and less
than prime %s.''' % p))
return pow(x, b, p)
################ Goldwasser-Micali Encryption #########################
def _legendre(a, p):
"""
Returns the legendre symbol of a and p
assuming that p is a prime.
i.e. 1 if a is a quadratic residue mod p
-1 if a is not a quadratic residue mod p
0 if a is divisible by p
Parameters
==========
a : int
The number to test.
p : prime
The prime to test ``a`` against.
Returns
=======
int
Legendre symbol (a / p).
"""
sig = pow(a, (p - 1)//2, p)
if sig == 1:
return 1
elif sig == 0:
return 0
else:
return -1
def _random_coprime_stream(n, seed=None):
randrange = _randrange(seed)
while True:
y = randrange(n)
if gcd(y, n) == 1:
yield y
def gm_private_key(p, q, a=None):
r"""
Check if ``p`` and ``q`` can be used as private keys for
the Goldwasser-Micali encryption. The method works
roughly as follows.
Explanation
===========
#. Pick two large primes $p$ and $q$.
#. Call their product $N$.
#. Given a message as an integer $i$, write $i$ in its bit representation $b_0, \dots, b_n$.
#. For each $k$,
if $b_k = 0$:
let $a_k$ be a random square
(quadratic residue) modulo $p q$
such that ``jacobi_symbol(a, p*q) = 1``
if $b_k = 1$:
let $a_k$ be a random non-square
(non-quadratic residue) modulo $p q$
such that ``jacobi_symbol(a, p*q) = 1``
returns $\left[a_1, a_2, \dots\right]$
$b_k$ can be recovered by checking whether or not
$a_k$ is a residue. And from the $b_k$'s, the message
can be reconstructed.
The idea is that, while ``jacobi_symbol(a, p*q)``
can be easily computed (and when it is equal to $-1$ will
tell you that $a$ is not a square mod $p q$), quadratic
residuosity modulo a composite number is hard to compute
without knowing its factorization.
Moreover, approximately half the numbers coprime to $p q$ have
:func:`~.jacobi_symbol` equal to $1$ . And among those, approximately half
are residues and approximately half are not. This maximizes the
entropy of the code.
Parameters
==========
p, q, a
Initialization variables.
Returns
=======
tuple : (p, q)
The input value ``p`` and ``q``.
Raises
======
ValueError
If ``p`` and ``q`` are not distinct odd primes.
"""
if p == q:
raise ValueError("expected distinct primes, "
"got two copies of %i" % p)
elif not isprime(p) or not isprime(q):
raise ValueError("first two arguments must be prime, "
"got %i of %i" % (p, q))
elif p == 2 or q == 2:
raise ValueError("first two arguments must not be even, "
"got %i of %i" % (p, q))
return p, q
def gm_public_key(p, q, a=None, seed=None):
"""
Compute public keys for ``p`` and ``q``.
Note that in Goldwasser-Micali Encryption,
public keys are randomly selected.
Parameters
==========
p, q, a : int, int, int
Initialization variables.
Returns
=======
tuple : (a, N)
``a`` is the input ``a`` if it is not ``None`` otherwise
some random integer coprime to ``p`` and ``q``.
``N`` is the product of ``p`` and ``q``.
"""
p, q = gm_private_key(p, q)
N = p * q
if a is None:
randrange = _randrange(seed)
while True:
a = randrange(N)
if _legendre(a, p) == _legendre(a, q) == -1:
break
else:
if _legendre(a, p) != -1 or _legendre(a, q) != -1:
return False
return (a, N)
def encipher_gm(i, key, seed=None):
"""
Encrypt integer 'i' using public_key 'key'
Note that gm uses random encryption.
Parameters
==========
i : int
The message to encrypt.
key : (a, N)
The public key.
Returns
=======
list : list of int
The randomized encrypted message.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
a, N = key
bits = []
while i > 0:
bits.append(i % 2)
i //= 2
gen = _random_coprime_stream(N, seed)
rev = reversed(bits)
encode = lambda b: next(gen)**2*pow(a, b) % N
return [ encode(b) for b in rev ]
def decipher_gm(message, key):
"""
Decrypt message 'message' using public_key 'key'.
Parameters
==========
message : list of int
The randomized encrypted message.
key : (p, q)
The private key.
Returns
=======
int
The encrypted message.
"""
p, q = key
res = lambda m, p: _legendre(m, p) > 0
bits = [res(m, p) * res(m, q) for m in message]
m = 0
for b in bits:
m <<= 1
m += not b
return m
########### RailFence Cipher #############
def encipher_railfence(message,rails):
"""
Performs Railfence Encryption on plaintext and returns ciphertext
Examples
========
>>> from sympy.crypto.crypto import encipher_railfence
>>> message = "hello world"
>>> encipher_railfence(message,3)
'horel ollwd'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Encrypted string message.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
return ''.join(sorted(message, key=lambda i: next(p)))
def decipher_railfence(ciphertext,rails):
"""
Decrypt the message using the given rails
Examples
========
>>> from sympy.crypto.crypto import decipher_railfence
>>> decipher_railfence("horel ollwd",3)
'hello world'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Decrypted string message.
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
idx = sorted(range(len(ciphertext)), key=lambda i: next(p))
res = [''] * len(ciphertext)
for i, c in zip(idx, ciphertext):
res[i] = c
return ''.join(res)
################ Blum-Goldwasser cryptosystem #########################
def bg_private_key(p, q):
"""
Check if p and q can be used as private keys for
the Blum-Goldwasser cryptosystem.
Explanation
===========
The three necessary checks for p and q to pass
so that they can be used as private keys:
1. p and q must both be prime
2. p and q must be distinct
3. p and q must be congruent to 3 mod 4
Parameters
==========
p, q
The keys to be checked.
Returns
=======
p, q
Input values.
Raises
======
ValueError
If p and q do not pass the above conditions.
"""
if not isprime(p) or not isprime(q):
raise ValueError("the two arguments must be prime, "
"got %i and %i" %(p, q))
elif p == q:
raise ValueError("the two arguments must be distinct, "
"got two copies of %i. " %p)
elif (p - 3) % 4 != 0 or (q - 3) % 4 != 0:
raise ValueError("the two arguments must be congruent to 3 mod 4, "
"got %i and %i" %(p, q))
return p, q
def bg_public_key(p, q):
"""
Calculates public keys from private keys.
Explanation
===========
The function first checks the validity of
private keys passed as arguments and
then returns their product.
Parameters
==========
p, q
The private keys.
Returns
=======
N
The public key.
"""
p, q = bg_private_key(p, q)
N = p * q
return N
def encipher_bg(i, key, seed=None):
"""
Encrypts the message using public key and seed.
Explanation
===========
ALGORITHM:
1. Encodes i as a string of L bits, m.
2. Select a random element r, where 1 < r < key, and computes
x = r^2 mod key.
3. Use BBS pseudo-random number generator to generate L random bits, b,
using the initial seed as x.
4. Encrypted message, c_i = m_i XOR b_i, 1 <= i <= L.
5. x_L = x^(2^L) mod key.
6. Return (c, x_L)
Parameters
==========
i
Message, a non-negative integer
key
The public key
Returns
=======
Tuple
(encrypted_message, x_L)
Raises
======
ValueError
If i is negative.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
enc_msg = []
while i > 0:
enc_msg.append(i % 2)
i //= 2
enc_msg.reverse()
L = len(enc_msg)
r = _randint(seed)(2, key - 1)
x = r**2 % key
x_L = pow(int(x), int(2**L), int(key))
rand_bits = []
for _ in range(L):
rand_bits.append(x % 2)
x = x**2 % key
encrypt_msg = [m ^ b for (m, b) in zip(enc_msg, rand_bits)]
return (encrypt_msg, x_L)
def decipher_bg(message, key):
"""
Decrypts the message using private keys.
Explanation
===========
ALGORITHM:
1. Let, c be the encrypted message, y the second number received,
and p and q be the private keys.
2. Compute, r_p = y^((p+1)/4 ^ L) mod p and
r_q = y^((q+1)/4 ^ L) mod q.
3. Compute x_0 = (q(q^-1 mod p)r_p + p(p^-1 mod q)r_q) mod N.
4. From, recompute the bits using the BBS generator, as in the
encryption algorithm.
5. Compute original message by XORing c and b.
Parameters
==========
message
Tuple of encrypted message and a non-negative integer.
key
Tuple of private keys.
Returns
=======
orig_msg
The original message
"""
p, q = key
encrypt_msg, y = message
public_key = p * q
L = len(encrypt_msg)
p_t = ((p + 1)/4)**L
q_t = ((q + 1)/4)**L
r_p = pow(int(y), int(p_t), int(p))
r_q = pow(int(y), int(q_t), int(q))
x = (q * mod_inverse(q, p) * r_p + p * mod_inverse(p, q) * r_q) % public_key
orig_bits = []
for _ in range(L):
orig_bits.append(x % 2)
x = x**2 % public_key
orig_msg = 0
for (m, b) in zip(encrypt_msg, orig_bits):
orig_msg = orig_msg * 2
orig_msg += (m ^ b)
return orig_msg
|
1a50b5e870b754db421c564dba5d6d9c308d3f2fa238f2990468e94db4e56e1e | from __future__ import annotations
from typing import Callable
from sympy.core import S, Add, Expr, Basic, Mul, Pow, Rational
from sympy.core.logic import fuzzy_not
from sympy.logic.boolalg import Boolean
from sympy.assumptions import ask, Q # type: ignore
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Explanation
===========
Unlike :func:`~.simplify()` which performs structural simplification
without any assumption, this function transforms the expression into
the form which is only valid under certain assumptions. Note that
``simplify()`` is generally not done in refining process.
Refining boolean expression involves reducing it to ``S.true`` or
``S.false``. Unlike :func:`~.ask()`, the expression will not be reduced
if the truth value cannot be determined.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
>>> refine(Q.real(x), Q.positive(x))
True
>>> refine(Q.positive(x), Q.real(x))
Q.positive(x)
See Also
========
sympy.simplify.simplify.simplify : Structural simplification without assumptions.
sympy.assumptions.ask.ask : Query for boolean expressions using assumptions.
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
ref_expr = expr._eval_refine(assumptions)
if ref_expr is not None:
return ref_expr
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
from sympy.functions.elementary.complexes import Abs
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
# arg is Mul
if isinstance(arg, Mul):
r = [refine(abs(a), assumptions) for a in arg.args]
non_abs = []
in_abs = []
for i in r:
if isinstance(i, Abs):
in_abs.append(i.args[0])
else:
non_abs.append(i)
return Mul(*non_abs) * Abs(Mul(*in_abs))
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if isinstance(expr.base, Pow):
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set()
odd_terms = set()
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function.
Examples
========
>>> from sympy import Q, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_re(expr, assumptions):
"""
Handler for real part.
Examples
========
>>> from sympy.assumptions.refine import refine_re
>>> from sympy import Q, re
>>> from sympy.abc import x
>>> refine_re(re(x), Q.real(x))
x
>>> refine_re(re(x), Q.imaginary(x))
0
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return arg
if ask(Q.imaginary(arg), assumptions):
return S.Zero
return _refine_reim(expr, assumptions)
def refine_im(expr, assumptions):
"""
Handler for imaginary part.
Explanation
===========
>>> from sympy.assumptions.refine import refine_im
>>> from sympy import Q, im
>>> from sympy.abc import x
>>> refine_im(im(x), Q.real(x))
0
>>> refine_im(im(x), Q.imaginary(x))
-I*x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return S.Zero
if ask(Q.imaginary(arg), assumptions):
return - S.ImaginaryUnit * arg
return _refine_reim(expr, assumptions)
def refine_arg(expr, assumptions):
"""
Handler for complex argument
Explanation
===========
>>> from sympy.assumptions.refine import refine_arg
>>> from sympy import Q, arg
>>> from sympy.abc import x
>>> refine_arg(arg(x), Q.positive(x))
0
>>> refine_arg(arg(x), Q.negative(x))
pi
"""
rg = expr.args[0]
if ask(Q.positive(rg), assumptions):
return S.Zero
if ask(Q.negative(rg), assumptions):
return S.Pi
return None
def _refine_reim(expr, assumptions):
# Helper function for refine_re & refine_im
expanded = expr.expand(complex = True)
if expanded != expr:
refined = refine(expanded, assumptions)
if refined != expanded:
return refined
# Best to leave the expression as is
return None
def refine_sign(expr, assumptions):
"""
Handler for sign.
Examples
========
>>> from sympy.assumptions.refine import refine_sign
>>> from sympy import Symbol, Q, sign, im
>>> x = Symbol('x', real = True)
>>> expr = sign(x)
>>> refine_sign(expr, Q.positive(x) & Q.nonzero(x))
1
>>> refine_sign(expr, Q.negative(x) & Q.nonzero(x))
-1
>>> refine_sign(expr, Q.zero(x))
0
>>> y = Symbol('y', imaginary = True)
>>> expr = sign(y)
>>> refine_sign(expr, Q.positive(im(y)))
I
>>> refine_sign(expr, Q.negative(im(y)))
-I
"""
arg = expr.args[0]
if ask(Q.zero(arg), assumptions):
return S.Zero
if ask(Q.real(arg)):
if ask(Q.positive(arg), assumptions):
return S.One
if ask(Q.negative(arg), assumptions):
return S.NegativeOne
if ask(Q.imaginary(arg)):
arg_re, arg_im = arg.as_real_imag()
if ask(Q.positive(arg_im), assumptions):
return S.ImaginaryUnit
if ask(Q.negative(arg_im), assumptions):
return -S.ImaginaryUnit
return expr
def refine_matrixelement(expr, assumptions):
"""
Handler for symmetric part.
Examples
========
>>> from sympy.assumptions.refine import refine_matrixelement
>>> from sympy import MatrixSymbol, Q
>>> X = MatrixSymbol('X', 3, 3)
>>> refine_matrixelement(X[0, 1], Q.symmetric(X))
X[0, 1]
>>> refine_matrixelement(X[1, 0], Q.symmetric(X))
X[0, 1]
"""
from sympy.matrices.expressions.matexpr import MatrixElement
matrix, i, j = expr.args
if ask(Q.symmetric(matrix), assumptions):
if (i - j).could_extract_minus_sign():
return expr
return MatrixElement(matrix, j, i)
handlers_dict: dict[str, Callable[[Expr, Boolean], Expr]] = {
'Abs': refine_abs,
'Pow': refine_Pow,
'atan2': refine_atan2,
're': refine_re,
'im': refine_im,
'arg': refine_arg,
'sign': refine_sign,
'MatrixElement': refine_matrixelement
}
|
0c4c819d0027eb649039a6ba6fdfbc4c3caff32337d5cef3ef75151699fe0139 | """
Functions and wrapper object to call assumption property and predicate
query with same syntax.
In SymPy, there are two assumption systems. Old assumption system is
defined in sympy/core/assumptions, and it can be accessed by attribute
such as ``x.is_even``. New assumption system is defined in
sympy/assumptions, and it can be accessed by predicates such as
``Q.even(x)``.
Old assumption is fast, while new assumptions can freely take local facts.
In general, old assumption is used in evaluation method and new assumption
is used in refinement method.
In most cases, both evaluation and refinement follow the same process, and
the only difference is which assumption system is used. This module provides
``is_[...]()`` functions and ``AssumptionsWrapper()`` class which allows
using two systems with same syntax so that parallel code implementation can be
avoided.
Examples
========
For multiple use, use ``AssumptionsWrapper()``.
>>> from sympy import Q, Symbol
>>> from sympy.assumptions.wrapper import AssumptionsWrapper
>>> x = Symbol('x')
>>> _x = AssumptionsWrapper(x, Q.even(x))
>>> _x.is_integer
True
>>> _x.is_odd
False
For single use, use ``is_[...]()`` functions.
>>> from sympy.assumptions.wrapper import is_infinite
>>> a = Symbol('a')
>>> print(is_infinite(a))
None
>>> is_infinite(a, Q.finite(a))
False
"""
from sympy.assumptions import ask, Q
from sympy.core.assumptions import (_assume_defined, as_property,
ManagedProperties)
from sympy.core.basic import Basic
from sympy.core.sympify import _sympify
class AssumptionsWrapperMeta(ManagedProperties):
"""
Metaclass to give _eval_is_[...] attributes to AssumptionsWrapper
"""
def __init__(cls, *args, **kws):
for fact in _assume_defined:
pname = "_eval_%s" % as_property(fact)
setattr(cls, pname, make_eval_method(fact))
super().__init__(cls, *args, **kws)
def make_eval_method(fact):
def getit(self):
try:
pred = getattr(Q, fact)
ret = ask(pred(self.expr), self.assumptions)
return ret
except AttributeError:
return None
return getit
# we subclass Basic to use the fact deduction and caching
class AssumptionsWrapper(Basic, metaclass=AssumptionsWrapperMeta):
"""
Wrapper over ``Basic`` instances to call predicate query by
``.is_[...]`` property
Parameters
==========
expr : Basic
assumptions : Boolean, optional
Examples
========
>>> from sympy import Q, Symbol
>>> from sympy.assumptions.wrapper import AssumptionsWrapper
>>> x = Symbol('x', even=True)
>>> AssumptionsWrapper(x).is_integer
True
>>> y = Symbol('y')
>>> AssumptionsWrapper(y, Q.even(y)).is_integer
True
With ``AssumptionsWrapper``, both evaluation and refinement can be supported
by single implementation.
>>> from sympy import Function
>>> class MyAbs(Function):
... @classmethod
... def eval(cls, x, assumptions=True):
... _x = AssumptionsWrapper(x, assumptions)
... if _x.is_nonnegative:
... return x
... if _x.is_negative:
... return -x
... def _eval_refine(self, assumptions):
... return MyAbs.eval(self.args[0], assumptions)
>>> MyAbs(x)
MyAbs(x)
>>> MyAbs(x).refine(Q.positive(x))
x
>>> MyAbs(Symbol('y', negative=True))
-y
"""
def __new__(cls, expr, assumptions=None):
if assumptions is None:
return expr
obj = super().__new__(cls, expr, _sympify(assumptions))
obj.expr = expr
obj.assumptions = assumptions
return obj
# one shot functions which are faster than AssumptionsWrapper
def is_infinite(obj, assumptions=None):
if assumptions is None:
return obj.is_infinite
return ask(Q.infinite(obj), assumptions)
def is_extended_real(obj, assumptions=None):
if assumptions is None:
return obj.is_extended_real
return ask(Q.extended_real(obj), assumptions)
def is_extended_nonnegative(obj, assumptions=None):
if assumptions is None:
return obj.is_extended_nonnegative
return ask(Q.extended_nonnegative(obj), assumptions)
|
95dfd959097187d4771253140639ea55fc042768ab3d8b49eaf5799967734acd | """
Module to evaluate the proposition with assumptions using SAT algorithm.
"""
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.assumptions.ask_generated import get_all_known_facts
from sympy.assumptions.assume import global_assumptions, AppliedPredicate
from sympy.assumptions.sathandlers import class_fact_registry
from sympy.core import oo
from sympy.logic.inference import satisfiable
from sympy.assumptions.cnf import CNF, EncodedCNF
def satask(proposition, assumptions=True, context=global_assumptions,
use_known_facts=True, iterations=oo):
"""
Function to evaluate the proposition with assumptions using SAT algorithm.
This function extracts every fact relevant to the expressions composing
proposition and assumptions. For example, if a predicate containing
``Abs(x)`` is proposed, then ``Q.zero(Abs(x)) | Q.positive(Abs(x))``
will be found and passed to SAT solver because ``Q.nonnegative`` is
registered as a fact for ``Abs``.
Proposition is evaluated to ``True`` or ``False`` if the truth value can be
determined. If not, ``None`` is returned.
Parameters
==========
proposition : Any boolean expression.
Proposition which will be evaluated to boolean value.
assumptions : Any boolean expression, optional.
Local assumptions to evaluate the *proposition*.
context : AssumptionsContext, optional.
Default assumptions to evaluate the *proposition*. By default,
this is ``sympy.assumptions.global_assumptions`` variable.
use_known_facts : bool, optional.
If ``True``, facts from ``sympy.assumptions.ask_generated``
module are passed to SAT solver as well.
iterations : int, optional.
Number of times that relevant facts are recursively extracted.
Default is infinite times until no new fact is found.
Returns
=======
``True``, ``False``, or ``None``
Examples
========
>>> from sympy import Abs, Q
>>> from sympy.assumptions.satask import satask
>>> from sympy.abc import x
>>> satask(Q.zero(Abs(x)), Q.zero(x))
True
"""
props = CNF.from_prop(proposition)
_props = CNF.from_prop(~proposition)
assumptions = CNF.from_prop(assumptions)
context_cnf = CNF()
if context:
context_cnf = context_cnf.extend(context)
sat = get_all_relevant_facts(props, assumptions, context_cnf,
use_known_facts=use_known_facts, iterations=iterations)
sat.add_from_cnf(assumptions)
if context:
sat.add_from_cnf(context_cnf)
return check_satisfiability(props, _props, sat)
def check_satisfiability(prop, _prop, factbase):
sat_true = factbase.copy()
sat_false = factbase.copy()
sat_true.add_from_cnf(prop)
sat_false.add_from_cnf(_prop)
can_be_true = satisfiable(sat_true)
can_be_false = satisfiable(sat_false)
if can_be_true and can_be_false:
return None
if can_be_true and not can_be_false:
return True
if not can_be_true and can_be_false:
return False
if not can_be_true and not can_be_false:
# TODO: Run additional checks to see which combination of the
# assumptions, global_assumptions, and relevant_facts are
# inconsistent.
raise ValueError("Inconsistent assumptions")
def extract_predargs(proposition, assumptions=None, context=None):
"""
Extract every expression in the argument of predicates from *proposition*,
*assumptions* and *context*.
Parameters
==========
proposition : sympy.assumptions.cnf.CNF
assumptions : sympy.assumptions.cnf.CNF, optional.
context : sympy.assumptions.cnf.CNF, optional.
CNF generated from assumptions context.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.assumptions.satask import extract_predargs
>>> from sympy.abc import x, y
>>> props = CNF.from_prop(Q.zero(Abs(x*y)))
>>> assump = CNF.from_prop(Q.zero(x) & Q.zero(y))
>>> extract_predargs(props, assump)
{x, y, Abs(x*y)}
"""
req_keys = find_symbols(proposition)
keys = proposition.all_predicates()
# XXX: We need this since True/False are not Basic
lkeys = set()
if assumptions:
lkeys |= assumptions.all_predicates()
if context:
lkeys |= context.all_predicates()
lkeys = lkeys - {S.true, S.false}
tmp_keys = None
while tmp_keys != set():
tmp = set()
for l in lkeys:
syms = find_symbols(l)
if (syms & req_keys) != set():
tmp |= syms
tmp_keys = tmp - req_keys
req_keys |= tmp_keys
keys |= {l for l in lkeys if find_symbols(l) & req_keys != set()}
exprs = set()
for key in keys:
if isinstance(key, AppliedPredicate):
exprs |= set(key.arguments)
else:
exprs.add(key)
return exprs
def find_symbols(pred):
"""
Find every :obj:`~.Symbol` in *pred*.
Parameters
==========
pred : sympy.assumptions.cnf.CNF, or any Expr.
"""
if isinstance(pred, CNF):
symbols = set()
for a in pred.all_predicates():
symbols |= find_symbols(a)
return symbols
return pred.atoms(Symbol)
def get_relevant_clsfacts(exprs, relevant_facts=None):
"""
Extract relevant facts from the items in *exprs*. Facts are defined in
``assumptions.sathandlers`` module.
This function is recursively called by ``get_all_relevant_facts()``.
Parameters
==========
exprs : set
Expressions whose relevant facts are searched.
relevant_facts : sympy.assumptions.cnf.CNF, optional.
Pre-discovered relevant facts.
Returns
=======
exprs : set
Candidates for next relevant fact searching.
relevant_facts : sympy.assumptions.cnf.CNF
Updated relevant facts.
Examples
========
Here, we will see how facts relevant to ``Abs(x*y)`` are recursively
extracted. On the first run, set containing the expression is passed
without pre-discovered relevant facts. The result is a set containing
candidates for next run, and ``CNF()`` instance containing facts
which are relevant to ``Abs`` and its argument.
>>> from sympy import Abs
>>> from sympy.assumptions.satask import get_relevant_clsfacts
>>> from sympy.abc import x, y
>>> exprs = {Abs(x*y)}
>>> exprs, facts = get_relevant_clsfacts(exprs)
>>> exprs
{x*y}
>>> facts.clauses #doctest: +SKIP
{frozenset({Literal(Q.odd(Abs(x*y)), False), Literal(Q.odd(x*y), True)}),
frozenset({Literal(Q.zero(Abs(x*y)), False), Literal(Q.zero(x*y), True)}),
frozenset({Literal(Q.even(Abs(x*y)), False), Literal(Q.even(x*y), True)}),
frozenset({Literal(Q.zero(Abs(x*y)), True), Literal(Q.zero(x*y), False)}),
frozenset({Literal(Q.even(Abs(x*y)), False),
Literal(Q.odd(Abs(x*y)), False),
Literal(Q.odd(x*y), True)}),
frozenset({Literal(Q.even(Abs(x*y)), False),
Literal(Q.even(x*y), True),
Literal(Q.odd(Abs(x*y)), False)}),
frozenset({Literal(Q.positive(Abs(x*y)), False),
Literal(Q.zero(Abs(x*y)), False)})}
We pass the first run's results to the second run, and get the expressions
for next run and updated facts.
>>> exprs, facts = get_relevant_clsfacts(exprs, relevant_facts=facts)
>>> exprs
{x, y}
On final run, no more candidate is returned thus we know that all
relevant facts are successfully retrieved.
>>> exprs, facts = get_relevant_clsfacts(exprs, relevant_facts=facts)
>>> exprs
set()
"""
if not relevant_facts:
relevant_facts = CNF()
newexprs = set()
for expr in exprs:
for fact in class_fact_registry(expr):
newfact = CNF.to_CNF(fact)
relevant_facts = relevant_facts._and(newfact)
for key in newfact.all_predicates():
if isinstance(key, AppliedPredicate):
newexprs |= set(key.arguments)
return newexprs - exprs, relevant_facts
def get_all_relevant_facts(proposition, assumptions, context,
use_known_facts=True, iterations=oo):
"""
Extract all relevant facts from *proposition* and *assumptions*.
This function extracts the facts by recursively calling
``get_relevant_clsfacts()``. Extracted facts are converted to
``EncodedCNF`` and returned.
Parameters
==========
proposition : sympy.assumptions.cnf.CNF
CNF generated from proposition expression.
assumptions : sympy.assumptions.cnf.CNF
CNF generated from assumption expression.
context : sympy.assumptions.cnf.CNF
CNF generated from assumptions context.
use_known_facts : bool, optional.
If ``True``, facts from ``sympy.assumptions.ask_generated``
module are encoded as well.
iterations : int, optional.
Number of times that relevant facts are recursively extracted.
Default is infinite times until no new fact is found.
Returns
=======
sympy.assumptions.cnf.EncodedCNF
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.assumptions.satask import get_all_relevant_facts
>>> from sympy.abc import x, y
>>> props = CNF.from_prop(Q.nonzero(x*y))
>>> assump = CNF.from_prop(Q.nonzero(x))
>>> context = CNF.from_prop(Q.nonzero(y))
>>> get_all_relevant_facts(props, assump, context) #doctest: +SKIP
<sympy.assumptions.cnf.EncodedCNF at 0x7f09faa6ccd0>
"""
# The relevant facts might introduce new keys, e.g., Q.zero(x*y) will
# introduce the keys Q.zero(x) and Q.zero(y), so we need to run it until
# we stop getting new things. Hopefully this strategy won't lead to an
# infinite loop in the future.
i = 0
relevant_facts = CNF()
all_exprs = set()
while True:
if i == 0:
exprs = extract_predargs(proposition, assumptions, context)
all_exprs |= exprs
exprs, relevant_facts = get_relevant_clsfacts(exprs, relevant_facts)
i += 1
if i >= iterations:
break
if not exprs:
break
if use_known_facts:
known_facts_CNF = CNF()
known_facts_CNF.add_clauses(get_all_known_facts())
kf_encoded = EncodedCNF()
kf_encoded.from_cnf(known_facts_CNF)
def translate_literal(lit, delta):
if lit > 0:
return lit + delta
else:
return lit - delta
def translate_data(data, delta):
return [{translate_literal(i, delta) for i in clause} for clause in data]
data = []
symbols = []
n_lit = len(kf_encoded.symbols)
for i, expr in enumerate(all_exprs):
symbols += [pred(expr) for pred in kf_encoded.symbols]
data += translate_data(kf_encoded.data, i * n_lit)
encoding = dict(list(zip(symbols, range(1, len(symbols)+1))))
ctx = EncodedCNF(data, encoding)
else:
ctx = EncodedCNF()
ctx.add_from_cnf(relevant_facts)
return ctx
|
ef8ffaf267791719e0b41d2c737f8db7534d91c730cb32303e2f5e08c98865f0 | """A module which implements predicates and assumption context."""
from contextlib import contextmanager
import inspect
from sympy.core.assumptions import ManagedProperties
from sympy.core.symbol import Str
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import Boolean, false, true
from sympy.multipledispatch.dispatcher import Dispatcher, str_signature
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import is_sequence
from sympy.utilities.source import get_class
class AssumptionsContext(set):
"""
Set containing default assumptions which are applied to the ``ask()``
function.
Explanation
===========
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples
========
The default assumption context is ``global_assumptions``, which is initially empty:
>>> from sympy import ask, Q
>>> from sympy.assumptions import global_assumptions
>>> global_assumptions
AssumptionsContext()
You can add default assumptions:
>>> from sympy.abc import x
>>> global_assumptions.add(Q.real(x))
>>> global_assumptions
AssumptionsContext({Q.real(x)})
>>> ask(Q.real(x))
True
And remove them:
>>> global_assumptions.remove(Q.real(x))
>>> print(ask(Q.real(x)))
None
The ``clear()`` method removes every assumption:
>>> global_assumptions.add(Q.positive(x))
>>> global_assumptions
AssumptionsContext({Q.positive(x)})
>>> global_assumptions.clear()
>>> global_assumptions
AssumptionsContext()
See Also
========
assuming
"""
def add(self, *assumptions):
"""Add assumptions."""
for a in assumptions:
super().add(a)
def _sympystr(self, printer):
if not self:
return "%s()" % self.__class__.__name__
return "{}({})".format(self.__class__.__name__, printer._print_set(self))
global_assumptions = AssumptionsContext()
class AppliedPredicate(Boolean):
"""
The class of expressions resulting from applying ``Predicate`` to
the arguments. ``AppliedPredicate`` merely wraps its argument and
remain unevaluated. To evaluate it, use the ``ask()`` function.
Examples
========
>>> from sympy import Q, ask
>>> Q.integer(1)
Q.integer(1)
The ``function`` attribute returns the predicate, and the ``arguments``
attribute returns the tuple of arguments.
>>> type(Q.integer(1))
<class 'sympy.assumptions.assume.AppliedPredicate'>
>>> Q.integer(1).function
Q.integer
>>> Q.integer(1).arguments
(1,)
Applied predicates can be evaluated to a boolean value with ``ask``:
>>> ask(Q.integer(1))
True
"""
__slots__ = ()
def __new__(cls, predicate, *args):
if not isinstance(predicate, Predicate):
raise TypeError("%s is not a Predicate." % predicate)
args = map(_sympify, args)
return super().__new__(cls, predicate, *args)
@property
def arg(self):
"""
Return the expression used by this assumption.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> a = Q.integer(x + 1)
>>> a.arg
x + 1
"""
# Will be deprecated
args = self._args
if len(args) == 2:
# backwards compatibility
return args[1]
raise TypeError("'arg' property is allowed only for unary predicates.")
@property
def function(self):
"""
Return the predicate.
"""
# Will be changed to self.args[0] after args overriding is removed
return self._args[0]
@property
def arguments(self):
"""
Return the arguments which are applied to the predicate.
"""
# Will be changed to self.args[1:] after args overriding is removed
return self._args[1:]
def _eval_ask(self, assumptions):
return self.function.eval(self.arguments, assumptions)
@property
def binary_symbols(self):
from .ask import Q
if self.function == Q.is_true:
i = self.arguments[0]
if i.is_Boolean or i.is_Symbol:
return i.binary_symbols
if self.function in (Q.eq, Q.ne):
if true in self.arguments or false in self.arguments:
if self.arguments[0].is_Symbol:
return {self.arguments[0]}
elif self.arguments[1].is_Symbol:
return {self.arguments[1]}
return set()
class PredicateMeta(ManagedProperties):
def __new__(cls, clsname, bases, dct):
# If handler is not defined, assign empty dispatcher.
if "handler" not in dct:
name = f"Ask{clsname.capitalize()}Handler"
handler = Dispatcher(name, doc="Handler for key %s" % name)
dct["handler"] = handler
dct["_orig_doc"] = dct.get("__doc__", "")
return super().__new__(cls, clsname, bases, dct)
@property
def __doc__(cls):
handler = cls.handler
doc = cls._orig_doc
if cls is not Predicate and handler is not None:
doc += "Handler\n"
doc += " =======\n\n"
# Append the handler's doc without breaking sphinx documentation.
docs = [" Multiply dispatched method: %s" % handler.name]
if handler.doc:
for line in handler.doc.splitlines():
if not line:
continue
docs.append(" %s" % line)
other = []
for sig in handler.ordering[::-1]:
func = handler.funcs[sig]
if func.__doc__:
s = ' Inputs: <%s>' % str_signature(sig)
lines = []
for line in func.__doc__.splitlines():
lines.append(" %s" % line)
s += "\n".join(lines)
docs.append(s)
else:
other.append(str_signature(sig))
if other:
othersig = " Other signatures:"
for line in other:
othersig += "\n * %s" % line
docs.append(othersig)
doc += '\n\n'.join(docs)
return doc
class Predicate(Boolean, metaclass=PredicateMeta):
"""
Base class for mathematical predicates. It also serves as a
constructor for undefined predicate objects.
Explanation
===========
Predicate is a function that returns a boolean value [1].
Predicate function is object, and it is instance of predicate class.
When a predicate is applied to arguments, ``AppliedPredicate``
instance is returned. This merely wraps the argument and remain
unevaluated. To obtain the truth value of applied predicate, use the
function ``ask``.
Evaluation of predicate is done by multiple dispatching. You can
register new handler to the predicate to support new types.
Every predicate in SymPy can be accessed via the property of ``Q``.
For example, ``Q.even`` returns the predicate which checks if the
argument is even number.
To define a predicate which can be evaluated, you must subclass this
class, make an instance of it, and register it to ``Q``. After then,
dispatch the handler by argument types.
If you directly construct predicate using this class, you will get
``UndefinedPredicate`` which cannot be dispatched. This is useful
when you are building boolean expressions which do not need to be
evaluated.
Examples
========
Applying and evaluating to boolean value:
>>> from sympy import Q, ask
>>> ask(Q.prime(7))
True
You can define a new predicate by subclassing and dispatching. Here,
we define a predicate for sexy primes [2] as an example.
>>> from sympy import Predicate, Integer
>>> class SexyPrimePredicate(Predicate):
... name = "sexyprime"
>>> Q.sexyprime = SexyPrimePredicate()
>>> @Q.sexyprime.register(Integer, Integer)
... def _(int1, int2, assumptions):
... args = sorted([int1, int2])
... if not all(ask(Q.prime(a), assumptions) for a in args):
... return False
... return args[1] - args[0] == 6
>>> ask(Q.sexyprime(5, 11))
True
Direct constructing returns ``UndefinedPredicate``, which can be
applied but cannot be dispatched.
>>> from sympy import Predicate, Integer
>>> Q.P = Predicate("P")
>>> type(Q.P)
<class 'sympy.assumptions.assume.UndefinedPredicate'>
>>> Q.P(1)
Q.P(1)
>>> Q.P.register(Integer)(lambda expr, assump: True)
Traceback (most recent call last):
...
TypeError: <class 'sympy.assumptions.assume.UndefinedPredicate'> cannot be dispatched.
References
==========
.. [1] https://en.wikipedia.org/wiki/Predicate_(mathematical_logic)
.. [2] https://en.wikipedia.org/wiki/Sexy_prime
"""
is_Atom = True
def __new__(cls, *args, **kwargs):
if cls is Predicate:
return UndefinedPredicate(*args, **kwargs)
obj = super().__new__(cls, *args)
return obj
@property
def name(self):
# May be overridden
return type(self).__name__
@classmethod
def register(cls, *types, **kwargs):
"""
Register the signature to the handler.
"""
if cls.handler is None:
raise TypeError("%s cannot be dispatched." % type(cls))
return cls.handler.register(*types, **kwargs)
@classmethod
def register_many(cls, *types, **kwargs):
"""
Register multiple signatures to same handler.
"""
def _(func):
for t in types:
if not is_sequence(t):
t = (t,) # for convenience, allow passing `type` to mean `(type,)`
cls.register(*t, **kwargs)(func)
return _
def __call__(self, *args):
return AppliedPredicate(self, *args)
def eval(self, args, assumptions=True):
"""
Evaluate ``self(*args)`` under the given assumptions.
This uses only direct resolution methods, not logical inference.
"""
result = None
try:
result = self.handler(*args, assumptions=assumptions)
except NotImplementedError:
pass
return result
def _eval_refine(self, assumptions):
# When Predicate is no longer Boolean, delete this method
return self
class UndefinedPredicate(Predicate):
"""
Predicate without handler.
Explanation
===========
This predicate is generated by using ``Predicate`` directly for
construction. It does not have a handler, and evaluating this with
arguments is done by SAT solver.
Examples
========
>>> from sympy import Predicate, Q
>>> Q.P = Predicate('P')
>>> Q.P.func
<class 'sympy.assumptions.assume.UndefinedPredicate'>
>>> Q.P.name
Str('P')
"""
handler = None
def __new__(cls, name, handlers=None):
# "handlers" parameter supports old design
if not isinstance(name, Str):
name = Str(name)
obj = super(Boolean, cls).__new__(cls, name)
obj.handlers = handlers or []
return obj
@property
def name(self):
return self.args[0]
def _hashable_content(self):
return (self.name,)
def __getnewargs__(self):
return (self.name,)
def __call__(self, expr):
return AppliedPredicate(self, expr)
def add_handler(self, handler):
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Predicate.add_handler()
should be replaced with the multipledispatch handler of Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
)
self.handlers.append(handler)
def remove_handler(self, handler):
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Predicate.remove_handler()
should be replaced with the multipledispatch handler of Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
)
self.handlers.remove(handler)
def eval(self, args, assumptions=True):
# Support for deprecated design
# When old design is removed, this will always return None
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Evaluating UndefinedPredicate
objects should be replaced with the multipledispatch handler of
Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
stacklevel=5,
)
expr, = args
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
eval_ = getattr(cls, subclass.__name__, None)
if eval_ is None:
continue
res = eval_(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager
def assuming(*assumptions):
"""
Context manager for assumptions.
Examples
========
>>> from sympy import assuming, Q, ask
>>> from sympy.abc import x, y
>>> print(ask(Q.integer(x + y)))
None
>>> with assuming(Q.integer(x), Q.integer(y)):
... print(ask(Q.integer(x + y)))
True
"""
old_global_assumptions = global_assumptions.copy()
global_assumptions.update(assumptions)
try:
yield
finally:
global_assumptions.clear()
global_assumptions.update(old_global_assumptions)
|
cab968d00a18fccb571cfd63d2a295e148008525c1069bc71d19c326e085b02f | from sympy.core.add import Add
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand_log, _mexpand
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly, factor
from sympy.simplify.simplify import separatevars
from sympy.simplify.radsimp import collect
from sympy.simplify.simplify import powsimp
from sympy.solvers.solvers import solve, _invert
from sympy.utilities.iterables import uniq
def _filtered_gens(poly, symbol):
"""process the generators of ``poly``, returning the set of generators that
have ``symbol``. If there are two generators that are inverses of each other,
prefer the one that has no denominator.
Examples
========
>>> from sympy.solvers.bivariate import _filtered_gens
>>> from sympy import Poly, exp
>>> from sympy.abc import x
>>> _filtered_gens(Poly(x + 1/x + exp(x)), x)
{x, exp(x)}
"""
# TODO it would be good to pick the smallest divisible power
# instead of the base for something like x**4 + x**2 -->
# return x**2 not x
gens = {g for g in poly.gens if symbol in g.free_symbols}
for g in list(gens):
ag = 1/g
if g in gens and ag in gens:
if ag.as_numer_denom()[1] is not S.One:
g = ag
gens.remove(g)
return gens
def _mostfunc(lhs, func, X=None):
"""Returns the term in lhs which contains the most of the
func-type things e.g. log(log(x)) wins over log(x) if both terms appear.
``func`` can be a function (exp, log, etc...) or any other SymPy object,
like Pow.
If ``X`` is not ``None``, then the function returns the term composed with the
most ``func`` having the specified variable.
Examples
========
>>> from sympy.solvers.bivariate import _mostfunc
>>> from sympy import exp
>>> from sympy.abc import x, y
>>> _mostfunc(exp(x) + exp(exp(x) + 2), exp)
exp(exp(x) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp)
exp(exp(y) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp, x)
exp(x)
>>> _mostfunc(x, exp, x) is None
True
>>> _mostfunc(exp(x) + exp(x*y), exp, x)
exp(x)
"""
fterms = [tmp for tmp in lhs.atoms(func) if (not X or
X.is_Symbol and X in tmp.free_symbols or
not X.is_Symbol and tmp.has(X))]
if len(fterms) == 1:
return fterms[0]
elif fterms:
return max(list(ordered(fterms)), key=lambda x: x.count(func))
return None
def _linab(arg, symbol):
"""Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``
where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are
independent of ``symbol``.
Examples
========
>>> from sympy.solvers.bivariate import _linab
>>> from sympy.abc import x, y
>>> from sympy import exp, S
>>> _linab(S(2), x)
(2, 0, 1)
>>> _linab(2*x, x)
(2, 0, x)
>>> _linab(y + y*x + 2*x, x)
(y + 2, y, x)
>>> _linab(3 + 2*exp(x), x)
(2, 3, exp(x))
"""
arg = factor_terms(arg.expand())
ind, dep = arg.as_independent(symbol)
if arg.is_Mul and dep.is_Add:
a, b, x = _linab(dep, symbol)
return ind*a, ind*b, x
if not arg.is_Add:
b = 0
a, x = ind, dep
else:
b = ind
a, x = separatevars(dep).as_independent(symbol, as_Add=False)
if x.could_extract_minus_sign():
a = -a
x = -x
return a, b, x
def _lambert(eq, x):
"""
Given an expression assumed to be in the form
``F(X, a..f) = a*log(b*X + c) + d*X + f = 0``
where X = g(x) and x = g^-1(X), return the Lambert solution,
``x = g^-1(-c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(-f/a)))``.
"""
eq = _mexpand(expand_log(eq))
mainlog = _mostfunc(eq, log, x)
if not mainlog:
return [] # violated assumptions
other = eq.subs(mainlog, 0)
if isinstance(-other, log):
eq = (eq - other).subs(mainlog, mainlog.args[0])
mainlog = mainlog.args[0]
if not isinstance(mainlog, log):
return [] # violated assumptions
other = -(-other).args[0]
eq += other
if x not in other.free_symbols:
return [] # violated assumptions
d, f, X2 = _linab(other, x)
logterm = collect(eq - other, mainlog)
a = logterm.as_coefficient(mainlog)
if a is None or x in a.free_symbols:
return [] # violated assumptions
logarg = mainlog.args[0]
b, c, X1 = _linab(logarg, x)
if X1 != X2:
return [] # violated assumptions
# invert the generator X1 so we have x(u)
u = Dummy('rhs')
xusolns = solve(X1 - u, x)
# There are infinitely many branches for LambertW
# but only branches for k = -1 and 0 might be real. The k = 0
# branch is real and the k = -1 branch is real if the LambertW argumen
# in in range [-1/e, 0]. Since `solve` does not return infinite
# solutions we will only include the -1 branch if it tests as real.
# Otherwise, inclusion of any LambertW in the solution indicates to
# the user that there are imaginary solutions corresponding to
# different k values.
lambert_real_branches = [-1, 0]
sol = []
# solution of the given Lambert equation is like
# sol = -c/b + (a/d)*LambertW(arg, k),
# where arg = d/(a*b)*exp((c*d-b*f)/a/b) and k in lambert_real_branches.
# Instead of considering the single arg, `d/(a*b)*exp((c*d-b*f)/a/b)`,
# the individual `p` roots obtained when writing `exp((c*d-b*f)/a/b)`
# as `exp(A/p) = exp(A)**(1/p)`, where `p` is an Integer, are used.
# calculating args for LambertW
num, den = ((c*d-b*f)/a/b).as_numer_denom()
p, den = den.as_coeff_Mul()
e = exp(num/den)
t = Dummy('t')
args = [d/(a*b)*t for t in roots(t**p - e, t).keys()]
# calculating solutions from args
for arg in args:
for k in lambert_real_branches:
w = LambertW(arg, k)
if k and not w.is_real:
continue
rhs = -c/b + (a/d)*w
sol.extend(xu.subs(u, rhs) for xu in xusolns)
return sol
def _solve_lambert(f, symbol, gens):
"""Return solution to ``f`` if it is a Lambert-type expression
else raise NotImplementedError.
For ``f(X, a..f) = a*log(b*X + c) + d*X - f = 0`` the solution
for ``X`` is ``X = -c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(f/a))``.
There are a variety of forms for `f(X, a..f)` as enumerated below:
1a1)
if B**B = R for R not in [0, 1] (since those cases would already
be solved before getting here) then log of both sides gives
log(B) + log(log(B)) = log(log(R)) and
X = log(B), a = 1, b = 1, c = 0, d = 1, f = log(log(R))
1a2)
if B*(b*log(B) + c)**a = R then log of both sides gives
log(B) + a*log(b*log(B) + c) = log(R) and
X = log(B), d=1, f=log(R)
1b)
if a*log(b*B + c) + d*B = R and
X = B, f = R
2a)
if (b*B + c)*exp(d*B + g) = R then log of both sides gives
log(b*B + c) + d*B + g = log(R) and
X = B, a = 1, f = log(R) - g
2b)
if g*exp(d*B + h) - b*B = c then the log form is
log(g) + d*B + h - log(b*B + c) = 0 and
X = B, a = -1, f = -h - log(g)
3)
if d*p**(a*B + g) - b*B = c then the log form is
log(d) + (a*B + g)*log(p) - log(b*B + c) = 0 and
X = B, a = -1, d = a*log(p), f = -log(d) - g*log(p)
"""
def _solve_even_degree_expr(expr, t, symbol):
"""Return the unique solutions of equations derived from
``expr`` by replacing ``t`` with ``+/- symbol``.
Parameters
==========
expr : Expr
The expression which includes a dummy variable t to be
replaced with +symbol and -symbol.
symbol : Symbol
The symbol for which a solution is being sought.
Returns
=======
List of unique solution of the two equations generated by
replacing ``t`` with positive and negative ``symbol``.
Notes
=====
If ``expr = 2*log(t) + x/2` then solutions for
``2*log(x) + x/2 = 0`` and ``2*log(-x) + x/2 = 0`` are
returned by this function. Though this may seem
counter-intuitive, one must note that the ``expr`` being
solved here has been derived from a different expression. For
an expression like ``eq = x**2*g(x) = 1``, if we take the
log of both sides we obtain ``log(x**2) + log(g(x)) = 0``. If
x is positive then this simplifies to
``2*log(x) + log(g(x)) = 0``; the Lambert-solving routines will
return solutions for this, but we must also consider the
solutions for ``2*log(-x) + log(g(x))`` since those must also
be a solution of ``eq`` which has the same value when the ``x``
in ``x**2`` is negated. If `g(x)` does not have even powers of
symbol then we do not want to replace the ``x`` there with
``-x``. So the role of the ``t`` in the expression received by
this function is to mark where ``+/-x`` should be inserted
before obtaining the Lambert solutions.
"""
nlhs, plhs = [
expr.xreplace({t: sgn*symbol}) for sgn in (-1, 1)]
sols = _solve_lambert(nlhs, symbol, gens)
if plhs != nlhs:
sols.extend(_solve_lambert(plhs, symbol, gens))
# uniq is needed for a case like
# 2*log(t) - log(-z**2) + log(z + log(x) + log(z))
# where substituting t with +/-x gives all the same solution;
# uniq, rather than list(set()), is used to maintain canonical
# order
return list(uniq(sols))
nrhs, lhs = f.as_independent(symbol, as_Add=True)
rhs = -nrhs
lamcheck = [tmp for tmp in gens
if (tmp.func in [exp, log] or
(tmp.is_Pow and symbol in tmp.exp.free_symbols))]
if not lamcheck:
raise NotImplementedError()
if lhs.is_Add or lhs.is_Mul:
# replacing all even_degrees of symbol with dummy variable t
# since these will need special handling; non-Add/Mul do not
# need this handling
t = Dummy('t', **symbol.assumptions0)
lhs = lhs.replace(
lambda i: # find symbol**even
i.is_Pow and i.base == symbol and i.exp.is_even,
lambda i: # replace t**even
t**i.exp)
if lhs.is_Add and lhs.has(t):
t_indep = lhs.subs(t, 0)
t_term = lhs - t_indep
_rhs = rhs - t_indep
if not t_term.is_Add and _rhs and not (
t_term.has(S.ComplexInfinity, S.NaN)):
eq = expand_log(log(t_term) - log(_rhs))
return _solve_even_degree_expr(eq, t, symbol)
elif lhs.is_Mul and rhs:
# this needs to happen whether t is present or not
lhs = expand_log(log(lhs), force=True)
rhs = log(rhs)
if lhs.has(t) and lhs.is_Add:
# it expanded from Mul to Add
eq = lhs - rhs
return _solve_even_degree_expr(eq, t, symbol)
# restore symbol in lhs
lhs = lhs.xreplace({t: symbol})
lhs = powsimp(factor(lhs, deep=True))
# make sure we have inverted as completely as possible
r = Dummy()
i, lhs = _invert(lhs - r, symbol)
rhs = i.xreplace({r: rhs})
# For the first forms:
#
# 1a1) B**B = R will arrive here as B*log(B) = log(R)
# lhs is Mul so take log of both sides:
# log(B) + log(log(B)) = log(log(R))
# 1a2) B*(b*log(B) + c)**a = R will arrive unchanged so
# lhs is Mul, so take log of both sides:
# log(B) + a*log(b*log(B) + c) = log(R)
# 1b) d*log(a*B + b) + c*B = R will arrive unchanged so
# lhs is Add, so isolate c*B and expand log of both sides:
# log(c) + log(B) = log(R - d*log(a*B + b))
soln = []
if not soln:
mainlog = _mostfunc(lhs, log, symbol)
if mainlog:
if lhs.is_Mul and rhs != 0:
soln = _lambert(log(lhs) - log(rhs), symbol)
elif lhs.is_Add:
other = lhs.subs(mainlog, 0)
if other and not other.is_Add and [
tmp for tmp in other.atoms(Pow)
if symbol in tmp.free_symbols]:
if not rhs:
diff = log(other) - log(other - lhs)
else:
diff = log(lhs - other) - log(rhs - other)
soln = _lambert(expand_log(diff), symbol)
else:
#it's ready to go
soln = _lambert(lhs - rhs, symbol)
# For the next forms,
#
# collect on main exp
# 2a) (b*B + c)*exp(d*B + g) = R
# lhs is mul, so take log of both sides:
# log(b*B + c) + d*B = log(R) - g
# 2b) g*exp(d*B + h) - b*B = R
# lhs is add, so add b*B to both sides,
# take the log of both sides and rearrange to give
# log(R + b*B) - d*B = log(g) + h
if not soln:
mainexp = _mostfunc(lhs, exp, symbol)
if mainexp:
lhs = collect(lhs, mainexp)
if lhs.is_Mul and rhs != 0:
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainexp-containing term to rhs
other = lhs.subs(mainexp, 0)
mainterm = lhs - other
rhs = rhs - other
if (mainterm.could_extract_minus_sign() and
rhs.could_extract_minus_sign()):
mainterm *= -1
rhs *= -1
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
# For the last form:
#
# 3) d*p**(a*B + g) - b*B = c
# collect on main pow, add b*B to both sides,
# take log of both sides and rearrange to give
# a*B*log(p) - log(b*B + c) = -log(d) - g*log(p)
if not soln:
mainpow = _mostfunc(lhs, Pow, symbol)
if mainpow and symbol in mainpow.exp.free_symbols:
lhs = collect(lhs, mainpow)
if lhs.is_Mul and rhs != 0:
# b*B = 0
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainpow-containing term to rhs
other = lhs.subs(mainpow, 0)
mainterm = lhs - other
rhs = rhs - other
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
if not soln:
raise NotImplementedError('%s does not appear to have a solution in '
'terms of LambertW' % f)
return list(ordered(soln))
def bivariate_type(f, x, y, *, first=True):
"""Given an expression, f, 3 tests will be done to see what type
of composite bivariate it might be, options for u(x, y) are::
x*y
x+y
x*y+x
x*y+y
If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy
variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and
equating the solutions to ``u(x, y)`` and then solving for ``x`` or
``y`` is equivalent to solving the original expression for ``x`` or
``y``. If ``x`` and ``y`` represent two functions in the same
variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``
can be solved for ``t`` then these represent the solutions to
``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.
Only positive values of ``u`` are considered.
Examples
========
>>> from sympy import solve
>>> from sympy.solvers.bivariate import bivariate_type
>>> from sympy.abc import x, y
>>> eq = (x**2 - 3).subs(x, x + y)
>>> bivariate_type(eq, x, y)
(x + y, _u**2 - 3, _u)
>>> uxy, pu, u = _
>>> usol = solve(pu, u); usol
[sqrt(3)]
>>> [solve(uxy - s) for s in solve(pu, u)]
[[{x: -y + sqrt(3)}]]
>>> all(eq.subs(s).equals(0) for sol in _ for s in sol)
True
"""
u = Dummy('u', positive=True)
if first:
p = Poly(f, x, y)
f = p.as_expr()
_x = Dummy()
_y = Dummy()
rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False)
if rv:
reps = {_x: x, _y: y}
return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2]
return
p = f
f = p.as_expr()
# f(x*y)
args = Add.make_args(p.as_expr())
new = []
for a in args:
a = _mexpand(a.subs(x, u/y))
free = a.free_symbols
if x in free or y in free:
break
new.append(a)
else:
return x*y, Add(*new), u
def ok(f, v, c):
new = _mexpand(f.subs(v, c))
free = new.free_symbols
return None if (x in free or y in free) else new
# f(a*x + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
a = root(p.coeff_monomial(x**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a)
if new is not None:
return a*x + b*y, new, u
# f(a*x*y + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
for itry in range(2):
a = root(p.coeff_monomial(x**d*y**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a/y)
if new is not None:
return a*x*y + b*y, new, u
x, y = y, x
|
0ac6d81d4cc98f8aa75f8e4a4f49916f012dda76c208337cc427671a8e6feb59 | """
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a single transcendental equation for a single variable in any domain either real or complex.
(currently supports solving in real domain only)
- solve a system of linear equations with N variables and M equations.
- solve a system of Non Linear Equations with N variables and M equations
"""
from sympy.core.sympify import sympify
from sympy.core import (S, Pow, Dummy, pi, Expr, Wild, Mul, Equality,
Add, Basic)
from sympy.core.containers import Tuple
from sympy.core.function import (Lambda, expand_complex, AppliedUndef,
expand_log, _mexpand, expand_trig, nfloat)
from sympy.core.mod import Mod
from sympy.core.numbers import igcd, I, Number, Rational, oo, ilcm
from sympy.core.power import integer_log
from sympy.core.relational import Eq, Ne, Relational
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import Symbol, _uniquely_named_symbol
from sympy.core.sympify import _sympify
from sympy.polys.matrices.linsolve import _linear_eq_to_dict
from sympy.polys.polyroots import UnsolvableFactorError
from sympy.simplify.simplify import simplify, fraction, trigsimp, nsimplify
from sympy.simplify import powdenest, logcombine
from sympy.functions import (log, tan, cot, sin, cos, sec, csc, exp,
acos, asin, acsc, asec,
piecewise_fold, Piecewise)
from sympy.functions.elementary.complexes import Abs, arg, re, im
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.miscellaneous import real_root
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.logic.boolalg import And, BooleanTrue
from sympy.sets import (FiniteSet, imageset, Interval, Intersection,
Union, ConditionSet, ImageSet, Complement, Contains)
from sympy.sets.sets import Set, ProductSet
from sympy.matrices import zeros, Matrix, MatrixBase
from sympy.ntheory import totient
from sympy.ntheory.factor_ import divisors
from sympy.ntheory.residue_ntheory import discrete_log, nthroot_mod
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf, factor, lcm, gcd)
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polytools import invert, groebner, poly
from sympy.polys.solvers import (sympy_eqs_to_ring, solve_lin_sys,
PolyNonlinearError)
from sympy.polys.matrices.linsolve import _linsolve
from sympy.solvers.solvers import (checksol, denoms, unrad,
_simple_dens, recast_to_symbols)
from sympy.solvers.polysys import solve_poly_system
from sympy.utilities import filldedent
from sympy.utilities.iterables import (numbered_symbols, has_dups,
is_sequence, iterable)
from sympy.calculus.util import periodicity, continuous_domain, function_range
from types import GeneratorType
class NonlinearError(ValueError):
"""Raised when unexpectedly encountering nonlinear equations"""
pass
_rc = Dummy("R", real=True), Dummy("C", complex=True)
def _masked(f, *atoms):
"""Return ``f``, with all objects given by ``atoms`` replaced with
Dummy symbols, ``d``, and the list of replacements, ``(d, e)``,
where ``e`` is an object of type given by ``atoms`` in which
any other instances of atoms have been recursively replaced with
Dummy symbols, too. The tuples are ordered so that if they are
applied in sequence, the origin ``f`` will be restored.
Examples
========
>>> from sympy import cos
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import _masked
>>> f = cos(cos(x) + 1)
>>> f, reps = _masked(cos(1 + cos(x)), cos)
>>> f
_a1
>>> reps
[(_a1, cos(_a0 + 1)), (_a0, cos(x))]
>>> for d, e in reps:
... f = f.xreplace({d: e})
>>> f
cos(cos(x) + 1)
"""
sym = numbered_symbols('a', cls=Dummy, real=True)
mask = []
for a in ordered(f.atoms(*atoms)):
for i in mask:
a = a.replace(*i)
mask.append((a, next(sym)))
for i, (o, n) in enumerate(mask):
f = f.replace(o, n)
mask[i] = (n, o)
mask = list(reversed(mask))
return f, mask
def _invert(f_x, y, x, domain=S.Complexes):
r"""
Reduce the complex valued equation $f(x) = y$ to a set of equations
$$\left\{g(x) = h_1(y),\ g(x) = h_2(y),\ \dots,\ g(x) = h_n(y) \right\}$$
where $g(x)$ is a simpler function than $f(x)$. The return value is a tuple
$(g(x), \mathrm{set}_h)$, where $g(x)$ is a function of $x$ and $\mathrm{set}_h$ is
the set of function $\left\{h_1(y), h_2(y), \dots, h_n(y)\right\}$.
Here, $y$ is not necessarily a symbol.
$\mathrm{set}_h$ contains the functions, along with the information
about the domain in which they are valid, through set
operations. For instance, if :math:`y = |x| - n` is inverted
in the real domain, then $\mathrm{set}_h$ is not simply
$\{-n, n\}$ as the nature of `n` is unknown; rather, it is:
$$ \left(\left[0, \infty\right) \cap \left\{n\right\}\right) \cup
\left(\left(-\infty, 0\right] \cap \left\{- n\right\}\right)$$
By default, the complex domain is used which means that inverting even
seemingly simple functions like $\exp(x)$ will give very different
results from those obtained in the real domain.
(In the case of $\exp(x)$, the inversion via $\log$ is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably set the domain to
``S.Reals`` (or use ``invert_real`` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers))
>>> invert_real(exp(x), y, x)
(x, Intersection({log(y)}, Reals))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers))
>>> invert_real(exp(x), 1, x)
(x, {0})
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if x not in f_x.free_symbols:
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if x in y.free_symbols:
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x1, s = _invert_real(f_x, FiniteSet(y), x)
else:
x1, s = _invert_complex(f_x, FiniteSet(y), x)
if not isinstance(s, FiniteSet) or x1 != x:
return x1, s
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled by the respective inverters.
if domain is S.Complexes:
return x1, s
else:
return x1, s.intersection(domain)
invert_complex = _invert
def invert_real(f_x, y, x):
"""
Inverts a real-valued function. Same as :func:`invert_complex`, but sets
the domain to ``S.Reals`` before inverting.
"""
return _invert(f_x, y, x, S.Reals)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (f, g_ys)
n = Dummy('n', real=True)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
return _invert_real(f.exp,
imageset(Lambda(n, log(n)), g_ys),
symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
return _invert_abs(f.args[0], g_ys, symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
if expo.is_rational:
num, den = expo.as_numer_denom()
if den % 2 == 0 and num % 2 == 1 and den.is_zero is False:
# Here we have f(x)**(num/den) = y
# where den is nonzero and even and y is an element
# of the set g_ys.
# den is even, so we are only interested in the cases
# where both f(x) and y are positive.
# Restricting y to be positive (using the set g_ys_pos)
# means that y**(den/num) is always positive.
# Therefore it isn't necessary to also constrain f(x)
# to be positive because we are only going to
# find solutions of f(x) = y**(d/n)
# where the rhs is already required to be positive.
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
_inv, _set = _invert_real(base, res, symbol)
return (_inv, _set)
if den % 2 == 1:
root = Lambda(n, real_root(n, expo))
res = imageset(root, g_ys)
if num % 2 == 0:
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
if num % 2 == 1:
return _invert_real(base, res, symbol)
elif expo.is_irrational:
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
return _invert_real(base, res, symbol)
else:
# indeterminate exponent, e.g. Float or parity of
# num, den of rational could not be determined
pass # use default return
if not base_has_sym:
rhs = g_ys.args[0]
if base.is_positive:
return _invert_real(expo,
imageset(Lambda(n, log(n, base, evaluate=False)), g_ys), symbol)
elif base.is_negative:
s, b = integer_log(rhs, base)
if b:
return _invert_real(expo, FiniteSet(s), symbol)
else:
return (expo, S.EmptySet)
elif base.is_zero:
one = Eq(rhs, 1)
if one == S.true:
# special case: 0**x - 1
return _invert_real(expo, FiniteSet(0), symbol)
elif one == S.false:
return (expo, S.EmptySet)
if isinstance(f, TrigonometricFunction):
if isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(trig, (sin, csc)):
F = asin if isinstance(trig, sin) else acsc
return (lambda a: n*pi + S.NegativeOne**n*F(a),)
if isinstance(trig, (cos, sec)):
F = acos if isinstance(trig, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a),)
if isinstance(trig, (tan, cot)):
return (lambda a: n*pi + trig.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_real(f.args[0], invs, symbol)
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
if g in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
return (h, S.EmptySet)
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
# special case: g**r = 0
# Could be improved like `_invert_real` to handle more general cases.
if expo.is_Rational and g_ys == FiniteSet(0):
if expo.is_positive:
return _invert_complex(base, g_ys, symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
if isinstance(g_ys, ImageSet):
# can solve upto `(d*exp(exp(...(exp(a*x + b))...) + c)` format.
# Further can be improved to `(d*exp(exp(...(exp(a*x**n + b*x**(n-1) + ... + f))...) + c)`.
g_ys_expr = g_ys.lamda.expr
g_ys_vars = g_ys.lamda.variables
k = Dummy('k{}'.format(len(g_ys_vars)))
g_ys_vars_1 = (k,) + g_ys_vars
exp_invs = Union(*[imageset(Lambda((g_ys_vars_1,), (I*(2*k*pi + arg(g_ys_expr))
+ log(Abs(g_ys_expr)))), S.Integers**(len(g_ys_vars_1)))])
return _invert_complex(f.exp, exp_invs, symbol)
elif isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.exp, exp_invs, symbol)
return (f, g_ys)
def _invert_abs(f, g_ys, symbol):
"""Helper function for inverting absolute value functions.
Returns the complete result of inverting an absolute value
function along with the conditions which must also be satisfied.
If it is certain that all these conditions are met, a :class:`~.FiniteSet`
of all possible solutions is returned. If any condition cannot be
satisfied, an :class:`~.EmptySet` is returned. Otherwise, a
:class:`~.ConditionSet` of the solutions, with all the required conditions
specified, is returned.
"""
if not g_ys.is_FiniteSet:
# this could be used for FiniteSet, but the
# results are more compact if they aren't, e.g.
# ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}) vs
# Union(Intersection(Interval(0, oo), {n}), Intersection(Interval(-oo, 0), {-n}))
# for the solution of abs(x) - n
pos = Intersection(g_ys, Interval(0, S.Infinity))
parg = _invert_real(f, pos, symbol)
narg = _invert_real(-f, pos, symbol)
if parg[0] != narg[0]:
raise NotImplementedError
return parg[0], Union(narg[1], parg[1])
# check conditions: all these must be true. If any are unknown
# then return them as conditions which must be satisfied
unknown = []
for a in g_ys.args:
ok = a.is_nonnegative if a.is_Number else a.is_positive
if ok is None:
unknown.append(a)
elif not ok:
return symbol, S.EmptySet
if unknown:
conditions = And(*[Contains(i, Interval(0, oo))
for i in unknown])
else:
conditions = True
n = Dummy('n', real=True)
# this is slightly different than above: instead of solving
# +/-f on positive values, here we solve for f on +/- g_ys
g_x, values = _invert_real(f, Union(
imageset(Lambda(n, n), g_ys),
imageset(Lambda(n, -n), g_ys)), symbol)
return g_x, ConditionSet(g_x, conditions, values)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
elif isinstance(f, Piecewise):
# Check the cases of the Piecewise in turn. There might be invalid
# expressions in later cases that don't apply e.g.
# solveset(Piecewise((0, Eq(x, 0)), (1/x, True)), x)
for expr, cond in f.args:
condsubs = cond.subs(symbol, p)
if condsubs is S.false:
continue
elif condsubs is S.true:
return _domain_check(expr, symbol, p)
else:
# We don't know which case of the Piecewise holds. On this
# basis we cannot decide whether any solution is in or out of
# the domain. Ideally this function would allow returning a
# symbolic condition for the validity of the solution that
# could be handled in the calling code. In the mean time we'll
# give this particular solution the benefit of the doubt and
# let it pass.
return True
else:
# TODO : We should not blindly recurse through all args of arbitrary expressions like this
return all(_domain_check(g, symbol, p)
for g in f.args)
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
do not assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that do not assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
A.setdefault('finite', A.get('finite', True))
if domain.is_subset(S.Reals):
# if this gets set it will make complex=True, too
A.setdefault('real', True)
else:
# don't change 'real' because being complex implies
# nothing about being real
A.setdefault('complex', True)
return A
reps = {s: Dummy(**assumptions(s)) for s in f.free_symbols}
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import TrigonometricFunction
>>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(_mexpand(f, recursive=True), deep=True)
g, h = fraction(f)
if not h.has(symbol):
try:
return _solve_as_poly(g, symbol, domain)
except NotImplementedError:
# The polynomial formed from g could end up having
# coefficients in a ring over which finding roots
# isn't implemented yet, e.g. ZZ[a] for some symbol a
return ConditionSet(symbol, Eq(f, 0), domain)
except CoercionFailed:
# contained oo, zoo or nan
return S.EmptySet
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
class _SolveTrig1Error(Exception):
"""Raised when _solve_trig1 heuristics do not apply"""
def _solve_trig(f, symbol, domain):
"""Function to call other helpers to solve trigonometric equations """
sol = None
try:
sol = _solve_trig1(f, symbol, domain)
except _SolveTrig1Error:
try:
sol = _solve_trig2(f, symbol, domain)
except ValueError:
raise NotImplementedError(filldedent('''
Solution to this kind of trigonometric equations
is yet to be implemented'''))
return sol
def _solve_trig1(f, symbol, domain):
"""Primary solver for trigonometric and hyperbolic equations
Returns either the solution set as a ConditionSet (auto-evaluated to a
union of ImageSets if no variables besides 'symbol' are involved) or
raises _SolveTrig1Error if f == 0 cannot be solved.
Notes
=====
Algorithm:
1. Do a change of variable x -> mu*x in arguments to trigonometric and
hyperbolic functions, in order to reduce them to small integers. (This
step is crucial to keep the degrees of the polynomials of step 4 low.)
2. Rewrite trigonometric/hyperbolic functions as exponentials.
3. Proceed to a 2nd change of variable, replacing exp(I*x) or exp(x) by y.
4. Solve the resulting rational equation.
5. Use invert_complex or invert_real to return to the original variable.
6. If the coefficients of 'symbol' were symbolic in nature, add the
necessary consistency conditions in a ConditionSet.
"""
# Prepare change of variable
x = Dummy('x')
if _is_function_class_equation(HyperbolicFunction, f, symbol):
cov = exp(x)
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
else:
cov = exp(I*x)
inverter = invert_complex
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(TrigonometricFunction, HyperbolicFunction)
trig_arguments = [e.args[0] for e in trig_functions]
# trigsimp may have reduced the equation to an expression
# that is independent of 'symbol' (e.g. cos**2+sin**2)
if not any(a.has(symbol) for a in trig_arguments):
return solveset(f_original, symbol, domain)
denominators = []
numerators = []
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise _SolveTrig1Error("trig argument is not a polynomial")
if poly_ar.degree() > 1: # degree >1 still bad
raise _SolveTrig1Error("degree of variable must not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
numerators.append(fraction(c)[0])
denominators.append(fraction(c)[1])
mu = lcm(denominators)/gcd(numerators)
f = f.subs(symbol, mu*x)
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(cov, y), h.subs(cov, y)
if g.has(x) or h.has(x):
raise _SolveTrig1Error("change of variable not possible")
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, ConditionSet):
raise _SolveTrig1Error("polynomial has ConditionSet solution")
if isinstance(solns, FiniteSet):
if any(isinstance(s, RootOf) for s in solns):
raise _SolveTrig1Error("polynomial results in RootOf object")
# revert the change of variable
cov = cov.subs(x, symbol/mu)
result = Union(*[inverter(cov, s, symbol)[1] for s in solns])
# In case of symbolic coefficients, the solution set is only valid
# if numerator and denominator of mu are non-zero.
if mu.has(Symbol):
syms = (mu).atoms(Symbol)
munum, muden = fraction(mu)
condnum = munum.as_independent(*syms, as_Add=False)[1]
condden = muden.as_independent(*syms, as_Add=False)[1]
cond = And(Ne(condnum, 0), Ne(condden, 0))
else:
cond = True
# Actual conditions are returned as part of the ConditionSet. Adding an
# intersection with C would only complicate some solution sets due to
# current limitations of intersection code. (e.g. #19154)
if domain is S.Complexes:
# This is a slight abuse of ConditionSet. Ideally this should
# be some kind of "PiecewiseSet". (See #19507 discussion)
return ConditionSet(symbol, cond, result)
else:
return ConditionSet(symbol, cond, Intersection(result, domain))
elif solns is S.EmptySet:
return S.EmptySet
else:
raise _SolveTrig1Error("polynomial solutions must form FiniteSet")
def _solve_trig2(f, symbol, domain):
"""Secondary helper to solve trigonometric equations,
called when first helper fails """
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(sin, cos, tan, sec, cot, csc)
trig_arguments = [e.args[0] for e in trig_functions]
denominators = []
numerators = []
# todo: This solver can be extended to hyperbolics if the
# analogous change of variable to tanh (instead of tan)
# is used.
if not trig_functions:
return ConditionSet(symbol, Eq(f_original, 0), domain)
# todo: The pre-processing below (extraction of numerators, denominators,
# gcd, lcm, mu, etc.) should be updated to the enhanced version in
# _solve_trig1. (See #19507)
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise ValueError("give up, we cannot solve if this is not a polynomial in x")
if poly_ar.degree() > 1: # degree >1 still bad
raise ValueError("degree of variable inside polynomial should not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
try:
numerators.append(Rational(c).p)
denominators.append(Rational(c).q)
except TypeError:
return ConditionSet(symbol, Eq(f_original, 0), domain)
x = Dummy('x')
# ilcm() and igcd() require more than one argument
if len(numerators) > 1:
mu = Rational(2)*ilcm(*denominators)/igcd(*numerators)
else:
assert len(numerators) == 1
mu = Rational(2)*denominators[0]/numerators[0]
f = f.subs(symbol, mu*x)
f = f.rewrite(tan)
f = expand_trig(f)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(tan(x), y), h.subs(tan(x), y)
if g.has(x) or h.has(x):
return ConditionSet(symbol, Eq(f_original, 0), domain)
solns = solveset(g, y, S.Reals) - solveset(h, y, S.Reals)
if isinstance(solns, FiniteSet):
result = Union(*[invert_real(tan(symbol/mu), s, symbol)[1]
for s in solns])
dsol = invert_real(tan(symbol/mu), oo, symbol)[1]
if degree(h) > degree(g): # If degree(denom)>degree(num) then there
result = Union(result, dsol) # would be another sol at Lim(denom-->oo)
return Intersection(result, domain)
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f_original, 0), S.Reals)
def _solve_as_poly(f, symbol, domain=S.Complexes):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), domain)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), domain)
if gen != symbol:
y = Dummy('y')
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
lhs, rhs_s = inverter(gen, y, symbol)
if lhs == symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
if isinstance(result, FiniteSet) and isinstance(gen, Pow
) and gen.base.is_Rational:
result = FiniteSet(*[expand_log(i) for i in result])
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with symbols
# or undefined functions because that makes the solution more complicated.
# For example, expand_complex(a) returns re(a) + I*im(a)
if all(s.atoms(Symbol, AppliedUndef) == set() and not isinstance(s, RootOf)
for s in result):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
if isinstance(result, FiniteSet) and domain != S.Complexes:
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled elsewhere.
result = result.intersection(domain)
return result
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _solve_radical(f, unradf, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
res = unradf
eq, cov = res if res else (f, [])
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, symbol)])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
if not isinstance(result, FiniteSet):
solution_set = result
else:
f_set = [] # solutions for FiniteSet
c_set = [] # solutions for ConditionSet
for s in result:
if checksol(f, symbol, s):
f_set.append(s)
else:
c_set.append(s)
solution_set = FiniteSet(*f_set) + ConditionSet(symbol, Eq(f, 0), FiniteSet(*c_set))
return solution_set
def _solve_abs(f, symbol, domain):
""" Helper function to solve equation involving absolute value function """
if not domain.is_subset(S.Reals):
raise ValueError(filldedent('''
Absolute values cannot be inverted in the
complex domain.'''))
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
f_p, f_q, f_r = [pattern_match.get(i, S.Zero) for i in (p, q, r)]
if not (f_p.is_zero or f_q.is_zero):
domain = continuous_domain(f_q, symbol, domain)
from .inequalities import solve_univariate_inequality
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False, domain=domain, continuous=True)
q_neg_cond = q_pos_cond.complement(domain)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def solve_decomposition(f, symbol, domain):
"""
Function to solve equations via the principle of "Decomposition
and Rewriting".
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solve_decomposition as sd
>>> x = Symbol('x')
>>> f1 = exp(2*x) - 3*exp(x) + 2
>>> sd(f1, x, S.Reals)
{0, log(2)}
>>> f2 = sin(x)**2 + 2*sin(x) + 1
>>> pprint(sd(f2, x, S.Reals), use_unicode=False)
3*pi
{2*n*pi + ---- | n in Integers}
2
>>> f3 = sin(x + 2)
>>> pprint(sd(f3, x, S.Reals), use_unicode=False)
{2*n*pi - 2 | n in Integers} U {2*n*pi - 2 + pi | n in Integers}
"""
from sympy.solvers.decompogen import decompogen
# decompose the given function
g_s = decompogen(f, symbol)
# `y_s` represents the set of values for which the function `g` is to be
# solved.
# `solutions` represent the solutions of the equations `g = y_s` or
# `g = 0` depending on the type of `y_s`.
# As we are interested in solving the equation: f = 0
y_s = FiniteSet(0)
for g in g_s:
frange = function_range(g, symbol, domain)
y_s = Intersection(frange, y_s)
result = S.EmptySet
if isinstance(y_s, FiniteSet):
for y in y_s:
solutions = solveset(Eq(g, y), symbol, domain)
if not isinstance(solutions, ConditionSet):
result += solutions
else:
if isinstance(y_s, ImageSet):
iter_iset = (y_s,)
elif isinstance(y_s, Union):
iter_iset = y_s.args
elif y_s is S.EmptySet:
# y_s is not in the range of g in g_s, so no solution exists
#in the given domain
return S.EmptySet
for iset in iter_iset:
new_solutions = solveset(Eq(iset.lamda.expr, g), symbol, domain)
dummy_var = tuple(iset.lamda.expr.free_symbols)[0]
(base_set,) = iset.base_sets
if isinstance(new_solutions, FiniteSet):
new_exprs = new_solutions
elif isinstance(new_solutions, Intersection):
if isinstance(new_solutions.args[1], FiniteSet):
new_exprs = new_solutions.args[1]
for new_expr in new_exprs:
result += ImageSet(Lambda(dummy_var, new_expr), base_set)
if result is S.EmptySet:
return ConditionSet(symbol, Eq(f, 0), domain)
y_s = result
return y_s
def _solveset(f, symbol, domain, _check=False):
"""Helper for solveset to return a result from an expression
that has already been sympify'ed and is known to contain the
given symbol."""
# _check controls whether the answer is checked or not
from sympy.simplify.simplify import signsimp
if isinstance(f, BooleanTrue):
return domain
orig_f = f
if f.is_Mul:
coeff, f = f.as_independent(symbol, as_Add=False)
if coeff in {S.ComplexInfinity, S.NegativeInfinity, S.Infinity}:
f = together(orig_f)
elif f.is_Add:
a, h = f.as_independent(symbol)
m, h = h.as_independent(symbol, as_Add=False)
if m not in {S.ComplexInfinity, S.Zero, S.Infinity,
S.NegativeInfinity}:
f = a/m + h # XXX condition `m != 0` should be added to soln
# assign the solvers to use
solver = lambda f, x, domain=domain: _solveset(f, x, domain)
inverter = lambda f, rhs, symbol: _invert(f, rhs, symbol, domain)
result = S.EmptySet
if f.expand().is_zero:
return domain
elif not f.has(symbol):
return S.EmptySet
elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain)
for m in f.args):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solver(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_trig(f, symbol, domain)
elif isinstance(f, arg):
a = f.args[0]
result = Intersection(_solveset(re(a) > 0, symbol, domain),
_solveset(im(a), symbol, domain))
elif f.is_Piecewise:
expr_set_pairs = f.as_expr_set_pairs(domain)
for (expr, in_set) in expr_set_pairs:
if in_set.is_Relational:
in_set = in_set.as_set()
solns = solver(expr, symbol, in_set)
result += solns
elif isinstance(f, Eq):
result = solver(Add(f.lhs, - f.rhs, evaluate=False), symbol, domain)
elif f.is_Relational:
from .inequalities import solve_univariate_inequality
try:
result = solve_univariate_inequality(
f, symbol, domain=domain, relational=False)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
elif _is_modular(f, symbol):
result = _solve_modular(f, symbol, domain)
else:
lhs, rhs_s = inverter(f, 0, symbol)
if lhs == symbol:
# do some very minimal simplification since
# repeated inversion may have left the result
# in a state that other solvers (e.g. poly)
# would have simplified; this is done here
# rather than in the inverter since here it
# is only done once whereas there it would
# be repeated for each step of the inversion
if isinstance(rhs_s, FiniteSet):
rhs_s = FiniteSet(*[Mul(*
signsimp(i).as_content_primitive())
for i in rhs_s])
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
for equation in [lhs - rhs for rhs in rhs_s]:
if equation == f:
u = unrad(f, symbol)
if u:
result += _solve_radical(equation, u,
symbol,
solver)
elif equation.has(Abs):
result += _solve_abs(f, symbol, domain)
else:
result_rational = _solve_as_rational(equation, symbol, domain)
if not isinstance(result_rational, ConditionSet):
result += result_rational
else:
# may be a transcendental type equation
t_result = _transolve(equation, symbol, domain)
if isinstance(t_result, ConditionSet):
# might need factoring; this is expensive so we
# have delayed until now. To avoid recursion
# errors look for a non-trivial factoring into
# a product of symbol dependent terms; I think
# that something that factors as a Pow would
# have already been recognized by now.
factored = equation.factor()
if factored.is_Mul and equation != factored:
_, dep = factored.as_independent(symbol)
if not dep.is_Add:
# non-trivial factoring of equation
# but use form with constants
# in case they need special handling
t_results = []
for fac in Mul.make_args(factored):
if fac.has(symbol):
t_results.append(solver(fac, symbol))
t_result = Union(*t_results)
result += t_result
else:
result += solver(equation, symbol)
elif rhs_s is not S.EmptySet:
result = ConditionSet(symbol, Eq(f, 0), domain)
if isinstance(result, ConditionSet):
if isinstance(f, Expr):
num, den = f.as_numer_denom()
if den.has(symbol):
_result = _solveset(num, symbol, domain)
if not isinstance(_result, ConditionSet):
singularities = _solveset(den, symbol, domain)
result = _result - singularities
if _check:
if isinstance(result, ConditionSet):
# it wasn't solved or has enumerated all conditions
# -- leave it alone
return result
# whittle away all but the symbol-containing core
# to use this for testing
if isinstance(orig_f, Expr):
fx = orig_f.as_independent(symbol, as_Add=True)[1]
fx = fx.as_independent(symbol, as_Add=False)[1]
else:
fx = orig_f
if isinstance(result, FiniteSet):
# check the result for invalid solutions
result = FiniteSet(*[s for s in result
if isinstance(s, RootOf)
or domain_check(fx, symbol, s)])
return result
def _is_modular(f, symbol):
"""
Helper function to check below mentioned types of modular equations.
``A - Mod(B, C) = 0``
A -> This can or cannot be a function of symbol.
B -> This is surely a function of symbol.
C -> It is an integer.
Parameters
==========
f : Expr
The equation to be checked.
symbol : Symbol
The concerned variable for which the equation is to be checked.
Examples
========
>>> from sympy import symbols, exp, Mod
>>> from sympy.solvers.solveset import _is_modular as check
>>> x, y = symbols('x y')
>>> check(Mod(x, 3) - 1, x)
True
>>> check(Mod(x, 3) - 1, y)
False
>>> check(Mod(x, 3)**2 - 5, x)
False
>>> check(Mod(x, 3)**2 - y, x)
False
>>> check(exp(Mod(x, 3)) - 1, x)
False
>>> check(Mod(3, y) - 1, y)
False
"""
if not f.has(Mod):
return False
# extract modterms from f.
modterms = list(f.atoms(Mod))
return (len(modterms) == 1 and # only one Mod should be present
modterms[0].args[0].has(symbol) and # B-> function of symbol
modterms[0].args[1].is_integer and # C-> to be an integer.
any(isinstance(term, Mod)
for term in list(_term_factors(f))) # free from other funcs
)
def _invert_modular(modterm, rhs, n, symbol):
"""
Helper function to invert modular equation.
``Mod(a, m) - rhs = 0``
Generally it is inverted as (a, ImageSet(Lambda(n, m*n + rhs), S.Integers)).
More simplified form will be returned if possible.
If it is not invertible then (modterm, rhs) is returned.
The following cases arise while inverting equation ``Mod(a, m) - rhs = 0``:
1. If a is symbol then m*n + rhs is the required solution.
2. If a is an instance of ``Add`` then we try to find two symbol independent
parts of a and the symbol independent part gets transferred to the other
side and again the ``_invert_modular`` is called on the symbol
dependent part.
3. If a is an instance of ``Mul`` then same as we done in ``Add`` we separate
out the symbol dependent and symbol independent parts and transfer the
symbol independent part to the rhs with the help of invert and again the
``_invert_modular`` is called on the symbol dependent part.
4. If a is an instance of ``Pow`` then two cases arise as following:
- If a is of type (symbol_indep)**(symbol_dep) then the remainder is
evaluated with the help of discrete_log function and then the least
period is being found out with the help of totient function.
period*n + remainder is the required solution in this case.
For reference: (https://en.wikipedia.org/wiki/Euler's_theorem)
- If a is of type (symbol_dep)**(symbol_indep) then we try to find all
primitive solutions list with the help of nthroot_mod function.
m*n + rem is the general solution where rem belongs to solutions list
from nthroot_mod function.
Parameters
==========
modterm, rhs : Expr
The modular equation to be inverted, ``modterm - rhs = 0``
symbol : Symbol
The variable in the equation to be inverted.
n : Dummy
Dummy variable for output g_n.
Returns
=======
A tuple (f_x, g_n) is being returned where f_x is modular independent function
of symbol and g_n being set of values f_x can have.
Examples
========
>>> from sympy import symbols, exp, Mod, Dummy, S
>>> from sympy.solvers.solveset import _invert_modular as invert_modular
>>> x, y = symbols('x y')
>>> n = Dummy('n')
>>> invert_modular(Mod(exp(x), 7), S(5), n, x)
(Mod(exp(x), 7), 5)
>>> invert_modular(Mod(x, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 5), Integers))
>>> invert_modular(Mod(3*x + 8, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 6), Integers))
>>> invert_modular(Mod(x**4, 7), S(5), n, x)
(x, EmptySet)
>>> invert_modular(Mod(2**(x**2 + x + 1), 7), S(2), n, x)
(x**2 + x + 1, ImageSet(Lambda(_n, 3*_n + 1), Naturals0))
"""
a, m = modterm.args
if rhs.is_real is False or any(term.is_real is False
for term in list(_term_factors(a))):
# Check for complex arguments
return modterm, rhs
if abs(rhs) >= abs(m):
# if rhs has value greater than value of m.
return symbol, S.EmptySet
if a == symbol:
return symbol, ImageSet(Lambda(n, m*n + rhs), S.Integers)
if a.is_Add:
# g + h = a
g, h = a.as_independent(symbol)
if g is not S.Zero:
x_indep_term = rhs - Mod(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Mul:
# g*h = a
g, h = a.as_independent(symbol)
if g is not S.One:
x_indep_term = rhs*invert(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Pow:
# base**expo = a
base, expo = a.args
if expo.has(symbol) and not base.has(symbol):
# remainder -> solution independent of n of equation.
# m, rhs are made coprime by dividing igcd(m, rhs)
try:
remainder = discrete_log(m / igcd(m, rhs), rhs, a.base)
except ValueError: # log does not exist
return modterm, rhs
# period -> coefficient of n in the solution and also referred as
# the least period of expo in which it is repeats itself.
# (a**(totient(m)) - 1) divides m. Here is link of theorem:
# (https://en.wikipedia.org/wiki/Euler's_theorem)
period = totient(m)
for p in divisors(period):
# there might a lesser period exist than totient(m).
if pow(a.base, p, m / igcd(m, a.base)) == 1:
period = p
break
# recursion is not applied here since _invert_modular is currently
# not smart enough to handle infinite rhs as here expo has infinite
# rhs = ImageSet(Lambda(n, period*n + remainder), S.Naturals0).
return expo, ImageSet(Lambda(n, period*n + remainder), S.Naturals0)
elif base.has(symbol) and not expo.has(symbol):
try:
remainder_list = nthroot_mod(rhs, expo, m, all_roots=True)
if remainder_list == []:
return symbol, S.EmptySet
except (ValueError, NotImplementedError):
return modterm, rhs
g_n = S.EmptySet
for rem in remainder_list:
g_n += ImageSet(Lambda(n, m*n + rem), S.Integers)
return base, g_n
return modterm, rhs
def _solve_modular(f, symbol, domain):
r"""
Helper function for solving modular equations of type ``A - Mod(B, C) = 0``,
where A can or cannot be a function of symbol, B is surely a function of
symbol and C is an integer.
Currently ``_solve_modular`` is only able to solve cases
where A is not a function of symbol.
Parameters
==========
f : Expr
The modular equation to be solved, ``f = 0``
symbol : Symbol
The variable in the equation to be solved.
domain : Set
A set over which the equation is solved. It has to be a subset of
Integers.
Returns
=======
A set of integer solutions satisfying the given modular equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy.solvers.solveset import _solve_modular as solve_modulo
>>> from sympy import S, Symbol, sin, Intersection, Interval, Mod
>>> x = Symbol('x')
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Integers)
ImageSet(Lambda(_n, 7*_n + 5), Integers)
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Reals) # domain should be subset of integers.
ConditionSet(x, Eq(Mod(5*x + 6, 7) - 3, 0), Reals)
>>> solve_modulo(-7 + Mod(x, 5), x, S.Integers)
EmptySet
>>> solve_modulo(Mod(12**x, 21) - 18, x, S.Integers)
ImageSet(Lambda(_n, 6*_n + 2), Naturals0)
>>> solve_modulo(Mod(sin(x), 7) - 3, x, S.Integers) # not solvable
ConditionSet(x, Eq(Mod(sin(x), 7) - 3, 0), Integers)
>>> solve_modulo(3 - Mod(x, 5), x, Intersection(S.Integers, Interval(0, 100)))
Intersection(ImageSet(Lambda(_n, 5*_n + 3), Integers), Range(0, 101, 1))
"""
# extract modterm and g_y from f
unsolved_result = ConditionSet(symbol, Eq(f, 0), domain)
modterm = list(f.atoms(Mod))[0]
rhs = -S.One*(f.subs(modterm, S.Zero))
if f.as_coefficients_dict()[modterm].is_negative:
# checks if coefficient of modterm is negative in main equation.
rhs *= -S.One
if not domain.is_subset(S.Integers):
return unsolved_result
if rhs.has(symbol):
# TODO Case: A-> function of symbol, can be extended here
# in future.
return unsolved_result
n = Dummy('n', integer=True)
f_x, g_n = _invert_modular(modterm, rhs, n, symbol)
if f_x == modterm and g_n == rhs:
return unsolved_result
if f_x == symbol:
if domain is not S.Integers:
return domain.intersect(g_n)
return g_n
if isinstance(g_n, ImageSet):
lamda_expr = g_n.lamda.expr
lamda_vars = g_n.lamda.variables
base_sets = g_n.base_sets
sol_set = _solveset(f_x - lamda_expr, symbol, S.Integers)
if isinstance(sol_set, FiniteSet):
tmp_sol = S.EmptySet
for sol in sol_set:
tmp_sol += ImageSet(Lambda(lamda_vars, sol), *base_sets)
sol_set = tmp_sol
else:
sol_set = ImageSet(Lambda(lamda_vars, sol_set), *base_sets)
return domain.intersect(sol_set)
return unsolved_result
def _term_factors(f):
"""
Iterator to get the factors of all terms present
in the given equation.
Parameters
==========
f : Expr
Equation that needs to be addressed
Returns
=======
Factors of all terms present in the equation.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.solveset import _term_factors
>>> x = symbols('x')
>>> list(_term_factors(-2 - x**2 + x*(x + 1)))
[-2, -1, x**2, x, x + 1]
"""
for add_arg in Add.make_args(f):
yield from Mul.make_args(add_arg)
def _solve_exponential(lhs, rhs, symbol, domain):
r"""
Helper function for solving (supported) exponential equations.
Exponential equations are the sum of (currently) at most
two terms with one or both of them having a power with a
symbol-dependent exponent.
For example
.. math:: 5^{2x + 3} - 5^{3x - 1}
.. math:: 4^{5 - 9x} - e^{2 - x}
Parameters
==========
lhs, rhs : Expr
The exponential equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable or
if the assumptions are not properly defined, in that case
a different style of ``ConditionSet`` is returned having the
solution(s) of the equation with the desired assumptions.
Examples
========
>>> from sympy.solvers.solveset import _solve_exponential as solve_expo
>>> from sympy import symbols, S
>>> x = symbols('x', real=True)
>>> a, b = symbols('a b')
>>> solve_expo(2**x + 3**x - 5**x, 0, x, S.Reals) # not solvable
ConditionSet(x, Eq(2**x + 3**x - 5**x, 0), Reals)
>>> solve_expo(a**x - b**x, 0, x, S.Reals) # solvable but incorrect assumptions
ConditionSet(x, (a > 0) & (b > 0), {0})
>>> solve_expo(3**(2*x) - 2**(x + 3), 0, x, S.Reals)
{-3*log(2)/(-2*log(3) + log(2))}
>>> solve_expo(2**x - 4**x, 0, x, S.Reals)
{0}
* Proof of correctness of the method
The logarithm function is the inverse of the exponential function.
The defining relation between exponentiation and logarithm is:
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
Therefore if we are given an equation with exponent terms, we can
convert every term to its corresponding logarithmic form. This is
achieved by taking logarithms and expanding the equation using
logarithmic identities so that it can easily be handled by ``solveset``.
For example:
.. math:: 3^{2x} = 2^{x + 3}
Taking log both sides will reduce the equation to
.. math:: (2x)\log(3) = (x + 3)\log(2)
This form can be easily handed by ``solveset``.
"""
unsolved_result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
newlhs = powdenest(lhs)
if lhs != newlhs:
# it may also be advantageous to factor the new expr
neweq = factor(newlhs - rhs)
if neweq != (lhs - rhs):
return _solveset(neweq, symbol, domain) # try again with _solveset
if not (isinstance(lhs, Add) and len(lhs.args) == 2):
# solving for the sum of more than two powers is possible
# but not yet implemented
return unsolved_result
if rhs != 0:
return unsolved_result
a, b = list(ordered(lhs.args))
a_term = a.as_independent(symbol)[1]
b_term = b.as_independent(symbol)[1]
a_base, a_exp = a_term.as_base_exp()
b_base, b_exp = b_term.as_base_exp()
if domain.is_subset(S.Reals):
conditions = And(
a_base > 0,
b_base > 0,
Eq(im(a_exp), 0),
Eq(im(b_exp), 0))
else:
conditions = And(
Ne(a_base, 0),
Ne(b_base, 0))
L, R = map(lambda i: expand_log(log(i), force=True), (a, -b))
solutions = _solveset(L - R, symbol, domain)
return ConditionSet(symbol, conditions, solutions)
def _is_exponential(f, symbol):
r"""
Return ``True`` if one or more terms contain ``symbol`` only in
exponents, else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Examples
========
>>> from sympy import symbols, cos, exp
>>> from sympy.solvers.solveset import _is_exponential as check
>>> x, y = symbols('x y')
>>> check(y, y)
False
>>> check(x**y - 1, y)
True
>>> check(x**y*2**y - 1, y)
True
>>> check(exp(x + 3) + 3**x, x)
True
>>> check(cos(2**x), x)
False
* Philosophy behind the helper
The function extracts each term of the equation and checks if it is
of exponential form w.r.t ``symbol``.
"""
rv = False
for expr_arg in _term_factors(f):
if symbol not in expr_arg.free_symbols:
continue
if (isinstance(expr_arg, Pow) and
symbol not in expr_arg.base.free_symbols or
isinstance(expr_arg, exp)):
rv = True # symbol in exponent
else:
return False # dependent on symbol in non-exponential way
return rv
def _solve_logarithm(lhs, rhs, symbol, domain):
r"""
Helper to solve logarithmic equations which are reducible
to a single instance of `\log`.
Logarithmic equations are (currently) the equations that contains
`\log` terms which can be reduced to a single `\log` term or
a constant using various logarithmic identities.
For example:
.. math:: \log(x) + \log(x - 4)
can be reduced to:
.. math:: \log(x(x - 4))
Parameters
==========
lhs, rhs : Expr
The logarithmic equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy import symbols, log, S
>>> from sympy.solvers.solveset import _solve_logarithm as solve_log
>>> x = symbols('x')
>>> f = log(x - 3) + log(x + 3)
>>> solve_log(f, 0, x, S.Reals)
{-sqrt(10), sqrt(10)}
* Proof of correctness
A logarithm is another way to write exponent and is defined by
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
When one side of the equation contains a single logarithm, the
equation can be solved by rewriting the equation as an equivalent
exponential equation as defined above. But if one side contains
more than one logarithm, we need to use the properties of logarithm
to condense it into a single logarithm.
Take for example
.. math:: \log(2x) - 15 = 0
contains single logarithm, therefore we can directly rewrite it to
exponential form as
.. math:: x = \frac{e^{15}}{2}
But if the equation has more than one logarithm as
.. math:: \log(x - 3) + \log(x + 3) = 0
we use logarithmic identities to convert it into a reduced form
Using,
.. math:: \log(a) + \log(b) = \log(ab)
the equation becomes,
.. math:: \log((x - 3)(x + 3))
This equation contains one logarithm and can be solved by rewriting
to exponents.
"""
new_lhs = logcombine(lhs, force=True)
new_f = new_lhs - rhs
return _solveset(new_f, symbol, domain)
def _is_logarithmic(f, symbol):
r"""
Return ``True`` if the equation is in the form
`a\log(f(x)) + b\log(g(x)) + ... + c` else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Returns
=======
``True`` if the equation is logarithmic otherwise ``False``.
Examples
========
>>> from sympy import symbols, tan, log
>>> from sympy.solvers.solveset import _is_logarithmic as check
>>> x, y = symbols('x y')
>>> check(log(x + 2) - log(x + 3), x)
True
>>> check(tan(log(2*x)), x)
False
>>> check(x*log(x), x)
False
>>> check(x + log(x), x)
False
>>> check(y + log(x), x)
True
* Philosophy behind the helper
The function extracts each term and checks whether it is
logarithmic w.r.t ``symbol``.
"""
rv = False
for term in Add.make_args(f):
saw_log = False
for term_arg in Mul.make_args(term):
if symbol not in term_arg.free_symbols:
continue
if isinstance(term_arg, log):
if saw_log:
return False # more than one log in term
saw_log = True
else:
return False # dependent on symbol in non-log way
if saw_log:
rv = True
return rv
def _is_lambert(f, symbol):
r"""
If this returns ``False`` then the Lambert solver (``_solve_lambert``) will not be called.
Explanation
===========
Quick check for cases that the Lambert solver might be able to handle.
1. Equations containing more than two operands and `symbol`s involving any of
`Pow`, `exp`, `HyperbolicFunction`,`TrigonometricFunction`, `log` terms.
2. In `Pow`, `exp` the exponent should have `symbol` whereas for
`HyperbolicFunction`,`TrigonometricFunction`, `log` should contain `symbol`.
3. For `HyperbolicFunction`,`TrigonometricFunction` the number of trigonometric functions in
equation should be less than number of symbols. (since `A*cos(x) + B*sin(x) - c`
is not the Lambert type).
Some forms of lambert equations are:
1. X**X = C
2. X*(B*log(X) + D)**A = C
3. A*log(B*X + A) + d*X = C
4. (B*X + A)*exp(d*X + g) = C
5. g*exp(B*X + h) - B*X = C
6. A*D**(E*X + g) - B*X = C
7. A*cos(X) + B*sin(X) - D*X = C
8. A*cosh(X) + B*sinh(X) - D*X = C
Where X is any variable,
A, B, C, D, E are any constants,
g, h are linear functions or log terms.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Returns
=======
If this returns ``False`` then the Lambert solver (``_solve_lambert``) will not be called.
Examples
========
>>> from sympy.solvers.solveset import _is_lambert
>>> from sympy import symbols, cosh, sinh, log
>>> x = symbols('x')
>>> _is_lambert(3*log(x) - x*log(3), x)
True
>>> _is_lambert(log(log(x - 3)) + log(x-3), x)
True
>>> _is_lambert(cosh(x) - sinh(x), x)
False
>>> _is_lambert((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x)
True
See Also
========
_solve_lambert
"""
term_factors = list(_term_factors(f.expand()))
# total number of symbols in equation
no_of_symbols = len([arg for arg in term_factors if arg.has(symbol)])
# total number of trigonometric terms in equation
no_of_trig = len([arg for arg in term_factors \
if arg.has(HyperbolicFunction, TrigonometricFunction)])
if f.is_Add and no_of_symbols >= 2:
# `log`, `HyperbolicFunction`, `TrigonometricFunction` should have symbols
# and no_of_trig < no_of_symbols
lambert_funcs = (log, HyperbolicFunction, TrigonometricFunction)
if any(isinstance(arg, lambert_funcs)\
for arg in term_factors if arg.has(symbol)):
if no_of_trig < no_of_symbols:
return True
# here, `Pow`, `exp` exponent should have symbols
elif any(isinstance(arg, (Pow, exp)) \
for arg in term_factors if (arg.as_base_exp()[1]).has(symbol)):
return True
return False
def _transolve(f, symbol, domain):
r"""
Function to solve transcendental equations. It is a helper to
``solveset`` and should be used internally. ``_transolve``
currently supports the following class of equations:
- Exponential equations
- Logarithmic equations
Parameters
==========
f : Any transcendental equation that needs to be solved.
This needs to be an expression, which is assumed
to be equal to ``0``.
symbol : The variable for which the equation is solved.
This needs to be of class ``Symbol``.
domain : A set over which the equation is solved.
This needs to be of class ``Set``.
Returns
=======
Set
A set of values for ``symbol`` for which ``f`` is equal to
zero. An ``EmptySet`` is returned if ``f`` does not have solutions
in respective domain. A ``ConditionSet`` is returned as unsolved
object if algorithms to evaluate complete solution are not
yet implemented.
How to use ``_transolve``
=========================
``_transolve`` should not be used as an independent function, because
it assumes that the equation (``f``) and the ``symbol`` comes from
``solveset`` and might have undergone a few modification(s).
To use ``_transolve`` as an independent function the equation (``f``)
and the ``symbol`` should be passed as they would have been by
``solveset``.
Examples
========
>>> from sympy.solvers.solveset import _transolve as transolve
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy import symbols, S, pprint
>>> x = symbols('x', real=True) # assumption added
>>> transolve(5**(x - 3) - 3**(2*x + 1), x, S.Reals)
{-(log(3) + 3*log(5))/(-log(5) + 2*log(3))}
How ``_transolve`` works
========================
``_transolve`` uses two types of helper functions to solve equations
of a particular class:
Identifying helpers: To determine whether a given equation
belongs to a certain class of equation or not. Returns either
``True`` or ``False``.
Solving helpers: Once an equation is identified, a corresponding
helper either solves the equation or returns a form of the equation
that ``solveset`` might better be able to handle.
* Philosophy behind the module
The purpose of ``_transolve`` is to take equations which are not
already polynomial in their generator(s) and to either recast them
as such through a valid transformation or to solve them outright.
A pair of helper functions for each class of supported
transcendental functions are employed for this purpose. One
identifies the transcendental form of an equation and the other
either solves it or recasts it into a tractable form that can be
solved by ``solveset``.
For example, an equation in the form `ab^{f(x)} - cd^{g(x)} = 0`
can be transformed to
`\log(a) + f(x)\log(b) - \log(c) - g(x)\log(d) = 0`
(under certain assumptions) and this can be solved with ``solveset``
if `f(x)` and `g(x)` are in polynomial form.
How ``_transolve`` is better than ``_tsolve``
=============================================
1) Better output
``_transolve`` provides expressions in a more simplified form.
Consider a simple exponential equation
>>> f = 3**(2*x) - 2**(x + 3)
>>> pprint(transolve(f, x, S.Reals), use_unicode=False)
-3*log(2)
{------------------}
-2*log(3) + log(2)
>>> pprint(tsolve(f, x), use_unicode=False)
/ 3 \
| --------|
| log(2/9)|
[-log\2 /]
2) Extensible
The API of ``_transolve`` is designed such that it is easily
extensible, i.e. the code that solves a given class of
equations is encapsulated in a helper and not mixed in with
the code of ``_transolve`` itself.
3) Modular
``_transolve`` is designed to be modular i.e, for every class of
equation a separate helper for identification and solving is
implemented. This makes it easy to change or modify any of the
method implemented directly in the helpers without interfering
with the actual structure of the API.
4) Faster Computation
Solving equation via ``_transolve`` is much faster as compared to
``_tsolve``. In ``solve``, attempts are made computing every possibility
to get the solutions. This series of attempts makes solving a bit
slow. In ``_transolve``, computation begins only after a particular
type of equation is identified.
How to add new class of equations
=================================
Adding a new class of equation solver is a three-step procedure:
- Identify the type of the equations
Determine the type of the class of equations to which they belong:
it could be of ``Add``, ``Pow``, etc. types. Separate internal functions
are used for each type. Write identification and solving helpers
and use them from within the routine for the given type of equation
(after adding it, if necessary). Something like:
.. code-block:: python
def add_type(lhs, rhs, x):
....
if _is_exponential(lhs, x):
new_eq = _solve_exponential(lhs, rhs, x)
....
rhs, lhs = eq.as_independent(x)
if lhs.is_Add:
result = add_type(lhs, rhs, x)
- Define the identification helper.
- Define the solving helper.
Apart from this, a few other things needs to be taken care while
adding an equation solver:
- Naming conventions:
Name of the identification helper should be as
``_is_class`` where class will be the name or abbreviation
of the class of equation. The solving helper will be named as
``_solve_class``.
For example: for exponential equations it becomes
``_is_exponential`` and ``_solve_expo``.
- The identifying helpers should take two input parameters,
the equation to be checked and the variable for which a solution
is being sought, while solving helpers would require an additional
domain parameter.
- Be sure to consider corner cases.
- Add tests for each helper.
- Add a docstring to your helper that describes the method
implemented.
The documentation of the helpers should identify:
- the purpose of the helper,
- the method used to identify and solve the equation,
- a proof of correctness
- the return values of the helpers
"""
def add_type(lhs, rhs, symbol, domain):
"""
Helper for ``_transolve`` to handle equations of
``Add`` type, i.e. equations taking the form as
``a*f(x) + b*g(x) + .... = c``.
For example: 4**x + 8**x = 0
"""
result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
# check if it is exponential type equation
if _is_exponential(lhs, symbol):
result = _solve_exponential(lhs, rhs, symbol, domain)
# check if it is logarithmic type equation
elif _is_logarithmic(lhs, symbol):
result = _solve_logarithm(lhs, rhs, symbol, domain)
return result
result = ConditionSet(symbol, Eq(f, 0), domain)
# invert_complex handles the call to the desired inverter based
# on the domain specified.
lhs, rhs_s = invert_complex(f, 0, symbol, domain)
if isinstance(rhs_s, FiniteSet):
assert (len(rhs_s.args)) == 1
rhs = rhs_s.args[0]
if lhs.is_Add:
result = add_type(lhs, rhs, symbol, domain)
else:
result = rhs_s
return result
def solveset(f, symbol=None, domain=S.Complexes):
r"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An :class:`~.EmptySet` is returned if `f` is False or nonzero.
A :class:`~.ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solution are not yet implemented.
``solveset`` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S, Eq
>>> from sympy.solvers.solveset import solveset, solveset_real
* The default domain is complex. Not specifying a domain will lead
to the solving of the equation in the complex domain (and this
is not affected by the assumptions on the symbol):
>>> x = Symbol('x')
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
>>> x = Symbol('x', real=True)
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
* If you want to use ``solveset`` to solve the equation in the
real domain, provide a real domain. (Using ``solveset_real``
does this automatically.)
>>> R = S.Reals
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, R)
{0}
>>> solveset_real(exp(x) - 1, x)
{0}
The solution is unaffected by assumptions on the symbol:
>>> p = Symbol('p', positive=True)
>>> pprint(solveset(p**2 - 4))
{-2, 2}
When a :class:`~.ConditionSet` is returned, symbols with assumptions that
would alter the set are replaced with more generic symbols:
>>> i = Symbol('i', imaginary=True)
>>> solveset(Eq(i**2 + i*sin(i), 1), i, domain=S.Reals)
ConditionSet(_R, Eq(_R**2 + _R*sin(_R) - 1, 0), Reals)
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, R)
Interval.open(0, oo)
"""
f = sympify(f)
symbol = sympify(symbol)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
if not isinstance(f, (Expr, Relational, Number)):
raise ValueError("%s is not a valid SymPy expression" % f)
if not isinstance(symbol, (Expr, Relational)) and symbol is not None:
raise ValueError("%s is not a valid SymPy symbol" % (symbol,))
if not isinstance(domain, Set):
raise ValueError("%s is not a valid domain" %(domain))
free_symbols = f.free_symbols
if f.has(Piecewise):
f = piecewise_fold(f)
if symbol is None and not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
elif free_symbols:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not isinstance(symbol, Symbol):
f, s, swap = recast_to_symbols([f], [symbol])
# the xreplace will be needed if a ConditionSet is returned
return solveset(f[0], s[0], domain).xreplace(swap)
# solveset should ignore assumptions on symbols
if symbol not in _rc:
x = _rc[0] if domain.is_subset(S.Reals) else _rc[1]
rv = solveset(f.xreplace({symbol: x}), x, domain)
# try to use the original symbol if possible
try:
_rv = rv.xreplace({x: symbol})
except TypeError:
_rv = rv
if rv.dummy_eq(_rv):
rv = _rv
return rv
# Abs has its own handling method which avoids the
# rewriting property that the first piece of abs(x)
# is for x >= 0 and the 2nd piece for x < 0 -- solutions
# can look better if the 2nd condition is x <= 0. Since
# the solution is a set, duplication of results is not
# an issue, e.g. {y, -y} when y is 0 will be {0}
f, mask = _masked(f, Abs)
f = f.rewrite(Piecewise) # everything that's not an Abs
for d, e in mask:
# everything *in* an Abs
e = e.func(e.args[0].rewrite(Piecewise))
f = f.xreplace({d: e})
f = piecewise_fold(f)
return _solveset(f, symbol, domain, _check=True)
def solveset_real(f, symbol):
return solveset(f, symbol, S.Reals)
def solveset_complex(f, symbol):
return solveset(f, symbol, S.Complexes)
def _solveset_multi(eqs, syms, domains):
'''Basic implementation of a multivariate solveset.
For internal use (not ready for public consumption)'''
rep = {}
for sym, dom in zip(syms, domains):
if dom is S.Reals:
rep[sym] = Symbol(sym.name, real=True)
eqs = [eq.subs(rep) for eq in eqs]
syms = [sym.subs(rep) for sym in syms]
syms = tuple(syms)
if len(eqs) == 0:
return ProductSet(*domains)
if len(syms) == 1:
sym = syms[0]
domain = domains[0]
solsets = [solveset(eq, sym, domain) for eq in eqs]
solset = Intersection(*solsets)
return ImageSet(Lambda((sym,), (sym,)), solset).doit()
eqs = sorted(eqs, key=lambda eq: len(eq.free_symbols & set(syms)))
for n, eq in enumerate(eqs):
sols = []
all_handled = True
for sym in syms:
if sym not in eq.free_symbols:
continue
sol = solveset(eq, sym, domains[syms.index(sym)])
if isinstance(sol, FiniteSet):
i = syms.index(sym)
symsp = syms[:i] + syms[i+1:]
domainsp = domains[:i] + domains[i+1:]
eqsp = eqs[:n] + eqs[n+1:]
for s in sol:
eqsp_sub = [eq.subs(sym, s) for eq in eqsp]
sol_others = _solveset_multi(eqsp_sub, symsp, domainsp)
fun = Lambda((symsp,), symsp[:i] + (s,) + symsp[i:])
sols.append(ImageSet(fun, sol_others).doit())
else:
all_handled = False
if all_handled:
return Union(*sols)
def solvify(f, symbol, domain):
"""Solves an equation using solveset and returns the solution in accordance
with the `solve` output API.
Returns
=======
We classify the output based on the type of solution returned by `solveset`.
Solution | Output
----------------------------------------
FiniteSet | list
ImageSet, | list (if `f` is periodic)
Union |
Union | list (with FiniteSet)
EmptySet | empty list
Others | None
Raises
======
NotImplementedError
A ConditionSet is the input.
Examples
========
>>> from sympy.solvers.solveset import solvify
>>> from sympy.abc import x
>>> from sympy import S, tan, sin, exp
>>> solvify(x**2 - 9, x, S.Reals)
[-3, 3]
>>> solvify(sin(x) - 1, x, S.Reals)
[pi/2]
>>> solvify(tan(x), x, S.Reals)
[0]
>>> solvify(exp(x) - 1, x, S.Complexes)
>>> solvify(exp(x) - 1, x, S.Reals)
[0]
"""
solution_set = solveset(f, symbol, domain)
result = None
if solution_set is S.EmptySet:
result = []
elif isinstance(solution_set, ConditionSet):
raise NotImplementedError('solveset is unable to solve this equation.')
elif isinstance(solution_set, FiniteSet):
result = list(solution_set)
else:
period = periodicity(f, symbol)
if period is not None:
solutions = S.EmptySet
iter_solutions = ()
if isinstance(solution_set, ImageSet):
iter_solutions = (solution_set,)
elif isinstance(solution_set, Union):
if all(isinstance(i, ImageSet) for i in solution_set.args):
iter_solutions = solution_set.args
for solution in iter_solutions:
solutions += solution.intersect(Interval(0, period, False, True))
if isinstance(solutions, FiniteSet):
result = list(solutions)
else:
solution = solution_set.intersect(domain)
if isinstance(solution, Union):
# concerned about only FiniteSet with Union but not about ImageSet
# if required could be extend
if any(isinstance(i, FiniteSet) for i in solution.args):
result = [sol for soln in solution.args \
for sol in soln.args if isinstance(soln,FiniteSet)]
else:
return None
elif isinstance(solution, FiniteSet):
result += solution
return result
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_coeffs(eq, *syms, dict=False):
"""Return a list whose elements are the coefficients of the
corresponding symbols in the sum of terms in ``eq``.
The additive constant is returned as the last element of the
list.
Raises
======
NonlinearError
The equation contains a nonlinear term
ValueError
duplicate or unordered symbols are passed
Parameters
==========
dict - (default False) when True, return coefficients as a
dictionary with coefficients keyed to syms that were present;
key 1 gives the constant term
Examples
========
>>> from sympy.solvers.solveset import linear_coeffs
>>> from sympy.abc import x, y, z
>>> linear_coeffs(3*x + 2*y - 1, x, y)
[3, 2, -1]
It is not necessary to expand the expression:
>>> linear_coeffs(x + y*(z*(x*3 + 2) + 3), x)
[3*y*z + 1, y*(2*z + 3)]
When nonlinear is detected, an error will be raised:
* even if they would cancel after expansion (so the
situation does not pass silently past the caller's
attention)
>>> eq = 1/x*(x - 1) + 1/x
>>> linear_coeffs(eq.expand(), x)
[0, 1]
>>> linear_coeffs(eq, x)
Traceback (most recent call last):
...
NonlinearError:
nonlinear in given generators
* when there are cross terms
>>> linear_coeffs(x*(y + 1), x, y)
Traceback (most recent call last):
...
NonlinearError:
symbol-dependent cross-terms encountered
* when there are terms that contain an expression
dependent on the symbols that is not linear
>>> linear_coeffs(x**2, x)
Traceback (most recent call last):
...
NonlinearError:
nonlinear in given generators
"""
eq = _sympify(eq)
if len(syms) == 1 and iterable(syms[0]) and not isinstance(syms[0], Basic):
raise ValueError('expecting unpacked symbols, *syms')
symset = set(syms)
if len(symset) != len(syms):
raise ValueError('duplicate symbols given')
try:
d, c = _linear_eq_to_dict([eq], symset)
d = d[0]
c = c[0]
except PolyNonlinearError as err:
raise NonlinearError(str(err))
if dict:
if c:
d[S.One] = c
return d
rv = [S.Zero]*(len(syms) + 1)
rv[-1] = c
for i, k in enumerate(syms):
if k not in d:
continue
rv[i] = d[k]
return rv
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. Element ``M[i, j]`` corresponds to the coefficient
of the jth symbol in the ith equation.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system will return $A$ and $b$ as:
$$ A = \left[\begin{array}{ccc}
4 & 2 & 3 \\
3 & 1 & 1 \\
2 & 4 & 9
\end{array}\right] \ \ b = \left[\begin{array}{c}
1 \\ -6 \\ 2
\end{array}\right] $$
The only simplification performed is to convert
``Eq(a, b)`` $\Rightarrow a - b$.
Raises
======
NonlinearError
The equations contain a nonlinear term.
ValueError
The symbols are not given or are not unique.
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> c, x, y, z = symbols('c, x, y, z')
The coefficients (numerical or symbolic) of the symbols will
be returned as matrices:
>>> eqns = [c*x + z - 1 - c, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[c, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[c + 1],
[ 0],
[ 0]])
This routine does not simplify expressions and will raise an error
if nonlinearity is encountered:
>>> eqns = [
... (x**2 - 3*x)/(x - 3) - 3,
... y**2 - 3*y - y*(y - 4) + x - 4]
>>> linear_eq_to_matrix(eqns, [x, y])
Traceback (most recent call last):
...
NonlinearError:
symbol-dependent term can be ignored using `strict=False`
Simplifying these equations will discard the removable singularity
in the first and reveal the linear structure of the second:
>>> [e.simplify() for e in eqns]
[x - 3, x + y - 4]
Any such simplification needed to eliminate nonlinear terms must
be done *before* calling this routine.
"""
if not symbols:
raise ValueError(filldedent('''
Symbols must be given, for which coefficients
are to be found.
'''))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
if has_dups(symbols):
raise ValueError('Symbols must be unique')
equations = sympify(equations)
if isinstance(equations, MatrixBase):
equations = list(equations)
elif isinstance(equations, (Expr, Eq)):
equations = [equations]
elif not is_sequence(equations):
raise ValueError(filldedent('''
Equation(s) must be given as a sequence, Expr,
Eq or Matrix.
'''))
# construct the dictionaries
try:
eq, c = _linear_eq_to_dict(equations, symbols)
except PolyNonlinearError as err:
raise NonlinearError(str(err))
# prepare output matrices
n, m = shape = len(eq), len(symbols)
ix = dict(zip(symbols, range(m)))
A = zeros(*shape)
for row, d in enumerate(eq):
for k in d:
col = ix[k]
A[row, col] = d[k]
b = Matrix(n, 1, [-i for i in c])
return A, b
def linsolve(system, *symbols):
r"""
Solve system of $N$ linear equations with $M$ variables; both
underdetermined and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, whereas infinite
solutions are represented parametrically in terms of the given
symbols. For unique solution a :class:`~.FiniteSet` of ordered tuples
is returned.
All standard input formats are supported:
For the given set of equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented matrix form, ``system`` given below:
$$ \text{system} = \left[{array}{cccc}
3 & 2 & -1 & 1\\
2 & -2 & 4 & -2\\
2 & -1 & 2 & 0
\end{array}\right] $$
::
system = Matrix([[3, 2, -1, 1], [2, -2, 4, -2], [2, -1, 2, 0]])
* List of equations form
::
system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]
* Input $A$ and $b$ in matrix form (from $Ax = b$) are given as:
$$ A = \left[\begin{array}{ccc}
3 & 2 & -1 \\
2 & -2 & 4 \\
2 & -1 & 2
\end{array}\right] \ \ b = \left[\begin{array}{c}
1 \\ -2 \\ 0
\end{array}\right] $$
::
A = Matrix([[3, 2, -1], [2, -2, 4], [2, -1, 2]])
b = Matrix([[1], [-2], [0]])
system = (A, b)
Symbols can always be passed but are actually only needed
when 1) a system of equations is being passed and 2) the
system is passed as an underdetermined matrix and one wants
to control the name of the free variables in the result.
An error is raised if no symbols are used for case 1, but if
no symbols are provided for case 2, internally generated symbols
will be provided. When providing symbols for case 2, there should
be at least as many symbols are there are columns in matrix A.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in a row echelon form matrix.
Returns
=======
A FiniteSet containing an ordered tuple of values for the
unknowns for which the `system` has a solution. (Wrapping
the tuple in FiniteSet is used to maintain a consistent
output format throughout solveset.)
Returns EmptySet, if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is underdetermined, the
function will return a parametric solution in terms of the given
symbols. Those that are free will be returned unchanged. e.g. in
the system below, `z` is returned as the solution for variable z;
it can take on any value.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), x, y, z)
{(z - 1, 2 - 2*z, z)}
If no symbols are given, internally generated symbols will be used.
The ``tau0`` in the third position indicates (as before) that the third
variable -- whatever it is named -- can take on any value:
>>> linsolve((A, b))
{(tau0 - 1, 2 - 2*tau0, tau0)}
* List of equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + y/2 - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]))
>>> linsolve(system, x, y)
{(x, y)}
* For an empty system linsolve returns empty set
>>> linsolve([], x)
EmptySet
* An error is raised if any nonlinearity is detected, even
if it could be removed with expansion
>>> linsolve([x*(1/x - 1)], x)
Traceback (most recent call last):
...
NonlinearError: nonlinear term: 1/x
>>> linsolve([x*(y + 1)], x, y)
Traceback (most recent call last):
...
NonlinearError: nonlinear cross-term: x*(y + 1)
>>> linsolve([x**2 - 1], x)
Traceback (most recent call last):
...
NonlinearError: nonlinear term: x**2
"""
if not system:
return S.EmptySet
# If second argument is an iterable
if symbols and hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
sym_gen = isinstance(symbols, GeneratorType)
dup_msg = 'duplicate symbols given'
b = None # if we don't get b the input was bad
# unpack system
if hasattr(system, '__iter__'):
# 1). (A, b)
if len(system) == 2 and isinstance(system[0], MatrixBase):
A, b = system
# 2). (eq1, eq2, ...)
if not isinstance(system[0], MatrixBase):
if sym_gen or not symbols:
raise ValueError(filldedent('''
When passing a system of equations, the explicit
symbols for which a solution is being sought must
be given as a sequence, too.
'''))
if len(set(symbols)) != len(symbols):
raise ValueError(dup_msg)
#
# Pass to the sparse solver implemented in polys. It is important
# that we do not attempt to convert the equations to a matrix
# because that would be very inefficient for large sparse systems
# of equations.
#
eqs = system
eqs = [sympify(eq) for eq in eqs]
try:
sol = _linsolve(eqs, symbols)
except PolyNonlinearError as exc:
# e.g. cos(x) contains an element of the set of generators
raise NonlinearError(str(exc))
if sol is None:
return S.EmptySet
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
return sol
elif isinstance(system, MatrixBase) and not (
symbols and not isinstance(symbols, GeneratorType) and
isinstance(symbols[0], MatrixBase)):
# 3). A augmented with b
A, b = system[:, :-1], system[:, -1:]
if b is None:
raise ValueError("Invalid arguments")
if sym_gen:
symbols = [next(symbols) for i in range(A.cols)]
symset = set(symbols)
if any(symset & (A.free_symbols | b.free_symbols)):
raise ValueError(filldedent('''
At least one of the symbols provided
already appears in the system to be solved.
One way to avoid this is to use Dummy symbols in
the generator, e.g. numbered_symbols('%s', cls=Dummy)
''' % symbols[0].name.rstrip('1234567890')))
elif len(symset) != len(symbols):
raise ValueError(dup_msg)
if not symbols:
symbols = [Dummy() for _ in range(A.cols)]
name = _uniquely_named_symbol('tau', (A, b),
compare=lambda i: str(i).rstrip('1234567890')).name
gen = numbered_symbols(name)
else:
gen = None
# This is just a wrapper for solve_lin_sys
eqs = []
rows = A.tolist()
for rowi, bi in zip(rows, b):
terms = [elem * sym for elem, sym in zip(rowi, symbols) if elem]
terms.append(-bi)
eqs.append(Add(*terms))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is None:
return S.EmptySet
#sol = {sym:val for sym, val in sol.items() if sym != val}
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
if gen is not None:
solsym = sol.free_symbols
rep = {sym: next(gen) for sym in symbols if sym in solsym}
sol = sol.subs(rep)
return sol
##############################################################################
# ------------------------------nonlinsolve ---------------------------------#
##############################################################################
def _return_conditionset(eqs, symbols):
# return conditionset
eqs = (Eq(lhs, 0) for lhs in eqs)
condition_set = ConditionSet(
Tuple(*symbols), And(*eqs), S.Complexes**len(symbols))
return condition_set
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
:func:`~.nonlinsolve`. This will be called from :func:`~.nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not :class:`~.Symbol` type.
Examples
========
>>> from sympy import symbols, substitution
>>> x, y = symbols('x, y', real=True)
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
{(-1, 1)}
* When you want a soln not satisfying $x + 1 = 0$
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
{(1, -1)}
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
{(-3, 4), (2, -1)}
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
{(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers))}
"""
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
if not getattr(symbols[0], 'is_Symbol', False):
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call equals total_conditionset
# it means that solveset failed to solve all eqs.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, intersection_dict, complement_dict):
# If solveset has returned some intersection/complement
# for any symbol, it will be added in the final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
intersect_set, complement_set = None, None
for key_sym, value_sym in intersection_dict.items():
if key_sym == key_res:
intersect_set = value_sym
for key_sym, value_sym in complement_dict.items():
if key_sym == key_res:
complement_set = value_sym
if intersect_set or complement_set:
new_value = FiniteSet(value_res)
if intersect_set and intersect_set != S.Complexes:
new_value = Intersection(new_value, intersect_set)
if complement_set:
new_value = Complement(new_value, complement_set)
if new_value is S.EmptySet:
res_copy = None
break
elif new_value.is_FiniteSet and len(new_value) == 1:
res_copy[key_res] = set(new_value).pop()
else:
res_copy[key_res] = new_value
if res_copy is not None:
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sym, sol, soln_imageset):
"""Separate the Complements, Intersections, ImageSet lambda expr and
its base_set. This function returns the unmasks sol from different classes
of sets and also returns the appended ImageSet elements in a
soln_imageset (dict: where key as unmasked element and value as ImageSet).
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, ConditionSet):
# extracts any solution in ConditionSet
sol = sol.base_set
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] not in (S.Reals, S.Complexes):
# Sometimes solveset returns soln with intersection
# S.Reals or S.Complexes. We don't consider that
# intersection.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = eq if eq in (True, False) else checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif sol in soln_imageset.keys():
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is :func:`~.solveset_complex` or :func:`~.solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
(base,) = value_res.base_sets
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res).expand()
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen1, depen2 = (eq2.rewrite(Add)).as_independent(*unsolved_syms)
if (depen1.has(Abs) or depen2.has(Abs)) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln, another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym) # might give ValueError with Abs
except (NotImplementedError, ValueError):
# If solveset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
if soln.base_set in (S.Reals, S.Complexes):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
else:
soln = soln.base_set
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
sym, soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sym, sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any(
ss in free for ss in got_symbol
):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr) and isinstance(sol, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if sol in soln_imageset.keys():
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# If total_solveset_call is equal to total_conditionset
# then solveset failed to solve all of the equations.
# In this case we return a ConditionSet here.
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# don't keep duplicate solutions
filtered_complex = []
for i in list(new_result_complex):
for j in list(new_result_real):
if i.keys() != j.keys():
continue
if all(a.dummy_eq(b) for a, b in zip(i.values(), j.values()) \
if not (isinstance(a, int) and isinstance(b, int))):
break
else:
filtered_complex.append(i)
# overall result
result = new_result_real + filtered_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y, y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections or complements:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, complements)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
def _solveset_work(system, symbols):
soln = solveset(system[0], symbols[0])
if isinstance(soln, FiniteSet):
_soln = FiniteSet(*[tuple((s,)) for s in soln])
return _soln
else:
return FiniteSet(tuple(FiniteSet(soln)))
def _handle_positive_dimensional(polys, symbols, denominators):
from sympy.polys.polytools import groebner
# substitution method where new system is groebner basis of the system
_symbols = list(symbols)
_symbols.sort(key=default_sort_key)
basis = groebner(polys, _symbols, polys=True)
new_system = []
for poly_eq in basis:
new_system.append(poly_eq.as_expr())
result = [{}]
result = substitution(
new_system, symbols, result, [],
denominators)
return result
# end of def _handle_positive_dimensional()
def _handle_zero_dimensional(polys, symbols, system):
# solve 0 dimensional poly system using `solve_poly_system`
result = solve_poly_system(polys, *symbols)
# May be some extra soln is added because
# we used `unrad` in `_separate_poly_nonpoly`, so
# need to check and remove if it is not a soln.
result_update = S.EmptySet
for res in result:
dict_sym_value = dict(list(zip(symbols, res)))
if all(checksol(eq, dict_sym_value) for eq in system):
result_update += FiniteSet(res)
return result_update
# end of def _handle_zero_dimensional()
def _separate_poly_nonpoly(system, symbols):
polys = []
polys_expr = []
nonpolys = []
# unrad_changed stores a list of expressions containing
# radicals that were processed using unrad
# this is useful if solutions need to be checked later.
unrad_changed = []
denominators = set()
poly = None
for eq in system:
# Store denom expressions that contain symbols
denominators.update(_simple_dens(eq, symbols))
# Convert equality to expression
if isinstance(eq, Equality):
eq = eq.rewrite(Add)
# try to remove sqrt and rational power
without_radicals = unrad(simplify(eq), *symbols)
if without_radicals:
unrad_changed.append(eq)
eq_unrad, cov = without_radicals
if not cov:
eq = eq_unrad
if isinstance(eq, Expr):
eq = eq.as_numer_denom()[0]
poly = eq.as_poly(*symbols, extension=True)
elif simplify(eq).is_number:
continue
if poly is not None:
polys.append(poly)
polys_expr.append(poly.as_expr())
else:
nonpolys.append(eq)
return polys, polys_expr, nonpolys, denominators, unrad_changed
# end of def _separate_poly_nonpoly()
def _handle_poly(polys, symbols):
# _handle_poly(polys, symbols) -> (poly_sol, poly_eqs)
#
# We will return possible solution information to nonlinsolve as well as a
# new system of polynomial equations to be solved if we cannot solve
# everything directly here. The new system of polynomial equations will be
# a lex-order Groebner basis for the original system. The lex basis
# hopefully separate some of the variables and equations and give something
# easier for substitution to work with.
# The format for representing solution sets in nonlinsolve and substitution
# is a list of dicts. These are the special cases:
no_information = [{}] # No equations solved yet
no_solutions = [] # The system is inconsistent and has no solutions.
# If there is no need to attempt further solution of these equations then
# we return no equations:
no_equations = []
inexact = any(not p.domain.is_Exact for p in polys)
if inexact:
# The use of Groebner over RR is likely to result incorrectly in an
# inconsistent Groebner basis. So, convert any float coefficients to
# Rational before computing the Groebner basis.
polys = [poly(nsimplify(p, rational=True)) for p in polys]
# Compute a Groebner basis in grevlex order wrt the ordering given. We will
# try to convert this to lex order later. Usually it seems to be more
# efficient to compute a lex order basis by computing a grevlex basis and
# converting to lex with fglm.
basis = groebner(polys, symbols, order='grevlex', polys=False)
#
# No solutions (inconsistent equations)?
#
if 1 in basis:
# No solutions:
poly_sol = no_solutions
poly_eqs = no_equations
#
# Finite number of solutions (zero-dimensional case)
#
elif basis.is_zero_dimensional:
# Convert Groebner basis to lex ordering
basis = basis.fglm('lex')
# Convert polynomial coefficients back to float before calling
# solve_poly_system
if inexact:
basis = [nfloat(p) for p in basis]
# Solve the zero-dimensional case using solve_poly_system if possible.
# If some polynomials have factors that cannot be solved in radicals
# then this will fail. Using solve_poly_system(..., strict=True)
# ensures that we either get a complete solution set in radicals or
# UnsolvableFactorError will be raised.
try:
result = solve_poly_system(basis, *symbols, strict=True)
except UnsolvableFactorError:
# Failure... not fully solvable in radicals. Return the lex-order
# basis for substitution to handle.
poly_sol = no_information
poly_eqs = list(basis)
else:
# Success! We have a finite solution set and solve_poly_system has
# succeeded in finding all solutions. Return the solutions and also
# an empty list of remaining equations to be solved.
poly_sol = [dict(zip(symbols, res)) for res in result]
poly_eqs = no_equations
#
# Infinite families of solutions (positive-dimensional case)
#
else:
# In this case the grevlex basis cannot be converted to lex using the
# fglm method and also solve_poly_system cannot solve the equations. We
# would like to return a lex basis but since we can't use fglm we
# compute the lex basis directly here. The time required to recompute
# the basis is generally significantly less than the time required by
# substitution to solve the new system.
poly_sol = no_information
poly_eqs = list(groebner(polys, symbols, order='lex', polys=False))
if inexact:
poly_eqs = [nfloat(p) for p in poly_eqs]
return poly_sol, poly_eqs
def nonlinsolve(system, *symbols):
r"""
Solve system of $N$ nonlinear equations with $M$ variables, which means both
under and overdetermined systems are supported. Positive dimensional
system is also supported (A system with infinitely many solutions is said
to be positive-dimensional). In a positive dimensional system the solution will
be dependent on at least one symbol. Returns both real solution
and complex solution (if they exist).
Parameters
==========
system : list of equations
The target system of equations
symbols : list of Symbols
symbols should be given as a sequence eg. list
Returns
=======
A :class:`~.FiniteSet` of ordered tuple of values of `symbols` for which the `system`
has solution. Order of values in the tuple is same as symbols present in
the parameter `symbols`.
Please note that general :class:`~.FiniteSet` is unordered, the solution
returned here is not simply a :class:`~.FiniteSet` of solutions, rather it
is a :class:`~.FiniteSet` of ordered tuple, i.e. the first and only
argument to :class:`~.FiniteSet` is a tuple of solutions, which is
ordered, and, hence ,the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper ``{}`` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
For the given set of equations, the respective input types
are given below:
.. math:: xy - 1 = 0
.. math:: 4x^2 + y^2 - 5 = 0
::
system = [x*y - 1, 4*x**2 + y**2 - 5]
symbols = [x, y]
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy import symbols, nonlinsolve
>>> x, y, z = symbols('x, y, z', real=True)
>>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
{(-1, -1), (-1/2, -2), (1/2, 2), (1, 1)}
1. Positive dimensional system and complements:
>>> from sympy import pprint
>>> from sympy.polys.polytools import is_zero_dimensional
>>> a, b, c, d = symbols('a, b, c, d', extended_real=True)
>>> eq1 = a + b + c + d
>>> eq2 = a*b + b*c + c*d + d*a
>>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
>>> eq4 = a*b*c*d - 1
>>> system = [eq1, eq2, eq3, eq4]
>>> is_zero_dimensional(system)
False
>>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
-1 1 1 -1
{(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
d d d d
>>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
{(2 - y, y)}
2. If some of the equations are non-polynomial then `nonlinsolve`
will call the ``substitution`` function and return real and complex solutions,
if present.
>>> from sympy import exp, sin
>>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
3. If system is non-linear polynomial and zero-dimensional then it
returns both solution (real and complex solutions, if present) using
:func:`~.solve_poly_system`:
>>> from sympy import sqrt
>>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
{(-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I)}
4. ``nonlinsolve`` can solve some linear (zero or positive dimensional)
system (because it uses the :func:`sympy.polys.polytools.groebner` function to get the
groebner basis and then uses the ``substitution`` function basis as the
new `system`). But it is not recommended to solve linear system using
``nonlinsolve``, because :func:`~.linsolve` is better for general linear systems.
>>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9, y + z - 4], [x, y, z])
{(3*z - 5, 4 - z, z)}
5. System having polynomial equations and only real solution is
solved using :func:`~.solve_poly_system`:
>>> e1 = sqrt(x**2 + y**2) - 10
>>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
>>> nonlinsolve((e1, e2), (x, y))
{(191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20)}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
{(1, 2), (1 - sqrt(5), 2 + sqrt(5)), (1 + sqrt(5), 2 - sqrt(5))}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
{(2, 1), (2 - sqrt(5), 1 + sqrt(5)), (2 + sqrt(5), 1 - sqrt(5))}
6. It is better to use symbols instead of trigonometric functions or
:class:`~.Function`. For example, replace $\sin(x)$ with a symbol, replace
$f(x)$ with a symbol and so on. Get a solution from ``nonlinsolve`` and then
use :func:`~.solveset` to get the value of $x$.
How nonlinsolve is better than old solver ``_solve_system`` :
=============================================================
1. A positive dimensional system solver: nonlinsolve can return
solution for positive dimensional system. It finds the
Groebner Basis of the positive dimensional system(calling it as
basis) then we can start solving equation(having least number of
variable first in the basis) using solveset and substituting that
solved solutions into other equation(of basis) to get solution in
terms of minimum variables. Here the important thing is how we
are substituting the known values and in which equations.
2. Real and complex solutions: nonlinsolve returns both real
and complex solution. If all the equations in the system are polynomial
then using :func:`~.solve_poly_system` both real and complex solution is returned.
If all the equations in the system are not polynomial equation then goes to
``substitution`` method with this polynomial and non polynomial equation(s),
to solve for unsolved variables. Here to solve for particular variable
solveset_real and solveset_complex is used. For both real and complex
solution ``_solve_using_known_values`` is used inside ``substitution``
(``substitution`` will be called when any non-polynomial equation is present).
If a solution is valid its general solution is added to the final result.
3. :class:`~.Complement` and :class:`~.Intersection` will be added:
nonlinsolve maintains dict for complements and intersections. If solveset
find complements or/and intersections with any interval or set during the
execution of ``substitution`` function, then complement or/and
intersection for that variable is added before returning final solution.
"""
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
if not is_sequence(symbols) or not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise IndexError(filldedent(msg))
symbols = list(map(_sympify, symbols))
system, symbols, swap = recast_to_symbols(system, symbols)
if swap:
soln = nonlinsolve(system, symbols)
return FiniteSet(*[tuple(i.xreplace(swap) for i in s) for s in soln])
if len(system) == 1 and len(symbols) == 1:
return _solveset_work(system, symbols)
# main code of def nonlinsolve() starts from here
polys, polys_expr, nonpolys, denominators, unrad_changed = \
_separate_poly_nonpoly(system, symbols)
poly_eqs = []
poly_sol = [{}]
if polys:
poly_sol, poly_eqs = _handle_poly(polys, symbols)
if poly_sol and poly_sol[0]:
poly_syms = set().union(*(eq.free_symbols for eq in polys))
unrad_syms = set().union(*(eq.free_symbols for eq in unrad_changed))
if unrad_syms == poly_syms and unrad_changed:
# if all the symbols have been solved by _handle_poly
# and unrad has been used then check solutions
poly_sol = [sol for sol in poly_sol if checksol(unrad_changed, sol)]
# Collect together the unsolved polynomials with the non-polynomial
# equations.
remaining = poly_eqs + nonpolys
# to_tuple converts a solution dictionary to a tuple containing the
# value for each symbol
to_tuple = lambda sol: tuple(sol[s] for s in symbols)
if not remaining:
# If there is nothing left to solve then return the solution from
# solve_poly_system directly.
return FiniteSet(*map(to_tuple, poly_sol))
else:
# Here we handle:
#
# 1. The Groebner basis if solve_poly_system failed.
# 2. The Groebner basis in the positive-dimensional case.
# 3. Any non-polynomial equations
#
# If solve_poly_system did succeed then we pass those solutions in as
# preliminary results.
subs_res = substitution(remaining, symbols, result=poly_sol, exclude=denominators)
if not isinstance(subs_res, FiniteSet):
return subs_res
# check solutions produced by substitution. Currently, checking is done for
# only those solutions which have non-Set variable values.
if unrad_changed:
result = [dict(zip(symbols, sol)) for sol in subs_res.args]
correct_sols = [sol for sol in result if any(isinstance(v, Set) for v in sol)
or checksol(unrad_changed, sol) != False]
return FiniteSet(*map(to_tuple, correct_sols))
else:
return subs_res
|
1aec47736fc2cafc07445237869c25a532241acc96a21d378af82abd8304afd3 | """
This module contain solvers for all kinds of equations:
- algebraic or transcendental, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core import (S, Add, Symbol, Dummy, Expr, Mul)
from sympy.core.assumptions import check_assumptions
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand_mul, expand_log, Derivative,
AppliedUndef, UndefinedFunction, nfloat,
Function, expand_power_exp, _mexpand, expand,
expand_func)
from sympy.core.logic import fuzzy_not
from sympy.core.numbers import ilcm, Float, Rational, _illegal
from sympy.core.power import integer_log, Pow
from sympy.core.relational import Eq, Ne
from sympy.core.sorting import ordered, default_sort_key
from sympy.core.sympify import sympify, _sympify
from sympy.core.traversal import preorder_traversal
from sympy.logic.boolalg import And, BooleanAtom
from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan,
Abs, re, im, arg, sqrt, atan2)
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.integrals.integrals import Integral
from sympy.ntheory.factor_ import divisors
from sympy.simplify import (simplify, collect, powsimp, posify, # type: ignore
powdenest, nsimplify, denom, logcombine, sqrtdenest, fraction,
separatevars)
from sympy.simplify.sqrtdenest import sqrt_depth
from sympy.simplify.fu import TR1, TR2i
from sympy.matrices.common import NonInvertibleMatrixError
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel, factor, Poly
from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError
from sympy.polys.solvers import sympy_eqs_to_ring, solve_lin_sys
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import filldedent, debug
from sympy.utilities.iterables import (connected_components,
generate_bell, uniq, iterable, is_sequence, subsets, flatten)
from sympy.utilities.decorator import conserve_mpmath_dps
from mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from types import GeneratorType
from collections import defaultdict
from itertools import combinations, product
import warnings
def recast_to_symbols(eqs, symbols):
"""
Return (e, s, d) where e and s are versions of *eqs* and
*symbols* in which any non-Symbol objects in *symbols* have
been replaced with generic Dummy symbols and d is a dictionary
that can be used to restore the original expressions.
Examples
========
>>> from sympy.solvers.solvers import recast_to_symbols
>>> from sympy import symbols, Function
>>> x, y = symbols('x y')
>>> fx = Function('f')(x)
>>> eqs, syms = [fx + 1, x, y], [fx, y]
>>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d)
([_X0 + 1, x, y], [_X0, y], {_X0: f(x)})
The original equations and symbols can be restored using d:
>>> assert [i.xreplace(d) for i in eqs] == eqs
>>> assert [d.get(i, i) for i in s] == syms
"""
if not iterable(eqs) and iterable(symbols):
raise ValueError('Both eqs and symbols must be iterable')
orig = list(symbols)
symbols = list(ordered(symbols))
swap_sym = {}
i = 0
for j, s in enumerate(symbols):
if not isinstance(s, Symbol) and s not in swap_sym:
swap_sym[s] = Dummy('X%d' % i)
i += 1
new_f = []
for i in eqs:
isubs = getattr(i, 'subs', None)
if isubs is not None:
new_f.append(isubs(swap_sym))
else:
new_f.append(i)
restore = {v: k for k, v in swap_sym.items()}
return new_f, [swap_sym.get(i, i) for i in orig], restore
def _ispow(e):
"""Return True if e is a Pow or is exp."""
return isinstance(e, Expr) and (e.is_Pow or isinstance(e, exp))
def _simple_dens(f, symbols):
# when checking if a denominator is zero, we can just check the
# base of powers with nonzero exponents since if the base is zero
# the power will be zero, too. To keep it simple and fast, we
# limit simplification to exponents that are Numbers
dens = set()
for d in denoms(f, symbols):
if d.is_Pow and d.exp.is_Number:
if d.exp.is_zero:
continue # foo**0 is never 0
d = d.base
dens.add(d)
return dens
def denoms(eq, *symbols):
"""
Return (recursively) set of all denominators that appear in *eq*
that contain any symbol in *symbols*; if *symbols* are not
provided then all denominators will be returned.
Examples
========
>>> from sympy.solvers.solvers import denoms
>>> from sympy.abc import x, y, z
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
If *symbols* are provided then only denominators containing
those symbols will be returned:
>>> denoms(1/x + 1/y + 1/z, y, z)
{y, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
# Here p might be Tuple or Relational
# Expr subtrees (e.g. lhs and rhs) will be traversed after by pot
if not isinstance(p, Expr):
continue
den = denom(p)
if den is S.One:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
elif len(symbols) == 1:
if iterable(symbols[0]):
symbols = symbols[0]
return {d for d in dens if any(s in d.free_symbols for s in symbols)}
def checksol(f, symbol, sol=None, **flags):
"""
Checks whether sol is a solution of equation f == 0.
Explanation
===========
Input can be either a single symbol and corresponding value
or a dictionary of symbols and values. When given as a dictionary
and flag ``simplify=True``, the values in the dictionary will be
simplified. *f* can be a single equation or an iterable of equations.
A solution must satisfy all equations in *f* to be considered valid;
if a solution does not satisfy any equation, False is returned; if one or
more checks are inconclusive (and none are False) then None is returned.
Examples
========
>>> from sympy import checksol, symbols
>>> x, y = symbols('x,y')
>>> checksol(x**4 - 1, x, 1)
True
>>> checksol(x**4 - 1, x, 0)
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
To check if an expression is zero using ``checksol()``, pass it
as *f* and send an empty dictionary for *symbol*:
>>> checksol(x**2 + x - x*(x + 1), {})
True
None is returned if ``checksol()`` could not conclude.
flags:
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify solution before substituting into function and
simplify the function before trying specific simplifications
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
"""
from sympy.physics.units import Unit
minimal = flags.get('minimal', False)
if sol is not None:
sol = {symbol: sol}
elif isinstance(symbol, dict):
sol = symbol
else:
msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)'
raise ValueError(msg % (symbol, sol))
if iterable(f):
if not f:
raise ValueError('no functions to check')
rv = True
for fi in f:
check = checksol(fi, sol, **flags)
if check:
continue
if check is False:
return False
rv = None # don't return, wait to see if there's a False
return rv
f = _sympify(f)
if f.is_number:
return f.is_zero
if isinstance(f, Poly):
f = f.as_expr()
elif isinstance(f, (Eq, Ne)):
if f.rhs in (S.true, S.false):
f = f.reversed
B, E = f.args
if isinstance(B, BooleanAtom):
f = f.subs(sol)
if not f.is_Boolean:
return
else:
f = f.rewrite(Add, evaluate=False)
if isinstance(f, BooleanAtom):
return bool(f)
elif not f.is_Relational and not f:
return True
illegal = set(_illegal)
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
attempt = -1
numerical = flags.get('numerical', True)
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if isinstance(val, Mul):
val = val.as_independent(Unit)[0]
if val.atoms() & illegal:
return False
elif attempt == 1:
if not val.is_number:
if not val.is_constant(*list(sol.keys()), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = _mexpand(val.as_numer_denom()[0], recursive=True)
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = simplify(f.subs(sol))
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = _mexpand(val, recursive=True)
if exval.is_number:
# we can decide now
val = exval
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
elif isinstance(p, UndefinedFunction):
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
nz = fuzzy_not(val.is_zero)
if nz is not None:
# issue 5673: nz may be True even when False
# so these are just hacks to keep a false positive
# from being returned
# HACK 1: LambertW (issue 5673)
if val.is_number and val.has(LambertW):
# don't eval this to verify solution since if we got here,
# numerical must be False
return None
# add other HACKs here if necessary, otherwise we assume
# the nz value is correct
return not nz
break
if val.is_Rational:
return val == 0
if numerical and val.is_number:
return (abs(val.n(18).n(12, chop=True)) < 1e-9) is S.true
if flags.get('warn', False):
warnings.warn("\n\tWarning: could not verify solution %s." % sol)
# returns None if it can't conclude
# TODO: improve solution testing
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
- systems implied by undetermined coefficients
Examples
========
The default output varies according to the input and might
be a list (possibly empty), a dictionary, a list of
dictionaries or tuples, or an expression involving relationals.
For specifics regarding different forms of output that may appear, see :ref:`solve_output`.
Let it suffice here to say that to obtain a uniform output from
`solve` use ``dict=True`` or ``set=True`` (see below).
>>> from sympy import solve, Poly, Eq, Matrix, Symbol
>>> from sympy.abc import x, y, z, a, b
The expressions that are passed can be Expr, Equality, or Poly
classes (or lists of the same); a Matrix is considered to be a
list of all the elements of the matrix:
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(Matrix([[x, x + y]]), x, y) == solve([x, x + y], x, y)
True
If no symbols are indicated to be of interest and the equation is
univariate, a list of values is returned; otherwise, the keys in
a dictionary will indicate which (of all the variables used in
the expression(s)) variables and solutions were found:
>>> solve(x**2 - 4)
[-2, 2]
>>> solve((x - a)*(y - b))
[{a: x}, {b: y}]
>>> solve([x - 3, y - 1])
{x: 3, y: 1}
>>> solve([x - 3, y**2 - 1])
[{x: 3, y: -1}, {x: 3, y: 1}]
If you pass symbols for which solutions are sought, the output will vary
depending on the number of symbols you passed, whether you are passing
a list of expressions or not, and whether a linear system was solved.
Uniform output is attained by using ``dict=True`` or ``set=True``.
>>> #### *** feel free to skip to the stars below *** ####
>>> from sympy import TableForm
>>> h = [None, ';|;'.join(['e', 's', 'solve(e, s)', 'solve(e, s, dict=True)',
... 'solve(e, s, set=True)']).split(';')]
>>> t = []
>>> for e, s in [
... (x - y, y),
... (x - y, [x, y]),
... (x**2 - y, [x, y]),
... ([x - 3, y -1], [x, y]),
... ]:
... how = [{}, dict(dict=True), dict(set=True)]
... res = [solve(e, s, **f) for f in how]
... t.append([e, '|', s, '|'] + [res[0], '|', res[1], '|', res[2]])
...
>>> # ******************************************************* #
>>> TableForm(t, headings=h, alignments="<")
e | s | solve(e, s) | solve(e, s, dict=True) | solve(e, s, set=True)
---------------------------------------------------------------------------------------
x - y | y | [x] | [{y: x}] | ([y], {(x,)})
x - y | [x, y] | [(y, y)] | [{x: y}] | ([x, y], {(y, y)})
x**2 - y | [x, y] | [(x, x**2)] | [{y: x**2}] | ([x, y], {(x, x**2)})
[x - 3, y - 1] | [x, y] | {x: 3, y: 1} | [{x: 3, y: 1}] | ([x, y], {(3, 1)})
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
When you pass all but one of the free symbols, an attempt
is made to find a single solution based on the method of
undetermined coefficients. If it succeeds, a dictionary of values
is returned. If you want an algebraic solutions for one
or more of the symbols, pass the expression to be solved in a list:
>>> e = a*x + b - 2*x - 3
>>> solve(e, [a, b])
{a: 2, b: 3}
>>> solve([e], [a, b])
{a: -b/x + (2*x + 3)/x}
When there is no solution for any given symbol which will make all
expressions zero, the empty list is returned (or an empty set in
the tuple when ``set=True``):
>>> from sympy import sqrt
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
>>> solve(sqrt(x) + 1, x, set=True)
([x], set())
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> from sympy import exp, Function
>>> f = Function('f')
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a function within a derivative, use :func:`~.dsolve`.
To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
It is possible to solve for anything in an expression that can be
replaced with a symbol using :obj:`~sympy.core.basic.Basic.subs`:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number, remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
eliminate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By setting the ``positive`` flag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If ``check=False``, then a solution to the numerator being zero is found
but the value of $x = 0$ is a spurious solution since $\sin(x)/x$ has the well
known limit (without discontinuity) of 1 at $x = 0$:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of $x = 0$ that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Solving Relationships**
When one or more expressions passed to ``solve`` is a relational,
a relational result is returned (and the ``dict`` and ``set`` flags
are ignored):
>>> solve(x < 3)
(-oo < x) & (x < 3)
>>> solve([x < 3, x**2 > 4], x)
((-oo < x) & (x < -2)) | ((2 < x) & (x < 3))
>>> solve([x + y - 3, x > 3], x)
(3 < x) & (x < oo) & Eq(x, 3 - y)
Although checking of assumptions on symbols in relationals
is not done, setting assumptions will affect how certain
relationals might automatically simplify:
>>> solve(x**2 > 4)
((-oo < x) & (x < -2)) | ((2 < x) & (x < oo))
>>> r = Symbol('r', real=True)
>>> solve(r**2 > 4)
(2 < r) | (r < -2)
There is currently no algorithm in SymPy that allows you to use
relationships to resolve more than one variable. So the following
does not determine that ``q < 0`` (and trying to solve for ``r``
and ``q`` will raise an error):
>>> from sympy import symbols
>>> r, q = symbols('r, q', real=True)
>>> solve([r + q - 3, r > 3], r)
(3 < r) & Eq(r, 3 - q)
You can directly call the routine that ``solve`` calls
when it encounters a relational: :func:`~.reduce_inequalities`.
It treats Expr like Equality.
>>> from sympy import reduce_inequalities
>>> reduce_inequalities([x**2 - 4])
Eq(x, -2) | Eq(x, 2)
If each relationship contains only one symbol of interest,
the expressions can be processed for multiple symbols:
>>> reduce_inequalities([0 <= x - 1, y < 3], [x, y])
(-oo < y) & (1 <= x) & (x < oo) & (y < 3)
But an error is raised if any relationship has more than one
symbol of interest:
>>> reduce_inequalities([0 <= x*y - 1, y < 3], [x, y])
Traceback (most recent call last):
...
NotImplementedError:
inequality has more than one symbol of interest.
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# checking/recording flags
###########################################################################
# set solver types explicitly; as soon as one is False
# all the rest will be False
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# allow solution to contain symbol if True:
implicit = flags.get('implicit', False)
# record desire to see warnings
warn = flags.get('warn', False)
# this flag will be needed for quick exits below, so record
# now -- but don't record `dict` yet since it might change
as_set = flags.get('set', False)
# keeping track of how f was passed
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
# sympify everything, creating list of expressions and list of symbols
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
f, symbols = (_sympified_list(w) for w in [f, symbols])
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
if not as_set:
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# check for duplicates
if len(symbols) != len(set(symbols)):
raise ValueError('duplicate symbols given')
# remove those not of interest
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
# automatically ignore True values
if isinstance(f, list):
f = [s for s in f if s is not S.true]
# handle canonicalization of equation types
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
# *** dispatch and handle as a system of relationals
# **************************************************
if fi.is_Relational:
if len(symbols) != 1:
raise ValueError("can only solve for one symbol at a time")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
return reduce_inequalities(f, symbols=symbols)
# convert Poly to expression
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if as_set:
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this set of symbols (perhaps recast) is needed below
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
if as_set:
return symbols, set()
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = None
if len(symbols) != 1:
solution = _solve_undetermined(f[0], symbols, flags)
if not solution:
solution = _solve(f[0], *symbols, **flags)
else:
linear, solution = _solve_system(f, symbols, **flags)
assert type(solution) is list
assert not solution or type(solution[0]) is dict, solution
#
# postprocessing
###########################################################################
# capture as_dict flag now (as_set already captured)
as_dict = flags.get('dict', False)
# define how solution will get unpacked
tuple_format = lambda s: [tuple([i.get(x, x) for x in symbols]) for i in s]
if as_dict or as_set:
unpack = None
elif bare_f:
if len(symbols) == 1:
unpack = lambda s: [i[symbols[0]] for i in s]
elif len(solution) == 1 and len(solution[0]) == len(symbols):
# undetermined linear coeffs solution
unpack = lambda s: s[0]
elif ordered_symbols:
unpack = tuple_format
else:
unpack = lambda s: s
else:
if solution:
if linear and len(solution) == 1:
# if you want the tuple solution for the linear
# case, use `set=True`
unpack = lambda s: s[0]
elif ordered_symbols:
unpack = tuple_format
else:
unpack = lambda s: s
else:
unpack = None
# Restore masked-off objects
if non_inverts and type(solution) is list:
solution = [{k: v.subs(non_inverts) for k, v in s.items()}
for s in solution]
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
if not solution:
if as_set:
return symbols, set()
return []
# make orderings canonical for list of dictionaries
if not as_set: # for set, no point in ordering
solution = [{k: s[k] for k in ordered(s)} for s in solution]
solution.sort(key=default_sort_key)
if not (as_set or as_dict):
return unpack(solution)
if as_dict:
return solution
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in solution))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in solution}
def _solve_undetermined(g, symbols, flags):
"""solve helper to return a list with one dict (solution) else None
A direct call to solve_undetermined_coeffs is more flexible and
can return both multiple solutions and handle more than one independent
variable. Here, we have to be more cautious to keep from solving
something that does not look like an undetermined coeffs system --
to minimize the surprise factor since singularities that cancel are not
prohibited in solve_undetermined_coeffs.
"""
if g.free_symbols - set(symbols):
sol = solve_undetermined_coeffs(g, symbols, **dict(flags, dict=True, set=None))
if len(sol) == 1:
return sol
def _solve(f, *symbols, **flags):
"""Return a checked solution for *f* in terms of one or more of the
symbols in the form of a list of dictionaries.
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised.
"""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
# look for solutions for desired symbols that are independent
# of symbols already solved for, e.g. if we solve for x = y
# then no symbol having x in its solution will be returned.
# First solve for linear symbols (since that is easier and limits
# solution size) and then proceed with symbols appearing
# in a non-linear fashion. Ideally, if one is solving a single
# expression for several symbols, they would have to be
# appear in factors of an expression, but we do not here
# attempt factorization. XXX perhaps handling a Mul
# should come first in this routine whether there is
# one or several symbols.
nonlin_s = []
got_s = set()
rhs_s = set()
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if vfree & got_s:
# was linear, but has redundant relationship
# e.g. x - y = 0 has y == x is redundant for x == y
# so ignore
continue
rhs_s |= vfree
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
nonlin_s.append(s)
if not nonlin_s:
return result
for s in nonlin_s:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if sol[s].free_symbols & got_s:
# depends on previously solved symbols: ignore
continue
got_s.add(s)
result.append(sol)
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
# solve f for a single variable
symbol = symbols[0]
# expand binomials only if it has the unknown symbol
f = f.replace(lambda e: isinstance(e, binomial) and e.has(symbol),
lambda e: expand_func(e))
# checking will be done unless it is turned off before making a
# recursive call; the variables `checkdens` and `check` are
# captured here (for reference below) in case flag value changes
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
if m in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
result = set()
break
soln = _vsolve(m, symbol, **flags)
result.update(set(soln))
result = [{symbol: v} for v in result]
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = flags.get('_denominators', _simple_dens(f, symbols))
result = [s for s in result if
not any(checksol(den, s, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for i, (expr, cond) in enumerate(f.args):
if expr.is_zero:
raise NotImplementedError(
'solve cannot represent interval solutions')
candidates = _vsolve(expr, symbol, **flags)
# the explicit condition for this expr is the current cond
# and none of the previous conditions
args = [~c for _, c in f.args[:i]] + [cond]
cond = And(*args)
for candidate in candidates:
if candidate in result:
# an unconditional value was already there
continue
try:
v = cond.subs(symbol, candidate)
_eval_simplify = getattr(v, '_eval_simplify', None)
if _eval_simplify is not None:
# unconditionally take the simplification of v
v = _eval_simplify(ratio=2, measure=lambda x: 1)
except TypeError:
# incompatible type with condition(s)
continue
if v == False:
continue
if v == True:
result.add(candidate)
else:
result.add(Piecewise(
(candidate, v),
(S.NaN, True)))
# solutions already checked and simplified
# ****************************************
return [{symbol: r} for r in result]
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num.is_zero or sol is S.NaN:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [{f_num: sol}]
poly = None
# check for a single Add generator
if not f_num.is_Add:
add_args = [i for i in f_num.atoms(Add)
if symbol in i.free_symbols]
if len(add_args) == 1:
gen = add_args[0]
spart = gen.as_independent(symbol)[1].as_base_exp()[0]
if spart == symbol:
try:
poly = Poly(f_num, spart)
except PolynomialError:
pass
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
if poly is None:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = {b for b in bases if b.is_Function}
trig = {_ for _ in funcs if
isinstance(_, TrigonometricFunction)}
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = None
if f_num.is_Add and len(f_num.args) == 2:
# check for sin(x)**p = cos(x)**p
_args = f_num.args
t = a, b = [i.atoms(Function).intersection(
trig) for i in _args]
if all(len(i) == 1 for i in t):
a, b = [i.pop() for i in t]
if isinstance(a, cos):
a, b = b, a
_args = _args[::-1]
if isinstance(a, sin) and isinstance(b, cos
) and a.args[0] == b.args[0]:
# sin(x) + cos(x) = 0 -> tan(x) + 1 = 0
newf, _d = (TR2i(_args[0]/_args[1]) + 1
).as_numer_denom()
if not _d.is_Number:
newf = None
if newf is None:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = list(ordered(_vsolve(t - f1, symbol, **flags)))[0]
result = [{symbol: cv_inv.subs(sol)} for sol in cv_sols]
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _vsolve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
result = [{symbol: i.subs(s)} for i in inv for s in soln]
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
hints = ('cubics', 'quartics', 'quintics')
solvers = {h: flags.get(h) for h in hints}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
inv = _vsolve(u - t, symbol, **flags)
soln = {i.subs(t, s) for i in inv for s in soln}
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
if soln is not None:
result = [{symbol: v} for v in soln]
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _vsolve(ieq, symbol, **flags)[0]
rv = {inv.subs(xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_vsolve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = [{symbol: v} for v in rv]
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = [{symbol: v} for v in soln]
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = [{k: d[k].simplify() for k in d} for d in result]
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [r for r in result if
not any(checksol(d, r, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, r, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
"""return ``(linear, solution)`` where ``linear`` is True
if the system was linear, else False; ``solution``
is a list of dictionaries giving solutions for the symbols
"""
if not exprs:
return False, []
if flags.pop('_split', True):
# Split the system into connected components
V = exprs
symsset = set(symbols)
exprsyms = {e: e.free_symbols & symsset for e in exprs}
E = []
sym_indices = {sym: i for i, sym in enumerate(symbols)}
for n, e1 in enumerate(exprs):
for e2 in exprs[:n]:
# Equations are connected if they share a symbol
if exprsyms[e1] & exprsyms[e2]:
E.append((e1, e2))
G = V, E
subexprs = connected_components(G)
if len(subexprs) > 1:
subsols = []
linear = True
for subexpr in subexprs:
subsyms = set()
for e in subexpr:
subsyms |= exprsyms[e]
subsyms = list(sorted(subsyms, key = lambda x: sym_indices[x]))
flags['_split'] = False # skip split step
_linear, subsol = _solve_system(subexpr, subsyms, **flags)
if linear:
linear = linear and _linear
if not isinstance(subsol, list):
subsol = [subsol]
subsols.append(subsol)
# Full solution is cartesion product of subsystems
sols = []
for soldicts in product(*subsols):
sols.append(dict(item for sd in soldicts
for item in sd.items()))
return linear, sols
polys = []
dens = set()
failed = []
result = []
solved_syms = []
linear = True
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
if d in symbols:
if linear:
linear = solve_linear(g, 0, [d])[0] == d
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if polys:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
result = [result] if result else []
if failed:
if result:
solved_syms = list(result[0].keys()) # there is only one result dict
else:
solved_syms = []
# linear doesn't change
else:
linear = False
if len(symbols) > len(polys):
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [], None or list of tuples
res = solve_poly_system(polys, *syms)
if res:
for r in set(res):
skip = False
for r1 in r:
if got_s and any(ss in r1.free_symbols
for ss in got_s):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.append(dict(list(zip(syms, r))))
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
if result:
solved_syms = symbols
result = [dict(list(zip(solved_syms, r))) for r in set(result)]
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
# convert None or [] to [{}]
result = result or [{}]
if failed:
linear = False
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = e.free_symbols & legal
# Solve first for symbols that have lower degree in the equation.
# Ideally we want to solve firstly for symbols that appear linearly
# with rational coefficients e.g. if e = x*y + z then we should
# solve for z first.
def key(sym):
ep = e.as_poly(sym)
if ep is None:
complexity = (S.Infinity, S.Infinity, S.Infinity)
else:
coeff_syms = ep.LC().free_symbols
complexity = (ep.degree(), len(coeff_syms & rv), len(coeff_syms))
return complexity + (default_sort_key(sym),)
if sort:
rv = sorted(rv, key=key)
return rv
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
u = Dummy() # used in solution checking
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
newresult = []
bad_results = []
hit = False
for r in result:
got_s = set()
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _vsolve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s is being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
# check that it is independent of previous solutions
iset = set(rnew.items())
for i in newresult:
if len(i) < len(iset) and not set(i.items()) - iset:
# this is a superset of a known solution that
# is smaller
break
else:
# keep it
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
if not result:
return False, []
# rely on linear/polynomial system solvers to simplify
# XXX the following tests show that the expressions
# returned are not the same as they would be if simplify
# were applied to this:
# sympy/solvers/ode/tests/test_systems/test__classify_linear_system
# sympy/solvers/tests/test_solvers/test_issue_4886
# so the docs should be updated to reflect that or else
# the following should be `bool(failed) or not linear`
default_simplify = bool(failed)
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
return linear, result
def solve_linear(lhs, rhs=0, symbols=[], exclude=[]):
r"""
Return a tuple derived from ``f = lhs - rhs`` that is one of
the following: ``(0, 1)``, ``(0, 0)``, ``(symbol, solution)``, ``(n, d)``.
Explanation
===========
``(0, 1)`` meaning that ``f`` is independent of the symbols in *symbols*
that are not in *exclude*.
``(0, 0)`` meaning that there is no solution to the equation amongst the
symbols given. If the first element of the tuple is not zero, then the
function is guaranteed to be dependent on a symbol in *symbols*.
``(symbol, solution)`` where symbol appears linearly in the numerator of
``f``, is in *symbols* (if given), and is not in *exclude* (if given). No
simplification is done to ``f`` other than a ``mul=True`` expansion, so the
solution will correspond strictly to a unique solution.
``(n, d)`` where ``n`` and ``d`` are the numerator and denominator of ``f``
when the numerator was not linear in any symbol of interest; ``n`` will
never be a symbol unless a solution for that symbol was found (in which case
the second element is the solution, not the denominator).
Examples
========
>>> from sympy import cancel, Pow
``f`` is independent of the symbols in *symbols* that are not in
*exclude*:
>>> from sympy import cos, sin, solve_linear
>>> from sympy.abc import x, y, z
>>> eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0
>>> solve_linear(eq)
(0, 1)
>>> eq = cos(x)**2 + sin(x)**2 # = 1
>>> solve_linear(eq)
(0, 1)
>>> solve_linear(x, exclude=[x])
(0, 1)
The variable ``x`` appears as a linear variable in each of the
following:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in ``x`` or ``y`` then the numerator and denominator are
returned:
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If the numerator of the expression is a symbol, then ``(0, 0)`` is
returned if the solution for that symbol would have set any
denominator to 0:
>>> eq = 1/(1/x - 2)
>>> eq.as_numer_denom()
(x, 1 - 2*x)
>>> solve_linear(eq)
(0, 0)
But automatic rewriting may cause a symbol in the denominator to
appear in the numerator so a solution will be returned:
>>> (1/x)**-1
x
>>> solve_linear((1/x)**-1)
(x, 0)
Use an unevaluated expression to avoid this:
>>> solve_linear(Pow(1/x, -1, evaluate=False))
(0, 0)
If ``x`` is allowed to cancel in the following expression, then it
appears to be linear in ``x``, but this sort of cancellation is not
done by ``solve_linear`` so the solution will always satisfy the
original expression without causing a division by zero error.
>>> eq = x**2*(1/x - z**2/x)
>>> solve_linear(cancel(eq))
(x, 0)
>>> solve_linear(eq)
(x**2*(1 - z**2), x)
A list of symbols for which a solution is desired may be given:
>>> solve_linear(x + y + z, symbols=[y])
(y, -x - z)
A list of symbols to ignore may also be given:
>>> solve_linear(x + y + z, exclude=[x])
(y, -x - z)
(A solution for ``y`` is obtained because it is the first variable
from the canonically sorted list of symbols that had a linear
solution.)
"""
if isinstance(lhs, Eq):
if rhs:
raise ValueError(filldedent('''
If lhs is an Equality, rhs must be 0 but was %s''' % rhs))
rhs = lhs.rhs
lhs = lhs.lhs
dens = None
eq = lhs - rhs
n, d = eq.as_numer_denom()
if not n:
return S.Zero, S.One
free = n.free_symbols
if not symbols:
symbols = free
else:
bad = [s for s in symbols if not s.is_Symbol]
if bad:
if len(bad) == 1:
bad = bad[0]
if len(symbols) == 1:
eg = 'solve(%s, %s)' % (eq, symbols[0])
else:
eg = 'solve(%s, *%s)' % (eq, list(symbols))
raise ValueError(filldedent('''
solve_linear only handles symbols, not %s. To isolate
non-symbols use solve, e.g. >>> %s <<<.
''' % (bad, eg)))
symbols = free.intersection(symbols)
symbols = symbols.difference(exclude)
if not symbols:
return S.Zero, S.One
# derivatives are easy to do but tricky to analyze to see if they
# are going to disallow a linear solution, so for simplicity we
# just evaluate the ones that have the symbols of interest
derivs = defaultdict(list)
for der in n.atoms(Derivative):
csym = der.free_symbols & symbols
for c in csym:
derivs[c].append(der)
all_zero = True
for xi in sorted(symbols, key=default_sort_key): # canonical order
# if there are derivatives in this var, calculate them now
if isinstance(derivs[xi], list):
derivs[xi] = {der: der.doit() for der in derivs[xi]}
newn = n.subs(derivs[xi])
dnewn_dxi = newn.diff(xi)
# dnewn_dxi can be nonzero if it survives differentation by any
# of its free symbols
free = dnewn_dxi.free_symbols
if dnewn_dxi and (not free or any(dnewn_dxi.diff(s) for s in free) or free == symbols):
all_zero = False
if dnewn_dxi is S.NaN:
break
if xi not in dnewn_dxi.free_symbols:
vi = -1/dnewn_dxi*(newn.subs(xi, 0))
if dens is None:
dens = _simple_dens(eq, symbols)
if not any(checksol(di, {xi: vi}, minimal=True) is True
for di in dens):
# simplify any trivial integral
irep = [(i, i.doit()) for i in vi.atoms(Integral) if
i.function.is_number]
# do a slight bit of simplification
vi = expand_mul(vi.subs(irep))
return xi, vi
if all_zero:
return S.Zero, S.One
if n.is_Symbol: # no solution for this symbol was found
return S.Zero, S.Zero
return n, d
def minsolve_linear_system(system, *symbols, **flags):
r"""
Find a particular solution to a linear system.
Explanation
===========
In particular, try to find a solution with the minimal possible number
of non-zero variables using a naive algorithm with exponential complexity.
If ``quick=True``, a heuristic is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
kfree = k.free_symbols
x = next(reversed(list(ordered(kfree))))
if len(kfree) != 1:
determined[x] = S.Zero
else:
val = _vsolve(k, x, check=False)[0]
if not val and not any(v.subs(x, val) for v in s.values()):
determined[x] = S.One
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug('minsolve: %s' % n)
thissol = None
for nonzeros in combinations(range(N), n):
subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], S.One) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = S.One
else:
s[sym] = S.Zero
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
def solve_linear_system(system, *symbols, **flags):
r"""
Solve system of $N$ linear equations with $M$ variables, which means
both under- and overdetermined systems are supported.
Explanation
===========
The possible number of solutions is zero, one, or infinite. Respectively,
this procedure will return None or a dictionary with solutions. In the
case of underdetermined systems, all arbitrary parameters are skipped.
This may cause a situation in which an empty dictionary is returned.
In that case, all symbols can be assigned arbitrary values.
Input to this function is a $N\times M + 1$ matrix, which means it has
to be in augmented form. If you prefer to enter $N$ equations and $M$
unknowns then use ``solve(Neqs, *Msymbols)`` instead. Note: a local
copy of the matrix is made by this routine so the matrix that is
passed will not be modified.
The algorithm used here is fraction-free Gaussian elimination,
which results, after elimination, in an upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
Examples
========
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system::
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
A degenerate system returns an empty dictionary:
>>> system = Matrix(( (0,0,0), (0,0,0) ))
>>> solve_linear_system(system, x, y)
{}
"""
assert system.shape[1] == len(symbols) + 1
# This is just a wrapper for solve_lin_sys
eqs = list(system * Matrix(symbols + (-1,)))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is not None:
sol = {sym:val for sym, val in sol.items() if sym != val}
return sol
def solve_undetermined_coeffs(equ, coeffs, *syms, **flags):
r"""
Solve a system of equations in $k$ parameters that is formed by
matching coefficients in variables ``coeffs`` that are on
factors dependent on the remaining variables (or those given
explicitly by ``syms``.
Explanation
===========
The result of this function is a dictionary with symbolic values of those
parameters with respect to coefficients in $q$ -- empty if there
is no solution or coefficients do not appear in the equation -- else
None (if the system was not recognized). If there is more than one
solution, the solutions are passed as a list. The output can be modified using
the same semantics as for `solve` since the flags that are passed are sent
directly to `solve` so, for example the flag ``dict=True`` will always return a list
of solutions as dictionaries.
This function accepts both Equality and Expr class instances.
The solving process is most efficient when symbols are specified
in addition to parameters to be determined, but an attempt to
determine them (if absent) will be made. If an expected solution is not
obtained (and symbols were not specified) try specifying them.
Examples
========
>>> from sympy import Eq, solve_undetermined_coeffs
>>> from sympy.abc import a, b, c, h, p, k, x, y
>>> solve_undetermined_coeffs(Eq(a*x + a + b, x/2), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(a - 2, [a])
{a: 2}
The equation can be nonlinear in the symbols:
>>> X, Y, Z = y, x**y, y*x**y
>>> eq = a*X + b*Y + c*Z - X - 2*Y - 3*Z
>>> coeffs = a, b, c
>>> syms = x, y
>>> solve_undetermined_coeffs(eq, coeffs, syms)
{a: 1, b: 2, c: 3}
And the system can be nonlinear in coefficients, too, but if
there is only a single solution, it will be returned as a
dictionary:
>>> eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p
>>> solve_undetermined_coeffs(eq, (h, p, k), x)
{h: -b/(2*a), k: (4*a*c - b**2)/(4*a), p: 1/(4*a)}
Multiple solutions are always returned in a list:
>>> solve_undetermined_coeffs(a**2*x + b - x, [a, b], x)
[{a: -1, b: 0}, {a: 1, b: 0}]
Using flag ``dict=True`` (in keeping with semantics in :func:`~.solve`)
will force the result to always be a list with any solutions
as elements in that list.
>>> solve_undetermined_coeffs(a*x - 2*x, [a], dict=True)
[{a: 2}]
"""
if not (coeffs and all(i.is_Symbol for i in coeffs)):
raise ValueError('must provide symbols for coeffs')
if isinstance(equ, Eq):
eq = equ.lhs - equ.rhs
else:
eq = equ
ceq = cancel(eq)
xeq = _mexpand(ceq.as_numer_denom()[0], recursive=True)
free = xeq.free_symbols
coeffs = free & set(coeffs)
if not coeffs:
return ([], {}) if flags.get('set', None) else [] # solve(0, x) -> []
if not syms:
# e.g. A*exp(x) + B - (exp(x) + y) separated into parts that
# don't/do depend on coeffs gives
# -(exp(x) + y), A*exp(x) + B
# then see what symbols are common to both
# {x} = {x, A, B} - {x, y}
ind, dep = xeq.as_independent(*coeffs, as_Add=True)
dfree = dep.free_symbols
syms = dfree & ind.free_symbols
if not syms:
# but if the system looks like (a + b)*x + b - c
# then {} = {a, b, x} - c
# so calculate {x} = {a, b, x} - {a, b}
syms = dfree - set(coeffs)
if not syms:
syms = [Dummy()]
else:
if len(syms) == 1 and iterable(syms[0]):
syms = syms[0]
e, s, _ = recast_to_symbols([xeq], syms)
xeq = e[0]
syms = s
# find the functional forms in which symbols appear
gens = set(xeq.as_coefficients_dict(*syms).keys()) - {1}
cset = set(coeffs)
if any(g.has_xfree(cset) for g in gens):
return # a generator contained a coefficient symbol
# make sure we are working with symbols for generators
e, gens, _ = recast_to_symbols([xeq], list(gens))
xeq = e[0]
# collect coefficients in front of generators
system = list(collect(xeq, gens, evaluate=False).values())
# get a solution
soln = solve(system, coeffs, **flags)
# unpack unless told otherwise if length is 1
settings = flags.get('dict', None) or flags.get('set', None)
if type(soln) is dict or settings or len(soln) != 1:
return soln
return soln[0]
def solve_linear_system_LU(matrix, syms):
"""
Solves the augmented matrix system using ``LUsolve`` and returns a
dictionary in which solutions are keyed to the symbols of *syms* as ordered.
Explanation
===========
The matrix must be invertible.
Examples
========
>>> from sympy import Matrix, solve_linear_system_LU
>>> from sympy.abc import x, y, z
>>> solve_linear_system_LU(Matrix([
... [1, 2, 0, 1],
... [3, 2, 2, 1],
... [2, 0, 0, 1]]), [x, y, z])
{x: 1/2, y: 1/4, z: -1/2}
See Also
========
LUsolve
"""
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
def det_perm(M):
"""
Return the determinant of *M* by using permutations to select factors.
Explanation
===========
For sizes larger than 8 the number of permutations becomes prohibitively
large, or if there are no symbols in the matrix, it is better to use the
standard determinant routines (e.g., ``M.det()``.)
See Also
========
det_minor
det_quick
"""
args = []
s = True
n = M.rows
list_ = M.flat()
for perm in generate_bell(n):
fac = []
idx = 0
for j in perm:
fac.append(list_[idx + j])
idx += n
term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7
args.append(term if s else -term)
s = not s
return Add(*args)
def det_minor(M):
"""
Return the ``det(M)`` computed from minors without
introducing new nesting in products.
See Also
========
det_perm
det_quick
"""
n = M.rows
if n == 2:
return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1]
else:
return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in
Add.make_args(det_minor(M.minor_submatrix(0, i)))])
if M[0, i] else S.Zero for i in range(n)])
def det_quick(M, method=None):
"""
Return ``det(M)`` assuming that either
there are lots of zeros or the size of the matrix
is small. If this assumption is not met, then the normal
Matrix.det function will be used with method = ``method``.
See Also
========
det_minor
det_perm
"""
if any(i.has(Symbol) for i in M):
if M.rows < 8 and all(i.has(Symbol) for i in M):
return det_perm(M)
return det_minor(M)
else:
return M.det(method=method) if method else M.det()
def inv_quick(M):
"""Return the inverse of ``M``, assuming that either
there are lots of zeros or the size of the matrix
is small.
"""
if not all(i.is_Number for i in M):
if not any(i.is_Number for i in M):
det = lambda _: det_perm(_)
else:
det = lambda _: det_minor(_)
else:
return M.inv()
n = M.rows
d = det(M)
if d == S.Zero:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible")
ret = zeros(n)
s1 = -1
for i in range(n):
s = s1 = -s1
for j in range(n):
di = det(M.minor_submatrix(i, j))
ret[j, i] = s*di/d
s = -s
return ret
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), S.Pi - asin(x)),
cos: lambda x: (acos(x), 2*S.Pi - acos(x)),
}
def _vsolve(e, s, **flags):
"""return list of scalar values for the solution of e for symbol s"""
return [i[s] for i in _solve(e, s, **flags)]
def _tsolve(eq, sym, **flags):
"""
Helper for ``_solve`` that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> from sympy import log, ordered
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy.abc import x
>>> list(ordered(tsolve(3**(2*x + 5) - 4, x)))
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return None
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _vsolve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if isinstance(f, log):
return _vsolve(f.args[0] - exp(rhs), sym, **flags)
return _tsolve(f - rhs, sym, **flags)
elif lhs.is_Pow:
if lhs.exp.is_Integer:
if lhs - rhs != eq:
return _vsolve(lhs - rhs, sym, **flags)
if sym not in lhs.exp.free_symbols:
return _vsolve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
# _tsolve calls this with Dummy before passing the actual number in.
if any(t.is_Dummy for t in rhs.free_symbols):
raise NotImplementedError # _tsolve will call here again...
# a ** g(x) == 0
if not rhs:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _vsolve(lhs.base, sym, **flags)
return [s for s in sol_base if lhs.exp.subs(sym, s) != 0] # XXX use checksol here?
# a ** g(x) == b
if not lhs.base.has(sym):
if lhs.base == 0:
return _vsolve(lhs.exp, sym, **flags) if rhs != 0 else []
# Gets most solutions...
if lhs.base == rhs.as_base_exp()[0]:
# handles case when bases are equal
sol = _vsolve(lhs.exp - rhs.as_base_exp()[1], sym, **flags)
else:
# handles cases when bases are not equal and exp
# may or may not be equal
f = exp(log(lhs.base)*lhs.exp) - exp(log(rhs))
sol = _vsolve(f, sym, **flags)
# Check for duplicate solutions
def equal(expr1, expr2):
_ = Dummy()
eq = checksol(expr1 - _, _, expr2)
if eq is None:
if nsimplify(expr1) != nsimplify(expr2):
return False
# they might be coincidentally the same
# so check more rigorously
eq = expr1.equals(expr2) # XXX expensive but necessary?
return eq
# Guess a rational exponent
e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base)))
e_rat = simplify(posify(e_rat)[0])
n, d = fraction(e_rat)
if expand(lhs.base**n - rhs**d) == 0:
sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)]
sol.extend(_vsolve(lhs.exp - e_rat, sym, **flags))
return list(set(sol))
# f(x) ** g(x) == c
else:
sol = []
logform = lhs.exp*log(lhs.base) - log(rhs)
if logform != lhs - rhs:
try:
sol.extend(_vsolve(logform, sym, **flags))
except NotImplementedError:
pass
# Collect possible solutions and check with substitution later.
check = []
if rhs == 1:
# f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1
check.extend(_vsolve(lhs.exp, sym, **flags))
check.extend(_vsolve(lhs.base - 1, sym, **flags))
check.extend(_vsolve(lhs.base + 1, sym, **flags))
elif rhs.is_Rational:
for d in (i for i in divisors(abs(rhs.p)) if i != 1):
e, t = integer_log(rhs.p, d)
if not t:
continue # rhs.p != d**b
for s in divisors(abs(rhs.q)):
if s**e== rhs.q:
r = Rational(d, s)
check.extend(_vsolve(lhs.base - r, sym, **flags))
check.extend(_vsolve(lhs.base + r, sym, **flags))
check.extend(_vsolve(lhs.exp - e, sym, **flags))
elif rhs.is_irrational:
b_l, e_l = lhs.base.as_base_exp()
n, d = (e_l*lhs.exp).as_numer_denom()
b, e = sqrtdenest(rhs).as_base_exp()
check = [sqrtdenest(i) for i in (_vsolve(lhs.base - b, sym, **flags))]
check.extend([sqrtdenest(i) for i in (_vsolve(lhs.exp - e, sym, **flags))])
if e_l*d != 1:
check.extend(_vsolve(b_l**n - rhs**(e_l*d), sym, **flags))
for s in check:
ok = checksol(eq, sym, s)
if ok is None:
ok = eq.subs(sym, s).equals(0)
if ok:
sol.append(s)
return list(set(sol))
elif lhs.is_Function and len(lhs.args) == 1:
if lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[type(lhs)](rhs):
soln.extend(_vsolve(lhs.args[0] - i, sym, **flags))
return list(set(soln))
elif lhs.func == LambertW:
return _vsolve(lhs.args[0] - rhs*exp(rhs), sym, **flags)
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _vsolve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
# make generator in log have exponent of 1
logs = eq.atoms(log)
spow = min(
{i.exp for j in logs for i in j.atoms(Pow)
if i.base == sym} or {1})
if spow != 1:
p = sym**spow
u = Dummy('bivariate-cov')
ueq = eq.subs(p, u)
if not ueq.has_free(sym):
sol = _vsolve(ueq, u, **flags)
inv = _vsolve(p - u, sym)
return [i.subs(u, s) for i in inv for s in sol]
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if isinstance(gi, (exp, log)) or (gi.is_Pow and gi.base == S.Exp1):
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(list(zip(up_or_log, [0]*len(up_or_log)))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
_eq = lhs - rhs
sols = _solve_lambert(_eq, sym, g)
# use a simplified form if it satisfies eq
# and has fewer operations
for n, s in enumerate(sols):
ns = nsimplify(s)
if ns != s and ns.count_ops() <= s.count_ops():
ok = checksol(_eq, sym, ns)
if ok is None:
ok = _eq.subs(sym, ns).equals(0)
if ok:
sols[n] = ns
return sols
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _vsolve(p, u, **flags)
return list({i.subs(u, s)
for i in inversion for s in sol})
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
if rhs == S.ComplexInfinity:
return []
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _vsolve(pos, u, **flags)
return [s.subs(reps) for s in soln]
except NotImplementedError:
pass
else:
pass # here for coverage
return # here for coverage
# TODO: option for calculating J numerically
@conserve_mpmath_dps
def nsolve(*args, dict=False, **kwargs):
r"""
Solve a nonlinear equation system numerically: ``nsolve(f, [args,] x0,
modules=['mpmath'], **kwargs)``.
Explanation
===========
``f`` is a vector function of symbolic expressions representing the system.
*args* are the variables. If there is only one variable, this argument can
be omitted. ``x0`` is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to
evaluate the function and the Jacobian matrix. Make sure to use a module
that supports matrices. For more information on the syntax, please see the
docstring of ``lambdify``.
If the keyword arguments contain ``dict=True`` (default is False) ``nsolve``
will return a list (perhaps empty) of solution mappings. This might be
especially useful if you want to use ``nsolve`` as a fallback to solve since
using the dict argument for both methods produces return values of
consistent type structure. Please note: to keep this consistent with
``solve``, the solution will be returned in a list even though ``nsolve``
(currently at least) only finds one solution at a time.
Overdetermined systems are supported.
Examples
========
>>> from sympy import Symbol, nsolve
>>> import mpmath
>>> mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print(nsolve((f1, f2), (x1, x2), (-1, 1)))
Matrix([[-1.19287309935246], [1.27844411169911]])
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
To solve with higher precision than the default, use the prec argument:
>>> from sympy import cos
>>> nsolve(cos(x) - x, 1)
0.739085133215161
>>> nsolve(cos(x) - x, 1, prec=50)
0.73908513321516064165531208767387340401341175890076
>>> cos(_)
0.73908513321516064165531208767387340401341175890076
To solve for complex roots of real functions, a nonreal initial point
must be specified:
>>> from sympy import I
>>> nsolve(x**2 + 2, I)
1.4142135623731*I
``mpmath.findroot`` is used and you can find their more extensive
documentation, especially concerning keyword parameters and
available solvers. Note, however, that functions which are very
steep near the root, the verification of the solution may fail. In
this case you should use the flag ``verify=False`` and
independently verify the solution.
>>> from sympy import cos, cosh
>>> f = cos(x)*cosh(x) - 1
>>> nsolve(f, 3.14*100)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19)
>>> ans = nsolve(f, 3.14*100, verify=False); ans
312.588469032184
>>> f.subs(x, ans).n(2)
2.1e+121
>>> (f/f.diff(x)).subs(x, ans).n(2)
7.4e-15
One might safely skip the verification if bounds of the root are known
and a bisection method is used:
>>> bounds = lambda i: (3.14*i, 3.14*(i + 1))
>>> nsolve(f, bounds(100), solver='bisect', verify=False)
315.730061685774
Alternatively, a function may be better behaved when the
denominator is ignored. Since this is not always the case, however,
the decision of what function to use is left to the discretion of
the user.
>>> eq = x**2/(1 - x)/(1 - 2*x)**2 - 100
>>> nsolve(eq, 0.46)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (10000 > 2.1684e-19)
Try another starting point or tweak arguments.
>>> nsolve(eq.as_numer_denom()[0], 0.46)
0.46792545969349058
"""
# there are several other SymPy functions that use method= so
# guard against that here
if 'method' in kwargs:
raise ValueError(filldedent('''
Keyword "method" should not be used in this context. When using
some mpmath solvers directly, the keyword "method" is
used, but when using nsolve (and findroot) the keyword to use is
"solver".'''))
if 'prec' in kwargs:
import mpmath
mpmath.mp.dps = kwargs.pop('prec')
# keyword argument to return result as a dictionary
as_dict = dict
from builtins import dict # to unhide the builtin
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
if iterable(fargs) and iterable(x0):
if len(x0) != len(fargs):
raise TypeError('nsolve expected exactly %i guess vectors, got %i'
% (len(fargs), len(x0)))
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
if iterable(f):
raise TypeError('nsolve expected 3 arguments, got 2')
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if iterable(f):
f = list(f)
for i, fi in enumerate(f):
if isinstance(fi, Eq):
f[i] = fi.lhs - fi.rhs
f = Matrix(f).T
if iterable(x0):
x0 = list(x0)
if not isinstance(f, Matrix):
# assume it's a SymPy expression
if isinstance(f, Eq):
f = f.lhs - f.rhs
syms = f.free_symbols
if fargs is None:
fargs = syms.copy().pop()
if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)):
raise ValueError(filldedent('''
expected a one-dimensional and numerical function'''))
# the function is much better behaved if there is no denominator
# but sending the numerator is left to the user since sometimes
# the function is better behaved when the denominator is present
# e.g., issue 11768
f = lambdify(fargs, f, modules)
x = sympify(findroot(f, x0, **kwargs))
if as_dict:
return [{fargs: x}]
return x
if len(fargs) > f.cols:
raise NotImplementedError(filldedent('''
need at least as many equations as variables'''))
verbose = kwargs.get('verbose', False)
if verbose:
print('f(x):')
print(f)
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print('J(x):')
print(J)
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
if as_dict:
return [dict(zip(fargs, [sympify(xi) for xi in x]))]
return Matrix(x)
def _invert(eq, *symbols, **kwargs):
"""
Return tuple (i, d) where ``i`` is independent of *symbols* and ``d``
contains symbols.
Explanation
===========
``i`` and ``d`` are obtained after recursively using algebraic inversion
until an uninvertible ``d`` remains. If there are no free symbols then
``d`` will be zero. Some (but not necessarily all) solutions to the
expression ``i - d`` will be related to the solutions of the original
expression.
Examples
========
>>> from sympy.solvers.solvers import _invert as invert
>>> from sympy import sqrt, cos
>>> from sympy.abc import x, y
>>> invert(x - 3)
(3, x)
>>> invert(3)
(3, 0)
>>> invert(2*cos(x) - 1)
(1/2, cos(x))
>>> invert(sqrt(x) - 3)
(3, sqrt(x))
>>> invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> invert(sqrt(x + y) - 2)
(4, x + y)
>>> invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an Integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
if eq.args:
# make sure we are working with flat eq
eq = eq.func(*eq.args)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, S.Zero
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = S.Zero
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep.is_zero:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = {}
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms.setdefault(d, []).append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(_ispow(i) for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
elif ai == -bi:
if isinstance(ad, Function) and ad.func == bd.func:
if len(ad.args) == len(bd.args) == 1:
lhs = ad.args[0] - bd.args[0]
elif len(ad.args) == len(bd.args):
# should be able to solve
# f(x, y) - f(2 - x, 0) == 0 -> x == 1
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
elif lhs.is_Mul and any(_ispow(a) for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and lhs.inverse() is not None and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif isinstance(lhs, atan2):
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
elif lhs.func == rhs.func:
if len(lhs.args) == len(rhs.args) == 1:
lhs = lhs.args[0]
rhs = rhs.args[0]
elif len(lhs.args) == len(rhs.args):
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
def unrad(eq, *syms, **flags):
"""
Remove radicals with symbolic arguments and return (eq, cov),
None, or raise an error.
Explanation
===========
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where:
*eq*, ``cov``
*eq* is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. *eq* might be rewritten in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
solutions to the original equation (if there are any).
*syms*
An iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if *syms* is not
set.
*flags* are used internally for communication during recursive calls.
Two options are also recognized:
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if:
* All bases of the radicals are the same; a change of variables is
done in this case.
* If all radicals appear in one term of the expression.
* There are only four terms with sqrt() factors or there are less than
four terms having sqrt() factors.
* There are only two terms with radicals.
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(-x**3 + x**2 + 2*x + 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _vsolve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow:
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
margs = list(Mul.make_args(eq))
changed = False
for i, m in enumerate(margs):
if m.could_extract_minus_sign():
margs[i] = -m
changed = True
if changed:
eq = Mul(*margs, evaluate=False)
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not pow.is_Pow:
continue
if _Q(pow) == 1:
continue
if pow.free_symbols & syms:
return True
return False
_take = flags.setdefault('_take', _take)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs # XXX legacy Eq as Eqn support
elif not isinstance(eq, Expr):
return
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
eq = eq.as_numer_denom()[0]
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
# see if there are radicals in symbols of interest
syms = set(syms) or eq.free_symbols # _take uses this
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g)]
if not gens:
return
# recast poly in terms of eigen-gens
poly = eq.as_poly(*gens)
# not a polynomial e.g. 1 + sqrt(x)*exp(sqrt(x)) with gen sqrt(x)
if poly is None:
return
# - an exponent has a symbol of interest (don't handle)
if any(g.exp.has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
# get terms together that have common generators
drad = dict(zip(rads, range(len(rads))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
x = b.free_symbols
else:
x = syms
x = list(ordered(x))[0]
try:
inv = _vsolve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _vsolve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_vsolve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
# delayed imports
from sympy.solvers.bivariate import (
bivariate_type, _solve_lambert, _filtered_gens)
|
077f7f02ad8e794e8a722d854e096f09b67b098fe85fbc0d6c85281b8a474db3 | """
module for generating C, C++, Fortran77, Fortran90, Julia, Rust
and Octave/Matlab routines that evaluate SymPy expressions.
This module is work in progress.
Only the milestones with a '+' character in the list below have been completed.
--- How is sympy.utilities.codegen different from sympy.printing.ccode? ---
We considered the idea to extend the printing routines for SymPy functions in
such a way that it prints complete compilable code, but this leads to a few
unsurmountable issues that can only be tackled with dedicated code generator:
- For C, one needs both a code and a header file, while the printing routines
generate just one string. This code generator can be extended to support
.pyf files for f2py.
- SymPy functions are not concerned with programming-technical issues, such
as input, output and input-output arguments. Other examples are contiguous
or non-contiguous arrays, including headers of other libraries such as gsl
or others.
- It is highly interesting to evaluate several SymPy functions in one C
routine, eventually sharing common intermediate results with the help
of the cse routine. This is more than just printing.
- From the programming perspective, expressions with constants should be
evaluated in the code generator as much as possible. This is different
for printing.
--- Basic assumptions ---
* A generic Routine data structure describes the routine that must be
translated into C/Fortran/... code. This data structure covers all
features present in one or more of the supported languages.
* Descendants from the CodeGen class transform multiple Routine instances
into compilable code. Each derived class translates into a specific
language.
* In many cases, one wants a simple workflow. The friendly functions in the
last part are a simple api on top of the Routine/CodeGen stuff. They are
easier to use, but are less powerful.
--- Milestones ---
+ First working version with scalar input arguments, generating C code,
tests
+ Friendly functions that are easier to use than the rigorous
Routine/CodeGen workflow.
+ Integer and Real numbers as input and output
+ Output arguments
+ InputOutput arguments
+ Sort input/output arguments properly
+ Contiguous array arguments (numpy matrices)
+ Also generate .pyf code for f2py (in autowrap module)
+ Isolate constants and evaluate them beforehand in double precision
+ Fortran 90
+ Octave/Matlab
- Common Subexpression Elimination
- User defined comments in the generated code
- Optional extra include lines for libraries/objects that can eval special
functions
- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ...
- Contiguous array arguments (SymPy matrices)
- Non-contiguous array arguments (SymPy matrices)
- ccode must raise an error when it encounters something that cannot be
translated into c. ccode(integrate(sin(x)/x, x)) does not make sense.
- Complex numbers as input and output
- A default complex datatype
- Include extra information in the header: date, user, hostname, sha1
hash, ...
- Fortran 77
- C++
- Python
- Julia
- Rust
- ...
"""
import os
import textwrap
from io import StringIO
from sympy import __version__ as sympy_version
from sympy.core import Symbol, S, Tuple, Equality, Function, Basic
from sympy.printing.c import c_code_printers
from sympy.printing.codeprinter import AssignmentError
from sympy.printing.fortran import FCodePrinter
from sympy.printing.julia import JuliaCodePrinter
from sympy.printing.octave import OctaveCodePrinter
from sympy.printing.rust import RustCodePrinter
from sympy.tensor import Idx, Indexed, IndexedBase
from sympy.matrices import (MatrixSymbol, ImmutableMatrix, MatrixBase,
MatrixExpr, MatrixSlice)
from sympy.utilities.iterables import is_sequence
__all__ = [
# description of routines
"Routine", "DataType", "default_datatypes", "get_default_datatype",
"Argument", "InputArgument", "OutputArgument", "Result",
# routines -> code
"CodeGen", "CCodeGen", "FCodeGen", "JuliaCodeGen", "OctaveCodeGen",
"RustCodeGen",
# friendly functions
"codegen", "make_routine",
]
#
# Description of routines
#
class Routine:
"""Generic description of evaluation routine for set of expressions.
A CodeGen class can translate instances of this class into code in a
particular language. The routine specification covers all the features
present in these languages. The CodeGen part must raise an exception
when certain features are not present in the target language. For
example, multiple return values are possible in Python, but not in C or
Fortran. Another example: Fortran and Python support complex numbers,
while C does not.
"""
def __init__(self, name, arguments, results, local_vars, global_vars):
"""Initialize a Routine instance.
Parameters
==========
name : string
Name of the routine.
arguments : list of Arguments
These are things that appear in arguments of a routine, often
appearing on the right-hand side of a function call. These are
commonly InputArguments but in some languages, they can also be
OutputArguments or InOutArguments (e.g., pass-by-reference in C
code).
results : list of Results
These are the return values of the routine, often appearing on
the left-hand side of a function call. The difference between
Results and OutputArguments and when you should use each is
language-specific.
local_vars : list of Results
These are variables that will be defined at the beginning of the
function.
global_vars : list of Symbols
Variables which will not be passed into the function.
"""
# extract all input symbols and all symbols appearing in an expression
input_symbols = set()
symbols = set()
for arg in arguments:
if isinstance(arg, OutputArgument):
symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed))
elif isinstance(arg, InputArgument):
input_symbols.add(arg.name)
elif isinstance(arg, InOutArgument):
input_symbols.add(arg.name)
symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed))
else:
raise ValueError("Unknown Routine argument: %s" % arg)
for r in results:
if not isinstance(r, Result):
raise ValueError("Unknown Routine result: %s" % r)
symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed))
local_symbols = set()
for r in local_vars:
if isinstance(r, Result):
symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed))
local_symbols.add(r.name)
else:
local_symbols.add(r)
symbols = {s.label if isinstance(s, Idx) else s for s in symbols}
# Check that all symbols in the expressions are covered by
# InputArguments/InOutArguments---subset because user could
# specify additional (unused) InputArguments or local_vars.
notcovered = symbols.difference(
input_symbols.union(local_symbols).union(global_vars))
if notcovered != set():
raise ValueError("Symbols needed for output are not in input " +
", ".join([str(x) for x in notcovered]))
self.name = name
self.arguments = arguments
self.results = results
self.local_vars = local_vars
self.global_vars = global_vars
def __str__(self):
return self.__class__.__name__ + "({name!r}, {arguments}, {results}, {local_vars}, {global_vars})".format(**self.__dict__)
__repr__ = __str__
@property
def variables(self):
"""Returns a set of all variables possibly used in the routine.
For routines with unnamed return values, the dummies that may or
may not be used will be included in the set.
"""
v = set(self.local_vars)
for arg in self.arguments:
v.add(arg.name)
for res in self.results:
v.add(res.result_var)
return v
@property
def result_variables(self):
"""Returns a list of OutputArgument, InOutArgument and Result.
If return values are present, they are at the end of the list.
"""
args = [arg for arg in self.arguments if isinstance(
arg, (OutputArgument, InOutArgument))]
args.extend(self.results)
return args
class DataType:
"""Holds strings for a certain datatype in different languages."""
def __init__(self, cname, fname, pyname, jlname, octname, rsname):
self.cname = cname
self.fname = fname
self.pyname = pyname
self.jlname = jlname
self.octname = octname
self.rsname = rsname
default_datatypes = {
"int": DataType("int", "INTEGER*4", "int", "", "", "i32"),
"float": DataType("double", "REAL*8", "float", "", "", "f64"),
"complex": DataType("double", "COMPLEX*16", "complex", "", "", "float") #FIXME:
# complex is only supported in fortran, python, julia, and octave.
# So to not break c or rust code generation, we stick with double or
# float, respectively (but actually should raise an exception for
# explicitly complex variables (x.is_complex==True))
}
COMPLEX_ALLOWED = False
def get_default_datatype(expr, complex_allowed=None):
"""Derives an appropriate datatype based on the expression."""
if complex_allowed is None:
complex_allowed = COMPLEX_ALLOWED
if complex_allowed:
final_dtype = "complex"
else:
final_dtype = "float"
if expr.is_integer:
return default_datatypes["int"]
elif expr.is_real:
return default_datatypes["float"]
elif isinstance(expr, MatrixBase):
#check all entries
dt = "int"
for element in expr:
if dt == "int" and not element.is_integer:
dt = "float"
if dt == "float" and not element.is_real:
return default_datatypes[final_dtype]
return default_datatypes[dt]
else:
return default_datatypes[final_dtype]
class Variable:
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol or MatrixSymbol
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
if not isinstance(name, (Symbol, MatrixSymbol)):
raise TypeError("The first argument must be a SymPy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an "
"instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError(
"The dimension argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'JULIA': datatype.jlname,
'OCTAVE': datatype.octname,
'PYTHON': datatype.pyname,
'RUST': datatype.rsname,
}
self.dimensions = dimensions
self.precision = precision
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
__repr__ = __str__
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested language.
Examples
========
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
class Argument(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
pass
class InputArgument(Argument):
pass
class ResultBase:
"""Base class for all "outgoing" information from a routine.
Objects of this class stores a SymPy expression, and a SymPy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
def __str__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.expr,
self.result_var)
__repr__ = __str__
class OutputArgument(Argument, ResultBase):
"""OutputArgument are always initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol, MatrixSymbol
The name of this variable. When used for code generation, this
might appear, for example, in the prototype of function in the
argument list.
result_var : Symbol, Indexed
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y".
expr : object
The expression that should be output, typically a SymPy
expression.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.result_var, self.expr)
__repr__ = __str__
class InOutArgument(Argument, ResultBase):
"""InOutArgument are never initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
if not datatype:
datatype = get_default_datatype(expr)
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
__init__.__doc__ = OutputArgument.__init__.__doc__
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.expr,
self.result_var)
__repr__ = __str__
class Result(Variable, ResultBase):
"""An expression for a return value.
The name result is used to avoid conflicts with the reserved word
"return" in the Python language. It is also shorter than ReturnValue.
These may or may not need a name in the destination (e.g., "return(x*y)"
might return a value without ever naming it).
"""
def __init__(self, expr, name=None, result_var=None, datatype=None,
dimensions=None, precision=None):
"""Initialize a return value.
Parameters
==========
expr : SymPy expression
name : Symbol, MatrixSymbol, optional
The name of this return variable. When used for code generation,
this might appear, for example, in the prototype of function in a
list of return values. A dummy name is generated if omitted.
result_var : Symbol, Indexed, optional
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y". Defaults to
`name` if omitted.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the expr argument.
dimension : sequence containing tupes, optional
If present, this variable is interpreted as an array,
where this sequence of tuples specifies (lower, upper)
bounds for each index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
# Basic because it is the base class for all types of expressions
if not isinstance(expr, (Basic, MatrixBase)):
raise TypeError("The first argument must be a SymPy expression.")
if name is None:
name = 'result_%d' % abs(hash(expr))
if datatype is None:
#try to infer data type from the expression
datatype = get_default_datatype(expr)
if isinstance(name, str):
if isinstance(expr, (MatrixBase, MatrixExpr)):
name = MatrixSymbol(name, *expr.shape)
else:
name = Symbol(name)
if result_var is None:
result_var = name
Variable.__init__(self, name, datatype=datatype,
dimensions=dimensions, precision=precision)
ResultBase.__init__(self, expr, result_var)
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.expr, self.name,
self.result_var)
__repr__ = __str__
#
# Transformation of routine objects into code
#
class CodeGen:
"""Abstract class for the code generators."""
printer = None # will be set to an instance of a CodePrinter subclass
def _indent_code(self, codelines):
return self.printer.indent_code(codelines)
def _printer_method_with_settings(self, method, settings=None, *args, **kwargs):
settings = settings or {}
ori = {k: self.printer._settings[k] for k in settings}
for k, v in settings.items():
self.printer._settings[k] = v
result = getattr(self.printer, method)(*args, **kwargs)
for k, v in ori.items():
self.printer._settings[k] = v
return result
def _get_symbol(self, s):
"""Returns the symbol as fcode prints it."""
if self.printer._settings['human']:
expr_str = self.printer.doprint(s)
else:
constants, not_supported, expr_str = self.printer.doprint(s)
if constants or not_supported:
raise ValueError("Failed to print %s" % str(s))
return expr_str.strip()
def __init__(self, project="project", cse=False):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
self.cse = cse
def routine(self, name, expr, argument_sequence=None, global_vars=None):
"""Creates an Routine object that is appropriate for this language.
This implementation is appropriate for at least C/Fortran. Subclasses
can override this if necessary.
Here, we assume at most one return value (the l-value) which must be
scalar. Additional outputs are OutputArguments (e.g., pointers on
right-hand-side or pass-by-reference). Matrices are always returned
via OutputArguments. If ``argument_sequence`` is None, arguments will
be ordered alphabetically, but with all InputArguments first, and then
OutputArgument and InOutArguments.
"""
if self.cse:
from sympy.simplify.cse_main import cse
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
for e in expr:
if not e.is_Equality:
raise CodeGenError("Lists of expressions must all be Equalities. {} is not.".format(e))
# create a list of right hand sides and simplify them
rhs = [e.rhs for e in expr]
common, simplified = cse(rhs)
# pack the simplified expressions back up with their left hand sides
expr = [Equality(e.lhs, rhs) for e, rhs in zip(expr, simplified)]
else:
if isinstance(expr, Equality):
common, simplified = cse(expr.rhs) #, ignore=in_out_args)
expr = Equality(expr.lhs, simplified[0])
else:
common, simplified = cse(expr)
expr = simplified
local_vars = [Result(b,a) for a,b in common]
local_symbols = {a for a,_ in common}
local_expressions = Tuple(*[b for _,b in common])
else:
local_expressions = Tuple()
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
if self.cse:
if {i.label for i in expressions.atoms(Idx)} != set():
raise CodeGenError("CSE and Indexed expressions do not play well together yet")
else:
# local variables for indexed expressions
local_vars = {i.label for i in expressions.atoms(Idx)}
local_symbols = local_vars
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = (expressions.free_symbols | local_expressions.free_symbols) - local_symbols - global_vars
new_symbols = set()
new_symbols.update(symbols)
for symbol in symbols:
if isinstance(symbol, Idx):
new_symbols.remove(symbol)
new_symbols.update(symbol.args[1].free_symbols)
if isinstance(symbol, Indexed):
new_symbols.remove(symbol)
symbols = new_symbols
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
elif isinstance(out_arg, MatrixSymbol):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg
else:
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
if expr.has(symbol):
output_args.append(
InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(
OutputArgument(symbol, out_arg, expr, dimensions=dims))
# remove duplicate arguments when they are not local variables
if symbol not in local_vars:
# avoid duplicate arguments
symbols.remove(symbol)
elif isinstance(expr, (ImmutableMatrix, MatrixSlice)):
# Create a "dummy" MatrixSymbol to use as the Output arg
out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape)
dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape])
output_args.append(
OutputArgument(out_arg, out_arg, expr, dimensions=dims))
else:
return_val.append(Result(expr))
arg_list = []
# setup input argument list
# helper to get dimensions for data for array-like args
def dimensions(s):
return [(S.Zero, dim - 1) for dim in s.shape]
array_symbols = {}
for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol) | local_expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
array = array_symbols[symbol]
metadata = {'dimensions': dimensions(array)}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x: str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
if isinstance(symbol, (IndexedBase, MatrixSymbol)):
metadata = {'dimensions': dimensions(symbol)}
else:
metadata = {}
new_args.append(InputArgument(symbol, **metadata))
arg_list = new_args
return Routine(name, arg_list, return_val, local_vars, global_vars)
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generated source is returned as a list of (filename, contents)
tuples, or is written to files (see below). Each filename consists
of the given prefix, appended with an appropriate extension.
Parameters
==========
routines : list
A list of Routine instances to be written
prefix : string
The prefix for the output files
to_files : bool, optional
When True, the output is written to files. Otherwise, a list
of (filename, contents) tuples is returned. [default: False]
header : bool, optional
When True, a header comment is included on top of each source
file. [default: True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default: True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
with open(filename, "w") as f:
dump_fn(self, routines, f, prefix, header, empty)
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code by calling language specific methods.
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
code_lines = self._preprocessor_statements(prefix)
for routine in routines:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
f.write(code_lines)
class CodeGenError(Exception):
pass
class CodeGenArgumentListError(Exception):
@property
def missing_args(self):
return self.args[1]
header_comment = """Code generated with SymPy %(version)s
See http://www.sympy.org/ for more information.
This file is part of '%(project)s'
"""
class CCodeGen(CodeGen):
"""Generator for C code.
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.c and <prefix>.h respectively.
"""
code_extension = "c"
interface_extension = "h"
standard = 'c99'
def __init__(self, project="project", printer=None,
preprocessor_statements=None, cse=False):
super().__init__(project=project, cse=cse)
self.printer = printer or c_code_printers[self.standard.lower()]()
self.preprocessor_statements = preprocessor_statements
if preprocessor_statements is None:
self.preprocessor_statements = ['#include <math.h>']
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype of the routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
if len(routine.results) > 1:
raise CodeGenError("C only supports a single or no return value.")
elif len(routine.results) == 1:
ctype = routine.results[0].get_datatype('C')
else:
ctype = "void"
type_args = []
for arg in routine.arguments:
name = self.printer.doprint(arg.name)
if arg.dimensions or isinstance(arg, ResultBase):
type_args.append((arg.get_datatype('C'), "*%s" % name))
else:
type_args.append((arg.get_datatype('C'), name))
arguments = ", ".join([ "%s %s" % t for t in type_args])
return "%s %s(%s)" % (ctype, routine.name, arguments)
def _preprocessor_statements(self, prefix):
code_lines = []
code_lines.append('#include "{}.h"'.format(os.path.basename(prefix)))
code_lines.extend(self.preprocessor_statements)
code_lines = ['{}\n'.format(l) for l in code_lines]
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_globals(self, routine):
# global variables are not explicitly declared within C functions
return []
def _declare_locals(self, routine):
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
code_lines = []
for result in routine.local_vars:
# local variables that are simple symbols such as those used as indices into
# for loops are defined declared elsewhere.
if not isinstance(result, Result):
continue
if result.name != result.result_var:
raise CodeGen("Result variable and name should match: {}".format(result))
assign_to = result.name
t = result.get_datatype('c')
if isinstance(result.expr, (MatrixBase, MatrixExpr)):
dims = result.expr.shape
code_lines.append("{} {}[{}];\n".format(t, str(assign_to), dims[0]*dims[1]))
prefix = ""
else:
prefix = "const {} ".format(t)
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
code_lines.append("{}{}\n".format(prefix, c_expr))
return code_lines
def _call_printer(self, routine):
code_lines = []
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
return_val = None
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name + "_result"
t = result.get_datatype('c')
code_lines.append("{} {};\n".format(t, str(assign_to)))
return_val = assign_to
else:
assign_to = result.result_var
try:
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
except AssignmentError:
assign_to = result.result_var
code_lines.append(
"%s %s;\n" % (result.get_datatype('c'), str(assign_to)))
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
code_lines.append("%s\n" % c_expr)
if return_val:
code_lines.append(" return %s;\n" % return_val)
return code_lines
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_c(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_c.extension = code_extension # type: ignore
dump_c.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the C header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to construct the include guards.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
guard_name = "%s__%s__H" % (self.project.replace(
" ", "_").upper(), prefix.replace("/", "_").upper())
# include guards
if empty:
print(file=f)
print("#ifndef %s" % guard_name, file=f)
print("#define %s" % guard_name, file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_prototype(routine)
print("%s;" % prototype, file=f)
# end if include guards
if empty:
print(file=f)
print("#endif", file=f)
if empty:
print(file=f)
dump_h.extension = interface_extension # type: ignore
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_c, dump_h]
class C89CodeGen(CCodeGen):
standard = 'C89'
class C99CodeGen(CCodeGen):
standard = 'C99'
class FCodeGen(CodeGen):
"""Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or FCodePrinter()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the fortran routine."""
code_list = []
if len(routine.results) > 1:
raise CodeGenError(
"Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
call_sig = "{}({})\n".format(routine.name, args)
# Fortran 95 requires all lines be less than 132 characters, so wrap
# this line before appending.
call_sig = ' &\n'.join(textwrap.wrap(call_sig,
width=60,
break_long_words=False)) + '\n'
code_list.append(call_sig)
code_list = [' '.join(code_list)]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unknown Argument type: %s" % type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s" % (
fprint(dim[0] + 1), fprint(dim[1] + 1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_globals(self, routine):
# Global variables not explicitly declared within Fortran 90 functions.
# Note: a future F77 mode may need to generate "common" blocks.
return []
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""Returns the closing statements of the fortran routine."""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface.
The routine should have a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = self._printer_method_with_settings(
'doprint', dict(human=False, source_format='free', standard=95),
result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append(
"%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', dict(human=False, source_format='free'), codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = {str(x).lower() for x in r.variables}
orig_case = {str(x) for x in r.variables}
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s" %
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension # type: ignore
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
f.write(prototype)
if empty:
print(file=f)
dump_h.extension = interface_extension # type: ignore
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
class JuliaCodeGen(CodeGen):
"""Generator for Julia code.
The .write() method inherited from CodeGen will output a code file
<prefix>.jl.
"""
code_extension = "jl"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or JuliaCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Julia."""
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
old_symbols = expressions.free_symbols - local_vars - global_vars
symbols = set()
for s in old_symbols:
if isinstance(s, Idx):
symbols.update(s.args[1].free_symbols)
elif not isinstance(s, Indexed):
symbols.add(s)
# Julia supports multiple return values
return_vals = []
output_args = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
dims = tuple([ (S.One, dim) for dim in out_arg.shape])
symbol = out_arg.base.label
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
output_args.sort(key=lambda x: str(x.name))
arg_list = list(output_args)
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("#\n")
else:
code_lines.append("# %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Inputs
args = []
for i, arg in enumerate(routine.arguments):
if isinstance(arg, OutputArgument):
raise CodeGenError("Julia: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, (InputArgument, InOutArgument)):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_globals(self, routine):
return []
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
outs = []
for result in routine.results:
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y[i]` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
return ["return " + ", ".join(outs) + "\nend\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, jl_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
declarations.append(
"%s = %s\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
"# unsupported: %s\n" % (name))
code_lines.append("%s\n" % (jl_expr))
return declarations + code_lines
def _indent_code(self, codelines):
# Note that indenting seems to happen twice, first
# statement-by-statement by JuliaPrinter then again here.
p = JuliaCodePrinter({'human': False})
return p.indent_code(codelines)
def dump_jl(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_jl.extension = code_extension # type: ignore
dump_jl.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_jl]
class OctaveCodeGen(CodeGen):
"""Generator for Octave code.
The .write() method inherited from CodeGen will output a code file
<prefix>.m.
Octave .m files usually contain one function. That function name should
match the filename (``prefix``). If you pass multiple ``name_expr`` pairs,
the latter ones are presumed to be private functions accessed by the
primary function.
You should only pass inputs to ``argument_sequence``: outputs are ordered
according to their order in ``name_expr``.
"""
code_extension = "m"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or OctaveCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Octave."""
# FIXME: this is probably general enough for other high-level
# languages, perhaps its the C/Fortran one that is specialized!
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
old_symbols = expressions.free_symbols - local_vars - global_vars
symbols = set()
for s in old_symbols:
if isinstance(s, Idx):
symbols.update(s.args[1].free_symbols)
elif not isinstance(s, Indexed):
symbols.add(s)
# Octave supports multiple return values
return_vals = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
symbol = out_arg.base.label
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
arg_list = []
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("%\n")
else:
code_lines.append("%% %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Outputs
outs = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y(i)` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
if len(outs) > 1:
code_list.append("[" + (", ".join(outs)) + "]")
else:
code_list.append("".join(outs))
code_list.append(" = ")
# Inputs
args = []
for i, arg in enumerate(routine.arguments):
if isinstance(arg, (OutputArgument, InOutArgument)):
raise CodeGenError("Octave: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, InputArgument):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_globals(self, routine):
if not routine.global_vars:
return []
s = " ".join(sorted([self._get_symbol(g) for g in routine.global_vars]))
return ["global " + s + "\n"]
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
return ["end\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, oct_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
declarations.append(
" %s = %s; %% constant\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
" %% unsupported: %s\n" % (name))
code_lines.append("%s\n" % (oct_expr))
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', dict(human=False), codelines)
def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True):
# Note used to call self.dump_code() but we need more control for header
code_lines = self._preprocessor_statements(prefix)
for i, routine in enumerate(routines):
if i > 0:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
if i == 0:
if routine.name != prefix:
raise ValueError('Octave function name should match prefix')
if header:
code_lines.append("%" + prefix.upper() +
" Autogenerated by SymPy\n")
code_lines.append(''.join(self._get_header()))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if code_lines:
f.write(code_lines)
dump_m.extension = code_extension # type: ignore
dump_m.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_m]
class RustCodeGen(CodeGen):
"""Generator for Rust code.
The .write() method inherited from CodeGen will output a code file
<prefix>.rs
"""
code_extension = "rs"
def __init__(self, project="project", printer=None):
super().__init__(project=project)
self.printer = printer or RustCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Rust."""
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = expressions.free_symbols - local_vars - global_vars - expressions.atoms(Indexed)
# Rust supports multiple return values
return_vals = []
output_args = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
dims = tuple([ (S.One, dim) for dim in out_arg.shape])
symbol = out_arg.base.label
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
output_args.sort(key=lambda x: str(x.name))
arg_list = list(output_args)
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/*\n")
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append((" *%s" % line.center(76)).rstrip() + "\n")
code_lines.append(" */\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype of the routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
results = [i.get_datatype('Rust') for i in routine.results]
if len(results) == 1:
rstype = " -> " + results[0]
elif len(routine.results) > 1:
rstype = " -> (" + ", ".join(results) + ")"
else:
rstype = ""
type_args = []
for arg in routine.arguments:
name = self.printer.doprint(arg.name)
if arg.dimensions or isinstance(arg, ResultBase):
type_args.append(("*%s" % name, arg.get_datatype('Rust')))
else:
type_args.append((name, arg.get_datatype('Rust')))
arguments = ", ".join([ "%s: %s" % t for t in type_args])
return "fn %s(%s)%s" % (routine.name, arguments, rstype)
def _preprocessor_statements(self, prefix):
code_lines = []
# code_lines.append("use std::f64::consts::*;\n")
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_globals(self, routine):
# global variables are not explicitly declared within C functions
return []
def _declare_locals(self, routine):
# loop variables are declared in loop statement
return []
def _call_printer(self, routine):
code_lines = []
declarations = []
returns = []
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
returns.append(str(result.result_var))
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, rs_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
declarations.append("const %s: f64 = %s;\n" % (name, value))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("// unsupported: %s\n" % (name))
code_lines.append("let %s\n" % rs_expr);
if len(returns) > 1:
returns = ['(' + ', '.join(returns) + ')']
returns.append('\n')
return declarations + code_lines + returns
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_rs(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_rs.extension = code_extension # type: ignore
dump_rs.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_rs]
def get_code_generator(language, project=None, standard=None, printer = None):
if language == 'C':
if standard is None:
pass
elif standard.lower() == 'c89':
language = 'C89'
elif standard.lower() == 'c99':
language = 'C99'
CodeGenClass = {"C": CCodeGen, "C89": C89CodeGen, "C99": C99CodeGen,
"F95": FCodeGen, "JULIA": JuliaCodeGen,
"OCTAVE": OctaveCodeGen,
"RUST": RustCodeGen}.get(language.upper())
if CodeGenClass is None:
raise ValueError("Language '%s' is not supported." % language)
return CodeGenClass(project, printer)
#
# Friendly functions
#
def codegen(name_expr, language=None, prefix=None, project="project",
to_files=False, header=True, empty=True, argument_sequence=None,
global_vars=None, standard=None, code_gen=None, printer = None):
"""Generate source code for expressions in a given language.
Parameters
==========
name_expr : tuple, or list of tuples
A single (name, expression) tuple or a list of (name, expression)
tuples. Each tuple corresponds to a routine. If the expression is
an equality (an instance of class Equality) the left hand side is
considered an output argument. If expression is an iterable, then
the routine will have multiple outputs.
language : string,
A string that indicates the source code language. This is case
insensitive. Currently, 'C', 'F95' and 'Octave' are supported.
'Octave' generates code compatible with both Octave and Matlab.
prefix : string, optional
A prefix for the names of the files that contain the source code.
Language-dependent suffixes will be appended. If omitted, the name
of the first name_expr tuple is used.
project : string, optional
A project name, used for making unique preprocessor instructions.
[default: "project"]
to_files : bool, optional
When True, the code will be written to one or more files with the
given prefix, otherwise strings with the names and contents of
these files are returned. [default: False]
header : bool, optional
When True, a header is written on top of each source file.
[default: True]
empty : bool, optional
When True, empty lines are used to structure the code.
[default: True]
argument_sequence : iterable, optional
Sequence of arguments for the routine in a preferred order. A
CodeGenError is raised if required arguments are missing.
Redundant arguments are used without warning. If omitted,
arguments will be ordered alphabetically, but with all input
arguments first, and then output or in-out arguments.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
standard : string
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
Examples
========
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... ("f", x+y*z), "C89", "test", header=False, empty=False)
>>> print(c_name)
test.c
>>> print(c_code)
#include "test.h"
#include <math.h>
double f(double x, double y, double z) {
double f_result;
f_result = x + y*z;
return f_result;
}
<BLANKLINE>
>>> print(h_name)
test.h
>>> print(c_header)
#ifndef PROJECT__TEST__H
#define PROJECT__TEST__H
double f(double x, double y, double z);
#endif
<BLANKLINE>
Another example using Equality objects to give named outputs. Here the
filename (prefix) is taken from the first (name, expr) pair.
>>> from sympy.abc import f, g
>>> from sympy import Eq
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])],
... "C99", header=False, empty=False)
>>> print(c_name)
myfcn.c
>>> print(c_code)
#include "myfcn.h"
#include <math.h>
double myfcn(double x, double y) {
double myfcn_result;
myfcn_result = x + y;
return myfcn_result;
}
void fcn2(double x, double y, double *f, double *g) {
(*f) = 2*x;
(*g) = y;
}
<BLANKLINE>
If the generated function(s) will be part of a larger project where various
global variables have been defined, the 'global_vars' option can be used
to remove the specified variables from the function signature
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(f_name, f_code), header] = codegen(
... ("f", x+y*z), "F95", header=False, empty=False,
... argument_sequence=(x, y), global_vars=(z,))
>>> print(f_code)
REAL*8 function f(x, y)
implicit none
REAL*8, intent(in) :: x
REAL*8, intent(in) :: y
f = x + y*z
end function
<BLANKLINE>
"""
# Initialize the code generator.
if language is None:
if code_gen is None:
raise ValueError("Need either language or code_gen")
else:
if code_gen is not None:
raise ValueError("You cannot specify both language and code_gen.")
code_gen = get_code_generator(language, project, standard, printer)
if isinstance(name_expr[0], str):
# single tuple is given, turn it into a singleton list with a tuple.
name_expr = [name_expr]
if prefix is None:
prefix = name_expr[0][0]
# Construct Routines appropriate for this code_gen from (name, expr) pairs.
routines = []
for name, expr in name_expr:
routines.append(code_gen.routine(name, expr, argument_sequence,
global_vars))
# Write the code.
return code_gen.write(routines, prefix, to_files, header, empty)
def make_routine(name, expr, argument_sequence=None,
global_vars=None, language="F95"):
"""A factory that makes an appropriate Routine from an expression.
Parameters
==========
name : string
The name of this routine in the generated code.
expr : expression or list/tuple of expressions
A SymPy expression that the Routine instance will represent. If
given a list or tuple of expressions, the routine will be
considered to have multiple return values and/or output arguments.
argument_sequence : list or tuple, optional
List arguments for the routine in a preferred order. If omitted,
the results are language dependent, for example, alphabetical order
or in the same order as the given expressions.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
language : string, optional
Specify a target language. The Routine itself should be
language-agnostic but the precise way one is created, error
checking, etc depend on the language. [default: "F95"].
Notes
=====
A decision about whether to use output arguments or return values is made
depending on both the language and the particular mathematical expressions.
For an expression of type Equality, the left hand side is typically made
into an OutputArgument (or perhaps an InOutArgument if appropriate).
Otherwise, typically, the calculated expression is made a return values of
the routine.
Examples
========
>>> from sympy.utilities.codegen import make_routine
>>> from sympy.abc import x, y, f, g
>>> from sympy import Eq
>>> r = make_routine('test', [Eq(f, 2*x), Eq(g, x + y)])
>>> [arg.result_var for arg in r.results]
[]
>>> [arg.name for arg in r.arguments]
[x, y, f, g]
>>> [arg.name for arg in r.result_variables]
[f, g]
>>> r.local_vars
set()
Another more complicated example with a mixture of specified and
automatically-assigned names. Also has Matrix output.
>>> from sympy import Matrix
>>> r = make_routine('fcn', [x*y, Eq(f, 1), Eq(g, x + g), Matrix([[x, 2]])])
>>> [arg.result_var for arg in r.results] # doctest: +SKIP
[result_5397460570204848505]
>>> [arg.expr for arg in r.results]
[x*y]
>>> [arg.name for arg in r.arguments] # doctest: +SKIP
[x, y, f, g, out_8598435338387848786]
We can examine the various arguments more closely:
>>> from sympy.utilities.codegen import (InputArgument, OutputArgument,
... InOutArgument)
>>> [a.name for a in r.arguments if isinstance(a, InputArgument)]
[x, y]
>>> [a.name for a in r.arguments if isinstance(a, OutputArgument)] # doctest: +SKIP
[f, out_8598435338387848786]
>>> [a.expr for a in r.arguments if isinstance(a, OutputArgument)]
[1, Matrix([[x, 2]])]
>>> [a.name for a in r.arguments if isinstance(a, InOutArgument)]
[g]
>>> [a.expr for a in r.arguments if isinstance(a, InOutArgument)]
[g + x]
"""
# initialize a new code generator
code_gen = get_code_generator(language)
return code_gen.routine(name, expr, argument_sequence, global_vars)
|
c70769c3da556d36f5992fa2b052a7375e0b7bf0d142ddb5394e18767f5a4080 | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, e.g.,
>>> from sympy.abc import x,y
>>> expr = (x - y)**25
>>> flat = expr.expand()
>>> binary_callable = autowrap(flat)
>>> binary_callable(2, 3)
-1.0
Although a SymPy user might primarily be interested in working with
mathematical expressions and not in the details of wrapping tools
needed to evaluate such expressions efficiently in numerical form,
the user cannot do so without some understanding of the
limits in the target language. For example, the expanded expression
contains large coefficients which result in loss of precision when
computing the expression:
>>> binary_callable(3, 2)
0.0
>>> binary_callable(4, 5), binary_callable(5, 4)
(-22925376.0, 25165824.0)
Wrapping the unexpanded expression gives the expected behavior:
>>> e = autowrap(expr)
>>> e(4, 5), e(5, 4)
(-1.0, 1.0)
The callable returned from autowrap() is a binary Python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic Python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
do not need the binaries for another project.
"""
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError, check_output
from string import Template
from warnings import warn
from sympy.core.cache import cacheit
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument,
InputArgument, CodeGenArgumentListError,
Result, ResultBase, C99CodeGen)
from sympy.utilities.iterables import iterable
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'),
'modules': ('numpy',)}
class CodeWrapError(Exception):
pass
class CodeWrapper:
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=None):
helpers = helpers or []
if self.filepath:
workdir = os.path.abspath(self.filepath)
else:
workdir = tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode('utf-8')))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = """\
from setuptools import setup
from setuptools import Extension
from Cython.Build import cythonize
cy_opts = {cythonize_options}
{np_import}
ext_mods = [Extension(
{ext_args},
include_dirs={include_dirs},
library_dirs={library_dirs},
libraries={libraries},
extra_compile_args={extra_compile_args},
extra_link_args={extra_link_args}
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
"""
_cythonize_options = {'compiler_directives':{'language_level' : "3"}}
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
std_compile_flag = '-std=c99'
def __init__(self, *args, **kwargs):
"""Instantiates a Cython code wrapper.
The following optional parameters get passed to ``setuptools.Extension``
for building the Python extension module. Read its documentation to
learn more.
Parameters
==========
include_dirs : [list of strings]
A list of directories to search for C/C++ header files (in Unix
form for portability).
library_dirs : [list of strings]
A list of directories to search for C/C++ libraries at link time.
libraries : [list of strings]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [list of strings]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could be
anything. Note that the attribute ``std_compile_flag`` will be
appended to this list.
extra_link_args : [list of strings]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create
a new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
cythonize_options : [dictionary]
Keyword arguments passed on to cythonize.
"""
self._include_dirs = kwargs.pop('include_dirs', [])
self._library_dirs = kwargs.pop('library_dirs', [])
self._libraries = kwargs.pop('libraries', [])
self._extra_compile_args = kwargs.pop('extra_compile_args', [])
self._extra_compile_args.append(self.std_compile_flag)
self._extra_link_args = kwargs.pop('extra_link_args', [])
self._cythonize_options = kwargs.pop('cythonize_options', self._cythonize_options)
self._need_numpy = False
super().__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine, build_dir=os.curdir):
# NOTE : build_dir is used for testing purposes.
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(os.path.join(build_dir, pyxfilename), 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
self._include_dirs.append('np.get_include()')
else:
np_import = ''
with open(os.path.join(build_dir, 'setup.py'), 'w') as f:
includes = str(self._include_dirs).replace("'np.get_include()'",
'np.get_include()')
f.write(self.setup_template.format(
ext_args=", ".join(ext_args),
np_import=np_import,
include_dirs=includes,
library_dirs=self._library_dirs,
libraries=self._libraries,
extra_compile_args=self._extra_compile_args,
extra_link_args=self._extra_link_args,
cythonize_options=self._cythonize_options
))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with Python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = [self._string_var(v) for v in val]
local_decs.append(" cdef {} = {}.shape[{}]".format(proto, mat, ind))
local_decs.extend([" cdef {}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([self._string_var(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if
isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=self._string_var(arg.name))
else:
return "%s %s" % (t, self._string_var(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(self._string_var(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{}*> {}.data".format(t, self._string_var(arg.name))
elif isinstance(arg, ResultBase):
return "&{}".format(self._string_var(arg.name))
else:
return self._string_var(arg.name)
def _string_var(self, var):
printer = self.generator.printer.doprint
return printer(var)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the f2py '
'backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super().__init__(*args, **kwargs)
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C99', 'C89', 'C'),
'F2PY': ('F95',),
'NUMPY': ('C99', 'C89', 'C'),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {} and language {} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(expr, language=None, backend='f2py', tempdir=None, args=None,
flags=None, verbose=False, helpers=None, code_gen=None, **kwargs):
"""Generates Python callable binaries based on the math expression.
Parameters
==========
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An ordered iterable of symbols. Specifies the argument sequence for the
function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : 3-tuple or iterable of 3-tuples, optional
Used to define auxiliary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be
passed in via ``helpers``. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be 3-tuples with (<function_name>, <sympy_expression>,
<argument_tuple>). It is mandatory to supply an argument sequence to
helper routines.
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
include_dirs : [string]
A list of directories to search for C/C++ header files (in Unix form
for portability).
library_dirs : [string]
A list of directories to search for C/C++ libraries at link time.
libraries : [string]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and compilers
where "command line" makes sense, this is typically a list of
command-line arguments, but for other platforms it could be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create a
new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
if not isinstance(language, type):
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
# two cases 1) helpers is an iterable of 3-tuples and 2) helpers is a
# 3-tuple
if iterable(helpers) and len(helpers) != 0 and iterable(helpers[0]):
helpers = helpers if helpers else ()
else:
helpers = [helpers] if helpers else ()
args = list(args) if iterable(args, exclude=set) else args
if code_gen is None:
code_gen = get_code_generator(language, "autowrap")
CodeWrapperClass = {
'F2PY': F2PyCodeWrapper,
'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper
}[backend.upper()]
code_wrapper = CodeWrapperClass(code_gen, tempdir, flags if flags else (),
verbose, **kwargs)
helps = []
for name_h, expr_h, args_h in helpers:
helps.append(code_gen.routine(name_h, expr_h, args_h))
for name_h, expr_h, args_h in helpers:
if expr.has(expr_h):
name_h = binary_function(name_h, expr_h, backend='dummy')
expr = expr.subs(expr_h, name_h(*args_h))
try:
routine = code_gen.routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = code_gen.routine('autofunc', expr, args + new_args)
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a SymPy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
Parameters
==========
symfunc : SymPy Function
The function to bind the callable to.
expr : SymPy Expression
The expression used to generate the function.
kwargs : dict
Any kwargs accepted by autowrap.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
${outcalls}
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
from setuptools.extension import Extension
from setuptools import setup
from numpy import get_include
if __name__ == "__main__":
setup(ext_modules=[
Extension('${module}',
sources=['${module}.c', '${filename}.c'],
include_dirs=[get_include()])])
""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the numpy'
' backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super().__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def wrap_code(self, routines, helpers=None):
# This routine overrides CodeWrapper because we can't assume funcname == routines[0].name
# Therefore we have to break the CodeWrapper private API.
# There isn't an obvious way to extend multi-expr support to
# the other autowrap backends, so we limit this change to ufuncify.
helpers = helpers if helpers is not None else []
# We just need a consistent name
funcname = 'wrapped_' + str(id(routines) + id(helpers))
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routines, helpers)
self._prepare_files(routines, funcname)
self._process_files(routines)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, funcname)
def _generate_code(self, main_routines, helper_routines):
all_routines = main_routines + helper_routines
self.generator.write(
all_routines, self.filename, True, self.include_header,
self.include_empty)
def _prepare_files(self, routines, funcname):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c(routines, f, self.filename, funcname=funcname)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix, funcname=None):
"""Write a C file with Python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
funcname
Name of the main function to be returned.
"""
if funcname is None:
if len(routines) == 1:
funcname = routines[0].name
else:
msg = 'funcname must be specified for multiple output routines'
raise ValueError(msg)
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
name = funcname
# Partition the C function arguments into categories
# Here we assume all routines accept the same arguments
r_index = 0
py_in, _ = self._partition_args(routines[0].arguments)
n_in = len(py_in)
n_out = len(routines)
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.extend([form.format('out', i, i) for i in range(n_out)])
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
outcalls = [_ufunc_outcalls.substitute(
outnum=i, call_args=call_args, funcname=routines[i].name) for i in
range(n_out)]
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types,
outcalls='\n '.join(outcalls))
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None, **kwargs):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
==========
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in
the specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can
be helpful for debugging.
helpers : iterable, optional
Used to define auxiliary expressions needed for the main expr. If
the main expression needs to call a specialized function it should
be put in the ``helpers`` iterable. Autowrap will then make sure
that the compiled main expression can link to the helper routine.
Items should be tuples with (<funtion_name>, <sympy_expression>,
<arguments>). It is mandatory to supply an argument sequence to
helper routines.
kwargs : dict
These kwargs will be passed to autowrap if the `f2py` or `cython`
backend is used and ignored if the `numpy` backend is used.
Notes
=====
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
==========
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
<class 'numpy.ufunc'>
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the 'f2py' and 'cython' backends, inputs are required to be equal length
1-dimensional arrays. The 'f2py' backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py')
>>> f_fortran(1, 2)
array([ 3.])
>>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]))
array([ 2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(np.array([1.0]), np.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
# maxargs is set by numpy compile-time constant NPY_MAXARGS
# If a future version of numpy modifies or removes this restriction
# this variable should be changed or removed
maxargs = 32
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir,
flags, verbose)
if not isinstance(expr, (list, tuple)):
expr = [expr]
if len(expr) == 0:
raise ValueError('Expression iterable has zero length')
if len(expr) + len(args) > maxargs:
msg = ('Cannot create ufunc with more than {0} total arguments: '
'got {1} in, {2} out')
raise ValueError(msg.format(maxargs, len(args), len(expr)))
routines = [make_routine('autofunc{}'.format(idx), exprx, args) for
idx, exprx in enumerate(expr)]
return code_wrapper.wrap_code(routines, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy('y'))
m = Dummy('m', integer=True)
i = Idx(Dummy('i', integer=True), m)
f_dummy = Dummy('f')
f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers, **kwargs)
|
23c82de7e34d442a9ed152d5fca3e7369a91d1ebc52a3e191e1d9068c0bdd550 | """
This module provides convenient functions to transform SymPy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import annotations
from typing import Any
import builtins
import inspect
import keyword
import textwrap
import linecache
# Required despite static analysis claiming it is not used
from sympy.external import import_module # noqa:F401
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import (is_sequence, iterable,
NotIterable, flatten)
from sympy.utilities.misc import filldedent
__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
MATH_DEFAULT: dict[str, Any] = {}
MPMATH_DEFAULT: dict[str, Any] = {}
NUMPY_DEFAULT: dict[str, Any] = {"I": 1j}
SCIPY_DEFAULT: dict[str, Any] = {"I": 1j}
CUPY_DEFAULT: dict[str, Any] = {"I": 1j}
JAX_DEFAULT: dict[str, Any] = {"I": 1j}
TENSORFLOW_DEFAULT: dict[str, Any] = {}
SYMPY_DEFAULT: dict[str, Any] = {}
NUMEXPR_DEFAULT: dict[str, Any] = {}
# These are the namespaces the lambda functions will use.
# These are separate from the names above because they are modified
# throughout this file, whereas the defaults should remain unmodified.
MATH = MATH_DEFAULT.copy()
MPMATH = MPMATH_DEFAULT.copy()
NUMPY = NUMPY_DEFAULT.copy()
SCIPY = SCIPY_DEFAULT.copy()
CUPY = CUPY_DEFAULT.copy()
JAX = JAX_DEFAULT.copy()
TENSORFLOW = TENSORFLOW_DEFAULT.copy()
SYMPY = SYMPY_DEFAULT.copy()
NUMEXPR = NUMEXPR_DEFAULT.copy()
# Mappings between SymPy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses
# of Function to automatically evalf.
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableDenseMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci",
"RisingFactorial": "rf",
"FallingFactorial": "ff",
"betainc_regularized": "betainc",
}
NUMPY_TRANSLATIONS: dict[str, str] = {
"Heaviside": "heaviside",
}
SCIPY_TRANSLATIONS: dict[str, str] = {}
CUPY_TRANSLATIONS: dict[str, str] = {}
JAX_TRANSLATIONS: dict[str, str] = {}
TENSORFLOW_TRANSLATIONS: dict[str, str] = {}
NUMEXPR_TRANSLATIONS: dict[str, str] = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)),
"scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import scipy; import numpy; from scipy.special import *",)),
"cupy": (CUPY, CUPY_DEFAULT, CUPY_TRANSLATIONS, ("import cupy",)),
"jax": (JAX, JAX_DEFAULT, JAX_TRANSLATIONS, ("import jax",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload=False):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow", "jax".
These dictionaries map names of Python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module cannot be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"Cannot import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a SymPy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
# Used for dynamically generated filenames that are inserted into the
# linecache.
_lambdify_generated_counter = 1
@doctest_depends_on(modules=('numpy', 'scipy', 'tensorflow',), python_version=(3,))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=False, cse=False):
"""Convert a SymPy expression into a function that allows for fast
numeric evaluation.
.. warning::
This function uses ``exec``, and thus should not be used on
unsanitized input.
.. deprecated:: 1.7
Passing a set for the *args* parameter is deprecated as sets are
unordered. Use an ordered iterable such as a list or tuple.
Explanation
===========
For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
equivalent NumPy function that numerically evaluates it:
>>> from sympy import sin, cos, symbols, lambdify
>>> import numpy as np
>>> x = symbols('x')
>>> expr = sin(x) + cos(x)
>>> expr
sin(x) + cos(x)
>>> f = lambdify(x, expr, 'numpy')
>>> a = np.array([1, 2])
>>> f(a)
[1.38177329 0.49315059]
The primary purpose of this function is to provide a bridge from SymPy
expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
and tensorflow. In general, SymPy functions do not work with objects from
other libraries, such as NumPy arrays, and functions from numeric
libraries like NumPy or mpmath do not work on SymPy expressions.
``lambdify`` bridges the two by converting a SymPy expression to an
equivalent numeric function.
The basic workflow with ``lambdify`` is to first create a SymPy expression
representing whatever mathematical function you wish to evaluate. This
should be done using only SymPy functions and expressions. Then, use
``lambdify`` to convert this to an equivalent function for numerical
evaluation. For instance, above we created ``expr`` using the SymPy symbol
``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
Parameters
==========
args : List[Symbol]
A variable or a list of variables whose nesting represents the
nesting of the arguments that will be passed to the function.
Variables can be symbols, undefined functions, or matrix symbols.
>>> from sympy import Eq
>>> from sympy.abc import x, y, z
The list of variables should match the structure of how the
arguments will be passed to the function. Simply enclose the
parameters as they will be passed in a list.
To call a function like ``f(x)`` then ``[x]``
should be the first argument to ``lambdify``; for this
case a single ``x`` can also be used:
>>> f = lambdify(x, x + 1)
>>> f(1)
2
>>> f = lambdify([x], x + 1)
>>> f(1)
2
To call a function like ``f(x, y)`` then ``[x, y]`` will
be the first argument of the ``lambdify``:
>>> f = lambdify([x, y], x + y)
>>> f(1, 1)
2
To call a function with a single 3-element tuple like
``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
argument of the ``lambdify``:
>>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
>>> f((3, 4, 5))
True
If two args will be passed and the first is a scalar but
the second is a tuple with two arguments then the items
in the list should match that structure:
>>> f = lambdify([x, (y, z)], x + y + z)
>>> f(1, (2, 3))
6
expr : Expr
An expression, list of expressions, or matrix to be evaluated.
Lists may be nested.
If the expression is a list, the output will also be a list.
>>> f = lambdify(x, [x, [x + 1, x + 2]])
>>> f(1)
[1, [2, 3]]
If it is a matrix, an array will be returned (for the NumPy module).
>>> from sympy import Matrix
>>> f = lambdify(x, Matrix([x, x + 1]))
>>> f(1)
[[1]
[2]]
Note that the argument order here (variables then expression) is used
to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
(roughly) like ``lambda x: expr``
(see :ref:`lambdify-how-it-works` below).
modules : str, optional
Specifies the numeric library to use.
If not specified, *modules* defaults to:
- ``["scipy", "numpy"]`` if SciPy is installed
- ``["numpy"]`` if only NumPy is installed
- ``["math", "mpmath", "sympy"]`` if neither is installed.
That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise.
*modules* can be one of the following types:
- The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the
corresponding printer and namespace mapping for that module.
- A module (e.g., ``math``). This uses the global namespace of the
module. If the module is one of the above known modules, it will
also use the corresponding printer and namespace mapping
(i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
- A dictionary that maps names of SymPy functions to arbitrary
functions
(e.g., ``{'sin': custom_sin}``).
- A list that contains a mix of the arguments above, with higher
priority given to entries appearing first
(e.g., to use the NumPy module but override the ``sin`` function
with a custom version, you can use
``[{'sin': custom_sin}, 'numpy']``).
dummify : bool, optional
Whether or not the variables in the provided expression that are not
valid Python identifiers are substituted with dummy symbols.
This allows for undefined functions like ``Function('f')(t)`` to be
supplied as arguments. By default, the variables are only dummified
if they are not valid Python identifiers.
Set ``dummify=True`` to replace all arguments with dummy symbols
(if ``args`` is not a string) - for example, to ensure that the
arguments do not redefine any built-in names.
cse : bool, or callable, optional
Large expressions can be computed more efficiently when
common subexpressions are identified and precomputed before
being used multiple time. Finding the subexpressions will make
creation of the 'lambdify' function slower, however.
When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default)
the user may pass a function matching the ``cse`` signature.
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
``lambdify`` can be used to translate SymPy expressions into mpmath
functions. This may be preferable to using ``evalf`` (which uses mpmath on
the backend) in some cases.
>>> f = lambdify(x, sin(x), 'mpmath')
>>> f(1)
0.8414709848078965
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
The ``flatten`` function can be used to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in ``expr`` can also carry their own numerical
implementations, in a callable attached to the ``_imp_`` attribute. This
can be used with undefined functions using the ``implemented_function``
factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow:
>>> import tensorflow as tf
>>> from sympy import Max, sin, lambdify
>>> from sympy.abc import x
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
After tensorflow v2, eager execution is enabled by default.
If you want to get the compatible result across tensorflow v1 and v2
as same as this tutorial, run this line.
>>> tf.compat.v1.enable_eager_execution()
If you have eager execution enabled, you can get the result out
immediately as you can use numpy.
If you pass tensorflow objects, you may get an ``EagerTensor``
object instead of value.
>>> result = func(tf.constant(1.0))
>>> print(result)
tf.Tensor(1.0, shape=(), dtype=float32)
>>> print(result.__class__)
<class 'tensorflow.python.framework.ops.EagerTensor'>
You can use ``.numpy()`` to get the numpy value of the tensor.
>>> result.numpy()
1.0
>>> var = tf.Variable(2.0)
>>> result = func(var) # also works for tf.Variable and tf.Placeholder
>>> result.numpy()
2.0
And it works with any shape array.
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> result = func(tensor)
>>> result.numpy()
[[1. 2.]
[3. 4.]]
Notes
=====
- For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
``implemented_function`` and user defined subclasses of Function. If
specified, numexpr may be the only option in modules. The official list
of numexpr functions can be found at:
https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
- In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> import numpy
>>> from sympy import Piecewise
>>> from sympy.testing.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(numpy.array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(numpy.array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
.. _lambdify-how-it-works:
How it works
============
When using this function, it helps a great deal to have an idea of what it
is doing. At its core, lambdify is nothing more than a namespace
translation, on top of a special printer that makes some corner cases work
properly.
To understand lambdify, first we must properly understand how Python
namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
with
.. code:: python
# sin_cos_sympy.py
from sympy.functions.elementary.trigonometric import (cos, sin)
def sin_cos(x):
return sin(x) + cos(x)
and one called ``sin_cos_numpy.py`` with
.. code:: python
# sin_cos_numpy.py
from numpy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
The two files define an identical function ``sin_cos``. However, in the
first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
``cos``. In the second, they are defined as the NumPy versions.
If we were to import the first file and use the ``sin_cos`` function, we
would get something like
>>> from sin_cos_sympy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
cos(1) + sin(1)
On the other hand, if we imported ``sin_cos`` from the second file, we
would get
>>> from sin_cos_numpy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
1.38177329068
In the first case we got a symbolic output, because it used the symbolic
``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
used was not inherent to the ``sin_cos`` function definition. Both
``sin_cos`` definitions are exactly the same. Rather, it was based on the
names defined at the module where the ``sin_cos`` function was defined.
The key point here is that when function in Python references a name that
is not defined in the function, that name is looked up in the "global"
namespace of the module where that function is defined.
Now, in Python, we can emulate this behavior without actually writing a
file to disk using the ``exec`` function. ``exec`` takes a string
containing a block of Python code, and a dictionary that should contain
the global variables of the module. It then executes the code "in" that
dictionary, as if it were the module globals. The following is equivalent
to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
>>> import sympy
>>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
cos(1) + sin(1)
and similarly with ``sin_cos_numpy``:
>>> import numpy
>>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
1.38177329068
So now we can get an idea of how ``lambdify`` works. The name "lambdify"
comes from the fact that we can think of something like ``lambdify(x,
sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
the symbols argument is first in ``lambdify``, as opposed to most SymPy
functions where it comes after the expression: to better mimic the
``lambda`` keyword.
``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
1. Converts it to a string
2. Creates a module globals dictionary based on the modules that are
passed in (by default, it uses the NumPy module)
3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
list of variables separated by commas, and ``{expr}`` is the string
created in step 1., then ``exec``s that string with the module globals
namespace and returns ``func``.
In fact, functions returned by ``lambdify`` support inspection. So you can
see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
are using IPython or the Jupyter notebook.
>>> f = lambdify(x, sin(x) + cos(x))
>>> import inspect
>>> print(inspect.getsource(f))
def _lambdifygenerated(x):
return sin(x) + cos(x)
This shows us the source code of the function, but not the namespace it
was defined in. We can inspect that by looking at the ``__globals__``
attribute of ``f``:
>>> f.__globals__['sin']
<ufunc 'sin'>
>>> f.__globals__['cos']
<ufunc 'cos'>
>>> f.__globals__['sin'] is numpy.sin
True
This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
``numpy.sin`` and ``numpy.cos``.
Note that there are some convenience layers in each of these steps, but at
the core, this is how ``lambdify`` works. Step 1 is done using the
``LambdaPrinter`` printers defined in the printing module (see
:mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
to define how they should be converted to a string for different modules.
You can change which printer ``lambdify`` uses by passing a custom printer
in to the ``printer`` argument.
Step 2 is augmented by certain translations. There are default
translations for each module, but you can provide your own by passing a
list to the ``modules`` argument. For instance,
>>> def mysin(x):
... print('taking the sin of', x)
... return numpy.sin(x)
...
>>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
>>> f(1)
taking the sin of 1
0.8414709848078965
The globals dictionary is generated from the list by merging the
dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
merging is done so that earlier items take precedence, which is why
``mysin`` is used above instead of ``numpy.sin``.
If you want to modify the way ``lambdify`` works for a given function, it
is usually easiest to do so by modifying the globals dictionary as such.
In more complicated cases, it may be necessary to create and pass in a
custom printer.
Finally, step 3 is augmented with certain convenience operations, such as
the addition of a docstring.
Understanding how ``lambdify`` works can make it easier to avoid certain
gotchas when using it. For instance, a common mistake is to create a
lambdified function for one module (say, NumPy), and pass it objects from
another (say, a SymPy expression).
For instance, say we create
>>> from sympy.abc import x
>>> f = lambdify(x, x + 1, 'numpy')
Now if we pass in a NumPy array, we get that array plus 1
>>> import numpy
>>> a = numpy.array([1, 2])
>>> f(a)
[2 3]
But what happens if you make the mistake of passing in a SymPy expression
instead of a NumPy array:
>>> f(x + 1)
x + 2
This worked, but it was only by accident. Now take a different lambdified
function:
>>> from sympy import sin
>>> g = lambdify(x, x + sin(x), 'numpy')
This works as expected on NumPy arrays:
>>> g(a)
[1.84147098 2.90929743]
But if we try to pass in a SymPy expression, it fails
>>> try:
... g(x + 1)
... # NumPy release after 1.17 raises TypeError instead of
... # AttributeError
... except (AttributeError, TypeError):
... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError:
Now, let's look at what happened. The reason this fails is that ``g``
calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
know how to operate on a SymPy object. **As a general rule, NumPy
functions do not know how to operate on SymPy expressions, and SymPy
functions do not know how to operate on NumPy arrays. This is why lambdify
exists: to provide a bridge between SymPy and NumPy.**
However, why is it that ``f`` did work? That's because ``f`` does not call
any functions, it only adds 1. So the resulting function that is created,
``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
namespace it is defined in. Thus it works, but only by accident. A future
version of ``lambdify`` may remove this behavior.
Be aware that certain implementation details described here may change in
future versions of SymPy. The API of passing in custom modules and
printers will not change, but the details of how a lambda function is
created may change. However, the basic idea will remain the same, and
understanding it will be helpful to understanding the behavior of
lambdify.
**In general: you should create lambdified functions for one module (say,
NumPy), and only pass it input types that are compatible with that module
(say, NumPy arrays).** Remember that by default, if the ``module``
argument is not provided, ``lambdify`` creates functions using the NumPy
and SciPy namespaces.
"""
from sympy.core.symbol import Symbol
from sympy.core.expr import Expr
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["numpy", "scipy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore
elif _module_present('scipy', namespaces):
from sympy.printing.numpy import SciPyPrinter as Printer # type: ignore
elif _module_present('numpy', namespaces):
from sympy.printing.numpy import NumPyPrinter as Printer # type: ignore
elif _module_present('cupy', namespaces):
from sympy.printing.numpy import CuPyPrinter as Printer # type: ignore
elif _module_present('jax', namespaces):
from sympy.printing.numpy import JaxPrinter as Printer # type: ignore
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore
else:
from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
if isinstance(args, set):
sympy_deprecation_warning(
"""
Passing the function arguments to lambdify() as a set is deprecated. This
leads to unpredictable results since sets are unordered. Instead, use a list
or tuple for the function arguments.
""",
deprecated_since_version="1.6.3",
active_deprecations_target="deprecated-lambdify-arguments-set",
)
# Get the names of the args, for creating a docstring
iterable_args = (args,) if isinstance(args, Expr) else args
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore
for n, var in enumerate(iterable_args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify)
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
if cse == True:
from sympy.simplify.cse_main import cse as _cse
cses, _expr = _cse(expr, list=False)
elif callable(cse):
cses, _expr = cse(expr)
else:
cses, _expr = (), expr
funcstr = funcprinter.doprint(funcname, iterable_args, _expr, cses=cses)
# Collect the module imports from the code printers.
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
ln = "from %s import %s" % (mod, k)
try:
exec(ln, {}, namespace)
except ImportError:
# Tensorflow 2.0 has issues with importing a specific
# function from its submodule.
# https://github.com/tensorflow/tensorflow/issues/33022
ln = "%s = %s.%s" % (k, mod, k)
exec(ln, {}, namespace)
imp_mod_lines.append(ln)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
funclocals = {}
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
func = funclocals[funcname]
# Apply the docstring
sig = "func({})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def _recursive_to_string(doprint, arg):
"""Functions in lambdify accept both SymPy types and non-SymPy types such as python
lists and tuples. This method ensures that we only call the doprint method of the
printer with SymPy types (so that the printer safely can use SymPy-methods)."""
from sympy.matrices.common import MatrixOperations
from sympy.core.basic import Basic
if isinstance(arg, (Basic, MatrixOperations)):
return doprint(arg)
elif iterable(arg):
if isinstance(arg, list):
left, right = "[", "]"
elif isinstance(arg, tuple):
left, right = "(", ",)"
else:
raise NotImplementedError("unhandled type: %s, %s" % (type(arg), arg))
return left +', '.join(_recursive_to_string(doprint, e) for e in arg) + right
elif isinstance(arg, str):
return arg
else:
return doprint(arg)
def lambdastr(args, expr, printer=None, dummify=None):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy.core.basic import Basic
from sympy.core.function import (Derivative, Function)
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
# replace these with Dummy symbols
if isinstance(args, (Function, Symbol, Derivative)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
expr = sympify(expr)
# dict/tuple are sympified to Basic
if isinstance(expr, Basic):
expr = expr.xreplace(dummies_dict)
# list is not sympified to Basic
elif isinstance(expr, list):
expr = [sub_expr(a, dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
def flat_indexes(iterable):
n = 0
for el in iterable:
if isiter(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
if dummify is None:
dummify = any(isinstance(a, Basic) and
a.atoms(Function, Derivative) for a in (
args if isiter(args) else [args]))
if isiter(args) and any(isiter(i) for i in args):
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
indexed_args = ','.join([
dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]])
for ind in flat_indexes(args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args)
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = _recursive_to_string(lambdarepr, expr)
return "lambda %s: (%s)" % (args, expr)
class _EvaluatorPrinter:
def __init__(self, printer=None, dummify=False):
self._dummify = dummify
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import LambdaPrinter
if printer is None:
printer = LambdaPrinter()
if inspect.isfunction(printer):
self._exprrepr = printer
else:
if inspect.isclass(printer):
printer = printer()
self._exprrepr = printer.doprint
#if hasattr(printer, '_print_Symbol'):
# symbolrepr = printer._print_Symbol
#if hasattr(printer, '_print_Dummy'):
# dummyrepr = printer._print_Dummy
# Used to print the generated function arguments in a standard way
self._argrepr = LambdaPrinter().doprint
def doprint(self, funcname, args, expr, *, cses=()):
"""
Returns the function definition code as a string.
"""
from sympy.core.symbol import Dummy
funcbody = []
if not iterable(args):
args = [args]
if cses:
subvars, subexprs = zip(*cses)
exprs = [expr] + list(subexprs)
argstrs, exprs = self._preprocess(args, exprs)
expr, subexprs = exprs[0], exprs[1:]
cses = zip(subvars, subexprs)
else:
argstrs, expr = self._preprocess(args, expr)
# Generate argument unpacking and final argument list
funcargs = []
unpackings = []
for argstr in argstrs:
if iterable(argstr):
funcargs.append(self._argrepr(Dummy()))
unpackings.extend(self._print_unpacking(argstr, funcargs[-1]))
else:
funcargs.append(argstr)
funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs))
# Wrap input arguments before unpacking
funcbody.extend(self._print_funcargwrapping(funcargs))
funcbody.extend(unpackings)
for s, e in cses:
if e is None:
funcbody.append('del {}'.format(s))
else:
funcbody.append('{} = {}'.format(s, self._exprrepr(e)))
str_expr = _recursive_to_string(self._exprrepr, expr)
if '\n' in str_expr:
str_expr = '({})'.format(str_expr)
funcbody.append('return {}'.format(str_expr))
funclines = [funcsig]
funclines.extend([' ' + line for line in funcbody])
return '\n'.join(funclines) + '\n'
@classmethod
def _is_safe_ident(cls, ident):
return isinstance(ident, str) and ident.isidentifier() \
and not keyword.iskeyword(ident)
def _preprocess(self, args, expr):
"""Preprocess args, expr to replace arguments that do not map
to valid Python identifiers.
Returns string form of args, and updated expr.
"""
from sympy.core.basic import Basic
from sympy.core.sorting import ordered
from sympy.core.function import (Derivative, Function)
from sympy.core.symbol import Dummy, uniquely_named_symbol
from sympy.matrices import DeferredVector
from sympy.core.expr import Expr
# Args of type Dummy can cause name collisions with args
# of type Symbol. Force dummify of everything in this
# situation.
dummify = self._dummify or any(
isinstance(arg, Dummy) for arg in flatten(args))
argstrs = [None]*len(args)
for arg, i in reversed(list(ordered(zip(args, range(len(args)))))):
if iterable(arg):
s, expr = self._preprocess(arg, expr)
elif isinstance(arg, DeferredVector):
s = str(arg)
elif isinstance(arg, Basic) and arg.is_symbol:
s = self._argrepr(arg)
if dummify or not self._is_safe_ident(s):
dummy = Dummy()
if isinstance(expr, Expr):
dummy = uniquely_named_symbol(
dummy.name, expr, modify=lambda s: '_' + s)
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
elif dummify or isinstance(arg, (Function, Derivative)):
dummy = Dummy()
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
else:
s = str(arg)
argstrs[i] = s
return argstrs, expr
def _subexpr(self, expr, dummies_dict):
from sympy.matrices import DeferredVector
from sympy.core.sympify import sympify
expr = sympify(expr)
xreplace = getattr(expr, 'xreplace', None)
if xreplace is not None:
expr = xreplace(dummies_dict)
else:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()]
v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [self._subexpr(sympify(a), dummies_dict) for a in expr]
return expr
def _print_funcargwrapping(self, args):
"""Generate argument wrapping code.
args is the argument list of the generated function (strings).
Return value is a list of lines of code that will be inserted at
the beginning of the function definition.
"""
return []
def _print_unpacking(self, unpackto, arg):
"""Generate argument unpacking code.
arg is the function argument to be unpacked (a string), and
unpackto is a list or nested lists of the variable names (strings) to
unpack to.
"""
def unpack_lhs(lvalues):
return '[{}]'.format(', '.join(
unpack_lhs(val) if iterable(val) else val for val in lvalues))
return ['{} = {}'.format(unpack_lhs(unpackto), arg)]
class _TensorflowEvaluatorPrinter(_EvaluatorPrinter):
def _print_unpacking(self, lvalues, rvalue):
"""Generate argument unpacking code.
This method is used when the input value is not interable,
but can be indexed (see issue #14655).
"""
def flat_indexes(elems):
n = 0
for el in elems:
if iterable(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind)))
for ind in flat_indexes(lvalues))
return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)]
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as ``expr``. Examples
include SymPy expressions, as well as tuples, lists and dicts that may
contain SymPy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within ``expr`` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# SymPy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If ``symfunc`` is an Undefined function, create a new function
with the same name and the implemented function attached.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import lambdify
>>> f = implemented_function('f', lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
kwargs = {}
if isinstance(symfunc, UndefinedFunction):
kwargs = symfunc._kwargs
symfunc = symfunc.__name__
if isinstance(symfunc, str):
# Keyword arguments to UndefinedFunction are added as attributes to
# the created class.
symfunc = UndefinedFunction(
symfunc, _imp_=staticmethod(implementation), **kwargs)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError(filldedent('''
symfunc should be either a string or
an UndefinedFunction instance.'''))
return symfunc
|
15d37b5390d849d0312bd6e22d4fb5585d5f862f7be389ba2abd5bf3a0bcf96e | from collections import defaultdict, OrderedDict
from itertools import (
chain, combinations, combinations_with_replacement, cycle, islice,
permutations, product
)
# For backwards compatibility
from itertools import product as cartes # noqa: F401
from operator import gt
# this is the logical location of these functions
from sympy.utilities.enumerative import (
multiset_partitions_taocp, list_visitor, MultisetPartitionTraverser)
from sympy.utilities.misc import as_int
from sympy.utilities.decorator import deprecated
def is_palindromic(s, i=0, j=None):
"""
Return True if the sequence is the same from left to right as it
is from right to left in the whole sequence (default) or in the
Python slice ``s[i: j]``; else False.
Examples
========
>>> from sympy.utilities.iterables import is_palindromic
>>> is_palindromic([1, 0, 1])
True
>>> is_palindromic('abcbb')
False
>>> is_palindromic('abcbb', 1)
False
Normal Python slicing is performed in place so there is no need to
create a slice of the sequence for testing:
>>> is_palindromic('abcbb', 1, -1)
True
>>> is_palindromic('abcbb', -4, -1)
True
See Also
========
sympy.ntheory.digits.is_palindromic: tests integers
"""
i, j, _ = slice(i, j).indices(len(s))
m = (j - i)//2
# if length is odd, middle element will be ignored
return all(s[i + k] == s[j - 1 - k] for k in range(m))
def flatten(iterable, levels=None, cls=None): # noqa: F811
"""
Recursively denest iterable containers.
>>> from sympy import flatten
>>> flatten([1, 2, 3])
[1, 2, 3]
>>> flatten([1, 2, [3]])
[1, 2, 3]
>>> flatten([1, [2, 3], [4, 5]])
[1, 2, 3, 4, 5]
>>> flatten([1.0, 2, (1, None)])
[1.0, 2, 1, None]
If you want to denest only a specified number of levels of
nested containers, then set ``levels`` flag to the desired
number of levels::
>>> ls = [[(-2, -1), (1, 2)], [(0, 0)]]
>>> flatten(ls, levels=1)
[(-2, -1), (1, 2), (0, 0)]
If cls argument is specified, it will only flatten instances of that
class, for example:
>>> from sympy import Basic, S
>>> class MyOp(Basic):
... pass
...
>>> flatten([MyOp(S(1), MyOp(S(2), S(3)))], cls=MyOp)
[1, 2, 3]
adapted from https://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
"""
from sympy.tensor.array import NDimArray
if levels is not None:
if not levels:
return iterable
elif levels > 0:
levels -= 1
else:
raise ValueError(
"expected non-negative number of levels, got %s" % levels)
if cls is None:
reducible = lambda x: is_sequence(x, set)
else:
reducible = lambda x: isinstance(x, cls)
result = []
for el in iterable:
if reducible(el):
if hasattr(el, 'args') and not isinstance(el, NDimArray):
el = el.args
result.extend(flatten(el, levels=levels, cls=cls))
else:
result.append(el)
return result
def unflatten(iter, n=2):
"""Group ``iter`` into tuples of length ``n``. Raise an error if
the length of ``iter`` is not a multiple of ``n``.
"""
if n < 1 or len(iter) % n:
raise ValueError('iter length is not a multiple of %i' % n)
return list(zip(*(iter[i::n] for i in range(n))))
def reshape(seq, how):
"""Reshape the sequence according to the template in ``how``.
Examples
========
>>> from sympy.utilities import reshape
>>> seq = list(range(1, 9))
>>> reshape(seq, [4]) # lists of 4
[[1, 2, 3, 4], [5, 6, 7, 8]]
>>> reshape(seq, (4,)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, 2)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, [2])) # (i, i, [i, i])
[(1, 2, [3, 4]), (5, 6, [7, 8])]
>>> reshape(seq, ((2,), [2])) # etc....
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
>>> reshape(seq, (1, [2], 1))
[(1, [2, 3], 4), (5, [6, 7], 8)]
>>> reshape(tuple(seq), ([[1], 1, (2,)],))
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
>>> reshape(tuple(seq), ([1], 1, (2,)))
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
>>> reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)])
[[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]]
"""
m = sum(flatten(how))
n, rem = divmod(len(seq), m)
if m < 0 or rem:
raise ValueError('template must sum to positive number '
'that divides the length of the sequence')
i = 0
container = type(how)
rv = [None]*n
for k in range(len(rv)):
_rv = []
for hi in how:
if isinstance(hi, int):
_rv.extend(seq[i: i + hi])
i += hi
else:
n = sum(flatten(hi))
hi_type = type(hi)
_rv.append(hi_type(reshape(seq[i: i + n], hi)[0]))
i += n
rv[k] = container(_rv)
return type(seq)(rv)
def group(seq, multiple=True):
"""
Splits a sequence into a list of lists of equal, adjacent elements.
Examples
========
>>> from sympy import group
>>> group([1, 1, 1, 2, 2, 3])
[[1, 1, 1], [2, 2], [3]]
>>> group([1, 1, 1, 2, 2, 3], multiple=False)
[(1, 3), (2, 2), (3, 1)]
>>> group([1, 1, 3, 2, 2, 1], multiple=False)
[(1, 2), (3, 1), (2, 2), (1, 1)]
See Also
========
multiset
"""
if not seq:
return []
current, groups = [seq[0]], []
for elem in seq[1:]:
if elem == current[-1]:
current.append(elem)
else:
groups.append(current)
current = [elem]
groups.append(current)
if multiple:
return groups
for i, current in enumerate(groups):
groups[i] = (current[0], len(current))
return groups
def _iproduct2(iterable1, iterable2):
'''Cartesian product of two possibly infinite iterables'''
it1 = iter(iterable1)
it2 = iter(iterable2)
elems1 = []
elems2 = []
sentinel = object()
def append(it, elems):
e = next(it, sentinel)
if e is not sentinel:
elems.append(e)
n = 0
append(it1, elems1)
append(it2, elems2)
while n <= len(elems1) + len(elems2):
for m in range(n-len(elems1)+1, len(elems2)):
yield (elems1[n-m], elems2[m])
n += 1
append(it1, elems1)
append(it2, elems2)
def iproduct(*iterables):
'''
Cartesian product of iterables.
Generator of the Cartesian product of iterables. This is analogous to
itertools.product except that it works with infinite iterables and will
yield any item from the infinite product eventually.
Examples
========
>>> from sympy.utilities.iterables import iproduct
>>> sorted(iproduct([1,2], [3,4]))
[(1, 3), (1, 4), (2, 3), (2, 4)]
With an infinite iterator:
>>> from sympy import S
>>> (3,) in iproduct(S.Integers)
True
>>> (3, 4) in iproduct(S.Integers, S.Integers)
True
.. seealso::
`itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_
'''
if len(iterables) == 0:
yield ()
return
elif len(iterables) == 1:
for e in iterables[0]:
yield (e,)
elif len(iterables) == 2:
yield from _iproduct2(*iterables)
else:
first, others = iterables[0], iterables[1:]
for ef, eo in _iproduct2(first, iproduct(*others)):
yield (ef,) + eo
def multiset(seq):
"""Return the hashable sequence in multiset form with values being the
multiplicity of the item in the sequence.
Examples
========
>>> from sympy.utilities.iterables import multiset
>>> multiset('mississippi')
{'i': 4, 'm': 1, 'p': 2, 's': 4}
See Also
========
group
"""
rv = defaultdict(int)
for s in seq:
rv[s] += 1
return dict(rv)
def ibin(n, bits=None, str=False):
"""Return a list of length ``bits`` corresponding to the binary value
of ``n`` with small bits to the right (last). If bits is omitted, the
length will be the number required to represent ``n``. If the bits are
desired in reversed order, use the ``[::-1]`` slice of the returned list.
If a sequence of all bits-length lists starting from ``[0, 0,..., 0]``
through ``[1, 1, ..., 1]`` are desired, pass a non-integer for bits, e.g.
``'all'``.
If the bit *string* is desired pass ``str=True``.
Examples
========
>>> from sympy.utilities.iterables import ibin
>>> ibin(2)
[1, 0]
>>> ibin(2, 4)
[0, 0, 1, 0]
If all lists corresponding to 0 to 2**n - 1, pass a non-integer
for bits:
>>> bits = 2
>>> for i in ibin(2, 'all'):
... print(i)
(0, 0)
(0, 1)
(1, 0)
(1, 1)
If a bit string is desired of a given length, use str=True:
>>> n = 123
>>> bits = 10
>>> ibin(n, bits, str=True)
'0001111011'
>>> ibin(n, bits, str=True)[::-1] # small bits left
'1101111000'
>>> list(ibin(3, 'all', str=True))
['000', '001', '010', '011', '100', '101', '110', '111']
"""
if n < 0:
raise ValueError("negative numbers are not allowed")
n = as_int(n)
if bits is None:
bits = 0
else:
try:
bits = as_int(bits)
except ValueError:
bits = -1
else:
if n.bit_length() > bits:
raise ValueError(
"`bits` must be >= {}".format(n.bit_length()))
if not str:
if bits >= 0:
return [1 if i == "1" else 0 for i in bin(n)[2:].rjust(bits, "0")]
else:
return variations(range(2), n, repetition=True)
else:
if bits >= 0:
return bin(n)[2:].rjust(bits, "0")
else:
return (bin(i)[2:].rjust(n, "0") for i in range(2**n))
def variations(seq, n, repetition=False):
r"""Returns an iterator over the n-sized variations of ``seq`` (size N).
``repetition`` controls whether items in ``seq`` can appear more than once;
Examples
========
``variations(seq, n)`` will return `\frac{N!}{(N - n)!}` permutations without
repetition of ``seq``'s elements:
>>> from sympy import variations
>>> list(variations([1, 2], 2))
[(1, 2), (2, 1)]
``variations(seq, n, True)`` will return the `N^n` permutations obtained
by allowing repetition of elements:
>>> list(variations([1, 2], 2, repetition=True))
[(1, 1), (1, 2), (2, 1), (2, 2)]
If you ask for more items than are in the set you get the empty set unless
you allow repetitions:
>>> list(variations([0, 1], 3, repetition=False))
[]
>>> list(variations([0, 1], 3, repetition=True))[:4]
[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]
.. seealso::
`itertools.permutations <https://docs.python.org/3/library/itertools.html#itertools.permutations>`_,
`itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_
"""
if not repetition:
seq = tuple(seq)
if len(seq) < n:
return iter(()) # 0 length iterator
return permutations(seq, n)
else:
if n == 0:
return iter(((),)) # yields 1 empty tuple
else:
return product(seq, repeat=n)
def subsets(seq, k=None, repetition=False):
r"""Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``.
A `k`-subset of an `n`-element set is any subset of length exactly `k`. The
number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``,
whereas there are `2^n` subsets all together. If `k` is ``None`` then all
`2^n` subsets will be returned from shortest to longest.
Examples
========
>>> from sympy import subsets
``subsets(seq, k)`` will return the `\frac{n!}{k!(n - k)!}` `k`-subsets (combinations)
without repetition, i.e. once an item has been removed, it can no
longer be "taken":
>>> list(subsets([1, 2], 2))
[(1, 2)]
>>> list(subsets([1, 2]))
[(), (1,), (2,), (1, 2)]
>>> list(subsets([1, 2, 3], 2))
[(1, 2), (1, 3), (2, 3)]
``subsets(seq, k, repetition=True)`` will return the `\frac{(n - 1 + k)!}{k!(n - 1)!}`
combinations *with* repetition:
>>> list(subsets([1, 2], 2, repetition=True))
[(1, 1), (1, 2), (2, 2)]
If you ask for more items than are in the set you get the empty set unless
you allow repetitions:
>>> list(subsets([0, 1], 3, repetition=False))
[]
>>> list(subsets([0, 1], 3, repetition=True))
[(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)]
"""
if k is None:
if not repetition:
return chain.from_iterable((combinations(seq, k)
for k in range(len(seq) + 1)))
else:
return chain.from_iterable((combinations_with_replacement(seq, k)
for k in range(len(seq) + 1)))
else:
if not repetition:
return combinations(seq, k)
else:
return combinations_with_replacement(seq, k)
def filter_symbols(iterator, exclude):
"""
Only yield elements from `iterator` that do not occur in `exclude`.
Parameters
==========
iterator : iterable
iterator to take elements from
exclude : iterable
elements to exclude
Returns
=======
iterator : iterator
filtered iterator
"""
exclude = set(exclude)
for s in iterator:
if s not in exclude:
yield s
def numbered_symbols(prefix='x', cls=None, start=0, exclude=(), *args, **assumptions):
"""
Generate an infinite stream of Symbols consisting of a prefix and
increasing subscripts provided that they do not occur in ``exclude``.
Parameters
==========
prefix : str, optional
The prefix to use. By default, this function will generate symbols of
the form "x0", "x1", etc.
cls : class, optional
The class to use. By default, it uses ``Symbol``, but you can also use ``Wild`` or ``Dummy``.
start : int, optional
The start number. By default, it is 0.
Returns
=======
sym : Symbol
The subscripted symbols.
"""
exclude = set(exclude or [])
if cls is None:
# We can't just make the default cls=Symbol because it isn't
# imported yet.
from sympy.core import Symbol
cls = Symbol
while True:
name = '%s%s' % (prefix, start)
s = cls(name, *args, **assumptions)
if s not in exclude:
yield s
start += 1
def capture(func):
"""Return the printed output of func().
``func`` should be a function without arguments that produces output with
print statements.
>>> from sympy.utilities.iterables import capture
>>> from sympy import pprint
>>> from sympy.abc import x
>>> def foo():
... print('hello world!')
...
>>> 'hello' in capture(foo) # foo, not foo()
True
>>> capture(lambda: pprint(2/x))
'2\\n-\\nx\\n'
"""
from io import StringIO
import sys
stdout = sys.stdout
sys.stdout = file = StringIO()
try:
func()
finally:
sys.stdout = stdout
return file.getvalue()
def sift(seq, keyfunc, binary=False):
"""
Sift the sequence, ``seq`` according to ``keyfunc``.
Returns
=======
When ``binary`` is ``False`` (default), the output is a dictionary
where elements of ``seq`` are stored in a list keyed to the value
of keyfunc for that element. If ``binary`` is True then a tuple
with lists ``T`` and ``F`` are returned where ``T`` is a list
containing elements of seq for which ``keyfunc`` was ``True`` and
``F`` containing those elements for which ``keyfunc`` was ``False``;
a ValueError is raised if the ``keyfunc`` is not binary.
Examples
========
>>> from sympy.utilities import sift
>>> from sympy.abc import x, y
>>> from sympy import sqrt, exp, pi, Tuple
>>> sift(range(5), lambda x: x % 2)
{0: [0, 2, 4], 1: [1, 3]}
sift() returns a defaultdict() object, so any key that has no matches will
give [].
>>> sift([x], lambda x: x.is_commutative)
{True: [x]}
>>> _[False]
[]
Sometimes you will not know how many keys you will get:
>>> sift([sqrt(x), exp(x), (y**x)**2],
... lambda x: x.as_base_exp()[0])
{E: [exp(x)], x: [sqrt(x)], y: [y**(2*x)]}
Sometimes you expect the results to be binary; the
results can be unpacked by setting ``binary`` to True:
>>> sift(range(4), lambda x: x % 2, binary=True)
([1, 3], [0, 2])
>>> sift(Tuple(1, pi), lambda x: x.is_rational, binary=True)
([1], [pi])
A ValueError is raised if the predicate was not actually binary
(which is a good test for the logic where sifting is used and
binary results were expected):
>>> unknown = exp(1) - pi # the rationality of this is unknown
>>> args = Tuple(1, pi, unknown)
>>> sift(args, lambda x: x.is_rational, binary=True)
Traceback (most recent call last):
...
ValueError: keyfunc gave non-binary output
The non-binary sifting shows that there were 3 keys generated:
>>> set(sift(args, lambda x: x.is_rational).keys())
{None, False, True}
If you need to sort the sifted items it might be better to use
``ordered`` which can economically apply multiple sort keys
to a sequence while sorting.
See Also
========
ordered
"""
if not binary:
m = defaultdict(list)
for i in seq:
m[keyfunc(i)].append(i)
return m
sift = F, T = [], []
for i in seq:
try:
sift[keyfunc(i)].append(i)
except (IndexError, TypeError):
raise ValueError('keyfunc gave non-binary output')
return T, F
def take(iter, n):
"""Return ``n`` items from ``iter`` iterator. """
return [ value for _, value in zip(range(n), iter) ]
def dict_merge(*dicts):
"""Merge dictionaries into a single dictionary. """
merged = {}
for dict in dicts:
merged.update(dict)
return merged
def common_prefix(*seqs):
"""Return the subsequence that is a common start of sequences in ``seqs``.
>>> from sympy.utilities.iterables import common_prefix
>>> common_prefix(list(range(3)))
[0, 1, 2]
>>> common_prefix(list(range(3)), list(range(4)))
[0, 1, 2]
>>> common_prefix([1, 2, 3], [1, 2, 5])
[1, 2]
>>> common_prefix([1, 2, 3], [1, 3, 5])
[1]
"""
if not all(seqs):
return []
elif len(seqs) == 1:
return seqs[0]
i = 0
for i in range(min(len(s) for s in seqs)):
if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))):
break
else:
i += 1
return seqs[0][:i]
def common_suffix(*seqs):
"""Return the subsequence that is a common ending of sequences in ``seqs``.
>>> from sympy.utilities.iterables import common_suffix
>>> common_suffix(list(range(3)))
[0, 1, 2]
>>> common_suffix(list(range(3)), list(range(4)))
[]
>>> common_suffix([1, 2, 3], [9, 2, 3])
[2, 3]
>>> common_suffix([1, 2, 3], [9, 7, 3])
[3]
"""
if not all(seqs):
return []
elif len(seqs) == 1:
return seqs[0]
i = 0
for i in range(-1, -min(len(s) for s in seqs) - 1, -1):
if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))):
break
else:
i -= 1
if i == -1:
return []
else:
return seqs[0][i + 1:]
def prefixes(seq):
"""
Generate all prefixes of a sequence.
Examples
========
>>> from sympy.utilities.iterables import prefixes
>>> list(prefixes([1,2,3,4]))
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4]]
"""
n = len(seq)
for i in range(n):
yield seq[:i + 1]
def postfixes(seq):
"""
Generate all postfixes of a sequence.
Examples
========
>>> from sympy.utilities.iterables import postfixes
>>> list(postfixes([1,2,3,4]))
[[4], [3, 4], [2, 3, 4], [1, 2, 3, 4]]
"""
n = len(seq)
for i in range(n):
yield seq[n - i - 1:]
def topological_sort(graph, key=None):
r"""
Topological sort of graph's vertices.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph to be sorted topologically.
key : callable[T] (optional)
Ordering key for vertices on the same level. By default the natural
(e.g. lexicographic) ordering is used (in this case the base type
must implement ordering relations).
Examples
========
Consider a graph::
+---+ +---+ +---+
| 7 |\ | 5 | | 3 |
+---+ \ +---+ +---+
| _\___/ ____ _/ |
| / \___/ \ / |
V V V V |
+----+ +---+ |
| 11 | | 8 | |
+----+ +---+ |
| | \____ ___/ _ |
| \ \ / / \ |
V \ V V / V V
+---+ \ +---+ | +----+
| 2 | | | 9 | | | 10 |
+---+ | +---+ | +----+
\________/
where vertices are integers. This graph can be encoded using
elementary Python's data structures as follows::
>>> V = [2, 3, 5, 7, 8, 9, 10, 11]
>>> E = [(7, 11), (7, 8), (5, 11), (3, 8), (3, 10),
... (11, 2), (11, 9), (11, 10), (8, 9)]
To compute a topological sort for graph ``(V, E)`` issue::
>>> from sympy.utilities.iterables import topological_sort
>>> topological_sort((V, E))
[3, 5, 7, 8, 11, 2, 9, 10]
If specific tie breaking approach is needed, use ``key`` parameter::
>>> topological_sort((V, E), key=lambda v: -v)
[7, 5, 11, 3, 10, 8, 9, 2]
Only acyclic graphs can be sorted. If the input graph has a cycle,
then ``ValueError`` will be raised::
>>> topological_sort((V, E + [(10, 7)]))
Traceback (most recent call last):
...
ValueError: cycle detected
References
==========
.. [1] https://en.wikipedia.org/wiki/Topological_sorting
"""
V, E = graph
L = []
S = set(V)
E = list(E)
for v, u in E:
S.discard(u)
if key is None:
key = lambda value: value
S = sorted(S, key=key, reverse=True)
while S:
node = S.pop()
L.append(node)
for u, v in list(E):
if u == node:
E.remove((u, v))
for _u, _v in E:
if v == _v:
break
else:
kv = key(v)
for i, s in enumerate(S):
ks = key(s)
if kv > ks:
S.insert(i, v)
break
else:
S.append(v)
if E:
raise ValueError("cycle detected")
else:
return L
def strongly_connected_components(G):
r"""
Strongly connected components of a directed graph in reverse topological
order.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph whose strongly connected components are to be found.
Examples
========
Consider a directed graph (in dot notation)::
digraph {
A -> B
A -> C
B -> C
C -> B
B -> D
}
.. graphviz::
digraph {
A -> B
A -> C
B -> C
C -> B
B -> D
}
where vertices are the letters A, B, C and D. This graph can be encoded
using Python's elementary data structures as follows::
>>> V = ['A', 'B', 'C', 'D']
>>> E = [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B'), ('B', 'D')]
The strongly connected components of this graph can be computed as
>>> from sympy.utilities.iterables import strongly_connected_components
>>> strongly_connected_components((V, E))
[['D'], ['B', 'C'], ['A']]
This also gives the components in reverse topological order.
Since the subgraph containing B and C has a cycle they must be together in
a strongly connected component. A and D are connected to the rest of the
graph but not in a cyclic manner so they appear as their own strongly
connected components.
Notes
=====
The vertices of the graph must be hashable for the data structures used.
If the vertices are unhashable replace them with integer indices.
This function uses Tarjan's algorithm to compute the strongly connected
components in `O(|V|+|E|)` (linear) time.
References
==========
.. [1] https://en.wikipedia.org/wiki/Strongly_connected_component
.. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
See Also
========
sympy.utilities.iterables.connected_components
"""
# Map from a vertex to its neighbours
V, E = G
Gmap = {vi: [] for vi in V}
for v1, v2 in E:
Gmap[v1].append(v2)
return _strongly_connected_components(V, Gmap)
def _strongly_connected_components(V, Gmap):
"""More efficient internal routine for strongly_connected_components"""
#
# Here V is an iterable of vertices and Gmap is a dict mapping each vertex
# to a list of neighbours e.g.:
#
# V = [0, 1, 2, 3]
# Gmap = {0: [2, 3], 1: [0]}
#
# For a large graph these data structures can often be created more
# efficiently then those expected by strongly_connected_components() which
# in this case would be
#
# V = [0, 1, 2, 3]
# Gmap = [(0, 2), (0, 3), (1, 0)]
#
# XXX: Maybe this should be the recommended function to use instead...
#
# Non-recursive Tarjan's algorithm:
lowlink = {}
indices = {}
stack = OrderedDict()
callstack = []
components = []
nomore = object()
def start(v):
index = len(stack)
indices[v] = lowlink[v] = index
stack[v] = None
callstack.append((v, iter(Gmap[v])))
def finish(v1):
# Finished a component?
if lowlink[v1] == indices[v1]:
component = [stack.popitem()[0]]
while component[-1] is not v1:
component.append(stack.popitem()[0])
components.append(component[::-1])
v2, _ = callstack.pop()
if callstack:
v1, _ = callstack[-1]
lowlink[v1] = min(lowlink[v1], lowlink[v2])
for v in V:
if v in indices:
continue
start(v)
while callstack:
v1, it1 = callstack[-1]
v2 = next(it1, nomore)
# Finished children of v1?
if v2 is nomore:
finish(v1)
# Recurse on v2
elif v2 not in indices:
start(v2)
elif v2 in stack:
lowlink[v1] = min(lowlink[v1], indices[v2])
# Reverse topological sort order:
return components
def connected_components(G):
r"""
Connected components of an undirected graph or weakly connected components
of a directed graph.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph whose connected components are to be found.
Examples
========
Given an undirected graph::
graph {
A -- B
C -- D
}
.. graphviz::
graph {
A -- B
C -- D
}
We can find the connected components using this function if we include
each edge in both directions::
>>> from sympy.utilities.iterables import connected_components
>>> V = ['A', 'B', 'C', 'D']
>>> E = [('A', 'B'), ('B', 'A'), ('C', 'D'), ('D', 'C')]
>>> connected_components((V, E))
[['A', 'B'], ['C', 'D']]
The weakly connected components of a directed graph can found the same
way.
Notes
=====
The vertices of the graph must be hashable for the data structures used.
If the vertices are unhashable replace them with integer indices.
This function uses Tarjan's algorithm to compute the connected components
in `O(|V|+|E|)` (linear) time.
References
==========
.. [1] https://en.wikipedia.org/wiki/Connected_component_(graph_theory)
.. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
See Also
========
sympy.utilities.iterables.strongly_connected_components
"""
# Duplicate edges both ways so that the graph is effectively undirected
# and return the strongly connected components:
V, E = G
E_undirected = []
for v1, v2 in E:
E_undirected.extend([(v1, v2), (v2, v1)])
return strongly_connected_components((V, E_undirected))
def rotate_left(x, y):
"""
Left rotates a list x by the number of steps specified
in y.
Examples
========
>>> from sympy.utilities.iterables import rotate_left
>>> a = [0, 1, 2]
>>> rotate_left(a, 1)
[1, 2, 0]
"""
if len(x) == 0:
return []
y = y % len(x)
return x[y:] + x[:y]
def rotate_right(x, y):
"""
Right rotates a list x by the number of steps specified
in y.
Examples
========
>>> from sympy.utilities.iterables import rotate_right
>>> a = [0, 1, 2]
>>> rotate_right(a, 1)
[2, 0, 1]
"""
if len(x) == 0:
return []
y = len(x) - y % len(x)
return x[y:] + x[:y]
def least_rotation(x, key=None):
'''
Returns the number of steps of left rotation required to
obtain lexicographically minimal string/list/tuple, etc.
Examples
========
>>> from sympy.utilities.iterables import least_rotation, rotate_left
>>> a = [3, 1, 5, 1, 2]
>>> least_rotation(a)
3
>>> rotate_left(a, _)
[1, 2, 3, 1, 5]
References
==========
.. [1] https://en.wikipedia.org/wiki/Lexicographically_minimal_string_rotation
'''
from sympy.functions.elementary.miscellaneous import Id
if key is None: key = Id
S = x + x # Concatenate string to it self to avoid modular arithmetic
f = [-1] * len(S) # Failure function
k = 0 # Least rotation of string found so far
for j in range(1,len(S)):
sj = S[j]
i = f[j-k-1]
while i != -1 and sj != S[k+i+1]:
if key(sj) < key(S[k+i+1]):
k = j-i-1
i = f[i]
if sj != S[k+i+1]:
if key(sj) < key(S[k]):
k = j
f[j-k] = -1
else:
f[j-k] = i+1
return k
def multiset_combinations(m, n, g=None):
"""
Return the unique combinations of size ``n`` from multiset ``m``.
Examples
========
>>> from sympy.utilities.iterables import multiset_combinations
>>> from itertools import combinations
>>> [''.join(i) for i in multiset_combinations('baby', 3)]
['abb', 'aby', 'bby']
>>> def count(f, s): return len(list(f(s, 3)))
The number of combinations depends on the number of letters; the
number of unique combinations depends on how the letters are
repeated.
>>> s1 = 'abracadabra'
>>> s2 = 'banana tree'
>>> count(combinations, s1), count(multiset_combinations, s1)
(165, 23)
>>> count(combinations, s2), count(multiset_combinations, s2)
(165, 54)
"""
from sympy.core.sorting import ordered
if g is None:
if isinstance(m, dict):
if any(as_int(v) < 0 for v in m.values()):
raise ValueError('counts cannot be negative')
N = sum(m.values())
if n > N:
return
g = [[k, m[k]] for k in ordered(m)]
else:
m = list(m)
N = len(m)
if n > N:
return
try:
m = multiset(m)
g = [(k, m[k]) for k in ordered(m)]
except TypeError:
m = list(ordered(m))
g = [list(i) for i in group(m, multiple=False)]
del m
else:
# not checking counts since g is intended for internal use
N = sum(v for k, v in g)
if n > N or not n:
yield []
else:
for i, (k, v) in enumerate(g):
if v >= n:
yield [k]*n
v = n - 1
for v in range(min(n, v), 0, -1):
for j in multiset_combinations(None, n - v, g[i + 1:]):
rv = [k]*v + j
if len(rv) == n:
yield rv
def multiset_permutations(m, size=None, g=None):
"""
Return the unique permutations of multiset ``m``.
Examples
========
>>> from sympy.utilities.iterables import multiset_permutations
>>> from sympy import factorial
>>> [''.join(i) for i in multiset_permutations('aab')]
['aab', 'aba', 'baa']
>>> factorial(len('banana'))
720
>>> len(list(multiset_permutations('banana')))
60
"""
from sympy.core.sorting import ordered
if g is None:
if isinstance(m, dict):
if any(as_int(v) < 0 for v in m.values()):
raise ValueError('counts cannot be negative')
g = [[k, m[k]] for k in ordered(m)]
else:
m = list(ordered(m))
g = [list(i) for i in group(m, multiple=False)]
del m
do = [gi for gi in g if gi[1] > 0]
SUM = sum([gi[1] for gi in do])
if not do or size is not None and (size > SUM or size < 1):
if not do and size is None or size == 0:
yield []
return
elif size == 1:
for k, v in do:
yield [k]
elif len(do) == 1:
k, v = do[0]
v = v if size is None else (size if size <= v else 0)
yield [k for i in range(v)]
elif all(v == 1 for k, v in do):
for p in permutations([k for k, v in do], size):
yield list(p)
else:
size = size if size is not None else SUM
for i, (k, v) in enumerate(do):
do[i][1] -= 1
for j in multiset_permutations(None, size - 1, do):
if j:
yield [k] + j
do[i][1] += 1
def _partition(seq, vector, m=None):
"""
Return the partition of seq as specified by the partition vector.
Examples
========
>>> from sympy.utilities.iterables import _partition
>>> _partition('abcde', [1, 0, 1, 2, 0])
[['b', 'e'], ['a', 'c'], ['d']]
Specifying the number of bins in the partition is optional:
>>> _partition('abcde', [1, 0, 1, 2, 0], 3)
[['b', 'e'], ['a', 'c'], ['d']]
The output of _set_partitions can be passed as follows:
>>> output = (3, [1, 0, 1, 2, 0])
>>> _partition('abcde', *output)
[['b', 'e'], ['a', 'c'], ['d']]
See Also
========
combinatorics.partitions.Partition.from_rgs
"""
if m is None:
m = max(vector) + 1
elif isinstance(vector, int): # entered as m, vector
vector, m = m, vector
p = [[] for i in range(m)]
for i, v in enumerate(vector):
p[v].append(seq[i])
return p
def _set_partitions(n):
"""Cycle through all partitions of n elements, yielding the
current number of partitions, ``m``, and a mutable list, ``q``
such that ``element[i]`` is in part ``q[i]`` of the partition.
NOTE: ``q`` is modified in place and generally should not be changed
between function calls.
Examples
========
>>> from sympy.utilities.iterables import _set_partitions, _partition
>>> for m, q in _set_partitions(3):
... print('%s %s %s' % (m, q, _partition('abc', q, m)))
1 [0, 0, 0] [['a', 'b', 'c']]
2 [0, 0, 1] [['a', 'b'], ['c']]
2 [0, 1, 0] [['a', 'c'], ['b']]
2 [0, 1, 1] [['a'], ['b', 'c']]
3 [0, 1, 2] [['a'], ['b'], ['c']]
Notes
=====
This algorithm is similar to, and solves the same problem as,
Algorithm 7.2.1.5H, from volume 4A of Knuth's The Art of Computer
Programming. Knuth uses the term "restricted growth string" where
this code refers to a "partition vector". In each case, the meaning is
the same: the value in the ith element of the vector specifies to
which part the ith set element is to be assigned.
At the lowest level, this code implements an n-digit big-endian
counter (stored in the array q) which is incremented (with carries) to
get the next partition in the sequence. A special twist is that a
digit is constrained to be at most one greater than the maximum of all
the digits to the left of it. The array p maintains this maximum, so
that the code can efficiently decide when a digit can be incremented
in place or whether it needs to be reset to 0 and trigger a carry to
the next digit. The enumeration starts with all the digits 0 (which
corresponds to all the set elements being assigned to the same 0th
part), and ends with 0123...n, which corresponds to each set element
being assigned to a different, singleton, part.
This routine was rewritten to use 0-based lists while trying to
preserve the beauty and efficiency of the original algorithm.
References
==========
.. [1] Nijenhuis, Albert and Wilf, Herbert. (1978) Combinatorial Algorithms,
2nd Ed, p 91, algorithm "nexequ". Available online from
https://www.math.upenn.edu/~wilf/website/CombAlgDownld.html (viewed
November 17, 2012).
"""
p = [0]*n
q = [0]*n
nc = 1
yield nc, q
while nc != n:
m = n
while 1:
m -= 1
i = q[m]
if p[i] != 1:
break
q[m] = 0
i += 1
q[m] = i
m += 1
nc += m - n
p[0] += n - m
if i == nc:
p[nc] = 0
nc += 1
p[i - 1] -= 1
p[i] += 1
yield nc, q
def multiset_partitions(multiset, m=None):
"""
Return unique partitions of the given multiset (in list form).
If ``m`` is None, all multisets will be returned, otherwise only
partitions with ``m`` parts will be returned.
If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1]
will be supplied.
Examples
========
>>> from sympy.utilities.iterables import multiset_partitions
>>> list(multiset_partitions([1, 2, 3, 4], 2))
[[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
>>> list(multiset_partitions([1, 2, 3, 4], 1))
[[[1, 2, 3, 4]]]
Only unique partitions are returned and these will be returned in a
canonical order regardless of the order of the input:
>>> a = [1, 2, 2, 1]
>>> ans = list(multiset_partitions(a, 2))
>>> a.sort()
>>> list(multiset_partitions(a, 2)) == ans
True
>>> a = range(3, 1, -1)
>>> (list(multiset_partitions(a)) ==
... list(multiset_partitions(sorted(a))))
True
If m is omitted then all partitions will be returned:
>>> list(multiset_partitions([1, 1, 2]))
[[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]]
>>> list(multiset_partitions([1]*3))
[[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
Counting
========
The number of partitions of a set is given by the bell number:
>>> from sympy import bell
>>> len(list(multiset_partitions(5))) == bell(5) == 52
True
The number of partitions of length k from a set of size n is given by the
Stirling Number of the 2nd kind:
>>> from sympy.functions.combinatorial.numbers import stirling
>>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15
True
These comments on counting apply to *sets*, not multisets.
Notes
=====
When all the elements are the same in the multiset, the order
of the returned partitions is determined by the ``partitions``
routine. If one is counting partitions then it is better to use
the ``nT`` function.
See Also
========
partitions
sympy.combinatorics.partitions.Partition
sympy.combinatorics.partitions.IntegerPartition
sympy.functions.combinatorial.numbers.nT
"""
# This function looks at the supplied input and dispatches to
# several special-case routines as they apply.
if isinstance(multiset, int):
n = multiset
if m and m > n:
return
multiset = list(range(n))
if m == 1:
yield [multiset[:]]
return
# If m is not None, it can sometimes be faster to use
# MultisetPartitionTraverser.enum_range() even for inputs
# which are sets. Since the _set_partitions code is quite
# fast, this is only advantageous when the overall set
# partitions outnumber those with the desired number of parts
# by a large factor. (At least 60.) Such a switch is not
# currently implemented.
for nc, q in _set_partitions(n):
if m is None or nc == m:
rv = [[] for i in range(nc)]
for i in range(n):
rv[q[i]].append(multiset[i])
yield rv
return
if len(multiset) == 1 and isinstance(multiset, str):
multiset = [multiset]
if not has_variety(multiset):
# Only one component, repeated n times. The resulting
# partitions correspond to partitions of integer n.
n = len(multiset)
if m and m > n:
return
if m == 1:
yield [multiset[:]]
return
x = multiset[:1]
for size, p in partitions(n, m, size=True):
if m is None or size == m:
rv = []
for k in sorted(p):
rv.extend([x*k]*p[k])
yield rv
else:
from sympy.core.sorting import ordered
multiset = list(ordered(multiset))
n = len(multiset)
if m and m > n:
return
if m == 1:
yield [multiset[:]]
return
# Split the information of the multiset into two lists -
# one of the elements themselves, and one (of the same length)
# giving the number of repeats for the corresponding element.
elements, multiplicities = zip(*group(multiset, False))
if len(elements) < len(multiset):
# General case - multiset with more than one distinct element
# and at least one element repeated more than once.
if m:
mpt = MultisetPartitionTraverser()
for state in mpt.enum_range(multiplicities, m-1, m):
yield list_visitor(state, elements)
else:
for state in multiset_partitions_taocp(multiplicities):
yield list_visitor(state, elements)
else:
# Set partitions case - no repeated elements. Pretty much
# same as int argument case above, with same possible, but
# currently unimplemented optimization for some cases when
# m is not None
for nc, q in _set_partitions(n):
if m is None or nc == m:
rv = [[] for i in range(nc)]
for i in range(n):
rv[q[i]].append(i)
yield [[multiset[j] for j in i] for i in rv]
def partitions(n, m=None, k=None, size=False):
"""Generate all partitions of positive integer, n.
Parameters
==========
m : integer (default gives partitions of all sizes)
limits number of parts in partition (mnemonic: m, maximum parts)
k : integer (default gives partitions number from 1 through n)
limits the numbers that are kept in the partition (mnemonic: k, keys)
size : bool (default False, only partition is returned)
when ``True`` then (M, P) is returned where M is the sum of the
multiplicities and P is the generated partition.
Each partition is represented as a dictionary, mapping an integer
to the number of copies of that integer in the partition. For example,
the first partition of 4 returned is {4: 1}, "4: one of them".
Examples
========
>>> from sympy.utilities.iterables import partitions
The numbers appearing in the partition (the key of the returned dict)
are limited with k:
>>> for p in partitions(6, k=2): # doctest: +SKIP
... print(p)
{2: 3}
{1: 2, 2: 2}
{1: 4, 2: 1}
{1: 6}
The maximum number of parts in the partition (the sum of the values in
the returned dict) are limited with m (default value, None, gives
partitions from 1 through n):
>>> for p in partitions(6, m=2): # doctest: +SKIP
... print(p)
...
{6: 1}
{1: 1, 5: 1}
{2: 1, 4: 1}
{3: 2}
References
==========
.. [1] modified from Tim Peter's version to allow for k and m values:
http://code.activestate.com/recipes/218332-generator-for-integer-partitions/
See Also
========
sympy.combinatorics.partitions.Partition
sympy.combinatorics.partitions.IntegerPartition
"""
if (n <= 0 or
m is not None and m < 1 or
k is not None and k < 1 or
m and k and m*k < n):
# the empty set is the only way to handle these inputs
# and returning {} to represent it is consistent with
# the counting convention, e.g. nT(0) == 1.
if size:
yield 0, {}
else:
yield {}
return
if m is None:
m = n
else:
m = min(m, n)
k = min(k or n, n)
n, m, k = as_int(n), as_int(m), as_int(k)
q, r = divmod(n, k)
ms = {k: q}
keys = [k] # ms.keys(), from largest to smallest
if r:
ms[r] = 1
keys.append(r)
room = m - q - bool(r)
if size:
yield sum(ms.values()), ms.copy()
else:
yield ms.copy()
while keys != [1]:
# Reuse any 1's.
if keys[-1] == 1:
del keys[-1]
reuse = ms.pop(1)
room += reuse
else:
reuse = 0
while 1:
# Let i be the smallest key larger than 1. Reuse one
# instance of i.
i = keys[-1]
newcount = ms[i] = ms[i] - 1
reuse += i
if newcount == 0:
del keys[-1], ms[i]
room += 1
# Break the remainder into pieces of size i-1.
i -= 1
q, r = divmod(reuse, i)
need = q + bool(r)
if need > room:
if not keys:
return
continue
ms[i] = q
keys.append(i)
if r:
ms[r] = 1
keys.append(r)
break
room -= need
if size:
yield sum(ms.values()), ms.copy()
else:
yield ms.copy()
def ordered_partitions(n, m=None, sort=True):
"""Generates ordered partitions of integer ``n``.
Parameters
==========
m : integer (default None)
The default value gives partitions of all sizes else only
those with size m. In addition, if ``m`` is not None then
partitions are generated *in place* (see examples).
sort : bool (default True)
Controls whether partitions are
returned in sorted order when ``m`` is not None; when False,
the partitions are returned as fast as possible with elements
sorted, but when m|n the partitions will not be in
ascending lexicographical order.
Examples
========
>>> from sympy.utilities.iterables import ordered_partitions
All partitions of 5 in ascending lexicographical:
>>> for p in ordered_partitions(5):
... print(p)
[1, 1, 1, 1, 1]
[1, 1, 1, 2]
[1, 1, 3]
[1, 2, 2]
[1, 4]
[2, 3]
[5]
Only partitions of 5 with two parts:
>>> for p in ordered_partitions(5, 2):
... print(p)
[1, 4]
[2, 3]
When ``m`` is given, a given list objects will be used more than
once for speed reasons so you will not see the correct partitions
unless you make a copy of each as it is generated:
>>> [p for p in ordered_partitions(7, 3)]
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [2, 2, 2]]
>>> [list(p) for p in ordered_partitions(7, 3)]
[[1, 1, 5], [1, 2, 4], [1, 3, 3], [2, 2, 3]]
When ``n`` is a multiple of ``m``, the elements are still sorted
but the partitions themselves will be *unordered* if sort is False;
the default is to return them in ascending lexicographical order.
>>> for p in ordered_partitions(6, 2):
... print(p)
[1, 5]
[2, 4]
[3, 3]
But if speed is more important than ordering, sort can be set to
False:
>>> for p in ordered_partitions(6, 2, sort=False):
... print(p)
[1, 5]
[3, 3]
[2, 4]
References
==========
.. [1] Generating Integer Partitions, [online],
Available: https://jeromekelleher.net/generating-integer-partitions.html
.. [2] Jerome Kelleher and Barry O'Sullivan, "Generating All
Partitions: A Comparison Of Two Encodings", [online],
Available: https://arxiv.org/pdf/0909.2331v2.pdf
"""
if n < 1 or m is not None and m < 1:
# the empty set is the only way to handle these inputs
# and returning {} to represent it is consistent with
# the counting convention, e.g. nT(0) == 1.
yield []
return
if m is None:
# The list `a`'s leading elements contain the partition in which
# y is the biggest element and x is either the same as y or the
# 2nd largest element; v and w are adjacent element indices
# to which x and y are being assigned, respectively.
a = [1]*n
y = -1
v = n
while v > 0:
v -= 1
x = a[v] + 1
while y >= 2 * x:
a[v] = x
y -= x
v += 1
w = v + 1
while x <= y:
a[v] = x
a[w] = y
yield a[:w + 1]
x += 1
y -= 1
a[v] = x + y
y = a[v] - 1
yield a[:w]
elif m == 1:
yield [n]
elif n == m:
yield [1]*n
else:
# recursively generate partitions of size m
for b in range(1, n//m + 1):
a = [b]*m
x = n - b*m
if not x:
if sort:
yield a
elif not sort and x <= m:
for ax in ordered_partitions(x, sort=False):
mi = len(ax)
a[-mi:] = [i + b for i in ax]
yield a
a[-mi:] = [b]*mi
else:
for mi in range(1, m):
for ax in ordered_partitions(x, mi, sort=True):
a[-mi:] = [i + b for i in ax]
yield a
a[-mi:] = [b]*mi
def binary_partitions(n):
"""
Generates the binary partition of n.
A binary partition consists only of numbers that are
powers of two. Each step reduces a `2^{k+1}` to `2^k` and
`2^k`. Thus 16 is converted to 8 and 8.
Examples
========
>>> from sympy.utilities.iterables import binary_partitions
>>> for i in binary_partitions(5):
... print(i)
...
[4, 1]
[2, 2, 1]
[2, 1, 1, 1]
[1, 1, 1, 1, 1]
References
==========
.. [1] TAOCP 4, section 7.2.1.5, problem 64
"""
from math import ceil, log
power = int(2**(ceil(log(n, 2))))
acc = 0
partition = []
while power:
if acc + power <= n:
partition.append(power)
acc += power
power >>= 1
last_num = len(partition) - 1 - (n & 1)
while last_num >= 0:
yield partition
if partition[last_num] == 2:
partition[last_num] = 1
partition.append(1)
last_num -= 1
continue
partition.append(1)
partition[last_num] >>= 1
x = partition[last_num + 1] = partition[last_num]
last_num += 1
while x > 1:
if x <= len(partition) - last_num - 1:
del partition[-x + 1:]
last_num += 1
partition[last_num] = x
else:
x >>= 1
yield [1]*n
def has_dups(seq):
"""Return True if there are any duplicate elements in ``seq``.
Examples
========
>>> from sympy import has_dups, Dict, Set
>>> has_dups((1, 2, 1))
True
>>> has_dups(range(3))
False
>>> all(has_dups(c) is False for c in (set(), Set(), dict(), Dict()))
True
"""
from sympy.core.containers import Dict
from sympy.sets.sets import Set
if isinstance(seq, (dict, set, Dict, Set)):
return False
unique = set()
try:
return any(True for s in seq if s in unique or unique.add(s))
except TypeError:
return len(seq) != len(list(uniq(seq)))
def has_variety(seq):
"""Return True if there are any different elements in ``seq``.
Examples
========
>>> from sympy import has_variety
>>> has_variety((1, 2, 1))
True
>>> has_variety((1, 1, 1))
False
"""
for i, s in enumerate(seq):
if i == 0:
sentinel = s
else:
if s != sentinel:
return True
return False
def uniq(seq, result=None):
"""
Yield unique elements from ``seq`` as an iterator. The second
parameter ``result`` is used internally; it is not necessary
to pass anything for this.
Note: changing the sequence during iteration will raise a
RuntimeError if the size of the sequence is known; if you pass
an iterator and advance the iterator you will change the
output of this routine but there will be no warning.
Examples
========
>>> from sympy.utilities.iterables import uniq
>>> dat = [1, 4, 1, 5, 4, 2, 1, 2]
>>> type(uniq(dat)) in (list, tuple)
False
>>> list(uniq(dat))
[1, 4, 5, 2]
>>> list(uniq(x for x in dat))
[1, 4, 5, 2]
>>> list(uniq([[1], [2, 1], [1]]))
[[1], [2, 1]]
"""
try:
n = len(seq)
except TypeError:
n = None
def check():
# check that size of seq did not change during iteration;
# if n == None the object won't support size changing, e.g.
# an iterator can't be changed
if n is not None and len(seq) != n:
raise RuntimeError('sequence changed size during iteration')
try:
seen = set()
result = result or []
for i, s in enumerate(seq):
if not (s in seen or seen.add(s)):
yield s
check()
except TypeError:
if s not in result:
yield s
check()
result.append(s)
if hasattr(seq, '__getitem__'):
yield from uniq(seq[i + 1:], result)
else:
yield from uniq(seq, result)
def generate_bell(n):
"""Return permutations of [0, 1, ..., n - 1] such that each permutation
differs from the last by the exchange of a single pair of neighbors.
The ``n!`` permutations are returned as an iterator. In order to obtain
the next permutation from a random starting permutation, use the
``next_trotterjohnson`` method of the Permutation class (which generates
the same sequence in a different manner).
Examples
========
>>> from itertools import permutations
>>> from sympy.utilities.iterables import generate_bell
>>> from sympy import zeros, Matrix
This is the sort of permutation used in the ringing of physical bells,
and does not produce permutations in lexicographical order. Rather, the
permutations differ from each other by exactly one inversion, and the
position at which the swapping occurs varies periodically in a simple
fashion. Consider the first few permutations of 4 elements generated
by ``permutations`` and ``generate_bell``:
>>> list(permutations(range(4)))[:5]
[(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)]
>>> list(generate_bell(4))[:5]
[(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)]
Notice how the 2nd and 3rd lexicographical permutations have 3 elements
out of place whereas each "bell" permutation always has only two
elements out of place relative to the previous permutation (and so the
signature (+/-1) of a permutation is opposite of the signature of the
previous permutation).
How the position of inversion varies across the elements can be seen
by tracing out where the largest number appears in the permutations:
>>> m = zeros(4, 24)
>>> for i, p in enumerate(generate_bell(4)):
... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero
>>> m.print_nonzero('X')
[XXX XXXXXX XXXXXX XXX]
[XX XX XXXX XX XXXX XX XX]
[X XXXX XX XXXX XX XXXX X]
[ XXXXXX XXXXXX XXXXXX ]
See Also
========
sympy.combinatorics.permutations.Permutation.next_trotterjohnson
References
==========
.. [1] https://en.wikipedia.org/wiki/Method_ringing
.. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018
.. [3] http://programminggeeks.com/bell-algorithm-for-permutation/
.. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm
.. [5] Generating involutions, derangements, and relatives by ECO
Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010
"""
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
if n == 1:
yield (0,)
elif n == 2:
yield (0, 1)
yield (1, 0)
elif n == 3:
yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)]
else:
m = n - 1
op = [0] + [-1]*m
l = list(range(n))
while True:
yield tuple(l)
# find biggest element with op
big = None, -1 # idx, value
for i in range(n):
if op[i] and l[i] > big[1]:
big = i, l[i]
i, _ = big
if i is None:
break # there are no ops left
# swap it with neighbor in the indicated direction
j = i + op[i]
l[i], l[j] = l[j], l[i]
op[i], op[j] = op[j], op[i]
# if it landed at the end or if the neighbor in the same
# direction is bigger then turn off op
if j == 0 or j == m or l[j + op[j]] > l[j]:
op[j] = 0
# any element bigger to the left gets +1 op
for i in range(j):
if l[i] > l[j]:
op[i] = 1
# any element bigger to the right gets -1 op
for i in range(j + 1, n):
if l[i] > l[j]:
op[i] = -1
def generate_involutions(n):
"""
Generates involutions.
An involution is a permutation that when multiplied
by itself equals the identity permutation. In this
implementation the involutions are generated using
Fixed Points.
Alternatively, an involution can be considered as
a permutation that does not contain any cycles with
a length that is greater than two.
Examples
========
>>> from sympy.utilities.iterables import generate_involutions
>>> list(generate_involutions(3))
[(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 1, 0)]
>>> len(list(generate_involutions(4)))
10
References
==========
.. [1] http://mathworld.wolfram.com/PermutationInvolution.html
"""
idx = list(range(n))
for p in permutations(idx):
for i in idx:
if p[p[i]] != i:
break
else:
yield p
def multiset_derangements(s):
"""Generate derangements of the elements of s *in place*.
Examples
========
>>> from sympy.utilities.iterables import multiset_derangements, uniq
Because the derangements of multisets (not sets) are generated
in place, copies of the return value must be made if a collection
of derangements is desired or else all values will be the same:
>>> list(uniq([i for i in multiset_derangements('1233')]))
[[None, None, None, None]]
>>> [i.copy() for i in multiset_derangements('1233')]
[['3', '3', '1', '2'], ['3', '3', '2', '1']]
>>> [''.join(i) for i in multiset_derangements('1233')]
['3312', '3321']
"""
from sympy.core.sorting import ordered
# create multiset dictionary of hashable elements or else
# remap elements to integers
try:
ms = multiset(s)
except TypeError:
# give each element a canonical integer value
key = dict(enumerate(ordered(uniq(s))))
h = []
for si in s:
for k in key:
if key[k] == si:
h.append(k)
break
for i in multiset_derangements(h):
yield [key[j] for j in i]
return
mx = max(ms.values()) # max repetition of any element
n = len(s) # the number of elements
## special cases
# 1) one element has more than half the total cardinality of s: no
# derangements are possible.
if mx*2 > n:
return
# 2) all elements appear once: singletons
if len(ms) == n:
yield from _set_derangements(s)
return
# find the first element that is repeated the most to place
# in the following two special cases where the selection
# is unambiguous: either there are two elements with multiplicity
# of mx or else there is only one with multiplicity mx
for M in ms:
if ms[M] == mx:
break
inonM = [i for i in range(n) if s[i] != M] # location of non-M
iM = [i for i in range(n) if s[i] == M] # locations of M
rv = [None]*n
# 3) half are the same
if 2*mx == n:
# M goes into non-M locations
for i in inonM:
rv[i] = M
# permutations of non-M go to M locations
for p in multiset_permutations([s[i] for i in inonM]):
for i, pi in zip(iM, p):
rv[i] = pi
yield rv
# clean-up (and encourages proper use of routine)
rv[:] = [None]*n
return
# 4) single repeat covers all but 1 of the non-repeats:
# if there is one repeat then the multiset of the values
# of ms would be {mx: 1, 1: n - mx}, i.e. there would
# be n - mx + 1 values with the condition that n - 2*mx = 1
if n - 2*mx == 1 and len(ms.values()) == n - mx + 1:
for i, i1 in enumerate(inonM):
ifill = inonM[:i] + inonM[i+1:]
for j in ifill:
rv[j] = M
for p in permutations([s[j] for j in ifill]):
rv[i1] = s[i1]
for j, pi in zip(iM, p):
rv[j] = pi
k = i1
for j in iM:
rv[j], rv[k] = rv[k], rv[j]
yield rv
k = j
# clean-up (and encourages proper use of routine)
rv[:] = [None]*n
return
## general case is handled with 3 helpers:
# 1) `finish_derangements` will place the last two elements
# which have arbitrary multiplicities, e.g. for multiset
# {c: 3, a: 2, b: 2}, the last two elements are a and b
# 2) `iopen` will tell where a given element can be placed
# 3) `do` will recursively place elements into subsets of
# valid locations
def finish_derangements():
"""Place the last two elements into the partially completed
derangement, and yield the results.
"""
a = take[1][0] # penultimate element
a_ct = take[1][1]
b = take[0][0] # last element to be placed
b_ct = take[0][1]
# split the indexes of the not-already-assigned elements of rv into
# three categories
forced_a = [] # positions which must have an a
forced_b = [] # positions which must have a b
open_free = [] # positions which could take either
for i in range(len(s)):
if rv[i] is None:
if s[i] == a:
forced_b.append(i)
elif s[i] == b:
forced_a.append(i)
else:
open_free.append(i)
if len(forced_a) > a_ct or len(forced_b) > b_ct:
# No derangement possible
return
for i in forced_a:
rv[i] = a
for i in forced_b:
rv[i] = b
for a_place in combinations(open_free, a_ct - len(forced_a)):
for a_pos in a_place:
rv[a_pos] = a
for i in open_free:
if rv[i] is None: # anything not in the subset is set to b
rv[i] = b
yield rv
# Clean up/undo the final placements
for i in open_free:
rv[i] = None
# additional cleanup - clear forced_a, forced_b
for i in forced_a:
rv[i] = None
for i in forced_b:
rv[i] = None
def iopen(v):
# return indices at which element v can be placed in rv:
# locations which are not already occupied if that location
# does not already contain v in the same location of s
return [i for i in range(n) if rv[i] is None and s[i] != v]
def do(j):
if j == 1:
# handle the last two elements (regardless of multiplicity)
# with a special method
yield from finish_derangements()
else:
# place the mx elements of M into a subset of places
# into which it can be replaced
M, mx = take[j]
for i in combinations(iopen(M), mx):
# place M
for ii in i:
rv[ii] = M
# recursively place the next element
yield from do(j - 1)
# mark positions where M was placed as once again
# open for placement of other elements
for ii in i:
rv[ii] = None
# process elements in order of canonically decreasing multiplicity
take = sorted(ms.items(), key=lambda x:(x[1], x[0]))
yield from do(len(take) - 1)
rv[:] = [None]*n
def random_derangement(t, choice=None, strict=True):
"""Return a list of elements in which none are in the same positions
as they were originally. If an element fills more than half of the positions
then an error will be raised since no derangement is possible. To obtain
a derangement of as many items as possible--with some of the most numerous
remaining in their original positions--pass `strict=False`. To produce a
pseudorandom derangment, pass a pseudorandom selector like `choice` (see
below).
Examples
========
>>> from sympy.utilities.iterables import random_derangement
>>> t = 'SymPy: a CAS in pure Python'
>>> d = random_derangement(t)
>>> all(i != j for i, j in zip(d, t))
True
A predictable result can be obtained by using a pseudorandom
generator for the choice:
>>> from sympy.core.random import seed, choice as c
>>> seed(1)
>>> d = [''.join(random_derangement(t, c)) for i in range(5)]
>>> assert len(set(d)) != 1 # we got different values
By reseeding, the same sequence can be obtained:
>>> seed(1)
>>> d2 = [''.join(random_derangement(t, c)) for i in range(5)]
>>> assert d == d2
"""
if choice is None:
import secrets
choice = secrets.choice
def shuffle(rv):
'''Knuth shuffle'''
for i in range(len(rv) - 1, 0, -1):
x = choice(rv[:i + 1])
j = rv.index(x)
rv[i], rv[j] = rv[j], rv[i]
def pick(rv, n):
'''shuffle rv and return the first n values
'''
shuffle(rv)
return rv[:n]
ms = multiset(t)
tot = len(t)
ms = sorted(ms.items(), key=lambda x: x[1])
# if there are not enough spaces for the most
# plentiful element to move to then some of them
# will have to stay in place
M, mx = ms[-1]
n = len(t)
xs = 2*mx - tot
if xs > 0:
if strict:
raise ValueError('no derangement possible')
opts = [i for (i, c) in enumerate(t) if c == ms[-1][0]]
pick(opts, xs)
stay = sorted(opts[:xs])
rv = list(t)
for i in reversed(stay):
rv.pop(i)
rv = random_derangement(rv, choice)
for i in stay:
rv.insert(i, ms[-1][0])
return ''.join(rv) if type(t) is str else rv
# the normal derangement calculated from here
if n == len(ms):
# approx 1/3 will succeed
rv = list(t)
while True:
shuffle(rv)
if all(i != j for i,j in zip(rv, t)):
break
else:
# general case
rv = [None]*n
while True:
j = 0
while j > -len(ms): # do most numerous first
j -= 1
e, c = ms[j]
opts = [i for i in range(n) if rv[i] is None and t[i] != e]
if len(opts) < c:
for i in range(n):
rv[i] = None
break # try again
pick(opts, c)
for i in range(c):
rv[opts[i]] = e
else:
return rv
return rv
def _set_derangements(s):
"""
yield derangements of items in ``s`` which are assumed to contain
no repeated elements
"""
if len(s) < 2:
return
if len(s) == 2:
yield [s[1], s[0]]
return
if len(s) == 3:
yield [s[1], s[2], s[0]]
yield [s[2], s[0], s[1]]
return
for p in permutations(s):
if not any(i == j for i, j in zip(p, s)):
yield list(p)
def generate_derangements(s):
"""
Return unique derangements of the elements of iterable ``s``.
Examples
========
>>> from sympy.utilities.iterables import generate_derangements
>>> list(generate_derangements([0, 1, 2]))
[[1, 2, 0], [2, 0, 1]]
>>> list(generate_derangements([0, 1, 2, 2]))
[[2, 2, 0, 1], [2, 2, 1, 0]]
>>> list(generate_derangements([0, 1, 1]))
[]
See Also
========
sympy.functions.combinatorial.factorials.subfactorial
"""
if not has_dups(s):
yield from _set_derangements(s)
else:
for p in multiset_derangements(s):
yield list(p)
def necklaces(n, k, free=False):
"""
A routine to generate necklaces that may (free=True) or may not
(free=False) be turned over to be viewed. The "necklaces" returned
are comprised of ``n`` integers (beads) with ``k`` different
values (colors). Only unique necklaces are returned.
Examples
========
>>> from sympy.utilities.iterables import necklaces, bracelets
>>> def show(s, i):
... return ''.join(s[j] for j in i)
The "unrestricted necklace" is sometimes also referred to as a
"bracelet" (an object that can be turned over, a sequence that can
be reversed) and the term "necklace" is used to imply a sequence
that cannot be reversed. So ACB == ABC for a bracelet (rotate and
reverse) while the two are different for a necklace since rotation
alone cannot make the two sequences the same.
(mnemonic: Bracelets can be viewed Backwards, but Not Necklaces.)
>>> B = [show('ABC', i) for i in bracelets(3, 3)]
>>> N = [show('ABC', i) for i in necklaces(3, 3)]
>>> set(N) - set(B)
{'ACB'}
>>> list(necklaces(4, 2))
[(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 1),
(0, 1, 0, 1), (0, 1, 1, 1), (1, 1, 1, 1)]
>>> [show('.o', i) for i in bracelets(4, 2)]
['....', '...o', '..oo', '.o.o', '.ooo', 'oooo']
References
==========
.. [1] http://mathworld.wolfram.com/Necklace.html
.. [2] Frank Ruskey, Carla Savage, and Terry Min Yih Wang,
Generating necklaces, Journal of Algorithms 13 (1992), 414-430;
https://doi.org/10.1016/0196-6774(92)90047-G
"""
# The FKM algorithm
if k == 0 and n > 0:
return
a = [0]*n
yield tuple(a)
if n == 0:
return
while True:
i = n - 1
while a[i] == k - 1:
i -= 1
if i == -1:
return
a[i] += 1
for j in range(n - i - 1):
a[j + i + 1] = a[j]
if n % (i + 1) == 0 and (not free or all(a <= a[j::-1] + a[-1:j:-1] for j in range(n - 1))):
# No need to test j = n - 1.
yield tuple(a)
def bracelets(n, k):
"""Wrapper to necklaces to return a free (unrestricted) necklace."""
return necklaces(n, k, free=True)
def generate_oriented_forest(n):
"""
This algorithm generates oriented forests.
An oriented graph is a directed graph having no symmetric pair of directed
edges. A forest is an acyclic graph, i.e., it has no cycles. A forest can
also be described as a disjoint union of trees, which are graphs in which
any two vertices are connected by exactly one simple path.
Examples
========
>>> from sympy.utilities.iterables import generate_oriented_forest
>>> list(generate_oriented_forest(4))
[[0, 1, 2, 3], [0, 1, 2, 2], [0, 1, 2, 1], [0, 1, 2, 0], \
[0, 1, 1, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 0, 0, 0]]
References
==========
.. [1] T. Beyer and S.M. Hedetniemi: constant time generation of
rooted trees, SIAM J. Computing Vol. 9, No. 4, November 1980
.. [2] https://stackoverflow.com/questions/1633833/oriented-forest-taocp-algorithm-in-python
"""
P = list(range(-1, n))
while True:
yield P[1:]
if P[n] > 0:
P[n] = P[P[n]]
else:
for p in range(n - 1, 0, -1):
if P[p] != 0:
target = P[p] - 1
for q in range(p - 1, 0, -1):
if P[q] == target:
break
offset = p - q
for i in range(p, n + 1):
P[i] = P[i - offset]
break
else:
break
def minlex(seq, directed=True, key=None):
r"""
Return the rotation of the sequence in which the lexically smallest
elements appear first, e.g. `cba \rightarrow acb`.
The sequence returned is a tuple, unless the input sequence is a string
in which case a string is returned.
If ``directed`` is False then the smaller of the sequence and the
reversed sequence is returned, e.g. `cba \rightarrow abc`.
If ``key`` is not None then it is used to extract a comparison key from each element in iterable.
Examples
========
>>> from sympy.combinatorics.polyhedron import minlex
>>> minlex((1, 2, 0))
(0, 1, 2)
>>> minlex((1, 0, 2))
(0, 2, 1)
>>> minlex((1, 0, 2), directed=False)
(0, 1, 2)
>>> minlex('11010011000', directed=True)
'00011010011'
>>> minlex('11010011000', directed=False)
'00011001011'
>>> minlex(('bb', 'aaa', 'c', 'a'))
('a', 'bb', 'aaa', 'c')
>>> minlex(('bb', 'aaa', 'c', 'a'), key=len)
('c', 'a', 'bb', 'aaa')
"""
from sympy.functions.elementary.miscellaneous import Id
if key is None: key = Id
best = rotate_left(seq, least_rotation(seq, key=key))
if not directed:
rseq = seq[::-1]
rbest = rotate_left(rseq, least_rotation(rseq, key=key))
best = min(best, rbest, key=key)
# Convert to tuple, unless we started with a string.
return tuple(best) if not isinstance(seq, str) else best
def runs(seq, op=gt):
"""Group the sequence into lists in which successive elements
all compare the same with the comparison operator, ``op``:
op(seq[i + 1], seq[i]) is True from all elements in a run.
Examples
========
>>> from sympy.utilities.iterables import runs
>>> from operator import ge
>>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2])
[[0, 1, 2], [2], [1, 4], [3], [2], [2]]
>>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2], op=ge)
[[0, 1, 2, 2], [1, 4], [3], [2, 2]]
"""
cycles = []
seq = iter(seq)
try:
run = [next(seq)]
except StopIteration:
return []
while True:
try:
ei = next(seq)
except StopIteration:
break
if op(ei, run[-1]):
run.append(ei)
continue
else:
cycles.append(run)
run = [ei]
if run:
cycles.append(run)
return cycles
def kbins(l, k, ordered=None):
"""
Return sequence ``l`` partitioned into ``k`` bins.
Examples
========
The default is to give the items in the same order, but grouped
into k partitions without any reordering:
>>> from sympy.utilities.iterables import kbins
>>> for p in kbins(list(range(5)), 2):
... print(p)
...
[[0], [1, 2, 3, 4]]
[[0, 1], [2, 3, 4]]
[[0, 1, 2], [3, 4]]
[[0, 1, 2, 3], [4]]
The ``ordered`` flag is either None (to give the simple partition
of the elements) or is a 2 digit integer indicating whether the order of
the bins and the order of the items in the bins matters. Given::
A = [[0], [1, 2]]
B = [[1, 2], [0]]
C = [[2, 1], [0]]
D = [[0], [2, 1]]
the following values for ``ordered`` have the shown meanings::
00 means A == B == C == D
01 means A == B
10 means A == D
11 means A == A
>>> for ordered_flag in [None, 0, 1, 10, 11]:
... print('ordered = %s' % ordered_flag)
... for p in kbins(list(range(3)), 2, ordered=ordered_flag):
... print(' %s' % p)
...
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]
See Also
========
partitions, multiset_partitions
"""
def partition(lista, bins):
# EnricoGiampieri's partition generator from
# https://stackoverflow.com/questions/13131491/
# partition-n-items-into-k-bins-in-python-lazily
if len(lista) == 1 or bins == 1:
yield [lista]
elif len(lista) > 1 and bins > 1:
for i in range(1, len(lista)):
for part in partition(lista[i:], bins - 1):
if len([lista[:i]] + part) == bins:
yield [lista[:i]] + part
if ordered is None:
yield from partition(l, k)
elif ordered == 11:
for pl in multiset_permutations(l):
pl = list(pl)
yield from partition(pl, k)
elif ordered == 00:
yield from multiset_partitions(l, k)
elif ordered == 10:
for p in multiset_partitions(l, k):
for perm in permutations(p):
yield list(perm)
elif ordered == 1:
for kgot, p in partitions(len(l), k, size=True):
if kgot != k:
continue
for li in multiset_permutations(l):
rv = []
i = j = 0
li = list(li)
for size, multiplicity in sorted(p.items()):
for m in range(multiplicity):
j = i + size
rv.append(li[i: j])
i = j
yield rv
else:
raise ValueError(
'ordered must be one of 00, 01, 10 or 11, not %s' % ordered)
def permute_signs(t):
"""Return iterator in which the signs of non-zero elements
of t are permuted.
Examples
========
>>> from sympy.utilities.iterables import permute_signs
>>> list(permute_signs((0, 1, 2)))
[(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2)]
"""
for signs in product(*[(1, -1)]*(len(t) - t.count(0))):
signs = list(signs)
yield type(t)([i*signs.pop() if i else i for i in t])
def signed_permutations(t):
"""Return iterator in which the signs of non-zero elements
of t and the order of the elements are permuted.
Examples
========
>>> from sympy.utilities.iterables import signed_permutations
>>> list(signed_permutations((0, 1, 2)))
[(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2), (0, 2, 1),
(0, -2, 1), (0, 2, -1), (0, -2, -1), (1, 0, 2), (-1, 0, 2),
(1, 0, -2), (-1, 0, -2), (1, 2, 0), (-1, 2, 0), (1, -2, 0),
(-1, -2, 0), (2, 0, 1), (-2, 0, 1), (2, 0, -1), (-2, 0, -1),
(2, 1, 0), (-2, 1, 0), (2, -1, 0), (-2, -1, 0)]
"""
return (type(t)(i) for j in permutations(t)
for i in permute_signs(j))
def rotations(s, dir=1):
"""Return a generator giving the items in s as list where
each subsequent list has the items rotated to the left (default)
or right (``dir=-1``) relative to the previous list.
Examples
========
>>> from sympy import rotations
>>> list(rotations([1,2,3]))
[[1, 2, 3], [2, 3, 1], [3, 1, 2]]
>>> list(rotations([1,2,3], -1))
[[1, 2, 3], [3, 1, 2], [2, 3, 1]]
"""
seq = list(s)
for i in range(len(seq)):
yield seq
seq = rotate_left(seq, dir)
def roundrobin(*iterables):
"""roundrobin recipe taken from itertools documentation:
https://docs.python.org/3/library/itertools.html#recipes
roundrobin('ABC', 'D', 'EF') --> A D E B F C
Recipe credited to George Sakkis
"""
nexts = cycle(iter(it).__next__ for it in iterables)
pending = len(iterables)
while pending:
try:
for nxt in nexts:
yield nxt()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
class NotIterable:
"""
Use this as mixin when creating a class which is not supposed to
return true when iterable() is called on its instances because
calling list() on the instance, for example, would result in
an infinite loop.
"""
pass
def iterable(i, exclude=(str, dict, NotIterable)):
"""
Return a boolean indicating whether ``i`` is SymPy iterable.
True also indicates that the iterator is finite, e.g. you can
call list(...) on the instance.
When SymPy is working with iterables, it is almost always assuming
that the iterable is not a string or a mapping, so those are excluded
by default. If you want a pure Python definition, make exclude=None. To
exclude multiple items, pass them as a tuple.
You can also set the _iterable attribute to True or False on your class,
which will override the checks here, including the exclude test.
As a rule of thumb, some SymPy functions use this to check if they should
recursively map over an object. If an object is technically iterable in
the Python sense but does not desire this behavior (e.g., because its
iteration is not finite, or because iteration might induce an unwanted
computation), it should disable it by setting the _iterable attribute to False.
See also: is_sequence
Examples
========
>>> from sympy.utilities.iterables import iterable
>>> from sympy import Tuple
>>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1]
>>> for i in things:
... print('%s %s' % (iterable(i), type(i)))
True <... 'list'>
True <... 'tuple'>
True <... 'set'>
True <class 'sympy.core.containers.Tuple'>
True <... 'generator'>
False <... 'dict'>
False <... 'str'>
False <... 'int'>
>>> iterable({}, exclude=None)
True
>>> iterable({}, exclude=str)
True
>>> iterable("no", exclude=str)
False
"""
if hasattr(i, '_iterable'):
return i._iterable
try:
iter(i)
except TypeError:
return False
if exclude:
return not isinstance(i, exclude)
return True
def is_sequence(i, include=None):
"""
Return a boolean indicating whether ``i`` is a sequence in the SymPy
sense. If anything that fails the test below should be included as
being a sequence for your application, set 'include' to that object's
type; multiple types should be passed as a tuple of types.
Note: although generators can generate a sequence, they often need special
handling to make sure their elements are captured before the generator is
exhausted, so these are not included by default in the definition of a
sequence.
See also: iterable
Examples
========
>>> from sympy.utilities.iterables import is_sequence
>>> from types import GeneratorType
>>> is_sequence([])
True
>>> is_sequence(set())
False
>>> is_sequence('abc')
False
>>> is_sequence('abc', include=str)
True
>>> generator = (c for c in 'abc')
>>> is_sequence(generator)
False
>>> is_sequence(generator, include=(str, GeneratorType))
True
"""
return (hasattr(i, '__getitem__') and
iterable(i) or
bool(include) and
isinstance(i, include))
@deprecated(
"""
Using postorder_traversal from the sympy.utilities.iterables submodule is
deprecated.
Instead, use postorder_traversal from the top-level sympy namespace, like
sympy.postorder_traversal
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-traversal-functions-moved")
def postorder_traversal(node, keys=None):
from sympy.core.traversal import postorder_traversal as _postorder_traversal
return _postorder_traversal(node, keys=keys)
@deprecated(
"""
Using interactive_traversal from the sympy.utilities.iterables submodule
is deprecated.
Instead, use interactive_traversal from the top-level sympy namespace,
like
sympy.interactive_traversal
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-traversal-functions-moved")
def interactive_traversal(expr):
from sympy.interactive.traversal import interactive_traversal as _interactive_traversal
return _interactive_traversal(expr)
@deprecated(
"""
Importing default_sort_key from sympy.utilities.iterables is deprecated.
Use from sympy import default_sort_key instead.
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-sympy-core-compatibility",
)
def default_sort_key(*args, **kwargs):
from sympy import default_sort_key as _default_sort_key
return _default_sort_key(*args, **kwargs)
@deprecated(
"""
Importing default_sort_key from sympy.utilities.iterables is deprecated.
Use from sympy import default_sort_key instead.
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-sympy-core-compatibility",
)
def ordered(*args, **kwargs):
from sympy import ordered as _ordered
return _ordered(*args, **kwargs)
|
bceed77adfda8432cf491fe3e3afc0164ff5c8258d57a38d912bf20c3049c8bf | """Miscellaneous stuff that does not really fit anywhere else."""
from __future__ import annotations
import operator
import sys
import os
import re as _re
import struct
from textwrap import fill, dedent
class Undecidable(ValueError):
# an error to be raised when a decision cannot be made definitively
# where a definitive answer is needed
pass
def filldedent(s, w=70, **kwargs):
"""
Strips leading and trailing empty lines from a copy of ``s``, then dedents,
fills and returns it.
Empty line stripping serves to deal with docstrings like this one that
start with a newline after the initial triple quote, inserting an empty
line at the beginning of the string.
Additional keyword arguments will be passed to ``textwrap.fill()``.
See Also
========
strlines, rawlines
"""
return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs)
def strlines(s, c=64, short=False):
"""Return a cut-and-pastable string that, when printed, is
equivalent to the input. The lines will be surrounded by
parentheses and no line will be longer than c (default 64)
characters. If the line contains newlines characters, the
`rawlines` result will be returned. If ``short`` is True
(default is False) then if there is one line it will be
returned without bounding parentheses.
Examples
========
>>> from sympy.utilities.misc import strlines
>>> q = 'this is a long string that should be broken into shorter lines'
>>> print(strlines(q, 40))
(
'this is a long string that should be b'
'roken into shorter lines'
)
>>> q == (
... 'this is a long string that should be b'
... 'roken into shorter lines'
... )
True
See Also
========
filldedent, rawlines
"""
if not isinstance(s, str):
raise ValueError('expecting string input')
if '\n' in s:
return rawlines(s)
q = '"' if repr(s).startswith('"') else "'"
q = (q,)*2
if '\\' in s: # use r-string
m = '(\nr%s%%s%s\n)' % q
j = '%s\nr%s' % q
c -= 3
else:
m = '(\n%s%%s%s\n)' % q
j = '%s\n%s' % q
c -= 2
out = []
while s:
out.append(s[:c])
s=s[c:]
if short and len(out) == 1:
return (m % out[0]).splitlines()[1] # strip bounding (\n...\n)
return m % j.join(out)
def rawlines(s):
"""Return a cut-and-pastable string that, when printed, is equivalent
to the input. Use this when there is more than one line in the
string. The string returned is formatted so it can be indented
nicely within tests; in some cases it is wrapped in the dedent
function which has to be imported from textwrap.
Examples
========
Note: because there are characters in the examples below that need
to be escaped because they are themselves within a triple quoted
docstring, expressions below look more complicated than they would
be if they were printed in an interpreter window.
>>> from sympy.utilities.misc import rawlines
>>> from sympy import TableForm
>>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee'])))
>>> print(rawlines(s))
(
'a bee\\n'
'-----\\n'
'1 10 '
)
>>> print(rawlines('''this
... that'''))
dedent('''\\
this
that''')
>>> print(rawlines('''this
... that
... '''))
dedent('''\\
this
that
''')
>>> s = \"\"\"this
... is a triple '''
... \"\"\"
>>> print(rawlines(s))
dedent(\"\"\"\\
this
is a triple '''
\"\"\")
>>> print(rawlines('''this
... that
... '''))
(
'this\\n'
'that\\n'
' '
)
See Also
========
filldedent, strlines
"""
lines = s.split('\n')
if len(lines) == 1:
return repr(lines[0])
triple = ["'''" in s, '"""' in s]
if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple):
rv = []
# add on the newlines
trailing = s.endswith('\n')
last = len(lines) - 1
for i, li in enumerate(lines):
if i != last or trailing:
rv.append(repr(li + '\n'))
else:
rv.append(repr(li))
return '(\n %s\n)' % '\n '.join(rv)
else:
rv = '\n '.join(lines)
if triple[0]:
return 'dedent("""\\\n %s""")' % rv
else:
return "dedent('''\\\n %s''')" % rv
ARCH = str(struct.calcsize('P') * 8) + "-bit"
# XXX: PyPy does not support hash randomization
HASH_RANDOMIZATION = getattr(sys.flags, 'hash_randomization', False)
_debug_tmp: list[str] = []
_debug_iter = 0
def debug_decorator(func):
"""If SYMPY_DEBUG is True, it will print a nice execution tree with
arguments and results of all decorated functions, else do nothing.
"""
from sympy import SYMPY_DEBUG
if not SYMPY_DEBUG:
return func
def maketree(f, *args, **kw):
global _debug_tmp
global _debug_iter
oldtmp = _debug_tmp
_debug_tmp = []
_debug_iter += 1
def tree(subtrees):
def indent(s, variant=1):
x = s.split("\n")
r = "+-%s\n" % x[0]
for a in x[1:]:
if a == "":
continue
if variant == 1:
r += "| %s\n" % a
else:
r += " %s\n" % a
return r
if len(subtrees) == 0:
return ""
f = []
for a in subtrees[:-1]:
f.append(indent(a))
f.append(indent(subtrees[-1], 2))
return ''.join(f)
# If there is a bug and the algorithm enters an infinite loop, enable the
# following lines. It will print the names and parameters of all major functions
# that are called, *before* they are called
#from functools import reduce
#print("%s%s %s%s" % (_debug_iter, reduce(lambda x, y: x + y, \
# map(lambda x: '-', range(1, 2 + _debug_iter))), f.__name__, args))
r = f(*args, **kw)
_debug_iter -= 1
s = "%s%s = %s\n" % (f.__name__, args, r)
if _debug_tmp != []:
s += tree(_debug_tmp)
_debug_tmp = oldtmp
_debug_tmp.append(s)
if _debug_iter == 0:
print(_debug_tmp[0])
_debug_tmp = []
return r
def decorated(*args, **kwargs):
return maketree(func, *args, **kwargs)
return decorated
def debug(*args):
"""
Print ``*args`` if SYMPY_DEBUG is True, else do nothing.
"""
from sympy import SYMPY_DEBUG
if SYMPY_DEBUG:
print(*args, file=sys.stderr)
def find_executable(executable, path=None):
"""Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found
"""
from .exceptions import sympy_deprecation_warning
sympy_deprecation_warning(
"""
sympy.utilities.misc.find_executable() is deprecated. Use the standard
library shutil.which() function instead.
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-find-executable",
)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extlist = ['']
if os.name == 'os2':
(base, ext) = os.path.splitext(executable)
# executable files on OS/2 can have an arbitrary extension, but
# .exe is automatically appended if no dot is present in the name
if not ext:
executable = executable + ".exe"
elif sys.platform == 'win32':
pathext = os.environ['PATHEXT'].lower().split(os.pathsep)
(base, ext) = os.path.splitext(executable)
if ext.lower() not in pathext:
extlist = pathext
for ext in extlist:
execname = executable + ext
if os.path.isfile(execname):
return execname
else:
for p in paths:
f = os.path.join(p, execname)
if os.path.isfile(f):
return f
return None
def func_name(x, short=False):
"""Return function name of `x` (if defined) else the `type(x)`.
If short is True and there is a shorter alias for the result,
return the alias.
Examples
========
>>> from sympy.utilities.misc import func_name
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> func_name(Matrix.eye(3))
'MutableDenseMatrix'
>>> func_name(x < 1)
'StrictLessThan'
>>> func_name(x < 1, short=True)
'Lt'
"""
alias = {
'GreaterThan': 'Ge',
'StrictGreaterThan': 'Gt',
'LessThan': 'Le',
'StrictLessThan': 'Lt',
'Equality': 'Eq',
'Unequality': 'Ne',
}
typ = type(x)
if str(typ).startswith("<type '"):
typ = str(typ).split("'")[1].split("'")[0]
elif str(typ).startswith("<class '"):
typ = str(typ).split("'")[1].split("'")[0]
rv = getattr(getattr(x, 'func', x), '__name__', typ)
if '.' in rv:
rv = rv.split('.')[-1]
if short:
rv = alias.get(rv, rv)
return rv
def _replace(reps):
"""Return a function that can make the replacements, given in
``reps``, on a string. The replacements should be given as mapping.
Examples
========
>>> from sympy.utilities.misc import _replace
>>> f = _replace(dict(foo='bar', d='t'))
>>> f('food')
'bart'
>>> f = _replace({})
>>> f('food')
'food'
"""
if not reps:
return lambda x: x
D = lambda match: reps[match.group(0)]
pattern = _re.compile("|".join(
[_re.escape(k) for k, v in reps.items()]), _re.M)
return lambda string: pattern.sub(D, string)
def replace(string, *reps):
"""Return ``string`` with all keys in ``reps`` replaced with
their corresponding values, longer strings first, irrespective
of the order they are given. ``reps`` may be passed as tuples
or a single mapping.
Examples
========
>>> from sympy.utilities.misc import replace
>>> replace('foo', {'oo': 'ar', 'f': 'b'})
'bar'
>>> replace("spamham sha", ("spam", "eggs"), ("sha","md5"))
'eggsham md5'
There is no guarantee that a unique answer will be
obtained if keys in a mapping overlap (i.e. are the same
length and have some identical sequence at the
beginning/end):
>>> reps = [
... ('ab', 'x'),
... ('bc', 'y')]
>>> replace('abc', *reps) in ('xc', 'ay')
True
References
==========
.. [1] https://stackoverflow.com/questions/6116978/python-replace-multiple-strings
"""
if len(reps) == 1:
kv = reps[0]
if isinstance(kv, dict):
reps = kv
else:
return string.replace(*kv)
else:
reps = dict(reps)
return _replace(reps)(string)
def translate(s, a, b=None, c=None):
"""Return ``s`` where characters have been replaced or deleted.
SYNTAX
======
translate(s, None, deletechars):
all characters in ``deletechars`` are deleted
translate(s, map [,deletechars]):
all characters in ``deletechars`` (if provided) are deleted
then the replacements defined by map are made; if the keys
of map are strings then the longer ones are handled first.
Multicharacter deletions should have a value of ''.
translate(s, oldchars, newchars, deletechars)
all characters in ``deletechars`` are deleted
then each character in ``oldchars`` is replaced with the
corresponding character in ``newchars``
Examples
========
>>> from sympy.utilities.misc import translate
>>> abc = 'abc'
>>> translate(abc, None, 'a')
'bc'
>>> translate(abc, {'a': 'x'}, 'c')
'xb'
>>> translate(abc, {'abc': 'x', 'a': 'y'})
'x'
>>> translate('abcd', 'ac', 'AC', 'd')
'AbC'
There is no guarantee that a unique answer will be
obtained if keys in a mapping overlap are the same
length and have some identical sequences at the
beginning/end:
>>> translate(abc, {'ab': 'x', 'bc': 'y'}) in ('xc', 'ay')
True
"""
mr = {}
if a is None:
if c is not None:
raise ValueError('c should be None when a=None is passed, instead got %s' % c)
if b is None:
return s
c = b
a = b = ''
else:
if isinstance(a, dict):
short = {}
for k in list(a.keys()):
if len(k) == 1 and len(a[k]) == 1:
short[k] = a.pop(k)
mr = a
c = b
if short:
a, b = [''.join(i) for i in list(zip(*short.items()))]
else:
a = b = ''
elif len(a) != len(b):
raise ValueError('oldchars and newchars have different lengths')
if c:
val = str.maketrans('', '', c)
s = s.translate(val)
s = replace(s, mr)
n = str.maketrans(a, b)
return s.translate(n)
def ordinal(num):
"""Return ordinal number string of num, e.g. 1 becomes 1st.
"""
# modified from https://codereview.stackexchange.com/questions/41298/producing-ordinal-numbers
n = as_int(num)
k = abs(n) % 100
if 11 <= k <= 13:
suffix = 'th'
elif k % 10 == 1:
suffix = 'st'
elif k % 10 == 2:
suffix = 'nd'
elif k % 10 == 3:
suffix = 'rd'
else:
suffix = 'th'
return str(n) + suffix
def as_int(n, strict=True):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value. When ``strict`` is True, this
uses `__index__ <https://docs.python.org/3/reference/datamodel.html#object.__index__>`_
and when it is False it uses ``int``.
Examples
========
>>> from sympy.utilities.misc import as_int
>>> from sympy import sqrt, S
The function is primarily concerned with sanitizing input for
functions that need to work with builtin integers, so anything that
is unambiguously an integer should be returned as an int:
>>> as_int(S(3))
3
Floats, being of limited precision, are not assumed to be exact and
will raise an error unless the ``strict`` flag is False. This
precision issue becomes apparent for large floating point numbers:
>>> big = 1e23
>>> type(big) is float
True
>>> big == int(big)
True
>>> as_int(big)
Traceback (most recent call last):
...
ValueError: ... is not an integer
>>> as_int(big, strict=False)
99999999999999991611392
Input that might be a complex representation of an integer value is
also rejected by default:
>>> one = sqrt(3 + 2*sqrt(2)) - sqrt(2)
>>> int(one) == 1
True
>>> as_int(one)
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
if strict:
try:
if isinstance(n, bool):
raise TypeError
return operator.index(n)
except TypeError:
raise ValueError('%s is not an integer' % (n,))
else:
try:
result = int(n)
except TypeError:
raise ValueError('%s is not an integer' % (n,))
if n != result:
raise ValueError('%s is not an integer' % (n,))
return result
|
785887ce724025c22a7e34314d25aae67404a509b1f9f4c8ab3289f3869c4fe7 | from sympy.core import S
from sympy.core.function import Lambda
from sympy.core.power import Pow
from .pycode import PythonCodePrinter, _known_functions_math, _print_known_const, _print_known_func, _unpack_integral_limits, ArrayPrinter
from .codeprinter import CodePrinter
_not_in_numpy = 'erf erfc factorial gamma loggamma'.split()
_in_numpy = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_numpy]
_known_functions_numpy = dict(_in_numpy, **{
'acos': 'arccos',
'acosh': 'arccosh',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'exp2': 'exp2',
'sign': 'sign',
'logaddexp': 'logaddexp',
'logaddexp2': 'logaddexp2',
})
_known_constants_numpy = {
'Exp1': 'e',
'Pi': 'pi',
'EulerGamma': 'euler_gamma',
'NaN': 'nan',
'Infinity': 'PINF',
'NegativeInfinity': 'NINF'
}
_numpy_known_functions = {k: 'numpy.' + v for k, v in _known_functions_numpy.items()}
_numpy_known_constants = {k: 'numpy.' + v for k, v in _known_constants_numpy.items()}
class NumPyPrinter(ArrayPrinter, PythonCodePrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_module = 'numpy'
_kf = _numpy_known_functions
_kc = _numpy_known_constants
def __init__(self, settings=None):
"""
`settings` is passed to CodePrinter.__init__()
`module` specifies the array module to use, currently 'NumPy', 'CuPy'
or 'JAX'.
"""
self.language = "Python with {}".format(self._module)
self.printmethod = "_{}code".format(self._module)
self._kf = {**PythonCodePrinter._kf, **self._kf}
super().__init__(settings=settings)
def _print_seq(self, seq):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
delimiter=', '
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
if expr.as_coeff_matrices()[0] is not S.One:
expr_list = expr.as_coeff_matrices()[1]+[(expr.as_coeff_matrices()[0])]
return '({})'.format(').dot('.join(self._print(i) for i in expr_list))
return '({})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_MatPow(self, expr):
"Matrix power printer"
return '{}({}, {})'.format(self._module_format(self._module + '.linalg.matrix_power'),
self._print(expr.args[0]), self._print(expr.args[1]))
def _print_Inverse(self, expr):
"Matrix inverse printer"
return '{}({})'.format(self._module_format(self._module + '.linalg.inv'),
self._print(expr.args[0]))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "%s(%s, %s)" % (self._module_format(self._module + '.dot'),
self._print(arg1),
self._print(arg2))
def _print_MatrixSolve(self, expr):
return "%s(%s, %s)" % (self._module_format(self._module + '.linalg.solve'),
self._print(expr.matrix),
self._print(expr.vector))
def _print_ZeroMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.zeros'),
self._print(expr.shape))
def _print_OneMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.ones'),
self._print(expr.shape))
def _print_FunctionMatrix(self, expr):
from sympy.abc import i, j
lamda = expr.lamda
if not isinstance(lamda, Lambda):
lamda = Lambda((i, j), lamda(i, j))
return '{}(lambda {}: {}, {})'.format(self._module_format(self._module + '.fromfunction'),
', '.join(self._print(arg) for arg in lamda.args[0]),
self._print(lamda.args[1]), self._print(expr.shape))
def _print_HadamardProduct(self, expr):
func = self._module_format(self._module + '.multiply')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_KroneckerProduct(self, expr):
func = self._module_format(self._module + '.kron')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_Adjoint(self, expr):
return '{}({}({}))'.format(
self._module_format(self._module + '.conjugate'),
self._module_format(self._module + '.transpose'),
self._print(expr.args[0]))
def _print_DiagonalOf(self, expr):
vect = '{}({})'.format(
self._module_format(self._module + '.diag'),
self._print(expr.arg))
return '{}({}, (-1, 1))'.format(
self._module_format(self._module + '.reshape'), vect)
def _print_DiagMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.diagflat'),
self._print(expr.args[0]))
def _print_DiagonalMatrix(self, expr):
return '{}({}, {}({}, {}))'.format(self._module_format(self._module + '.multiply'),
self._print(expr.arg), self._module_format(self._module + '.eye'),
self._print(expr.shape[0]), self._print(expr.shape[1]))
def _print_Piecewise(self, expr):
"Piecewise function printer"
from sympy.logic.boolalg import ITE, simplify_logic
def print_cond(cond):
""" Problem having an ITE in the cond. """
if cond.has(ITE):
return self._print(simplify_logic(cond))
else:
return self._print(cond)
exprs = '[{}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{}]'.format(','.join(print_cond(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return '{}({}, {}, default={})'.format(
self._module_format(self._module + '.select'), conds, exprs,
self._print(S.NaN))
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=self._module_format(self._module + '.'+op[expr.rel_op]),
lhs=lhs, rhs=rhs)
return super()._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format(self._module + '.logical_and'), ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format(self._module + '.logical_or'), ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{}({})'.format(self._module_format(self._module + '.logical_not'), ','.join(self._print(i) for i in expr.args))
def _print_Pow(self, expr, rational=False):
# XXX Workaround for negative integer power error
if expr.exp.is_integer and expr.exp.is_negative:
expr = Pow(expr.base, expr.exp.evalf(), evaluate=False)
return self._hprint_Pow(expr, rational=rational, sqrt=self._module + '.sqrt')
def _print_Min(self, expr):
return '{}(({}), axis=0)'.format(self._module_format(self._module + '.amin'), ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{}(({}), axis=0)'.format(self._module_format(self._module + '.amax'), ','.join(self._print(i) for i in expr.args))
def _print_arg(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.angle'), self._print(expr.args[0]))
def _print_im(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.imag'), self._print(expr.args[0]))
def _print_Mod(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.mod'), ', '.join(
map(lambda arg: self._print(arg), expr.args)))
def _print_re(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.real'), self._print(expr.args[0]))
def _print_sinc(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.sinc'), self._print(expr.args[0]/S.Pi))
def _print_MatrixBase(self, expr):
func = self.known_functions.get(expr.__class__.__name__, None)
if func is None:
func = self._module_format(self._module + '.array')
return "%s(%s)" % (func, self._print(expr.tolist()))
def _print_Identity(self, expr):
shape = expr.shape
if all(dim.is_Integer for dim in shape):
return "%s(%s)" % (self._module_format(self._module + '.eye'), self._print(expr.shape[0]))
else:
raise NotImplementedError("Symbolic matrix dimensions are not yet supported for identity matrices")
def _print_BlockMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.block'),
self._print(expr.args[0].tolist()))
def _print_NDimArray(self, expr):
if len(expr.shape) == 1:
return self._module + '.array(' + self._print(expr.args[0]) + ')'
if len(expr.shape) == 2:
return self._print(expr.tomatrix())
# Should be possible to extend to more dimensions
return CodePrinter._print_not_supported(self, expr)
_add = "add"
_einsum = "einsum"
_transpose = "transpose"
_ones = "ones"
_zeros = "zeros"
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for func in _numpy_known_functions:
setattr(NumPyPrinter, f'_print_{func}', _print_known_func)
for const in _numpy_known_constants:
setattr(NumPyPrinter, f'_print_{const}', _print_known_const)
_known_functions_scipy_special = {
'Ei': 'expi',
'erf': 'erf',
'erfc': 'erfc',
'besselj': 'jv',
'bessely': 'yv',
'besseli': 'iv',
'besselk': 'kv',
'cosm1': 'cosm1',
'powm1': 'powm1',
'factorial': 'factorial',
'gamma': 'gamma',
'loggamma': 'gammaln',
'digamma': 'psi',
'polygamma': 'polygamma',
'RisingFactorial': 'poch',
'jacobi': 'eval_jacobi',
'gegenbauer': 'eval_gegenbauer',
'chebyshevt': 'eval_chebyt',
'chebyshevu': 'eval_chebyu',
'legendre': 'eval_legendre',
'hermite': 'eval_hermite',
'laguerre': 'eval_laguerre',
'assoc_laguerre': 'eval_genlaguerre',
'beta': 'beta',
'LambertW' : 'lambertw',
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio',
'Pi': 'pi',
}
_scipy_known_functions = {k : "scipy.special." + v for k, v in _known_functions_scipy_special.items()}
_scipy_known_constants = {k : "scipy.constants." + v for k, v in _known_constants_scipy_constants.items()}
class SciPyPrinter(NumPyPrinter):
_kf = {**NumPyPrinter._kf, **_scipy_known_functions}
_kc = {**NumPyPrinter._kc, **_scipy_known_constants}
def __init__(self, settings=None):
super().__init__(settings=settings)
self.language = "Python with SciPy and NumPy"
def _print_SparseRepMatrix(self, expr):
i, j, data = [], [], []
for (r, c), v in expr.todok().items():
i.append(r)
j.append(c)
data.append(v)
return "{name}(({data}, ({i}, {j})), shape={shape})".format(
name=self._module_format('scipy.sparse.coo_matrix'),
data=data, i=i, j=j, shape=expr.shape
)
_print_ImmutableSparseMatrix = _print_SparseRepMatrix
# SciPy's lpmv has a different order of arguments from assoc_legendre
def _print_assoc_legendre(self, expr):
return "{0}({2}, {1}, {3})".format(
self._module_format('scipy.special.lpmv'),
self._print(expr.args[0]),
self._print(expr.args[1]),
self._print(expr.args[2]))
def _print_lowergamma(self, expr):
return "{0}({2})*{1}({2}, {3})".format(
self._module_format('scipy.special.gamma'),
self._module_format('scipy.special.gammainc'),
self._print(expr.args[0]),
self._print(expr.args[1]))
def _print_uppergamma(self, expr):
return "{0}({2})*{1}({2}, {3})".format(
self._module_format('scipy.special.gamma'),
self._module_format('scipy.special.gammaincc'),
self._print(expr.args[0]),
self._print(expr.args[1]))
def _print_betainc(self, expr):
betainc = self._module_format('scipy.special.betainc')
beta = self._module_format('scipy.special.beta')
args = [self._print(arg) for arg in expr.args]
return f"({betainc}({args[0]}, {args[1]}, {args[3]}) - {betainc}({args[0]}, {args[1]}, {args[2]})) \
* {beta}({args[0]}, {args[1]})"
def _print_betainc_regularized(self, expr):
return "{0}({1}, {2}, {4}) - {0}({1}, {2}, {3})".format(
self._module_format('scipy.special.betainc'),
self._print(expr.args[0]),
self._print(expr.args[1]),
self._print(expr.args[2]),
self._print(expr.args[3]))
def _print_fresnels(self, expr):
return "{}({})[0]".format(
self._module_format("scipy.special.fresnel"),
self._print(expr.args[0]))
def _print_fresnelc(self, expr):
return "{}({})[1]".format(
self._module_format("scipy.special.fresnel"),
self._print(expr.args[0]))
def _print_airyai(self, expr):
return "{}({})[0]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airyaiprime(self, expr):
return "{}({})[1]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airybi(self, expr):
return "{}({})[2]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airybiprime(self, expr):
return "{}({})[3]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_bernoulli(self, expr):
# scipy's bernoulli is inconsistent with SymPy's so rewrite
return self._print(expr._eval_rewrite_as_zeta(*expr.args))
def _print_harmonic(self, expr):
return self._print(expr._eval_rewrite_as_zeta(*expr.args))
def _print_Integral(self, e):
integration_vars, limits = _unpack_integral_limits(e)
if len(limits) == 1:
# nicer (but not necessary) to prefer quad over nquad for 1D case
module_str = self._module_format("scipy.integrate.quad")
limit_str = "%s, %s" % tuple(map(self._print, limits[0]))
else:
module_str = self._module_format("scipy.integrate.nquad")
limit_str = "({})".format(", ".join(
"(%s, %s)" % tuple(map(self._print, l)) for l in limits))
return "{}(lambda {}: {}, {})[0]".format(
module_str,
", ".join(map(self._print, integration_vars)),
self._print(e.args[0]),
limit_str)
for func in _scipy_known_functions:
setattr(SciPyPrinter, f'_print_{func}', _print_known_func)
for const in _scipy_known_constants:
setattr(SciPyPrinter, f'_print_{const}', _print_known_const)
_cupy_known_functions = {k : "cupy." + v for k, v in _known_functions_numpy.items()}
_cupy_known_constants = {k : "cupy." + v for k, v in _known_constants_numpy.items()}
class CuPyPrinter(NumPyPrinter):
"""
CuPy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_module = 'cupy'
_kf = _cupy_known_functions
_kc = _cupy_known_constants
def __init__(self, settings=None):
super().__init__(settings=settings)
for func in _cupy_known_functions:
setattr(CuPyPrinter, f'_print_{func}', _print_known_func)
for const in _cupy_known_constants:
setattr(CuPyPrinter, f'_print_{const}', _print_known_const)
_jax_known_functions = {k: 'jax.numpy.' + v for k, v in _known_functions_numpy.items()}
_jax_known_constants = {k: 'jax.numpy.' + v for k, v in _known_constants_numpy.items()}
class JaxPrinter(NumPyPrinter):
"""
JAX printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_module = "jax.numpy"
_kf = _jax_known_functions
_kc = _jax_known_constants
def __init__(self, settings=None):
super().__init__(settings=settings)
# These need specific override to allow for the lack of "jax.numpy.reduce"
def _print_And(self, expr):
"Logical And printer"
return "{}({}.asarray([{}]), axis=0)".format(
self._module_format(self._module + ".all"),
self._module_format(self._module),
",".join(self._print(i) for i in expr.args),
)
def _print_Or(self, expr):
"Logical Or printer"
return "{}({}.asarray([{}]), axis=0)".format(
self._module_format(self._module + ".any"),
self._module_format(self._module),
",".join(self._print(i) for i in expr.args),
)
for func in _jax_known_functions:
setattr(JaxPrinter, f'_print_{func}', _print_known_func)
for const in _jax_known_constants:
setattr(JaxPrinter, f'_print_{const}', _print_known_const)
|
ff029f9c3ab36c6a9dec55f825b61c58f6a834d2e92fc791885fa21a68bca18d | """
Python code printers
This module contains Python code printers for plain Python as well as NumPy & SciPy enabled code.
"""
from collections import defaultdict
from itertools import chain
from sympy.core import S
from sympy.core.mod import Mod
from .precedence import precedence
from .codeprinter import CodePrinter
_kw = {
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while',
'with', 'yield', 'None', 'False', 'nonlocal', 'True'
}
_known_functions = {
'Abs': 'abs',
'Min': 'min',
'Max': 'max',
}
_known_functions_math = {
'acos': 'acos',
'acosh': 'acosh',
'asin': 'asin',
'asinh': 'asinh',
'atan': 'atan',
'atan2': 'atan2',
'atanh': 'atanh',
'ceiling': 'ceil',
'cos': 'cos',
'cosh': 'cosh',
'erf': 'erf',
'erfc': 'erfc',
'exp': 'exp',
'expm1': 'expm1',
'factorial': 'factorial',
'floor': 'floor',
'gamma': 'gamma',
'hypot': 'hypot',
'loggamma': 'lgamma',
'log': 'log',
'ln': 'log',
'log10': 'log10',
'log1p': 'log1p',
'log2': 'log2',
'sin': 'sin',
'sinh': 'sinh',
'Sqrt': 'sqrt',
'tan': 'tan',
'tanh': 'tanh'
} # Not used from ``math``: [copysign isclose isfinite isinf isnan ldexp frexp pow modf
# radians trunc fmod fsum gcd degrees fabs]
_known_constants_math = {
'Exp1': 'e',
'Pi': 'pi',
'E': 'e',
'Infinity': 'inf',
'NaN': 'nan',
'ComplexInfinity': 'nan'
}
def _print_known_func(self, expr):
known = self.known_functions[expr.__class__.__name__]
return '{name}({args})'.format(name=self._module_format(known),
args=', '.join(map(lambda arg: self._print(arg), expr.args)))
def _print_known_const(self, expr):
known = self.known_constants[expr.__class__.__name__]
return self._module_format(known)
class AbstractPythonCodePrinter(CodePrinter):
printmethod = "_pythoncode"
language = "Python"
reserved_words = _kw
modules = None # initialized to a set in __init__
tab = ' '
_kf = dict(chain(
_known_functions.items(),
[(k, 'math.' + v) for k, v in _known_functions_math.items()]
))
_kc = {k: 'math.'+v for k, v in _known_constants_math.items()}
_operators = {'and': 'and', 'or': 'or', 'not': 'not'}
_default_settings = dict(
CodePrinter._default_settings,
user_functions={},
precision=17,
inline=True,
fully_qualified_modules=True,
contract=False,
standard='python3',
)
def __init__(self, settings=None):
super().__init__(settings)
# Python standard handler
std = self._settings['standard']
if std is None:
import sys
std = 'python{}'.format(sys.version_info.major)
if std != 'python3':
raise ValueError('Only Python 3 is supported.')
self.standard = std
self.module_imports = defaultdict(set)
# Known functions and constants handler
self.known_functions = dict(self._kf, **(settings or {}).get(
'user_functions', {}))
self.known_constants = dict(self._kc, **(settings or {}).get(
'user_constants', {}))
def _declare_number_const(self, name, value):
return "%s = %s" % (name, value)
def _module_format(self, fqn, register=True):
parts = fqn.split('.')
if register and len(parts) > 1:
self.module_imports['.'.join(parts[:-1])].add(parts[-1])
if self._settings['fully_qualified_modules']:
return fqn
else:
return fqn.split('(')[0].split('[')[0].split('.')[-1]
def _format_code(self, lines):
return lines
def _get_statement(self, codestring):
return "{}".format(codestring)
def _get_comment(self, text):
return " # {}".format(text)
def _expand_fold_binary_op(self, op, args):
"""
This method expands a fold on binary operations.
``functools.reduce`` is an example of a folded operation.
For example, the expression
`A + B + C + D`
is folded into
`((A + B) + C) + D`
"""
if len(args) == 1:
return self._print(args[0])
else:
return "%s(%s, %s)" % (
self._module_format(op),
self._expand_fold_binary_op(op, args[:-1]),
self._print(args[-1]),
)
def _expand_reduce_binary_op(self, op, args):
"""
This method expands a reductin on binary operations.
Notice: this is NOT the same as ``functools.reduce``.
For example, the expression
`A + B + C + D`
is reduced into:
`(A + B) + (C + D)`
"""
if len(args) == 1:
return self._print(args[0])
else:
N = len(args)
Nhalf = N // 2
return "%s(%s, %s)" % (
self._module_format(op),
self._expand_reduce_binary_op(args[:Nhalf]),
self._expand_reduce_binary_op(args[Nhalf:]),
)
def _print_NaN(self, expr):
return "float('nan')"
def _print_Infinity(self, expr):
return "float('inf')"
def _print_NegativeInfinity(self, expr):
return "float('-inf')"
def _print_ComplexInfinity(self, expr):
return self._print_NaN(expr)
def _print_Mod(self, expr):
PREC = precedence(expr)
return ('{} % {}'.format(*map(lambda x: self.parenthesize(x, PREC), expr.args)))
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
if i == 0:
result.append('(')
result.append('(')
result.append(self._print(e))
result.append(')')
result.append(' if ')
result.append(self._print(c))
result.append(' else ')
i += 1
result = result[:-1]
if result[-1] == 'True':
result = result[:-2]
result.append(')')
else:
result.append(' else None)')
return ''.join(result)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '({lhs} {op} {rhs})'.format(op=expr.rel_op, lhs=lhs, rhs=rhs)
return super()._print_Relational(expr)
def _print_ITE(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
return self._print(expr.rewrite(Piecewise))
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_KroneckerDelta(self, expr):
a, b = expr.args
return '(1 if {a} == {b} else 0)'.format(
a = self._print(a),
b = self._print(b)
)
def _print_MatrixBase(self, expr):
name = expr.__class__.__name__
func = self.known_functions.get(name, name)
return "%s(%s)" % (func, self._print(expr.tolist()))
_print_SparseRepMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
lambda self, expr: self._print_MatrixBase(expr)
def _indent_codestring(self, codestring):
return '\n'.join([self.tab + line for line in codestring.split('\n')])
def _print_FunctionDefinition(self, fd):
body = '\n'.join(map(lambda arg: self._print(arg), fd.body))
return "def {name}({parameters}):\n{body}".format(
name=self._print(fd.name),
parameters=', '.join([self._print(var.symbol) for var in fd.parameters]),
body=self._indent_codestring(body)
)
def _print_While(self, whl):
body = '\n'.join(map(lambda arg: self._print(arg), whl.body))
return "while {cond}:\n{body}".format(
cond=self._print(whl.condition),
body=self._indent_codestring(body)
)
def _print_Declaration(self, decl):
return '%s = %s' % (
self._print(decl.variable.symbol),
self._print(decl.variable.value)
)
def _print_Return(self, ret):
arg, = ret.args
return 'return %s' % self._print(arg)
def _print_Print(self, prnt):
print_args = ', '.join(map(lambda arg: self._print(arg), prnt.print_args))
if prnt.format_string != None: # Must be '!= None', cannot be 'is not None'
print_args = '{} % ({})'.format(
self._print(prnt.format_string), print_args)
if prnt.file != None: # Must be '!= None', cannot be 'is not None'
print_args += ', file=%s' % self._print(prnt.file)
return 'print(%s)' % print_args
def _print_Stream(self, strm):
if str(strm.name) == 'stdout':
return self._module_format('sys.stdout')
elif str(strm.name) == 'stderr':
return self._module_format('sys.stderr')
else:
return self._print(strm.name)
def _print_NoneToken(self, arg):
return 'None'
def _hprint_Pow(self, expr, rational=False, sqrt='math.sqrt'):
"""Printing helper function for ``Pow``
Notes
=====
This preprocesses the ``sqrt`` as math formatter and prints division
Examples
========
>>> from sympy import sqrt
>>> from sympy.printing.pycode import PythonCodePrinter
>>> from sympy.abc import x
Python code printer automatically looks up ``math.sqrt``.
>>> printer = PythonCodePrinter()
>>> printer._hprint_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._hprint_Pow(sqrt(x), rational=False)
'math.sqrt(x)'
>>> printer._hprint_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._hprint_Pow(1/sqrt(x), rational=False)
'1/math.sqrt(x)'
>>> printer._hprint_Pow(1/x, rational=False)
'1/x'
>>> printer._hprint_Pow(1/x, rational=True)
'x**(-1)'
Using sqrt from numpy or mpmath
>>> printer._hprint_Pow(sqrt(x), sqrt='numpy.sqrt')
'numpy.sqrt(x)'
>>> printer._hprint_Pow(sqrt(x), sqrt='mpmath.sqrt')
'mpmath.sqrt(x)'
See Also
========
sympy.printing.str.StrPrinter._print_Pow
"""
PREC = precedence(expr)
if expr.exp == S.Half and not rational:
func = self._module_format(sqrt)
arg = self._print(expr.base)
return '{func}({arg})'.format(func=func, arg=arg)
if expr.is_commutative and not rational:
if -expr.exp is S.Half:
func = self._module_format(sqrt)
num = self._print(S.One)
arg = self._print(expr.base)
return f"{num}/{func}({arg})"
if expr.exp is S.NegativeOne:
num = self._print(S.One)
arg = self.parenthesize(expr.base, PREC, strict=False)
return f"{num}/{arg}"
base_str = self.parenthesize(expr.base, PREC, strict=False)
exp_str = self.parenthesize(expr.exp, PREC, strict=False)
return "{}**{}".format(base_str, exp_str)
class ArrayPrinter:
def _arrayify(self, indexed):
from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array
try:
return convert_indexed_to_array(indexed)
except Exception:
return indexed
def _get_einsum_string(self, subranks, contraction_indices):
letters = self._get_letter_generator_for_einsum()
contraction_string = ""
counter = 0
d = {j: min(i) for i in contraction_indices for j in i}
indices = []
for rank_arg in subranks:
lindices = []
for i in range(rank_arg):
if counter in d:
lindices.append(d[counter])
else:
lindices.append(counter)
counter += 1
indices.append(lindices)
mapping = {}
letters_free = []
letters_dum = []
for i in indices:
for j in i:
if j not in mapping:
l = next(letters)
mapping[j] = l
else:
l = mapping[j]
contraction_string += l
if j in d:
if l not in letters_dum:
letters_dum.append(l)
else:
letters_free.append(l)
contraction_string += ","
contraction_string = contraction_string[:-1]
return contraction_string, letters_free, letters_dum
def _get_letter_generator_for_einsum(self):
for i in range(97, 123):
yield chr(i)
for i in range(65, 91):
yield chr(i)
raise ValueError("out of letters")
def _print_ArrayTensorProduct(self, expr):
letters = self._get_letter_generator_for_einsum()
contraction_string = ",".join(["".join([next(letters) for j in range(i)]) for i in expr.subranks])
return '%s("%s", %s)' % (
self._module_format(self._module + "." + self._einsum),
contraction_string,
", ".join([self._print(arg) for arg in expr.args])
)
def _print_ArrayContraction(self, expr):
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct
base = expr.expr
contraction_indices = expr.contraction_indices
if isinstance(base, ArrayTensorProduct):
elems = ",".join(["%s" % (self._print(arg)) for arg in base.args])
ranks = base.subranks
else:
elems = self._print(base)
ranks = [len(base.shape)]
contraction_string, letters_free, letters_dum = self._get_einsum_string(ranks, contraction_indices)
if not contraction_indices:
return self._print(base)
if isinstance(base, ArrayTensorProduct):
elems = ",".join(["%s" % (self._print(arg)) for arg in base.args])
else:
elems = self._print(base)
return "%s(\"%s\", %s)" % (
self._module_format(self._module + "." + self._einsum),
"{}->{}".format(contraction_string, "".join(sorted(letters_free))),
elems,
)
def _print_ArrayDiagonal(self, expr):
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct
diagonal_indices = list(expr.diagonal_indices)
if isinstance(expr.expr, ArrayTensorProduct):
subranks = expr.expr.subranks
elems = expr.expr.args
else:
subranks = expr.subranks
elems = [expr.expr]
diagonal_string, letters_free, letters_dum = self._get_einsum_string(subranks, diagonal_indices)
elems = [self._print(i) for i in elems]
return '%s("%s", %s)' % (
self._module_format(self._module + "." + self._einsum),
"{}->{}".format(diagonal_string, "".join(letters_free+letters_dum)),
", ".join(elems)
)
def _print_PermuteDims(self, expr):
return "%s(%s, %s)" % (
self._module_format(self._module + "." + self._transpose),
self._print(expr.expr),
self._print(expr.permutation.array_form),
)
def _print_ArrayAdd(self, expr):
return self._expand_fold_binary_op(self._module + "." + self._add, expr.args)
def _print_OneArray(self, expr):
return "%s((%s,))" % (
self._module_format(self._module+ "." + self._ones),
','.join(map(self._print,expr.args))
)
def _print_ZeroArray(self, expr):
return "%s((%s,))" % (
self._module_format(self._module+ "." + self._zeros),
','.join(map(self._print,expr.args))
)
def _print_Assignment(self, expr):
#XXX: maybe this needs to happen at a higher level e.g. at _print or
#doprint?
lhs = self._print(self._arrayify(expr.lhs))
rhs = self._print(self._arrayify(expr.rhs))
return "%s = %s" % ( lhs, rhs )
def _print_IndexedBase(self, expr):
return self._print_ArraySymbol(expr)
class PythonCodePrinter(AbstractPythonCodePrinter):
def _print_sign(self, e):
return '(0.0 if {e} == 0 else {f}(1, {e}))'.format(
f=self._module_format('math.copysign'), e=self._print(e.args[0]))
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Indexed(self, expr):
base = expr.args[0]
index = expr.args[1:]
return "{}[{}]".format(str(base), ", ".join([self._print(ind) for ind in index]))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational)
def _print_Rational(self, expr):
return '{}/{}'.format(expr.p, expr.q)
def _print_Half(self, expr):
return self._print_Rational(expr)
def _print_frac(self, expr):
return self._print_Mod(Mod(expr.args[0], 1))
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
elif '{' in name: # Remove curly braces from subscripted variables
return name.replace('{', '').replace('}', '')
else:
return name
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
def pycode(expr, **settings):
""" Converts an expr to a string of Python code
Parameters
==========
expr : Expr
A SymPy expression.
fully_qualified_modules : bool
Whether or not to write out full module names of functions
(``math.sin`` vs. ``sin``). default: ``True``.
standard : str or None, optional
Only 'python3' (default) is supported.
This parameter may be removed in the future.
Examples
========
>>> from sympy import pycode, tan, Symbol
>>> pycode(tan(Symbol('x')) + 1)
'math.tan(x) + 1'
"""
return PythonCodePrinter(settings).doprint(expr)
_not_in_mpmath = 'log1p log2'.split()
_in_mpmath = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_mpmath]
_known_functions_mpmath = dict(_in_mpmath, **{
'beta': 'beta',
'frac': 'frac',
'fresnelc': 'fresnelc',
'fresnels': 'fresnels',
'sign': 'sign',
'loggamma': 'loggamma',
'hyper': 'hyper',
'meijerg': 'meijerg',
'besselj': 'besselj',
'bessely': 'bessely',
'besseli': 'besseli',
'besselk': 'besselk',
})
_known_constants_mpmath = {
'Exp1': 'e',
'Pi': 'pi',
'GoldenRatio': 'phi',
'EulerGamma': 'euler',
'Catalan': 'catalan',
'NaN': 'nan',
'Infinity': 'inf',
'NegativeInfinity': 'ninf'
}
def _unpack_integral_limits(integral_expr):
""" helper function for _print_Integral that
- accepts an Integral expression
- returns a tuple of
- a list variables of integration
- a list of tuples of the upper and lower limits of integration
"""
integration_vars = []
limits = []
for integration_range in integral_expr.limits:
if len(integration_range) == 3:
integration_var, lower_limit, upper_limit = integration_range
else:
raise NotImplementedError("Only definite integrals are supported")
integration_vars.append(integration_var)
limits.append((lower_limit, upper_limit))
return integration_vars, limits
class MpmathPrinter(PythonCodePrinter):
"""
Lambda printer for mpmath which maintains precision for floats
"""
printmethod = "_mpmathcode"
language = "Python with mpmath"
_kf = dict(chain(
_known_functions.items(),
[(k, 'mpmath.' + v) for k, v in _known_functions_mpmath.items()]
))
_kc = {k: 'mpmath.'+v for k, v in _known_constants_mpmath.items()}
def _print_Float(self, e):
# XXX: This does not handle setting mpmath.mp.dps. It is assumed that
# the caller of the lambdified function will have set it to sufficient
# precision to match the Floats in the expression.
# Remove 'mpz' if gmpy is installed.
args = str(tuple(map(int, e._mpf_)))
return '{func}({args})'.format(func=self._module_format('mpmath.mpf'), args=args)
def _print_Rational(self, e):
return "{func}({p})/{func}({q})".format(
func=self._module_format('mpmath.mpf'),
q=self._print(e.q),
p=self._print(e.p)
)
def _print_Half(self, e):
return self._print_Rational(e)
def _print_uppergamma(self, e):
return "{}({}, {}, {})".format(
self._module_format('mpmath.gammainc'),
self._print(e.args[0]),
self._print(e.args[1]),
self._module_format('mpmath.inf'))
def _print_lowergamma(self, e):
return "{}({}, 0, {})".format(
self._module_format('mpmath.gammainc'),
self._print(e.args[0]),
self._print(e.args[1]))
def _print_log2(self, e):
return '{0}({1})/{0}(2)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
def _print_log1p(self, e):
return '{}({})'.format(
self._module_format('mpmath.log1p'), self._print(e.args[0]))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='mpmath.sqrt')
def _print_Integral(self, e):
integration_vars, limits = _unpack_integral_limits(e)
return "{}(lambda {}: {}, {})".format(
self._module_format("mpmath.quad"),
", ".join(map(self._print, integration_vars)),
self._print(e.args[0]),
", ".join("(%s, %s)" % tuple(map(self._print, l)) for l in limits))
for k in MpmathPrinter._kf:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_mpmath:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_const)
class SymPyPrinter(AbstractPythonCodePrinter):
language = "Python with SymPy"
def _print_Function(self, expr):
mod = expr.func.__module__ or ''
return '%s(%s)' % (self._module_format(mod + ('.' if mod else '') + expr.func.__name__),
', '.join(map(lambda arg: self._print(arg), expr.args)))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='sympy.sqrt')
|
0049d91a37708ac3bc7eb0b8dcf7044aa080cf7b9eea3c351f51ce76cdc06377 | """
A Printer for generating readable representation of most SymPy classes.
"""
from __future__ import annotations
from typing import Any
from sympy.core import S, Rational, Pow, Basic, Mul, Number
from sympy.core.mul import _keep_coeff
from sympy.core.relational import Relational
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import SympifyError
from sympy.utilities.iterables import sift
from .precedence import precedence, PRECEDENCE
from .printer import Printer, print_function
from mpmath.libmp import prec_to_dps, to_str as mlib_to_str
class StrPrinter(Printer):
printmethod = "_sympystr"
_default_settings: dict[str, Any] = {
"order": None,
"full_prec": "auto",
"sympy_integers": False,
"abbrev": False,
"perm_cyclic": True,
"min": None,
"max": None,
}
_relationals: dict[str, str] = {}
def parenthesize(self, item, level, strict=False):
if (precedence(item) < level) or ((not strict) and precedence(item) <= level):
return "(%s)" % self._print(item)
else:
return self._print(item)
def stringify(self, args, sep, level=0):
return sep.join([self.parenthesize(item, level) for item in args])
def emptyPrinter(self, expr):
if isinstance(expr, str):
return expr
elif isinstance(expr, Basic):
return repr(expr)
else:
return str(expr)
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
prec = precedence(expr)
l = []
for term in terms:
t = self._print(term)
if t.startswith('-') and not term.is_Add:
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < prec or term.is_Add:
l.extend([sign, "(%s)" % t])
else:
l.extend([sign, t])
sign = l.pop(0)
if sign == '+':
sign = ""
return sign + ' '.join(l)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_Not(self, expr):
return '~%s' %(self.parenthesize(expr.args[0],PRECEDENCE["Not"]))
def _print_And(self, expr):
args = list(expr.args)
for j, i in enumerate(args):
if isinstance(i, Relational) and (
i.canonical.rhs is S.NegativeInfinity):
args.insert(0, args.pop(j))
return self.stringify(args, " & ", PRECEDENCE["BitwiseAnd"])
def _print_Or(self, expr):
return self.stringify(expr.args, " | ", PRECEDENCE["BitwiseOr"])
def _print_Xor(self, expr):
return self.stringify(expr.args, " ^ ", PRECEDENCE["BitwiseXor"])
def _print_AppliedPredicate(self, expr):
return '%s(%s)' % (
self._print(expr.function), self.stringify(expr.arguments, ", "))
def _print_Basic(self, expr):
l = [self._print(o) for o in expr.args]
return expr.__class__.__name__ + "(%s)" % ", ".join(l)
def _print_BlockMatrix(self, B):
if B.blocks.shape == (1, 1):
self._print(B.blocks[0, 0])
return self._print(B.blocks)
def _print_Catalan(self, expr):
return 'Catalan'
def _print_ComplexInfinity(self, expr):
return 'zoo'
def _print_ConditionSet(self, s):
args = tuple([self._print(i) for i in (s.sym, s.condition)])
if s.base_set is S.UniversalSet:
return 'ConditionSet(%s, %s)' % args
args += (self._print(s.base_set),)
return 'ConditionSet(%s, %s, %s)' % args
def _print_Derivative(self, expr):
dexpr = expr.expr
dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count]
return 'Derivative(%s)' % ", ".join(map(lambda arg: self._print(arg), [dexpr] + dvars))
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
item = "%s: %s" % (self._print(key), self._print(d[key]))
items.append(item)
return "{%s}" % ", ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return 'Domain: ' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
else:
return 'Domain on ' + self._print(d.symbols)
def _print_Dummy(self, expr):
return '_' + expr.name
def _print_EulerGamma(self, expr):
return 'EulerGamma'
def _print_Exp1(self, expr):
return 'E'
def _print_ExprCondPair(self, expr):
return '(%s, %s)' % (self._print(expr.expr), self._print(expr.cond))
def _print_Function(self, expr):
return expr.func.__name__ + "(%s)" % self.stringify(expr.args, ", ")
def _print_GoldenRatio(self, expr):
return 'GoldenRatio'
def _print_Heaviside(self, expr):
# Same as _print_Function but uses pargs to suppress default 1/2 for
# 2nd args
return expr.func.__name__ + "(%s)" % self.stringify(expr.pargs, ", ")
def _print_TribonacciConstant(self, expr):
return 'TribonacciConstant'
def _print_ImaginaryUnit(self, expr):
return 'I'
def _print_Infinity(self, expr):
return 'oo'
def _print_Integral(self, expr):
def _xab_tostr(xab):
if len(xab) == 1:
return self._print(xab[0])
else:
return self._print((xab[0],) + tuple(xab[1:]))
L = ', '.join([_xab_tostr(l) for l in expr.limits])
return 'Integral(%s, %s)' % (self._print(expr.function), L)
def _print_Interval(self, i):
fin = 'Interval{m}({a}, {b})'
a, b, l, r = i.args
if a.is_infinite and b.is_infinite:
m = ''
elif a.is_infinite and not r:
m = ''
elif b.is_infinite and not l:
m = ''
elif not l and not r:
m = ''
elif l and r:
m = '.open'
elif l:
m = '.Lopen'
else:
m = '.Ropen'
return fin.format(**{'a': a, 'b': b, 'm': m})
def _print_AccumulationBounds(self, i):
return "AccumBounds(%s, %s)" % (self._print(i.min),
self._print(i.max))
def _print_Inverse(self, I):
return "%s**(-1)" % self.parenthesize(I.arg, PRECEDENCE["Pow"])
def _print_Lambda(self, obj):
expr = obj.expr
sig = obj.signature
if len(sig) == 1 and sig[0].is_symbol:
sig = sig[0]
return "Lambda(%s, %s)" % (self._print(sig), self._print(expr))
def _print_LatticeOp(self, expr):
args = sorted(expr.args, key=default_sort_key)
return expr.func.__name__ + "(%s)" % ", ".join(self._print(arg) for arg in args)
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
return "Limit(%s, %s, %s, dir='%s')" % tuple(map(self._print, (e, z, z0, dir)))
def _print_list(self, expr):
return "[%s]" % self.stringify(expr, ", ")
def _print_List(self, expr):
return self._print_list(expr)
def _print_MatrixBase(self, expr):
return expr._format_str(self)
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \
+ '[%s, %s]' % (self._print(expr.i), self._print(expr.j))
def _print_MatrixSlice(self, expr):
def strslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = ''
if x[1] == dim:
x[1] = ''
return ':'.join(map(lambda arg: self._print(arg), x))
return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + '[' +
strslice(expr.rowslice, expr.parent.rows) + ', ' +
strslice(expr.colslice, expr.parent.cols) + ']')
def _print_DeferredVector(self, expr):
return expr.name
def _print_Mul(self, expr):
prec = precedence(expr)
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
args = expr.args
if args[0] is S.One or any(
isinstance(a, Number) or
a.is_Pow and all(ai.is_Integer for ai in a.args)
for a in args[1:]):
d, n = sift(args, lambda x:
isinstance(x, Pow) and bool(x.exp.as_coeff_Mul()[0] < 0),
binary=True)
for i, di in enumerate(d):
if di.exp.is_Number:
e = -di.exp
else:
dargs = list(di.exp.args)
dargs[0] = -dargs[0]
e = Mul._from_args(dargs)
d[i] = Pow(di.base, e, evaluate=False) if e - 1 else di.base
pre = []
# don't parenthesize first factor if negative
if n and not n[0].is_Add and n[0].could_extract_minus_sign():
pre = [self._print(n.pop(0))]
nfactors = pre + [self.parenthesize(a, prec, strict=False)
for a in n]
if not nfactors:
nfactors = ['1']
# don't parenthesize first of denominator unless singleton
if len(d) > 1 and d[0].could_extract_minus_sign():
pre = [self._print(d.pop(0))]
else:
pre = []
dfactors = pre + [self.parenthesize(a, prec, strict=False)
for a in d]
n = '*'.join(nfactors)
d = '*'.join(dfactors)
if len(dfactors) > 1:
return '%s/(%s)' % (n, d)
elif dfactors:
return '%s/%s' % (n, d)
return n
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
pow_paren = [] # Will collect all pow with more than one base element and exp = -1
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
def apow(i):
b, e = i.as_base_exp()
eargs = list(Mul.make_args(e))
if eargs[0] is S.NegativeOne:
eargs = eargs[1:]
else:
eargs[0] = -eargs[0]
e = Mul._from_args(eargs)
if isinstance(i, Pow):
return i.func(b, e, evaluate=False)
return i.func(e, evaluate=False)
for item in args:
if (item.is_commutative and
isinstance(item, Pow) and
bool(item.exp.as_coeff_Mul()[0] < 0)):
if item.exp is not S.NegativeOne:
b.append(apow(item))
else:
if (len(item.args[0].args) != 1 and
isinstance(item.base, (Mul, Pow))):
# To avoid situations like #14160
pow_paren.append(item)
b.append(item.base)
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec, strict=False) for x in a]
b_str = [self.parenthesize(x, prec, strict=False) for x in b]
# To parenthesize Pow with exp = -1 and having more than one Symbol
for item in pow_paren:
if item.base in b:
b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)]
if not b:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + "/" + b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_MatMul(self, expr):
c, m = expr.as_coeff_mmul()
sign = ""
if c.is_number:
re, im = c.as_real_imag()
if im.is_zero and re.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
elif re.is_zero and im.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
return sign + '*'.join(
[self.parenthesize(arg, precedence(expr)) for arg in expr.args]
)
def _print_ElementwiseApplyFunction(self, expr):
return "{}.({})".format(
expr.function,
self._print(expr.expr),
)
def _print_NaN(self, expr):
return 'nan'
def _print_NegativeInfinity(self, expr):
return '-oo'
def _print_Order(self, expr):
if not expr.variables or all(p is S.Zero for p in expr.point):
if len(expr.variables) <= 1:
return 'O(%s)' % self._print(expr.expr)
else:
return 'O(%s)' % self.stringify((expr.expr,) + expr.variables, ', ', 0)
else:
return 'O(%s)' % self.stringify(expr.args, ', ', 0)
def _print_Ordinal(self, expr):
return expr.__str__()
def _print_Cycle(self, expr):
return expr.__str__()
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
from sympy.utilities.exceptions import sympy_deprecation_warning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=7,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
if not expr.size:
return '()'
# before taking Cycle notation, see if the last element is
# a singleton and move it to the head of the string
s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):]
last = s.rfind('(')
if not last == 0 and ',' not in s[last:]:
s = s[last:] + s[:last]
s = s.replace(',', '')
return s
else:
s = expr.support()
if not s:
if expr.size < 5:
return 'Permutation(%s)' % self._print(expr.array_form)
return 'Permutation([], size=%s)' % self._print(expr.size)
trim = self._print(expr.array_form[:s[-1] + 1]) + ', size=%s' % self._print(expr.size)
use = full = self._print(expr.array_form)
if len(trim) < len(full):
use = trim
return 'Permutation(%s)' % use
def _print_Subs(self, obj):
expr, old, new = obj.args
if len(obj.point) == 1:
old = old[0]
new = new[0]
return "Subs(%s, %s, %s)" % (
self._print(expr), self._print(old), self._print(new))
def _print_TensorIndex(self, expr):
return expr._print()
def _print_TensorHead(self, expr):
return expr._print()
def _print_Tensor(self, expr):
return expr._print()
def _print_TensMul(self, expr):
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
sign, args = expr._get_args_for_traditional_printer()
return sign + "*".join(
[self.parenthesize(arg, precedence(expr)) for arg in args]
)
def _print_TensAdd(self, expr):
return expr._print()
def _print_ArraySymbol(self, expr):
return self._print(expr.name)
def _print_ArrayElement(self, expr):
return "%s[%s]" % (
self.parenthesize(expr.name, PRECEDENCE["Func"], True), ", ".join([self._print(i) for i in expr.indices]))
def _print_PermutationGroup(self, expr):
p = [' %s' % self._print(a) for a in expr.args]
return 'PermutationGroup([\n%s])' % ',\n'.join(p)
def _print_Pi(self, expr):
return 'pi'
def _print_PolyRing(self, ring):
return "Polynomial ring in %s over %s with %s order" % \
(", ".join(map(lambda rs: self._print(rs), ring.symbols)),
self._print(ring.domain), self._print(ring.order))
def _print_FracField(self, field):
return "Rational function field in %s over %s with %s order" % \
(", ".join(map(lambda fs: self._print(fs), field.symbols)),
self._print(field.domain), self._print(field.order))
def _print_FreeGroupElement(self, elm):
return elm.__str__()
def _print_GaussianElement(self, poly):
return "(%s + %s*I)" % (poly.x, poly.y)
def _print_PolyElement(self, poly):
return poly.str(self, PRECEDENCE, "%s**%s", "*")
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self.parenthesize(frac.numer, PRECEDENCE["Mul"], strict=True)
denom = self.parenthesize(frac.denom, PRECEDENCE["Atom"], strict=True)
return numer + "/" + denom
def _print_Poly(self, expr):
ATOM_PREC = PRECEDENCE["Atom"] - 1
terms, gens = [], [ self.parenthesize(s, ATOM_PREC) for s in expr.gens ]
for monom, coeff in expr.terms():
s_monom = []
for i, e in enumerate(monom):
if e > 0:
if e == 1:
s_monom.append(gens[i])
else:
s_monom.append(gens[i] + "**%d" % e)
s_monom = "*".join(s_monom)
if coeff.is_Add:
if s_monom:
s_coeff = "(" + self._print(coeff) + ")"
else:
s_coeff = self._print(coeff)
else:
if s_monom:
if coeff is S.One:
terms.extend(['+', s_monom])
continue
if coeff is S.NegativeOne:
terms.extend(['-', s_monom])
continue
s_coeff = self._print(coeff)
if not s_monom:
s_term = s_coeff
else:
s_term = s_coeff + "*" + s_monom
if s_term.startswith('-'):
terms.extend(['-', s_term[1:]])
else:
terms.extend(['+', s_term])
if terms[0] in ('-', '+'):
modifier = terms.pop(0)
if modifier == '-':
terms[0] = '-' + terms[0]
format = expr.__class__.__name__ + "(%s, %s"
from sympy.polys.polyerrors import PolynomialError
try:
format += ", modulus=%s" % expr.get_modulus()
except PolynomialError:
format += ", domain='%s'" % expr.get_domain()
format += ")"
for index, item in enumerate(gens):
if len(item) > 2 and (item[:1] == "(" and item[len(item) - 1:] == ")"):
gens[index] = item[1:len(item) - 1]
return format % (' '.join(terms), ', '.join(gens))
def _print_UniversalSet(self, p):
return 'UniversalSet'
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_Pow(self, expr, rational=False):
"""Printing helper function for ``Pow``
Parameters
==========
rational : bool, optional
If ``True``, it will not attempt printing ``sqrt(x)`` or
``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)``
instead.
See examples for additional details
Examples
========
>>> from sympy import sqrt, StrPrinter
>>> from sympy.abc import x
How ``rational`` keyword works with ``sqrt``:
>>> printer = StrPrinter()
>>> printer._print_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._print_Pow(sqrt(x), rational=False)
'sqrt(x)'
>>> printer._print_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._print_Pow(1/sqrt(x), rational=False)
'1/sqrt(x)'
Notes
=====
``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy,
so there is no need of defining a separate printer for ``sqrt``.
Instead, it should be handled here as well.
"""
PREC = precedence(expr)
if expr.exp is S.Half and not rational:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if -expr.exp is S.Half and not rational:
# Note: Don't test "expr.exp == -S.Half" here, because that will
# match -0.5, which we don't want.
return "%s/sqrt(%s)" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base)))
if expr.exp is -S.One:
# Similarly to the S.Half case, don't test with "==" here.
return '%s/%s' % (self._print(S.One),
self.parenthesize(expr.base, PREC, strict=False))
e = self.parenthesize(expr.exp, PREC, strict=False)
if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1:
# the parenthesized exp should be '(Rational(a, b))' so strip parens,
# but just check to be sure.
if e.startswith('(Rational'):
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1])
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False),
self.parenthesize(expr.exp, PREC, strict=False))
def _print_Integer(self, expr):
if self._settings.get("sympy_integers", False):
return "S(%s)" % (expr)
return str(expr.p)
def _print_Integers(self, expr):
return 'Integers'
def _print_Naturals(self, expr):
return 'Naturals'
def _print_Naturals0(self, expr):
return 'Naturals0'
def _print_Rationals(self, expr):
return 'Rationals'
def _print_Reals(self, expr):
return 'Reals'
def _print_Complexes(self, expr):
return 'Complexes'
def _print_EmptySet(self, expr):
return 'EmptySet'
def _print_EmptySequence(self, expr):
return 'EmptySequence'
def _print_int(self, expr):
return str(expr)
def _print_mpz(self, expr):
return str(expr)
def _print_Rational(self, expr):
if expr.q == 1:
return str(expr.p)
else:
if self._settings.get("sympy_integers", False):
return "S(%s)/%s" % (expr.p, expr.q)
return "%s/%s" % (expr.p, expr.q)
def _print_PythonRational(self, expr):
if expr.q == 1:
return str(expr.p)
else:
return "%d/%d" % (expr.p, expr.q)
def _print_Fraction(self, expr):
if expr.denominator == 1:
return str(expr.numerator)
else:
return "%s/%s" % (expr.numerator, expr.denominator)
def _print_mpq(self, expr):
if expr.denominator == 1:
return str(expr.numerator)
else:
return "%s/%s" % (expr.numerator, expr.denominator)
def _print_Float(self, expr):
prec = expr._prec
if prec < 5:
dps = 0
else:
dps = prec_to_dps(expr._prec)
if self._settings["full_prec"] is True:
strip = False
elif self._settings["full_prec"] is False:
strip = True
elif self._settings["full_prec"] == "auto":
strip = self._print_level > 1
low = self._settings["min"] if "min" in self._settings else None
high = self._settings["max"] if "max" in self._settings else None
rv = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high)
if rv.startswith('-.0'):
rv = '-0.' + rv[3:]
elif rv.startswith('.0'):
rv = '0.' + rv[2:]
if rv.startswith('+'):
# e.g., +inf -> inf
rv = rv[1:]
return rv
def _print_Relational(self, expr):
charmap = {
"==": "Eq",
"!=": "Ne",
":=": "Assignment",
'+=': "AddAugmentedAssignment",
"-=": "SubAugmentedAssignment",
"*=": "MulAugmentedAssignment",
"/=": "DivAugmentedAssignment",
"%=": "ModAugmentedAssignment",
}
if expr.rel_op in charmap:
return '%s(%s, %s)' % (charmap[expr.rel_op], self._print(expr.lhs),
self._print(expr.rhs))
return '%s %s %s' % (self.parenthesize(expr.lhs, precedence(expr)),
self._relationals.get(expr.rel_op) or expr.rel_op,
self.parenthesize(expr.rhs, precedence(expr)))
def _print_ComplexRootOf(self, expr):
return "CRootOf(%s, %d)" % (self._print_Add(expr.expr, order='lex'),
expr.index)
def _print_RootSum(self, expr):
args = [self._print_Add(expr.expr, order='lex')]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
return "RootSum(%s)" % ", ".join(args)
def _print_GroebnerBasis(self, basis):
cls = basis.__class__.__name__
exprs = [self._print_Add(arg, order=basis.order) for arg in basis.exprs]
exprs = "[%s]" % ", ".join(exprs)
gens = [ self._print(gen) for gen in basis.gens ]
domain = "domain='%s'" % self._print(basis.domain)
order = "order='%s'" % self._print(basis.order)
args = [exprs] + gens + [domain, order]
return "%s(%s)" % (cls, ", ".join(args))
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
args = ', '.join(self._print(item) for item in items)
if not args:
return "set()"
return '{%s}' % args
def _print_FiniteSet(self, s):
from sympy.sets.sets import FiniteSet
items = sorted(s, key=default_sort_key)
args = ', '.join(self._print(item) for item in items)
if any(item.has(FiniteSet) for item in items):
return 'FiniteSet({})'.format(args)
return '{{{}}}'.format(args)
def _print_Partition(self, s):
items = sorted(s, key=default_sort_key)
args = ', '.join(self._print(arg) for arg in items)
return 'Partition({})'.format(args)
def _print_frozenset(self, s):
if not s:
return "frozenset()"
return "frozenset(%s)" % self._print_set(s)
def _print_Sum(self, expr):
def _xab_tostr(xab):
if len(xab) == 1:
return self._print(xab[0])
else:
return self._print((xab[0],) + tuple(xab[1:]))
L = ', '.join([_xab_tostr(l) for l in expr.limits])
return 'Sum(%s, %s)' % (self._print(expr.function), L)
def _print_Symbol(self, expr):
return expr.name
_print_MatrixSymbol = _print_Symbol
_print_RandomSymbol = _print_Symbol
def _print_Identity(self, expr):
return "I"
def _print_ZeroMatrix(self, expr):
return "0"
def _print_OneMatrix(self, expr):
return "1"
def _print_Predicate(self, expr):
return "Q.%s" % expr.name
def _print_str(self, expr):
return str(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.stringify(expr, ", ")
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_Transpose(self, T):
return "%s.T" % self.parenthesize(T.arg, PRECEDENCE["Pow"])
def _print_Uniform(self, expr):
return "Uniform(%s, %s)" % (self._print(expr.a), self._print(expr.b))
def _print_Quantity(self, expr):
if self._settings.get("abbrev", False):
return "%s" % expr.abbrev
return "%s" % expr.name
def _print_Quaternion(self, expr):
s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True) for i in expr.args]
a = [s[0]] + [i+"*"+j for i, j in zip(s[1:], "ijk")]
return " + ".join(a)
def _print_Dimension(self, expr):
return str(expr)
def _print_Wild(self, expr):
return expr.name + '_'
def _print_WildFunction(self, expr):
return expr.name + '_'
def _print_WildDot(self, expr):
return expr.name
def _print_WildPlus(self, expr):
return expr.name
def _print_WildStar(self, expr):
return expr.name
def _print_Zero(self, expr):
if self._settings.get("sympy_integers", False):
return "S(0)"
return "0"
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
cls = p.__class__.__name__
rep = self._print(p.rep)
dom = self._print(p.dom)
ring = self._print(p.ring)
return "%s(%s, %s, %s)" % (cls, rep, dom, ring)
def _print_DMF(self, expr):
return self._print_DMP(expr)
def _print_Object(self, obj):
return 'Object("%s")' % obj.name
def _print_IdentityMorphism(self, morphism):
return 'IdentityMorphism(%s)' % morphism.domain
def _print_NamedMorphism(self, morphism):
return 'NamedMorphism(%s, %s, "%s")' % \
(morphism.domain, morphism.codomain, morphism.name)
def _print_Category(self, category):
return 'Category("%s")' % category.name
def _print_Manifold(self, manifold):
return manifold.name.name
def _print_Patch(self, patch):
return patch.name.name
def _print_CoordSystem(self, coords):
return coords.name.name
def _print_BaseScalarField(self, field):
return field._coord_sys.symbols[field._index].name
def _print_BaseVectorField(self, field):
return 'e_%s' % field._coord_sys.symbols[field._index].name
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
return 'd%s' % field._coord_sys.symbols[field._index].name
else:
return 'd(%s)' % self._print(field)
def _print_Tr(self, expr):
#TODO : Handle indices
return "%s(%s)" % ("Tr", self._print(expr.args[0]))
def _print_Str(self, s):
return self._print(s.name)
def _print_AppliedBinaryRelation(self, expr):
rel = expr.function
return '%s(%s, %s)' % (self._print(rel),
self._print(expr.lhs),
self._print(expr.rhs))
@print_function(StrPrinter)
def sstr(expr, **settings):
"""Returns the expression as a string.
For large expressions where speed is a concern, use the setting
order='none'. If abbrev=True setting is used then units are printed in
abbreviated form.
Examples
========
>>> from sympy import symbols, Eq, sstr
>>> a, b = symbols('a b')
>>> sstr(Eq(a + b, 0))
'Eq(a + b, 0)'
"""
p = StrPrinter(settings)
s = p.doprint(expr)
return s
class StrReprPrinter(StrPrinter):
"""(internal) -- see sstrrepr"""
def _print_str(self, s):
return repr(s)
def _print_Str(self, s):
# Str does not to be printed same as str here
return "%s(%s)" % (s.__class__.__name__, self._print(s.name))
@print_function(StrReprPrinter)
def sstrrepr(expr, **settings):
"""return expr in mixed str/repr form
i.e. strings are returned in repr form with quotes, and everything else
is returned in str form.
This function could be useful for hooking into sys.displayhook
"""
p = StrReprPrinter(settings)
s = p.doprint(expr)
return s
|
befc00393fa7c27b3efba3d5c69be82fd0ff251abb306bc956c918700f857ef9 | """
Rust code printer
The `RustCodePrinter` converts SymPy expressions into Rust expressions.
A complete code generator, which uses `rust_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
# Possible Improvement
#
# * make sure we follow Rust Style Guidelines_
# * make use of pattern matching
# * better support for reference
# * generate generic code and use trait to make sure they have specific methods
# * use crates_ to get more math support
# - num_
# + BigInt_, BigUint_
# + Complex_
# + Rational64_, Rational32_, BigRational_
#
# .. _crates: https://crates.io/
# .. _Guidelines: https://github.com/rust-lang/rust/tree/master/src/doc/style
# .. _num: http://rust-num.github.io/num/num/
# .. _BigInt: http://rust-num.github.io/num/num/bigint/struct.BigInt.html
# .. _BigUint: http://rust-num.github.io/num/num/bigint/struct.BigUint.html
# .. _Complex: http://rust-num.github.io/num/num/complex/struct.Complex.html
# .. _Rational32: http://rust-num.github.io/num/num/rational/type.Rational32.html
# .. _Rational64: http://rust-num.github.io/num/num/rational/type.Rational64.html
# .. _BigRational: http://rust-num.github.io/num/num/rational/type.BigRational.html
from __future__ import annotations
from typing import Any
from sympy.core import S, Rational, Float, Lambda
from sympy.printing.codeprinter import CodePrinter
# Rust's methods for integer and float can be found at here :
#
# * `Rust - Primitive Type f64 <https://doc.rust-lang.org/std/primitive.f64.html>`_
# * `Rust - Primitive Type i64 <https://doc.rust-lang.org/std/primitive.i64.html>`_
#
# Function Style :
#
# 1. args[0].func(args[1:]), method with arguments
# 2. args[0].func(), method without arguments
# 3. args[1].func(), method without arguments (e.g. (e, x) => x.exp())
# 4. func(args), function with arguments
# dictionary mapping SymPy function to (argument_conditions, Rust_function).
# Used in RustCodePrinter._print_Function(self)
# f64 method in Rust
known_functions = {
# "": "is_nan",
# "": "is_infinite",
# "": "is_finite",
# "": "is_normal",
# "": "classify",
"floor": "floor",
"ceiling": "ceil",
# "": "round",
# "": "trunc",
# "": "fract",
"Abs": "abs",
"sign": "signum",
# "": "is_sign_positive",
# "": "is_sign_negative",
# "": "mul_add",
"Pow": [(lambda base, exp: exp == -S.One, "recip", 2), # 1.0/x
(lambda base, exp: exp == S.Half, "sqrt", 2), # x ** 0.5
(lambda base, exp: exp == -S.Half, "sqrt().recip", 2), # 1/(x ** 0.5)
(lambda base, exp: exp == Rational(1, 3), "cbrt", 2), # x ** (1/3)
(lambda base, exp: base == S.One*2, "exp2", 3), # 2 ** x
(lambda base, exp: exp.is_integer, "powi", 1), # x ** y, for i32
(lambda base, exp: not exp.is_integer, "powf", 1)], # x ** y, for f64
"exp": [(lambda exp: True, "exp", 2)], # e ** x
"log": "ln",
# "": "log", # number.log(base)
# "": "log2",
# "": "log10",
# "": "to_degrees",
# "": "to_radians",
"Max": "max",
"Min": "min",
# "": "hypot", # (x**2 + y**2) ** 0.5
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
# "": "sin_cos",
# "": "exp_m1", # e ** x - 1
# "": "ln_1p", # ln(1 + x)
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"sqrt": "sqrt", # To enable automatic rewrites
}
# i64 method in Rust
# known_functions_i64 = {
# "": "min_value",
# "": "max_value",
# "": "from_str_radix",
# "": "count_ones",
# "": "count_zeros",
# "": "leading_zeros",
# "": "trainling_zeros",
# "": "rotate_left",
# "": "rotate_right",
# "": "swap_bytes",
# "": "from_be",
# "": "from_le",
# "": "to_be", # to big endian
# "": "to_le", # to little endian
# "": "checked_add",
# "": "checked_sub",
# "": "checked_mul",
# "": "checked_div",
# "": "checked_rem",
# "": "checked_neg",
# "": "checked_shl",
# "": "checked_shr",
# "": "checked_abs",
# "": "saturating_add",
# "": "saturating_sub",
# "": "saturating_mul",
# "": "wrapping_add",
# "": "wrapping_sub",
# "": "wrapping_mul",
# "": "wrapping_div",
# "": "wrapping_rem",
# "": "wrapping_neg",
# "": "wrapping_shl",
# "": "wrapping_shr",
# "": "wrapping_abs",
# "": "overflowing_add",
# "": "overflowing_sub",
# "": "overflowing_mul",
# "": "overflowing_div",
# "": "overflowing_rem",
# "": "overflowing_neg",
# "": "overflowing_shl",
# "": "overflowing_shr",
# "": "overflowing_abs",
# "Pow": "pow",
# "Abs": "abs",
# "sign": "signum",
# "": "is_positive",
# "": "is_negnative",
# }
# These are the core reserved words in the Rust language. Taken from:
# http://doc.rust-lang.org/grammar.html#keywords
reserved_words = ['abstract',
'alignof',
'as',
'become',
'box',
'break',
'const',
'continue',
'crate',
'do',
'else',
'enum',
'extern',
'false',
'final',
'fn',
'for',
'if',
'impl',
'in',
'let',
'loop',
'macro',
'match',
'mod',
'move',
'mut',
'offsetof',
'override',
'priv',
'proc',
'pub',
'pure',
'ref',
'return',
'Self',
'self',
'sizeof',
'static',
'struct',
'super',
'trait',
'true',
'type',
'typeof',
'unsafe',
'unsized',
'use',
'virtual',
'where',
'while',
'yield']
class RustCodePrinter(CodePrinter):
"""A printer to convert SymPy expressions to strings of Rust code"""
printmethod = "_rust_code"
language = "Rust"
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
'inline': False,
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// %s" % text
def _declare_number_const(self, name, value):
return "const %s: f64 = %s;" % (name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for %(var)s in %(start)s..%(end)s {"
for i in indices:
# Rust arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_caller_var(self, expr):
if len(expr.args) > 1:
# for something like `sin(x + y + z)`,
# make sure we can get '(x + y + z).sin()'
# instead of 'x + y + z.sin()'
return '(' + self._print(expr) + ')'
elif expr.is_number:
return self._print(expr, _type=True)
else:
return self._print(expr)
def _print_Function(self, expr):
"""
basic function for printing `Function`
Function Style :
1. args[0].func(args[1:]), method with arguments
2. args[0].func(), method without arguments
3. args[1].func(), method without arguments (e.g. (e, x) => x.exp())
4. func(args), function with arguments
"""
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
style = 1
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func, style in cond_func:
if cond(*expr.args):
break
if func is not None:
if style == 1:
ret = "%(var)s.%(method)s(%(args)s)" % {
'var': self._print_caller_var(expr.args[0]),
'method': func,
'args': self.stringify(expr.args[1:], ", ") if len(expr.args) > 1 else ''
}
elif style == 2:
ret = "%(var)s.%(method)s()" % {
'var': self._print_caller_var(expr.args[0]),
'method': func,
}
elif style == 3:
ret = "%(var)s.%(method)s()" % {
'var': self._print_caller_var(expr.args[1]),
'method': func,
}
else:
ret = "%(func)s(%(args)s)" % {
'func': func,
'args': self.stringify(expr.args, ", "),
}
return ret
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
elif expr.func.__name__ in self._rewriteable_functions:
# Simple rewrite to supported function possible
target_f, required_fs = self._rewriteable_functions[expr.func.__name__]
if self._can_print(target_f) and all(self._can_print(f) for f in required_fs):
return self._print(expr.rewrite(target_f))
else:
return self._print_not_supported(expr)
def _print_Pow(self, expr):
if expr.base.is_integer and not expr.exp.is_integer:
expr = type(expr)(Float(expr.base), expr.exp)
return self._print(expr)
return self._print_Function(expr)
def _print_Float(self, expr, _type=False):
ret = super()._print_Float(expr)
if _type:
return ret + '_f64'
else:
return ret
def _print_Integer(self, expr, _type=False):
ret = super()._print_Integer(expr)
if _type:
return ret + '_i32'
else:
return ret
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d_f64/%d.0' % (p, q)
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return expr.label.name
def _print_Dummy(self, expr):
return expr.name
def _print_Exp1(self, expr, _type=False):
return "E"
def _print_Pi(self, expr, _type=False):
return 'PI'
def _print_Infinity(self, expr, _type=False):
return 'INFINITY'
def _print_NegativeInfinity(self, expr, _type=False):
return 'NEG_INFINITY'
def _print_BooleanTrue(self, expr, _type=False):
return "true"
def _print_BooleanFalse(self, expr, _type=False):
return "false"
def _print_bool(self, expr, _type=False):
return str(expr).lower()
def _print_NaN(self, expr, _type=False):
return "NAN"
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines[-1] += " else {"
else:
lines[-1] += " else if (%s) {" % self._print(c)
code0 = self._print(e)
lines.append(code0)
lines.append("}")
if self._settings['inline']:
return " ".join(lines)
else:
return "\n".join(lines)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
return self._print(expr.rewrite(Piecewise, deep=False))
def _print_MatrixBase(self, A):
if A.cols == 1:
return "[%s]" % ", ".join(self._print(a) for a in A)
else:
raise ValueError("Full Matrix Support in Rust need Crates (https://crates.io/keywords/matrix).")
def _print_SparseRepMatrix(self, mat):
# do not allow sparse matrices to be made dense
return self._print_not_supported(mat)
def _print_MatrixElement(self, expr):
return "%s[%s]" % (expr.parent,
expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if expr in self._dereference:
return '(*%s)' % name
else:
return name
def _print_Assignment(self, expr):
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rust_code(expr, assign_to=None, **settings):
"""Converts an expr to a string of Rust code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired C string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rust_code, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rust_code((2*tau)**Rational(7, 2))
'8*1.4142135623731*tau.powf(7_f64/2.0)'
>>> rust_code(sin(x), assign_to="s")
's = x.sin();'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs", 4),
... (lambda x: x.is_integer, "ABS", 4)],
... "func": "f"
... }
>>> func = Function('func')
>>> rust_code(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'(fabs(x) + x.CEIL()).f()'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rust_code(expr, tau))
tau = if (x > 0) {
x + 1
} else {
x
};
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rust_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rust_code(mat, A))
A = [x.powi(2), if (x > 0) {
x + 1
} else {
x
}, x.sin()];
"""
return RustCodePrinter(settings).doprint(expr, assign_to)
def print_rust_code(expr, **settings):
"""Prints Rust representation of the given expression."""
print(rust_code(expr, **settings))
|
e9ce68389834f3145fe91b4a20c0b11873b85a7461472757e0009f70e7c800f1 | """
A Printer which converts an expression into its LaTeX equivalent.
"""
from __future__ import annotations
from typing import Any, Callable, TYPE_CHECKING
import itertools
from sympy.core import Add, Float, Mod, Mul, Number, S, Symbol, Expr
from sympy.core.alphabets import greeks
from sympy.core.containers import Tuple
from sympy.core.function import Function, AppliedUndef, Derivative
from sympy.core.operations import AssocOp
from sympy.core.power import Pow
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import SympifyError
from sympy.logic.boolalg import true, BooleanTrue, BooleanFalse
from sympy.tensor.array import NDimArray
# sympy.printing imports
from sympy.printing.precedence import precedence_traditional
from sympy.printing.printer import Printer, print_function
from sympy.printing.conventions import split_super_sub, requires_partial
from sympy.printing.precedence import precedence, PRECEDENCE
from mpmath.libmp.libmpf import prec_to_dps, to_str as mlib_to_str
from sympy.utilities.iterables import has_variety, sift
import re
if TYPE_CHECKING:
from sympy.vector.basisdependent import BasisDependent
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at
# https://docs.mathjax.org/en/latest/tex.html#supported-latex-commands
# This variable only contains those functions which SymPy uses.
accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan',
'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec',
'csc', 'cot', 'coth', 're', 'im', 'frac', 'root',
'arg',
]
tex_greek_dictionary = {
'Alpha': r'\mathrm{A}',
'Beta': r'\mathrm{B}',
'Gamma': r'\Gamma',
'Delta': r'\Delta',
'Epsilon': r'\mathrm{E}',
'Zeta': r'\mathrm{Z}',
'Eta': r'\mathrm{H}',
'Theta': r'\Theta',
'Iota': r'\mathrm{I}',
'Kappa': r'\mathrm{K}',
'Lambda': r'\Lambda',
'Mu': r'\mathrm{M}',
'Nu': r'\mathrm{N}',
'Xi': r'\Xi',
'omicron': 'o',
'Omicron': r'\mathrm{O}',
'Pi': r'\Pi',
'Rho': r'\mathrm{P}',
'Sigma': r'\Sigma',
'Tau': r'\mathrm{T}',
'Upsilon': r'\Upsilon',
'Phi': r'\Phi',
'Chi': r'\mathrm{X}',
'Psi': r'\Psi',
'Omega': r'\Omega',
'lamda': r'\lambda',
'Lamda': r'\Lambda',
'khi': r'\chi',
'Khi': r'\mathrm{X}',
'varepsilon': r'\varepsilon',
'varkappa': r'\varkappa',
'varphi': r'\varphi',
'varpi': r'\varpi',
'varrho': r'\varrho',
'varsigma': r'\varsigma',
'vartheta': r'\vartheta',
}
other_symbols = {'aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar',
'hslash', 'mho', 'wp'}
# Variable name modifiers
modifier_dict: dict[str, Callable[[str], str]] = {
# Accents
'mathring': lambda s: r'\mathring{'+s+r'}',
'ddddot': lambda s: r'\ddddot{'+s+r'}',
'dddot': lambda s: r'\dddot{'+s+r'}',
'ddot': lambda s: r'\ddot{'+s+r'}',
'dot': lambda s: r'\dot{'+s+r'}',
'check': lambda s: r'\check{'+s+r'}',
'breve': lambda s: r'\breve{'+s+r'}',
'acute': lambda s: r'\acute{'+s+r'}',
'grave': lambda s: r'\grave{'+s+r'}',
'tilde': lambda s: r'\tilde{'+s+r'}',
'hat': lambda s: r'\hat{'+s+r'}',
'bar': lambda s: r'\bar{'+s+r'}',
'vec': lambda s: r'\vec{'+s+r'}',
'prime': lambda s: "{"+s+"}'",
'prm': lambda s: "{"+s+"}'",
# Faces
'bold': lambda s: r'\boldsymbol{'+s+r'}',
'bm': lambda s: r'\boldsymbol{'+s+r'}',
'cal': lambda s: r'\mathcal{'+s+r'}',
'scr': lambda s: r'\mathscr{'+s+r'}',
'frak': lambda s: r'\mathfrak{'+s+r'}',
# Brackets
'norm': lambda s: r'\left\|{'+s+r'}\right\|',
'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle',
'abs': lambda s: r'\left|{'+s+r'}\right|',
'mag': lambda s: r'\left|{'+s+r'}\right|',
}
greek_letters_set = frozenset(greeks)
_between_two_numbers_p = (
re.compile(r'[0-9][} ]*$'), # search
re.compile(r'[0-9]'), # match
)
def latex_escape(s: str) -> str:
"""
Escape a string such that latex interprets it as plaintext.
We cannot use verbatim easily with mathjax, so escaping is easier.
Rules from https://tex.stackexchange.com/a/34586/41112.
"""
s = s.replace('\\', r'\textbackslash')
for c in '&%$#_{}':
s = s.replace(c, '\\' + c)
s = s.replace('~', r'\textasciitilde')
s = s.replace('^', r'\textasciicircum')
return s
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings: dict[str, Any] = {
"full_prec": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"inv_trig_style": "abbreviated",
"itex": False,
"ln_notation": False,
"long_frac_ratio": None,
"mat_delim": "[",
"mat_str": None,
"mode": "plain",
"mul_symbol": None,
"order": None,
"symbol_names": {},
"root_notation": True,
"mat_symbol_style": "plain",
"imaginary_unit": "i",
"gothic_re_im": False,
"decimal_separator": "period",
"perm_cyclic": True,
"parenthesize_super": True,
"min": None,
"max": None,
"diff_operator": "d",
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
try:
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
except KeyError:
self._settings['mul_symbol_latex'] = \
self._settings['mul_symbol']
try:
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
except KeyError:
if (self._settings['mul_symbol'].strip() in
['', ' ', '\\', '\\,', '\\:', '\\;', '\\quad']):
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table['dot']
else:
self._settings['mul_symbol_latex_numbers'] = \
self._settings['mul_symbol']
self._delim_dict = {'(': ')', '[': ']'}
imaginary_unit_table = {
None: r"i",
"i": r"i",
"ri": r"\mathrm{i}",
"ti": r"\text{i}",
"j": r"j",
"rj": r"\mathrm{j}",
"tj": r"\text{j}",
}
imag_unit = self._settings['imaginary_unit']
self._settings['imaginary_unit_latex'] = imaginary_unit_table.get(imag_unit, imag_unit)
diff_operator_table = {
None: r"d",
"d": r"d",
"rd": r"\mathrm{d}",
"td": r"\text{d}",
}
diff_operator = self._settings['diff_operator']
self._settings["diff_operator_latex"] = diff_operator_table.get(diff_operator, diff_operator)
def _add_parens(self, s) -> str:
return r"\left({}\right)".format(s)
# TODO: merge this with the above, which requires a lot of test changes
def _add_parens_lspace(self, s) -> str:
return r"\left( {}\right)".format(s)
def parenthesize(self, item, level, is_neg=False, strict=False) -> str:
prec_val = precedence_traditional(item)
if is_neg and strict:
return self._add_parens(self._print(item))
if (prec_val < level) or ((not strict) and prec_val <= level):
return self._add_parens(self._print(item))
else:
return self._print(item)
def parenthesize_super(self, s):
"""
Protect superscripts in s
If the parenthesize_super option is set, protect with parentheses, else
wrap in braces.
"""
if "^" in s:
if self._settings['parenthesize_super']:
return self._add_parens(s)
else:
return "{{{}}}".format(s)
return s
def doprint(self, expr) -> str:
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in
a Mul.
"""
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
if expr.is_Mul:
if not first and expr.could_extract_minus_sign():
return True
elif precedence_traditional(expr) < PRECEDENCE["Mul"]:
return True
elif expr.is_Relational:
return True
if expr.is_Piecewise:
return True
if any(expr.has(x) for x in (Mod,)):
return True
if (not last and
any(expr.has(x) for x in (Integral, Product, Sum))):
return True
return False
def _needs_add_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
if any(expr.has(x) for x in (Mod,)):
return True
if expr.is_Add:
return True
return False
def _mul_is_clean(self, expr) -> bool:
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr) -> bool:
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr: str, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_Basic(self, expr):
name = self._deal_with_super_sub(expr.__class__.__name__)
if expr.args:
ls = [self._print(o) for o in expr.args]
s = r"\operatorname{{{}}}\left({}\right)"
return s.format(name, ", ".join(ls))
else:
return r"\text{{{}}}".format(name)
def _print_bool(self, e: bool | BooleanTrue | BooleanFalse):
return r"\text{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\text{%s}" % e
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif term.could_extract_minus_sign():
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Cycle(self, expr):
from sympy.combinatorics.permutations import Permutation
if expr.size == 0:
return r"\left( \right)"
expr = Permutation(expr)
expr_perm = expr.cyclic_form
siz = expr.size
if expr.array_form[-1] == siz - 1:
expr_perm = expr_perm + [[siz - 1]]
term_tex = ''
for i in expr_perm:
term_tex += str(i).replace(',', r"\;")
term_tex = term_tex.replace('[', r"\left( ")
term_tex = term_tex.replace(']', r"\right)")
return term_tex
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.exceptions import sympy_deprecation_warning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=8,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
return self._print_Cycle(expr)
if expr.size == 0:
return r"\left( \right)"
lower = [self._print(arg) for arg in expr.array_form]
upper = [self._print(arg) for arg in range(len(lower))]
row1 = " & ".join(upper)
row2 = " & ".join(lower)
mat = r" \\ ".join((row1, row2))
return r"\begin{pmatrix} %s \end{pmatrix}" % mat
def _print_AppliedPermutation(self, expr):
perm, var = expr.args
return r"\sigma_{%s}(%s)" % (self._print(perm), self._print(var))
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
strip = False if self._settings['full_prec'] else True
low = self._settings["min"] if "min" in self._settings else None
high = self._settings["max"] if "max" in self._settings else None
str_real = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
if self._settings['decimal_separator'] == 'comma':
mant = mant.replace('.','{,}')
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
if self._settings['decimal_separator'] == 'comma':
str_real = str_real.replace('.','{,}')
return str_real
def _print_Cross(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \times %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Curl(self, expr):
vec = expr._expr
return r"\nabla\times %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Divergence(self, expr):
vec = expr._expr
return r"\nabla\cdot %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Dot(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \cdot %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Gradient(self, expr):
func = expr._expr
return r"\nabla %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Laplacian(self, expr):
func = expr._expr
return r"\Delta %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Mul(self, expr: Expr):
from sympy.simplify import fraction
separator: str = self._settings['mul_symbol_latex']
numbersep: str = self._settings['mul_symbol_latex_numbers']
def convert(expr) -> str:
if not expr.is_Mul:
return str(self._print(expr))
else:
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = list(expr.args)
# If there are quantities or prefixes, append them at the back.
units, nonunits = sift(args, lambda x: (hasattr(x, "_scale_factor") or hasattr(x, "is_physical_constant")) or
(isinstance(x, Pow) and
hasattr(x.base, "is_physical_constant")), binary=True)
prefixes, units = sift(units, lambda x: hasattr(x, "_scale_factor"), binary=True)
return convert_args(nonunits + prefixes + units)
def convert_args(args) -> str:
_tex = last_term_tex = ""
for i, term in enumerate(args):
term_tex = self._print(term)
if not (hasattr(term, "_scale_factor") or hasattr(term, "is_physical_constant")):
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if _between_two_numbers_p[0].search(last_term_tex) and \
_between_two_numbers_p[1].match(str(term)):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
# XXX: _print_Pow calls this routine with instances of Pow...
if isinstance(expr, Mul):
args = expr.args
if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]):
return convert_args(args)
include_parens = False
if expr.could_extract_minus_sign():
expr = -expr
tex = "- "
if expr.is_Add:
tex += "("
include_parens = True
else:
tex = ""
numer, denom = fraction(expr, exact=True)
if denom is S.One and Pow(1, -1, evaluate=False) not in expr.args:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] and ldenom <= 2 and \
"^" not in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif ratio is not None and \
len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
if include_parens:
tex += ")"
return tex
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_PrimeIdeal(self, expr):
p = self._print(expr.p)
if expr.is_inert:
return rf'\left({p}\right)'
alpha = self._print(expr.alpha.as_expr())
return rf'\left({p}, {alpha}\right)'
def _print_Pow(self, expr: Pow):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational:
p: int = expr.exp.p # type: ignore
q: int = expr.exp.q # type: ignore
if abs(p) == 1 and q != 1 and self._settings['root_notation']:
base = self._print(expr.base)
if q == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (q, base)
else:
tex = r"\sqrt[%d]{%s}" % (q, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] and q != 1:
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
# issue #12886: add parentheses for superscripts raised to powers
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
if expr.base.is_Function:
return self._print(expr.base, exp="%s/%s" % (p, q))
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_negative and expr.base.is_commutative:
# special case for 1^(-x), issue 9216
if expr.base == 1:
return r"%s^{%s}" % (expr.base, expr.exp)
# special case for (1/x)^(-y) and (-1/-x)^(-y), issue 20252
if expr.base.is_Rational:
base_p: int = expr.base.p # type: ignore
base_q: int = expr.base.q # type: ignore
if base_p * base_q == abs(base_q):
if expr.exp == -1:
return r"\frac{1}{\frac{%s}{%s}}" % (base_p, base_q)
else:
return r"\frac{1}{(\frac{%s}{%s})^{%s}}" % (base_p, base_q, abs(expr.exp))
# things like 1/x
return self._print_Mul(expr)
if expr.base.is_Function:
return self._print(expr.base, exp=self._print(expr.exp))
tex = r"%s^{%s}"
return self._helper_print_standard_power(expr, tex)
def _helper_print_standard_power(self, expr, template: str) -> str:
exp = self._print(expr.exp)
# issue #12886: add parentheses around superscripts raised
# to powers
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
elif (isinstance(expr.base, Derivative)
and base.startswith(r'\left(')
and re.match(r'\\left\(\\d?d?dot', base)
and base.endswith(r'\right)')):
# don't use parentheses around dotted derivative
base = base[6: -7] # remove outermost added parens
return template % (base, exp)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr: 'BasisDependent'):
from sympy.vector import Vector
o1: list[str] = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key=lambda x: x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = r'\left(' + self._print(v) + r'\right)'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex_base = self._print(expr.base)
tex = '{'+tex_base+'}'+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Idx(self, expr):
label = self._print(expr.label)
if expr.upper is not None:
upper = self._print(expr.upper)
if expr.lower is not None:
lower = self._print(expr.lower)
else:
lower = self._print(S.Zero)
interval = '{lower}\\mathrel{{..}}\\nobreak {upper}'.format(
lower = lower, upper = upper)
return '{{{label}}}_{{{interval}}}'.format(
label = label, interval = interval)
#if no bounds are defined this just prints the label
return label
def _print_Derivative(self, expr):
if requires_partial(expr.expr):
diff_symbol = r'\partial'
else:
diff_symbol = self._settings["diff_operator_latex"]
tex = ""
dim = 0
for x, num in reversed(expr.variable_count):
dim += num
if num == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol,
self.parenthesize_super(self._print(x)),
self._print(num))
if dim == 1:
tex = r"\frac{%s}{%s}" % (diff_symbol, tex)
else:
tex = r"\frac{%s^{%s}}{%s}" % (diff_symbol, self._print(dim), tex)
if any(i.could_extract_minus_sign() for i in expr.args):
return r"%s %s" % (tex, self.parenthesize(expr.expr,
PRECEDENCE["Mul"],
is_neg=True,
strict=True))
return r"%s %s" % (tex, self.parenthesize(expr.expr,
PRECEDENCE["Mul"],
is_neg=False,
strict=True))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr,
latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
diff_symbol = self._settings["diff_operator_latex"]
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, %s%s" % (diff_symbol, self._print(symbol[0]))
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] != 'inline' \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, %s%s" % (diff_symbol, self._print(symbol)))
return r"%s %s%s" % (tex, self.parenthesize(expr.function,
PRECEDENCE["Mul"],
is_neg=any(i.could_extract_minus_sign() for i in expr.args),
strict=True),
"".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if str(dir) == '+-' or z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func: str) -> str:
r'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, excluding sub- and superscripts, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
superscriptidx = func.find("^")
subscriptidx = func.find("_")
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\') or subscriptidx == 1 or superscriptidx == 1:
name = func
else:
if superscriptidx > 0 and subscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:min(subscriptidx,superscriptidx)],
func[min(subscriptidx,superscriptidx):])
elif superscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:superscriptidx],
func[superscriptidx:])
elif subscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:subscriptidx],
func[subscriptidx:])
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr: Function, exp=None) -> str:
r'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func) and \
not isinstance(expr, AppliedUndef):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [str(self._print(arg)) for arg in expr.args]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = [
"asin", "acos", "atan",
"acsc", "asec", "acot",
"asinh", "acosh", "atanh",
"acsch", "asech", "acoth",
]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
pass
elif inv_trig_style == "full":
func = ("ar" if func[-1] == "h" else "arc") + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
func_tex = self._hprint_Function(func)
func_tex = self.parenthesize_super(func_tex)
name = r'%s^{%s}' % (func_tex, exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left(%s \right)}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_ElementwiseApplyFunction(self, expr):
return r"{%s}_{\circ}\left({%s}\right)" % (
self._print(expr.function),
self._print(expr.expr),
)
@property
def _special_function_classes(self):
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.functions.special.gamma_functions import gamma, lowergamma
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.error_functions import Chi
return {KroneckerDelta: r'\delta',
gamma: r'\Gamma',
lowergamma: r'\gamma',
beta: r'\operatorname{B}',
DiracDelta: r'\delta',
Chi: r'\operatorname{Chi}'}
def _print_FunctionClass(self, expr):
for cls in self._special_function_classes:
if issubclass(expr, cls) and expr.__name__ == cls.__name__:
return self._special_function_classes[cls]
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_IdentityFunction(self, expr):
return r"\left( x \mapsto x \right)"
def _hprint_variadic_function(self, expr, exp=None) -> str:
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\%s\left(%s\right)" % (str(expr.func).lower(),
", ".join(texargs))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Min = _print_Max = _hprint_variadic_function
def _print_floor(self, expr, exp=None):
tex = r"\left\lfloor{%s}\right\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\left\lceil{%s}\right\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_log(self, expr, exp=None):
if not self._settings["ln_notation"]:
tex = r"\log{\left(%s \right)}" % self._print(expr.args[0])
else:
tex = r"\ln{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_re(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Re{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{re}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Im{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{im}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy.logic.boolalg import (Equivalent, Implies)
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\Leftrightarrow")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg \left(%s\right)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\Leftrightarrow")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Exp1(self, expr, exp=None):
return "e"
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]),
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_beta(self, expr, exp=None):
x = expr.args[0]
# Deal with unevaluated single argument beta
y = expr.args[0] if len(expr.args) == 1 else expr.args[1]
tex = rf"\left({x}, {y}\right)"
if exp is not None:
return r"\operatorname{B}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{B}%s" % tex
def _print_betainc(self, expr, exp=None, operator='B'):
largs = [self._print(arg) for arg in expr.args]
tex = r"\left(%s, %s\right)" % (largs[0], largs[1])
if exp is not None:
return r"\operatorname{%s}_{(%s, %s)}^{%s}%s" % (operator, largs[2], largs[3], exp, tex)
else:
return r"\operatorname{%s}_{(%s, %s)}%s" % (operator, largs[2], largs[3], tex)
def _print_betainc_regularized(self, expr, exp=None):
return self._print_betainc(expr, exp, operator='I')
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _hprint_one_arg_func(self, expr, exp=None) -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (self._print(expr.func), exp, tex)
else:
return r"%s%s" % (self._print(expr.func), tex)
_print_gamma = _hprint_one_arg_func
def _print_Chi(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\operatorname{Chi}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{Chi}%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
tex = r"!%s" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"\left(%s\right)^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
tex = r"%s!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
tex = r"%s!!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
base = r"%s" % self.parenthesize(n, PRECEDENCE['Func'])
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
sub = r"%s" % self.parenthesize(k, PRECEDENCE['Func'])
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym: str) -> str:
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, exp)
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec) -> str:
if not vec:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _print_hn1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(1)}')
def _print_hn2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(2)}')
def _hprint_airy(self, expr, exp=None, notation="") -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation="") -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (exp, tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (exp, tex)
return r"\zeta%s" % tex
def _print_stieltjes(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_{%s}\left(%s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"_{%s}" % self._print(expr.args[0])
if exp is not None:
return r"\gamma%s^{%s}" % (tex, exp)
return r"\gamma%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (exp, tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, exp, tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def __print_mathieu_functions(self, character, args, prime=False, exp=None):
a, q, z = map(self._print, args)
sup = r"^{\prime}" if prime else ""
exp = "" if not exp else "^{%s}" % exp
return r"%s%s\left(%s, %s, %s\right)%s" % (character, sup, a, q, z, exp)
def _print_mathieuc(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, exp=exp)
def _print_mathieus(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, exp=exp)
def _print_mathieucprime(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, prime=True, exp=exp)
def _print_mathieusprime(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, prime=True, exp=exp)
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
if self._settings['fold_short_frac']:
return r"%s%d / %d" % (sign, p, expr.q)
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif expr.variables:
s += self._print(expr.variables[0])
s += r'\rightarrow '
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"O\left(%s\right)" % s
def _print_Symbol(self, expr: Symbol, style='plain'):
name: str = self._settings['symbol_names'].get(expr)
if name is not None:
return name
return self._deal_with_super_sub(expr.name, style=style)
_print_RandomSymbol = _print_Symbol
def _deal_with_super_sub(self, string: str, style='plain') -> str:
if '{' in string:
name, supers, subs = string, [], []
else:
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# apply the style only to the name
if style == 'bold':
name = "\\mathbf{{{}}}".format(name)
# glue all items together:
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_matrix_contents(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([self._print(i) for i in expr[line, :]]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
return out_str % r"\\".join(lines)
def _print_MatrixBase(self, expr):
out_str = self._print_matrix_contents(expr)
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True)\
+ '_{%s, %s}' % (self._print(expr.i), self._print(expr.j))
def _print_MatrixSlice(self, expr):
def latexslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = None
if x[1] == dim:
x[1] = None
return ':'.join(self._print(xi) if xi is not None else '' for xi in x)
return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + r'\left[' +
latexslice(expr.rowslice, expr.parent.rows) + ', ' +
latexslice(expr.colslice, expr.parent.cols) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
return r"\left(%s\right)^{T}" % self._print(mat)
else:
s = self.parenthesize(mat, precedence_traditional(expr), True)
if '^' in s:
return r"\left(%s\right)^{T}" % s
else:
return "%s^{T}" % s
def _print_Trace(self, expr):
mat = expr.arg
return r"\operatorname{tr}\left(%s \right)" % self._print(mat)
def _print_Adjoint(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
return r"\left(%s\right)^{\dagger}" % self._print(mat)
else:
s = self.parenthesize(mat, precedence_traditional(expr), True)
if '^' in s:
return r"\left(%s\right)^{\dagger}" % s
else:
return r"%s^{\dagger}" % s
def _print_MatMul(self, expr):
from sympy import MatMul
# Parenthesize nested MatMul but not other types of Mul objects:
parens = lambda x: self._print(x) if isinstance(x, Mul) and not isinstance(x, MatMul) else \
self.parenthesize(x, precedence_traditional(expr), False)
args = list(expr.args)
if expr.could_extract_minus_sign():
if args[0] == -1:
args = args[1:]
else:
args[0] = -args[0]
return '- ' + ' '.join(map(parens, args))
else:
return ' '.join(map(parens, args))
def _print_Determinant(self, expr):
mat = expr.arg
if mat.is_MatrixExpr:
from sympy.matrices.expressions.blockmatrix import BlockMatrix
if isinstance(mat, BlockMatrix):
return r"\left|{%s}\right|" % self._print_matrix_contents(mat.blocks)
return r"\left|{%s}\right|" % self._print(mat)
return r"\left|{%s}\right|" % self._print_matrix_contents(mat)
def _print_Mod(self, expr, exp=None):
if exp is not None:
return r'\left(%s \bmod %s\right)^{%s}' % \
(self.parenthesize(expr.args[0], PRECEDENCE['Mul'],
strict=True),
self.parenthesize(expr.args[1], PRECEDENCE['Mul'],
strict=True),
exp)
return r'%s \bmod %s' % (self.parenthesize(expr.args[0],
PRECEDENCE['Mul'],
strict=True),
self.parenthesize(expr.args[1],
PRECEDENCE['Mul'],
strict=True))
def _print_HadamardProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \circ '.join(
map(lambda arg: parens(arg, prec, strict=True), args))
def _print_HadamardPower(self, expr):
if precedence_traditional(expr.exp) < PRECEDENCE["Mul"]:
template = r"%s^{\circ \left({%s}\right)}"
else:
template = r"%s^{\circ {%s}}"
return self._helper_print_standard_power(expr, template)
def _print_KroneckerProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \otimes '.join(
map(lambda arg: parens(arg, prec, strict=True), args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol) and base.is_MatrixExpr:
return "\\left(%s\\right)^{%s}" % (self._print(base),
self._print(exp))
else:
base_str = self._print(base)
if '^' in base_str:
return r"\left(%s\right)^{%s}" % (base_str, self._print(exp))
else:
return "%s^{%s}" % (base_str, self._print(exp))
def _print_MatrixSymbol(self, expr):
return self._print_Symbol(expr, style=self._settings[
'mat_symbol_style'])
def _print_ZeroMatrix(self, Z):
return "0" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{0}"
def _print_OneMatrix(self, O):
return "1" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{1}"
def _print_Identity(self, I):
return r"\mathbb{I}" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{I}"
def _print_PermutationMatrix(self, P):
perm_str = self._print(P.args[0])
return "P_{%s}" % perm_str
def _print_NDimArray(self, expr: NDimArray):
if expr.rank() == 0:
return self._print(expr[()])
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.rank() == 0) or (expr.shape[-1] <= 10):
mat_str = 'matrix'
else:
mat_str = 'array'
block_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
block_str = block_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
block_str= block_str.replace('%s','{}%s')
if self._settings['mat_delim']:
left_delim: str = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
block_str = r'\left' + left_delim + block_str + \
r'\right' + right_delim
if expr.rank() == 0:
return block_str % ""
level_str: list[list[str]] = [[] for i in range(expr.rank() + 1)]
shape_ranges = [list(range(i)) for i in expr.shape]
for outer_i in itertools.product(*shape_ranges):
level_str[-1].append(self._print(expr[outer_i]))
even = True
for back_outer_i in range(expr.rank()-1, -1, -1):
if len(level_str[back_outer_i+1]) < expr.shape[back_outer_i]:
break
if even:
level_str[back_outer_i].append(
r" & ".join(level_str[back_outer_i+1]))
else:
level_str[back_outer_i].append(
block_str % (r"\\".join(level_str[back_outer_i+1])))
if len(level_str[back_outer_i+1]) == 1:
level_str[back_outer_i][-1] = r"\left[" + \
level_str[back_outer_i][-1] + r"\right]"
even = not even
level_str[back_outer_i+1] = []
out_str = level_str[0][0]
if expr.rank() % 2 == 1:
out_str = block_str % out_str
return out_str
def _printer_tensor_indices(self, name, indices, index_map: dict):
out_str = self._print(name)
last_valence = None
prev_map = None
for index in indices:
new_valence = index.is_up
if ((index in index_map) or prev_map) and \
last_valence == new_valence:
out_str += ","
if last_valence != new_valence:
if last_valence is not None:
out_str += "}"
if index.is_up:
out_str += "{}^{"
else:
out_str += "{}_{"
out_str += self._print(index.args[0])
if index in index_map:
out_str += "="
out_str += self._print(index_map[index])
prev_map = True
else:
prev_map = False
last_valence = new_valence
if last_valence is not None:
out_str += "}"
return out_str
def _print_Tensor(self, expr):
name = expr.args[0].args[0]
indices = expr.get_indices()
return self._printer_tensor_indices(name, indices, {})
def _print_TensorElement(self, expr):
name = expr.expr.args[0].args[0]
indices = expr.expr.get_indices()
index_map = expr.index_map
return self._printer_tensor_indices(name, indices, index_map)
def _print_TensMul(self, expr):
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
sign, args = expr._get_args_for_traditional_printer()
return sign + "".join(
[self.parenthesize(arg, precedence(expr)) for arg in args]
)
def _print_TensAdd(self, expr):
a = []
args = expr.args
for x in args:
a.append(self.parenthesize(x, precedence(expr)))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _print_TensorIndex(self, expr):
return "{}%s{%s}" % (
"^" if expr.is_up else "_",
self._print(expr.args[0])
)
def _print_PartialDerivative(self, expr):
if len(expr.variables) == 1:
return r"\frac{\partial}{\partial {%s}}{%s}" % (
self._print(expr.variables[0]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
else:
return r"\frac{\partial^{%s}}{%s}{%s}" % (
len(expr.variables),
" ".join([r"\partial {%s}" % self._print(i) for i in expr.variables]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
def _print_ArraySymbol(self, expr):
return self._print(expr.name)
def _print_ArrayElement(self, expr):
return "{{%s}_{%s}}" % (
self.parenthesize(expr.name, PRECEDENCE["Func"], True),
", ".join([f"{self._print(i)}" for i in expr.indices]))
def _print_UniversalSet(self, expr):
return r"\mathbb{U}"
def _print_frac(self, expr, exp=None):
if exp is None:
return r"\operatorname{frac}{\left(%s\right)}" % self._print(expr.args[0])
else:
return r"\operatorname{frac}{\left(%s\right)}^{%s}" % (
self._print(expr.args[0]), exp)
def _print_tuple(self, expr):
if self._settings['decimal_separator'] == 'comma':
sep = ";"
elif self._settings['decimal_separator'] == 'period':
sep = ","
else:
raise ValueError('Unknown Decimal Separator')
if len(expr) == 1:
# 1-tuple needs a trailing separator
return self._add_parens_lspace(self._print(expr[0]) + sep)
else:
return self._add_parens_lspace(
(sep + r" \ ").join([self._print(i) for i in expr]))
def _print_TensorProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \otimes '.join(elements)
def _print_WedgeProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \wedge '.join(elements)
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
if self._settings['decimal_separator'] == 'comma':
return r"\left[ %s\right]" % \
r"; \ ".join([self._print(i) for i in expr])
elif self._settings['decimal_separator'] == 'period':
return r"\left[ %s\right]" % \
r", \ ".join([self._print(i) for i in expr])
else:
raise ValueError('Unknown Decimal Separator')
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left\{ %s\right\}" % r", \ ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_SingularityFunction(self, expr, exp=None):
shift = self._print(expr.args[0] - expr.args[1])
power = self._print(expr.args[2])
tex = r"{\left\langle %s \right\rangle}^{%s}" % (shift, power)
if exp is not None:
tex = r"{\left({\langle %s \rangle}^{%s}\right)}^{%s}" % (shift, power, exp)
return tex
def _print_Heaviside(self, expr, exp=None):
pargs = ', '.join(self._print(arg) for arg in expr.pargs)
tex = r"\theta\left(%s\right)" % pargs
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp is not None:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return '\\text{Domain: }' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
return ('\\text{Domain: }' + self._print(d.symbols) + ' \\in ' +
self._print(d.set))
elif hasattr(d, 'symbols'):
return '\\text{Domain on }' + self._print(d.symbols)
else:
return self._print(None)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
if self._settings['decimal_separator'] == 'comma':
items = "; ".join(map(self._print, items))
elif self._settings['decimal_separator'] == 'period':
items = ", ".join(map(self._print, items))
else:
raise ValueError('Unknown Decimal Separator')
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
def _print_symbolic_range():
# Symbolic Range that cannot be resolved
if s.args[0] == 0:
if s.args[2] == 1:
cont = self._print(s.args[1])
else:
cont = ", ".join(self._print(arg) for arg in s.args)
else:
if s.args[2] == 1:
cont = ", ".join(self._print(arg) for arg in s.args[:2])
else:
cont = ", ".join(self._print(arg) for arg in s.args)
return(f"\\text{{Range}}\\left({cont}\\right)")
dots = object()
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif s.is_empty is not None:
if (s.size < 4) == True:
printset = tuple(s)
elif s.is_iterable:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
return _print_symbolic_range()
else:
return _print_symbolic_range()
return (r"\left\{" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right\}")
def __print_number_polynomial(self, expr, letter, exp=None):
if len(expr.args) == 2:
if exp is not None:
return r"%s_{%s}^{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), exp,
self._print(expr.args[1]))
return r"%s_{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), self._print(expr.args[1]))
tex = r"%s_{%s}" % (letter, self._print(expr.args[0]))
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_bernoulli(self, expr, exp=None):
return self.__print_number_polynomial(expr, "B", exp)
def _print_genocchi(self, expr, exp=None):
return self.__print_number_polynomial(expr, "G", exp)
def _print_bell(self, expr, exp=None):
if len(expr.args) == 3:
tex1 = r"B_{%s, %s}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
tex2 = r"\left(%s\right)" % r", ".join(self._print(el) for
el in expr.args[2])
if exp is not None:
tex = r"%s^{%s}%s" % (tex1, exp, tex2)
else:
tex = tex1 + tex2
return tex
return self.__print_number_polynomial(expr, "B", exp)
def _print_fibonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "F", exp)
def _print_lucas(self, expr, exp=None):
tex = r"L_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_tribonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "T", exp)
def _print_SeqFormula(self, s):
dots = object()
if len(s.start.free_symbols) > 0 or len(s.stop.free_symbols) > 0:
return r"\left\{%s\right\}_{%s=%s}^{%s}" % (
self._print(s.formula),
self._print(s.variables[0]),
self._print(s.start),
self._print(s.stop)
)
if s.start is S.NegativeInfinity:
stop = s.stop
printset = (dots, s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append(dots)
else:
printset = tuple(s)
return (r"\left[" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right]")
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_AccumulationBounds(self, i):
return r"\left\langle %s, %s\right\rangle" % \
(self._print(i.min), self._print(i.max))
def _print_Union(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cup ".join(args_str)
def _print_Complement(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \setminus ".join(args_str)
def _print_Intersection(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cap ".join(args_str)
def _print_SymmetricDifference(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \triangle ".join(args_str)
def _print_ProductSet(self, p):
prec = precedence_traditional(p)
if len(p.sets) >= 1 and not has_variety(p.sets):
return self.parenthesize(p.sets[0], prec) + "^{%d}" % len(p.sets)
return r" \times ".join(
self.parenthesize(set, prec) for set in p.sets)
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Naturals0(self, n):
return r"\mathbb{N}_0"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Rationals(self, i):
return r"\mathbb{Q}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_Complexes(self, i):
return r"\mathbb{C}"
def _print_ImageSet(self, s):
expr = s.lamda.expr
sig = s.lamda.signature
xys = ((self._print(x), self._print(y)) for x, y in zip(sig, s.base_sets))
xinys = r", ".join(r"%s \in %s" % xy for xy in xys)
return r"\left\{%s\; \middle|\; %s\right\}" % (self._print(expr), xinys)
def _print_ConditionSet(self, s):
vars_print = ', '.join([self._print(var) for var in Tuple(s.sym)])
if s.base_set is S.UniversalSet:
return r"\left\{%s\; \middle|\; %s \right\}" % \
(vars_print, self._print(s.condition))
return r"\left\{%s\; \middle|\; %s \in %s \wedge %s \right\}" % (
vars_print,
vars_print,
self._print(s.base_set),
self._print(s.condition))
def _print_PowerSet(self, expr):
arg_print = self._print(expr.args[0])
return r"\mathcal{{P}}\left({}\right)".format(arg_print)
def _print_ComplexRegion(self, s):
vars_print = ', '.join([self._print(var) for var in s.variables])
return r"\left\{%s\; \middle|\; %s \in %s \right\}" % (
self._print(s.expr),
vars_print,
self._print(s.sets))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FourierSeries(self, s):
if s.an.formula is S.Zero and s.bn.formula is S.Zero:
return self._print(s.a0)
return self._print_Add(s.truncate()) + r' + \ldots'
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.infinite)
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
terms = []
for monom, coeff in poly.terms():
s_monom = ''
for i, exp in enumerate(monom):
if exp > 0:
if exp == 1:
s_monom += self._print(poly.gens[i])
else:
s_monom += self._print(pow(poly.gens[i], exp))
if coeff.is_Add:
if s_monom:
s_coeff = r"\left(%s\right)" % self._print(coeff)
else:
s_coeff = self._print(coeff)
else:
if s_monom:
if coeff is S.One:
terms.extend(['+', s_monom])
continue
if coeff is S.NegativeOne:
terms.extend(['-', s_monom])
continue
s_coeff = self._print(coeff)
if not s_monom:
s_term = s_coeff
else:
s_term = s_coeff + " " + s_monom
if s_term.startswith('-'):
terms.extend(['-', s_term[1:]])
else:
terms.extend(['+', s_term])
if terms[0] in ('-', '+'):
modifier = terms.pop(0)
if modifier == '-':
terms[0] = '-' + terms[0]
expr = ' '.join(terms)
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left(%s \right)}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_ComplexRootOf(self, root):
cls = root.__class__.__name__
if cls == "ComplexRootOf":
cls = "CRootOf"
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr,
index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls,
", ".join(args))
def _print_OrdinalOmega(self, expr):
return r"\omega"
def _print_OmegaPower(self, expr):
exp, mul = expr.args
if mul != 1:
if exp != 1:
return r"{} \omega^{{{}}}".format(mul, exp)
else:
return r"{} \omega".format(mul)
else:
if exp != 1:
return r"\omega^{{{}}}".format(exp)
else:
return r"\omega"
def _print_Ordinal(self, expr):
return " + ".join([self._print(arg) for arg in expr.args])
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr, exp=None):
m, x = (expr.args[0], None) if len(expr.args) == 1 else expr.args
tex = r"E_{%s}" % self._print(m)
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
if x is not None:
tex = r"%s\left(%s\right)" % (tex, self._print(x))
return tex
def _print_catalan(self, expr, exp=None):
tex = r"C_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_UnifiedTransform(self, expr, s, inverse=False):
return r"\mathcal{{{}}}{}_{{{}}}\left[{}\right]\left({}\right)".format(s, '^{-1}' if inverse else '', self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_MellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M')
def _print_InverseMellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M', True)
def _print_LaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L')
def _print_InverseLaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L', True)
def _print_FourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F')
def _print_InverseFourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F', True)
def _print_SineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN')
def _print_InverseSineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN', True)
def _print_CosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS')
def _print_InverseCosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS', True)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_LambertW(self, expr, exp=None):
arg0 = self._print(expr.args[0])
exp = r"^{%s}" % (exp,) if exp is not None else ""
if len(expr.args) == 1:
result = r"W%s\left(%s\right)" % (exp, arg0)
else:
arg1 = self._print(expr.args[1])
result = "W{0}_{{{1}}}\\left({2}\\right)".format(exp, arg1, arg0)
return result
def _print_Expectation(self, expr):
return r"\operatorname{{E}}\left[{}\right]".format(self._print(expr.args[0]))
def _print_Variance(self, expr):
return r"\operatorname{{Var}}\left({}\right)".format(self._print(expr.args[0]))
def _print_Covariance(self, expr):
return r"\operatorname{{Cov}}\left({}\right)".format(", ".join(self._print(arg) for arg in expr.args))
def _print_Probability(self, expr):
return r"\operatorname{{P}}\left({}\right)".format(self._print(expr.args[0]))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_TransferFunction(self, expr):
num, den = self._print(expr.num), self._print(expr.den)
return r"\frac{%s}{%s}" % (num, den)
def _print_Series(self, expr):
args = list(expr.args)
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False)
return ' '.join(map(parens, args))
def _print_MIMOSeries(self, expr):
from sympy.physics.control.lti import MIMOParallel
args = list(expr.args)[::-1]
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False) if isinstance(x, MIMOParallel) else self._print(x)
return r"\cdot".join(map(parens, args))
def _print_Parallel(self, expr):
return ' + '.join(map(self._print, expr.args))
def _print_MIMOParallel(self, expr):
return ' + '.join(map(self._print, expr.args))
def _print_Feedback(self, expr):
from sympy.physics.control import TransferFunction, Series
num, tf = expr.sys1, TransferFunction(1, 1, expr.var)
num_arg_list = list(num.args) if isinstance(num, Series) else [num]
den_arg_list = list(expr.sys2.args) if \
isinstance(expr.sys2, Series) else [expr.sys2]
den_term_1 = tf
if isinstance(num, Series) and isinstance(expr.sys2, Series):
den_term_2 = Series(*num_arg_list, *den_arg_list)
elif isinstance(num, Series) and isinstance(expr.sys2, TransferFunction):
if expr.sys2 == tf:
den_term_2 = Series(*num_arg_list)
else:
den_term_2 = tf, Series(*num_arg_list, expr.sys2)
elif isinstance(num, TransferFunction) and isinstance(expr.sys2, Series):
if num == tf:
den_term_2 = Series(*den_arg_list)
else:
den_term_2 = Series(num, *den_arg_list)
else:
if num == tf:
den_term_2 = Series(*den_arg_list)
elif expr.sys2 == tf:
den_term_2 = Series(*num_arg_list)
else:
den_term_2 = Series(*num_arg_list, *den_arg_list)
numer = self._print(num)
denom_1 = self._print(den_term_1)
denom_2 = self._print(den_term_2)
_sign = "+" if expr.sign == -1 else "-"
return r"\frac{%s}{%s %s %s}" % (numer, denom_1, _sign, denom_2)
def _print_MIMOFeedback(self, expr):
from sympy.physics.control import MIMOSeries
inv_mat = self._print(MIMOSeries(expr.sys2, expr.sys1))
sys1 = self._print(expr.sys1)
_sign = "+" if expr.sign == -1 else "-"
return r"\left(I_{\tau} %s %s\right)^{-1} \cdot %s" % (_sign, inv_mat, sys1)
def _print_TransferFunctionMatrix(self, expr):
mat = self._print(expr._expr_mat)
return r"%s_\tau" % mat
def _print_DFT(self, expr):
return r"\text{{{}}}_{{{}}}".format(expr.__class__.__name__, expr.n)
_print_IDFT = _print_DFT
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return r"\mathbf{{{}}}".format(self._print(Symbol(morphism.name)))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{{{}}}^{{{}}}'.format(self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ {} \right]".format(",".join(
'{' + self._print(x) + '}' for x in m))
def _print_SubModule(self, m):
return r"\left\langle {} \right\rangle".format(",".join(
'{' + self._print(x) + '}' for x in m.gens))
def _print_ModuleImplementedIdeal(self, m):
return r"\left\langle {} \right\rangle".format(",".join(
'{' + self._print(x) + '}' for [x] in m._module.gens))
def _print_Quaternion(self, expr):
# TODO: This expression is potentially confusing,
# shall we print it as `Quaternion( ... )`?
s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True)
for i in expr.args]
a = [s[0]] + [i+" "+j for i, j in zip(s[1:], "ijk")]
return " + ".join(a)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(R.ring),
self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
return r"{{{}}} + {{{}}}".format(self._print(x.data),
self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
return r"{{{}}} + {{{}}}".format(self._print(m.data),
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{{{}}} : {{{}}} \to {{{}}}".format(self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_Manifold(self, manifold):
string = manifold.name.name
if '{' in string:
name, supers, subs = string, [], []
else:
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
name = r'\text{%s}' % name
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Patch(self, patch):
return r'\text{%s}_{%s}' % (self._print(patch.name), self._print(patch.manifold))
def _print_CoordSystem(self, coordsys):
return r'\text{%s}^{\text{%s}}_{%s}' % (
self._print(coordsys.name), self._print(coordsys.patch.name), self._print(coordsys.manifold)
)
def _print_CovarDerivativeOp(self, cvd):
return r'\mathbb{\nabla}_{%s}' % self._print(cvd._wrt)
def _print_BaseScalarField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\mathbf{{{}}}'.format(self._print(Symbol(string)))
def _print_BaseVectorField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\partial_{{{}}}'.format(self._print(Symbol(string)))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys.symbols[field._index].name
return r'\operatorname{{d}}{}'.format(self._print(Symbol(string)))
else:
string = self._print(field)
return r'\operatorname{{d}}\left({}\right)'.format(string)
def _print_Tr(self, p):
# TODO: Handle indices
contents = self._print(p.args[0])
return r'\operatorname{{tr}}\left({}\right)'.format(contents)
def _print_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\phi\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\phi\left(%s\right)' % self._print(expr.args[0])
def _print_reduced_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\lambda\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\lambda\left(%s\right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (exp, tex)
return r"\sigma%s" % tex
def _print_udivisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^*^{%s}%s" % (exp, tex)
return r"\sigma^*%s" % tex
def _print_primenu(self, expr, exp=None):
if exp is not None:
return r'\left(\nu\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\nu\left(%s\right)' % self._print(expr.args[0])
def _print_primeomega(self, expr, exp=None):
if exp is not None:
return r'\left(\Omega\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\Omega\left(%s\right)' % self._print(expr.args[0])
def _print_Str(self, s):
return str(s.name)
def _print_float(self, expr):
return self._print(Float(expr))
def _print_int(self, expr):
return str(expr)
def _print_mpz(self, expr):
return str(expr)
def _print_mpq(self, expr):
return str(expr)
def _print_Predicate(self, expr):
return r"\operatorname{{Q}}_{{\text{{{}}}}}".format(latex_escape(str(expr.name)))
def _print_AppliedPredicate(self, expr):
pred = expr.function
args = expr.arguments
pred_latex = self._print(pred)
args_latex = ', '.join([self._print(a) for a in args])
return '%s(%s)' % (pred_latex, args_latex)
def emptyPrinter(self, expr):
# default to just printing as monospace, like would normally be shown
s = super().emptyPrinter(expr)
return r"\mathtt{\text{%s}}" % latex_escape(s)
def translate(s: str) -> str:
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set:
return "\\" + s.lower()
elif s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=len, reverse=True):
if s.lower().endswith(key) and len(s) > len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
@print_function(LatexPrinter)
def latex(expr, **settings):
r"""Convert the given expression to LaTeX string representation.
Parameters
==========
full_prec: boolean, optional
If set to True, a floating point number is printed with full precision.
fold_frac_powers : boolean, optional
Emit ``^{p/q}`` instead of ``^{\frac{p}{q}}`` for fractional powers.
fold_func_brackets : boolean, optional
Fold function brackets where applicable.
fold_short_frac : boolean, optional
Emit ``p / q`` instead of ``\frac{p}{q}`` when the denominator is
simple enough (at most two terms and no powers). The default value is
``True`` for inline mode, ``False`` otherwise.
inv_trig_style : string, optional
How inverse trig functions should be displayed. Can be one of
``'abbreviated'``, ``'full'``, or ``'power'``. Defaults to
``'abbreviated'``.
itex : boolean, optional
Specifies if itex-specific syntax is used, including emitting
``$$...$$``.
ln_notation : boolean, optional
If set to ``True``, ``\ln`` is used instead of default ``\log``.
long_frac_ratio : float or None, optional
The allowed ratio of the width of the numerator to the width of the
denominator before the printer breaks off long fractions. If ``None``
(the default value), long fractions are not broken up.
mat_delim : string, optional
The delimiter to wrap around matrices. Can be one of ``'['``, ``'('``,
or the empty string ``''``. Defaults to ``'['``.
mat_str : string, optional
Which matrix environment string to emit. ``'smallmatrix'``,
``'matrix'``, ``'array'``, etc. Defaults to ``'smallmatrix'`` for
inline mode, ``'matrix'`` for matrices of no more than 10 columns, and
``'array'`` otherwise.
mode: string, optional
Specifies how the generated code will be delimited. ``mode`` can be one
of ``'plain'``, ``'inline'``, ``'equation'`` or ``'equation*'``. If
``mode`` is set to ``'plain'``, then the resulting code will not be
delimited at all (this is the default). If ``mode`` is set to
``'inline'`` then inline LaTeX ``$...$`` will be used. If ``mode`` is
set to ``'equation'`` or ``'equation*'``, the resulting code will be
enclosed in the ``equation`` or ``equation*`` environment (remember to
import ``amsmath`` for ``equation*``), unless the ``itex`` option is
set. In the latter case, the ``$$...$$`` syntax is used.
mul_symbol : string or None, optional
The symbol to use for multiplication. Can be one of ``None``,
``'ldot'``, ``'dot'``, or ``'times'``.
order: string, optional
Any of the supported monomial orderings (currently ``'lex'``,
``'grlex'``, or ``'grevlex'``), ``'old'``, and ``'none'``. This
parameter does nothing for `~.Mul` objects. Setting order to ``'old'``
uses the compatibility ordering for ``~.Add`` defined in Printer. For
very large expressions, set the ``order`` keyword to ``'none'`` if
speed is a concern.
symbol_names : dictionary of strings mapped to symbols, optional
Dictionary of symbols and the custom strings they should be emitted as.
root_notation : boolean, optional
If set to ``False``, exponents of the form 1/n are printed in fractonal
form. Default is ``True``, to print exponent in root form.
mat_symbol_style : string, optional
Can be either ``'plain'`` (default) or ``'bold'``. If set to
``'bold'``, a `~.MatrixSymbol` A will be printed as ``\mathbf{A}``,
otherwise as ``A``.
imaginary_unit : string, optional
String to use for the imaginary unit. Defined options are ``'i'``
(default) and ``'j'``. Adding ``r`` or ``t`` in front gives ``\mathrm``
or ``\text``, so ``'ri'`` leads to ``\mathrm{i}`` which gives
`\mathrm{i}`.
gothic_re_im : boolean, optional
If set to ``True``, `\Re` and `\Im` is used for ``re`` and ``im``, respectively.
The default is ``False`` leading to `\operatorname{re}` and `\operatorname{im}`.
decimal_separator : string, optional
Specifies what separator to use to separate the whole and fractional parts of a
floating point number as in `2.5` for the default, ``period`` or `2{,}5`
when ``comma`` is specified. Lists, sets, and tuple are printed with semicolon
separating the elements when ``comma`` is chosen. For example, [1; 2; 3] when
``comma`` is chosen and [1,2,3] for when ``period`` is chosen.
parenthesize_super : boolean, optional
If set to ``False``, superscripted expressions will not be parenthesized when
powered. Default is ``True``, which parenthesizes the expression when powered.
min: Integer or None, optional
Sets the lower bound for the exponent to print floating point numbers in
fixed-point format.
max: Integer or None, optional
Sets the upper bound for the exponent to print floating point numbers in
fixed-point format.
diff_operator: string, optional
String to use for differential operator. Default is ``'d'``, to print in italic
form. ``'rd'``, ``'td'`` are shortcuts for ``\mathrm{d}`` and ``\text{d}``.
Notes
=====
Not using a print statement for printing, results in double backslashes for
latex commands since that's the way Python escapes backslashes in strings.
>>> from sympy import latex, Rational
>>> from sympy.abc import tau
>>> latex((2*tau)**Rational(7,2))
'8 \\sqrt{2} \\tau^{\\frac{7}{2}}'
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
Examples
========
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational, log
>>> from sympy.abc import x, y, mu, r, tau
Basic usage:
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
``mode`` and ``itex`` options:
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
Fraction options:
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets=True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
Multiplication options:
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
Trig options:
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left(\frac{7}{2} \right)}
Matrix options:
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
Custom printing of symbols:
>>> print(latex(x**2, symbol_names={x: 'x_i'}))
x_i^{2}
Logarithms:
>>> print(latex(log(10)))
\log{\left(10 \right)}
>>> print(latex(log(10), ln_notation=True))
\ln{\left(10 \right)}
``latex()`` also supports the builtin container types :class:`list`,
:class:`tuple`, and :class:`dict`:
>>> print(latex([2/x, y], mode='inline'))
$\left[ 2 / x, \ y\right]$
Unsupported types are rendered as monospaced plaintext:
>>> print(latex(int))
\mathtt{\text{<class 'int'>}}
>>> print(latex("plain % text"))
\mathtt{\text{plain \% text}}
See :ref:`printer_method_example` for an example of how to override
this behavior for your own types by implementing ``_latex``.
.. versionchanged:: 1.7.0
Unsupported types no longer have their ``str`` representation treated as valid latex.
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression. Takes the same
settings as ``latex()``."""
print(latex(expr, **settings))
def multiline_latex(lhs, rhs, terms_per_line=1, environment="align*", use_dots=False, **settings):
r"""
This function generates a LaTeX equation with a multiline right-hand side
in an ``align*``, ``eqnarray`` or ``IEEEeqnarray`` environment.
Parameters
==========
lhs : Expr
Left-hand side of equation
rhs : Expr
Right-hand side of equation
terms_per_line : integer, optional
Number of terms per line to print. Default is 1.
environment : "string", optional
Which LaTeX wnvironment to use for the output. Options are "align*"
(default), "eqnarray", and "IEEEeqnarray".
use_dots : boolean, optional
If ``True``, ``\\dots`` is added to the end of each line. Default is ``False``.
Examples
========
>>> from sympy import multiline_latex, symbols, sin, cos, exp, log, I
>>> x, y, alpha = symbols('x y alpha')
>>> expr = sin(alpha*y) + exp(I*alpha) - cos(log(y))
>>> print(multiline_latex(x, expr))
\begin{align*}
x = & e^{i \alpha} \\
& + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using at most two terms per line:
>>> print(multiline_latex(x, expr, 2))
\begin{align*}
x = & e^{i \alpha} + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using ``eqnarray`` and dots:
>>> print(multiline_latex(x, expr, terms_per_line=2, environment="eqnarray", use_dots=True))
\begin{eqnarray}
x & = & e^{i \alpha} + \sin{\left(\alpha y \right)} \dots\nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{eqnarray}
Using ``IEEEeqnarray``:
>>> print(multiline_latex(x, expr, environment="IEEEeqnarray"))
\begin{IEEEeqnarray}{rCl}
x & = & e^{i \alpha} \nonumber\\
& & + \sin{\left(\alpha y \right)} \nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{IEEEeqnarray}
Notes
=====
All optional parameters from ``latex`` can also be used.
"""
# Based on code from https://github.com/sympy/sympy/issues/3001
l = LatexPrinter(**settings)
if environment == "eqnarray":
result = r'\begin{eqnarray}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{eqnarray}'
doubleet = True
elif environment == "IEEEeqnarray":
result = r'\begin{IEEEeqnarray}{rCl}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{IEEEeqnarray}'
doubleet = True
elif environment == "align*":
result = r'\begin{align*}' + '\n'
first_term = '= &'
nonumber = ''
end_term = '\n\\end{align*}'
doubleet = False
else:
raise ValueError("Unknown environment: {}".format(environment))
dots = ''
if use_dots:
dots=r'\dots'
terms = rhs.as_ordered_terms()
n_terms = len(terms)
term_count = 1
for i in range(n_terms):
term = terms[i]
term_start = ''
term_end = ''
sign = '+'
if term_count > terms_per_line:
if doubleet:
term_start = '& & '
else:
term_start = '& '
term_count = 1
if term_count == terms_per_line:
# End of line
if i < n_terms-1:
# There are terms remaining
term_end = dots + nonumber + r'\\' + '\n'
else:
term_end = ''
if term.as_ordered_factors()[0] == -1:
term = -1*term
sign = r'-'
if i == 0: # beginning
if sign == '+':
sign = ''
result += r'{:s} {:s}{:s} {:s} {:s}'.format(l.doprint(lhs),
first_term, sign, l.doprint(term), term_end)
else:
result += r'{:s}{:s} {:s} {:s}'.format(term_start, sign,
l.doprint(term), term_end)
term_count += 1
result += end_term
return result
|
f512357cee0f2f8bc70b5363e43230bc1df12914a69527ee749c1ba3d368fdf1 | """Printing subsystem driver
SymPy's printing system works the following way: Any expression can be
passed to a designated Printer who then is responsible to return an
adequate representation of that expression.
**The basic concept is the following:**
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-back use the emptyPrinter method for the printer.
Which Method is Responsible for Printing?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The whole printing process is started by calling ``.doprint(expr)`` on the printer
which you want to use. This method looks for an appropriate method which can
print the given expression in the given style that the printer defines.
While looking for the method, it follows these steps:
1. **Let the object print itself if it knows how.**
The printer looks for a specific method in every object. The name of that method
depends on the specific printer and is defined under ``Printer.printmethod``.
For example, StrPrinter calls ``_sympystr`` and LatexPrinter calls ``_latex``.
Look at the documentation of the printer that you want to use.
The name of the method is specified there.
This was the original way of doing printing in sympy. Every class had
its own latex, mathml, str and repr methods, but it turned out that it
is hard to produce a high quality printer, if all the methods are spread
out that far. Therefore all printing code was combined into the different
printers, which works great for built-in SymPy objects, but not that
good for user defined classes where it is inconvenient to patch the
printers.
2. **Take the best fitting method defined in the printer.**
The printer loops through expr classes (class + its bases), and tries
to dispatch the work to ``_print_<EXPR_CLASS>``
e.g., suppose we have the following class hierarchy::
Basic
|
Atom
|
Number
|
Rational
then, for ``expr=Rational(...)``, the Printer will try
to call printer methods in the order as shown in the figure below::
p._print(expr)
|
|-- p._print_Rational(expr)
|
|-- p._print_Number(expr)
|
|-- p._print_Atom(expr)
|
`-- p._print_Basic(expr)
if ``._print_Rational`` method exists in the printer, then it is called,
and the result is returned back. Otherwise, the printer tries to call
``._print_Number`` and so on.
3. **As a fall-back use the emptyPrinter method for the printer.**
As fall-back ``self.emptyPrinter`` will be called with the expression. If
not defined in the Printer subclass this will be the same as ``str(expr)``.
.. _printer_example:
Example of Custom Printer
^^^^^^^^^^^^^^^^^^^^^^^^^
In the example below, we have a printer which prints the derivative of a function
in a shorter form.
.. code-block:: python
from sympy.core.symbol import Symbol
from sympy.printing.latex import LatexPrinter, print_latex
from sympy.core.function import UndefinedFunction, Function
class MyLatexPrinter(LatexPrinter):
\"\"\"Print derivative of a function of symbols in a shorter form.
\"\"\"
def _print_Derivative(self, expr):
function, *vars = expr.args
if not isinstance(type(function), UndefinedFunction) or \\
not all(isinstance(i, Symbol) for i in vars):
return super()._print_Derivative(expr)
# If you want the printer to work correctly for nested
# expressions then use self._print() instead of str() or latex().
# See the example of nested modulo below in the custom printing
# method section.
return "{}_{{{}}}".format(
self._print(Symbol(function.func.__name__)),
''.join(self._print(i) for i in vars))
def print_my_latex(expr):
\"\"\" Most of the printers define their own wrappers for print().
These wrappers usually take printer settings. Our printer does not have
any settings.
\"\"\"
print(MyLatexPrinter().doprint(expr))
y = Symbol("y")
x = Symbol("x")
f = Function("f")
expr = f(x, y).diff(x, y)
# Print the expression using the normal latex printer and our custom
# printer.
print_latex(expr)
print_my_latex(expr)
The output of the code above is::
\\frac{\\partial^{2}}{\\partial x\\partial y} f{\\left(x,y \\right)}
f_{xy}
.. _printer_method_example:
Example of Custom Printing Method
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In the example below, the latex printing of the modulo operator is modified.
This is done by overriding the method ``_latex`` of ``Mod``.
>>> from sympy import Symbol, Mod, Integer, print_latex
>>> # Always use printer._print()
>>> class ModOp(Mod):
... def _latex(self, printer):
... a, b = [printer._print(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left(%s, %s\\right)}" % (a, b)
Comparing the output of our custom operator to the builtin one:
>>> x = Symbol('x')
>>> m = Symbol('m')
>>> print_latex(Mod(x, m))
x \\bmod m
>>> print_latex(ModOp(x, m))
\\operatorname{Mod}{\\left(x, m\\right)}
Common mistakes
~~~~~~~~~~~~~~~
It's important to always use ``self._print(obj)`` to print subcomponents of
an expression when customizing a printer. Mistakes include:
1. Using ``self.doprint(obj)`` instead:
>>> # This example does not work properly, as only the outermost call may use
>>> # doprint.
>>> class ModOpModeWrong(Mod):
... def _latex(self, printer):
... a, b = [printer.doprint(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left(%s, %s\\right)}" % (a, b)
This fails when the ``mode`` argument is passed to the printer:
>>> print_latex(ModOp(x, m), mode='inline') # ok
$\\operatorname{Mod}{\\left(x, m\\right)}$
>>> print_latex(ModOpModeWrong(x, m), mode='inline') # bad
$\\operatorname{Mod}{\\left($x$, $m$\\right)}$
2. Using ``str(obj)`` instead:
>>> class ModOpNestedWrong(Mod):
... def _latex(self, printer):
... a, b = [str(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left(%s, %s\\right)}" % (a, b)
This fails on nested objects:
>>> # Nested modulo.
>>> print_latex(ModOp(ModOp(x, m), Integer(7))) # ok
\\operatorname{Mod}{\\left(\\operatorname{Mod}{\\left(x, m\\right)}, 7\\right)}
>>> print_latex(ModOpNestedWrong(ModOpNestedWrong(x, m), Integer(7))) # bad
\\operatorname{Mod}{\\left(ModOpNestedWrong(x, m), 7\\right)}
3. Using ``LatexPrinter()._print(obj)`` instead.
>>> from sympy.printing.latex import LatexPrinter
>>> class ModOpSettingsWrong(Mod):
... def _latex(self, printer):
... a, b = [LatexPrinter()._print(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left(%s, %s\\right)}" % (a, b)
This causes all the settings to be discarded in the subobjects. As an
example, the ``full_prec`` setting which shows floats to full precision is
ignored:
>>> from sympy import Float
>>> print_latex(ModOp(Float(1) * x, m), full_prec=True) # ok
\\operatorname{Mod}{\\left(1.00000000000000 x, m\\right)}
>>> print_latex(ModOpSettingsWrong(Float(1) * x, m), full_prec=True) # bad
\\operatorname{Mod}{\\left(1.0 x, m\\right)}
"""
from __future__ import annotations
import sys
from typing import Any, Type
import inspect
from contextlib import contextmanager
from functools import cmp_to_key, update_wrapper
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.core import BasicMeta
from sympy.core.function import AppliedUndef, UndefinedFunction, Function
@contextmanager
def printer_context(printer, **kwargs):
original = printer._context.copy()
try:
printer._context.update(kwargs)
yield
finally:
printer._context = original
class Printer:
""" Generic printer
Its job is to provide infrastructure for implementing new printers easily.
If you want to define your custom Printer or your custom printing method
for your custom class then see the example above: printer_example_ .
"""
_global_settings: dict[str, Any] = {}
_default_settings: dict[str, Any] = {}
printmethod = None # type: str
@classmethod
def _get_initial_settings(cls):
settings = cls._default_settings.copy()
for key, val in cls._global_settings.items():
if key in cls._default_settings:
settings[key] = val
return settings
def __init__(self, settings=None):
self._str = str
self._settings = self._get_initial_settings()
self._context = {} # mutable during printing
if settings is not None:
self._settings.update(settings)
if len(self._settings) > len(self._default_settings):
for key in self._settings:
if key not in self._default_settings:
raise TypeError("Unknown setting '%s'." % key)
# _print_level is the number of times self._print() was recursively
# called. See StrPrinter._print_Float() for an example of usage
self._print_level = 0
@classmethod
def set_global_settings(cls, **settings):
"""Set system-wide printing settings. """
for key, val in settings.items():
if val is not None:
cls._global_settings[key] = val
@property
def order(self):
if 'order' in self._settings:
return self._settings['order']
else:
raise AttributeError("No order defined.")
def doprint(self, expr):
"""Returns printer's representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr, **kwargs) -> str:
"""Internal dispatcher
Tries the following concepts to print an expression:
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-back use the emptyPrinter method for the printer.
"""
self._print_level += 1
try:
# If the printer defines a name for a printing method
# (Printer.printmethod) and the object knows for itself how it
# should be printed, use that method.
if (self.printmethod and hasattr(expr, self.printmethod)
and not isinstance(expr, BasicMeta)):
return getattr(expr, self.printmethod)(self, **kwargs)
# See if the class of expr is known, or if one of its super
# classes is known, and use that print function
# Exception: ignore the subclasses of Undefined, so that, e.g.,
# Function('gamma') does not get dispatched to _print_gamma
classes = type(expr).__mro__
if AppliedUndef in classes:
classes = classes[classes.index(AppliedUndef):]
if UndefinedFunction in classes:
classes = classes[classes.index(UndefinedFunction):]
# Another exception: if someone subclasses a known function, e.g.,
# gamma, and changes the name, then ignore _print_gamma
if Function in classes:
i = classes.index(Function)
classes = tuple(c for c in classes[:i] if \
c.__name__ == classes[0].__name__ or \
c.__name__.endswith("Base")) + classes[i:]
for cls in classes:
printmethodname = '_print_' + cls.__name__
printmethod = getattr(self, printmethodname, None)
if printmethod is not None:
return printmethod(expr, **kwargs)
# Unknown object, fall back to the emptyPrinter.
return self.emptyPrinter(expr)
finally:
self._print_level -= 1
def emptyPrinter(self, expr):
return str(expr)
def _as_ordered_terms(self, expr, order=None):
"""A compatibility function for ordering terms in Add. """
order = order or self.order
if order == 'old':
return sorted(Add.make_args(expr), key=cmp_to_key(Basic._compare_pretty))
elif order == 'none':
return list(expr.args)
else:
return expr.as_ordered_terms(order=order)
class _PrintFunction:
"""
Function wrapper to replace ``**settings`` in the signature with printer defaults
"""
def __init__(self, f, print_cls: Type[Printer]):
# find all the non-setting arguments
params = list(inspect.signature(f).parameters.values())
assert params.pop(-1).kind == inspect.Parameter.VAR_KEYWORD
self.__other_params = params
self.__print_cls = print_cls
update_wrapper(self, f)
def __reduce__(self):
# Since this is used as a decorator, it replaces the original function.
# The default pickling will try to pickle self.__wrapped__ and fail
# because the wrapped function can't be retrieved by name.
return self.__wrapped__.__qualname__
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
@property
def __signature__(self) -> inspect.Signature:
settings = self.__print_cls._get_initial_settings()
return inspect.Signature(
parameters=self.__other_params + [
inspect.Parameter(k, inspect.Parameter.KEYWORD_ONLY, default=v)
for k, v in settings.items()
],
return_annotation=self.__wrapped__.__annotations__.get('return', inspect.Signature.empty) # type:ignore
)
def print_function(print_cls):
""" A decorator to replace kwargs with the printer settings in __signature__ """
def decorator(f):
if sys.version_info < (3, 9):
# We have to create a subclass so that `help` actually shows the docstring in older Python versions.
# IPython and Sphinx do not need this, only a raw Python console.
cls = type(f'{f.__qualname__}_PrintFunction', (_PrintFunction,), dict(__doc__=f.__doc__))
else:
cls = _PrintFunction
return cls(f, print_cls)
return decorator
|
711a66b445f05fa0a650236656ed71bf717416b0182916c299a243b4dce046fb | """
C code printer
The C89CodePrinter & C99CodePrinter converts single SymPy expressions into
single C expressions, using the functions defined in math.h where possible.
A complete code generator, which uses ccode extensively, can be found in
sympy.utilities.codegen. The codegen module can be used to generate complete
source code files that are compilable without further modifications.
"""
from __future__ import annotations
from typing import Any
from functools import wraps
from itertools import chain
from sympy.core import S
from sympy.codegen.ast import (
Assignment, Pointer, Variable, Declaration, Type,
real, complex_, integer, bool_, float32, float64, float80,
complex64, complex128, intc, value_const, pointer_const,
int8, int16, int32, int64, uint8, uint16, uint32, uint64, untyped,
none
)
from sympy.printing.codeprinter import CodePrinter, requires
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.sets.fancysets import Range
# These are defined in the other file so we can avoid importing sympy.codegen
# from the top-level 'import sympy'. Export them here as well.
from sympy.printing.codeprinter import ccode, print_ccode # noqa:F401
# dictionary mapping SymPy function to (argument_conditions, C_function).
# Used in C89CodePrinter._print_Function(self)
known_functions_C89 = {
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"floor": "floor",
"ceiling": "ceil",
"sqrt": "sqrt", # To enable automatic rewrites
}
known_functions_C99 = dict(known_functions_C89, **{
'exp2': 'exp2',
'expm1': 'expm1',
'log10': 'log10',
'log2': 'log2',
'log1p': 'log1p',
'Cbrt': 'cbrt',
'hypot': 'hypot',
'fma': 'fma',
'loggamma': 'lgamma',
'erfc': 'erfc',
'Max': 'fmax',
'Min': 'fmin',
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"erf": "erf",
"gamma": "tgamma",
})
# These are the core reserved words in the C language. Taken from:
# http://en.cppreference.com/w/c/keyword
reserved_words = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'int',
'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'entry', # never standardized, we'll leave it here anyway
'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'
]
reserved_words_c99 = ['inline', 'restrict']
def get_math_macros():
""" Returns a dictionary with math-related macros from math.h/cmath
Note that these macros are not strictly required by the C/C++-standard.
For MSVC they are enabled by defining "_USE_MATH_DEFINES" (preferably
via a compilation flag).
Returns
=======
Dictionary mapping SymPy expressions to strings (macro names)
"""
from sympy.codegen.cfunctions import log2, Sqrt
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.miscellaneous import sqrt
return {
S.Exp1: 'M_E',
log2(S.Exp1): 'M_LOG2E',
1/log(2): 'M_LOG2E',
log(2): 'M_LN2',
log(10): 'M_LN10',
S.Pi: 'M_PI',
S.Pi/2: 'M_PI_2',
S.Pi/4: 'M_PI_4',
1/S.Pi: 'M_1_PI',
2/S.Pi: 'M_2_PI',
2/sqrt(S.Pi): 'M_2_SQRTPI',
2/Sqrt(S.Pi): 'M_2_SQRTPI',
sqrt(2): 'M_SQRT2',
Sqrt(2): 'M_SQRT2',
1/sqrt(2): 'M_SQRT1_2',
1/Sqrt(2): 'M_SQRT1_2'
}
def _as_macro_if_defined(meth):
""" Decorator for printer methods
When a Printer's method is decorated using this decorator the expressions printed
will first be looked for in the attribute ``math_macros``, and if present it will
print the macro name in ``math_macros`` followed by a type suffix for the type
``real``. e.g. printing ``sympy.pi`` would print ``M_PIl`` if real is mapped to float80.
"""
@wraps(meth)
def _meth_wrapper(self, expr, **kwargs):
if expr in self.math_macros:
return '%s%s' % (self.math_macros[expr], self._get_math_macro_suffix(real))
else:
return meth(self, expr, **kwargs)
return _meth_wrapper
class C89CodePrinter(CodePrinter):
"""A printer to convert Python expressions to strings of C code"""
printmethod = "_ccode"
language = "C"
standard = "C89"
reserved_words = set(reserved_words)
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
type_aliases = {
real: float64,
complex_: complex128,
integer: intc
}
type_mappings: dict[Type, Any] = {
real: 'double',
intc: 'int',
float32: 'float',
float64: 'double',
integer: 'int',
bool_: 'bool',
int8: 'int8_t',
int16: 'int16_t',
int32: 'int32_t',
int64: 'int64_t',
uint8: 'int8_t',
uint16: 'int16_t',
uint32: 'int32_t',
uint64: 'int64_t',
}
type_headers = {
bool_: {'stdbool.h'},
int8: {'stdint.h'},
int16: {'stdint.h'},
int32: {'stdint.h'},
int64: {'stdint.h'},
uint8: {'stdint.h'},
uint16: {'stdint.h'},
uint32: {'stdint.h'},
uint64: {'stdint.h'},
}
# Macros needed to be defined when using a Type
type_macros: dict[Type, tuple[str, ...]] = {}
type_func_suffixes = {
float32: 'f',
float64: '',
float80: 'l'
}
type_literal_suffixes = {
float32: 'F',
float64: '',
float80: 'L'
}
type_math_macro_suffixes = {
float80: 'l'
}
math_macros = None
_ns = '' # namespace, C++ uses 'std::'
# known_functions-dict to copy
_kf: dict[str, Any] = known_functions_C89
def __init__(self, settings=None):
settings = settings or {}
if self.math_macros is None:
self.math_macros = settings.pop('math_macros', get_math_macros())
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
self.type_headers = dict(chain(self.type_headers.items(),
settings.pop('type_headers', {}).items()))
self.type_macros = dict(chain(self.type_macros.items(),
settings.pop('type_macros', {}).items()))
self.type_func_suffixes = dict(chain(self.type_func_suffixes.items(),
settings.pop('type_func_suffixes', {}).items()))
self.type_literal_suffixes = dict(chain(self.type_literal_suffixes.items(),
settings.pop('type_literal_suffixes', {}).items()))
self.type_math_macro_suffixes = dict(chain(self.type_math_macro_suffixes.items(),
settings.pop('type_math_macro_suffixes', {}).items()))
super().__init__(settings)
self.known_functions = dict(self._kf, **settings.get('user_functions', {}))
self._dereference = set(settings.get('dereference', []))
self.headers = set()
self.libraries = set()
self.macros = set()
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
""" Get code string as a statement - i.e. ending with a semicolon. """
return codestring if codestring.endswith(';') else codestring + ';'
def _get_comment(self, text):
return "/* {} */".format(text)
def _declare_number_const(self, name, value):
type_ = self.type_aliases[real]
var = Variable(name, type=type_, value=value.evalf(type_.decimal_dig), attrs={value_const})
decl = Declaration(var)
return self._get_statement(self._print(decl))
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
@_as_macro_if_defined
def _print_Mul(self, expr, **kwargs):
return super()._print_Mul(expr, **kwargs)
@_as_macro_if_defined
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
suffix = self._get_func_suffix(real)
if expr.exp == -1:
literal_suffix = self._get_literal_suffix(real)
return '1.0%s/%s' % (literal_suffix, self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return '%ssqrt%s(%s)' % (self._ns, suffix, self._print(expr.base))
elif expr.exp == S.One/3 and self.standard != 'C89':
return '%scbrt%s(%s)' % (self._ns, suffix, self._print(expr.base))
else:
return '%spow%s(%s, %s)' % (self._ns, suffix, self._print(expr.base),
self._print(expr.exp))
def _print_Mod(self, expr):
num, den = expr.args
if num.is_integer and den.is_integer:
PREC = precedence(expr)
snum, sden = [self.parenthesize(arg, PREC) for arg in expr.args]
# % is remainder (same sign as numerator), not modulo (same sign as
# denominator), in C. Hence, % only works as modulo if both numbers
# have the same sign
if (num.is_nonnegative and den.is_nonnegative or
num.is_nonpositive and den.is_nonpositive):
return f"{snum} % {sden}"
return f"(({snum} % {sden}) + {sden}) % {sden}"
# Not guaranteed integer
return self._print_math_func(expr, known='fmod')
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
suffix = self._get_literal_suffix(real)
return '%d.0%s/%d.0%s' % (p, suffix, q, suffix)
def _print_Indexed(self, expr):
# calculate index for 1d array
offset = getattr(expr.base, 'offset', S.Zero)
strides = getattr(expr.base, 'strides', None)
indices = expr.indices
if strides is None or isinstance(strides, str):
dims = expr.shape
shift = S.One
temp = tuple()
if strides == 'C' or strides is None:
traversal = reversed(range(expr.rank))
indices = indices[::-1]
elif strides == 'F':
traversal = range(expr.rank)
for i in traversal:
temp += (shift,)
shift *= dims[i]
strides = temp
flat_index = sum([x[0]*x[1] for x in zip(indices, strides)]) + offset
return "%s[%s]" % (self._print(expr.base.label),
self._print(flat_index))
def _print_Idx(self, expr):
return self._print(expr.label)
@_as_macro_if_defined
def _print_NumberSymbol(self, expr):
return super()._print_NumberSymbol(expr)
def _print_Infinity(self, expr):
return 'HUGE_VAL'
def _print_NegativeInfinity(self, expr):
return '-HUGE_VAL'
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c),
self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_ITE(self, expr):
from sympy.functions import Piecewise
return self._print(expr.rewrite(Piecewise, deep=False))
def _print_MatrixElement(self, expr):
return "{}[{}]".format(self.parenthesize(expr.parent, PRECEDENCE["Atom"],
strict=True), expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if expr in self._settings['dereference']:
return '(*{})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('for ({target} = {start}; {target} < {stop}; {target} += '
'{step}) {{\n{body}\n}}').format(target=target, start=start,
stop=stop, step=step, body=body)
def _print_sign(self, func):
return '((({0}) > 0) - (({0}) < 0))'.format(self._print(func.args[0]))
def _print_Max(self, expr):
if "Max" in self.known_functions:
return self._print_Function(expr)
def inner_print_max(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Max objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s > %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_max(args[:half]),
'b': inner_print_max(args[half:])
}
return inner_print_max(expr.args)
def _print_Min(self, expr):
if "Min" in self.known_functions:
return self._print_Function(expr)
def inner_print_min(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Min objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s < %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_min(args[:half]),
'b': inner_print_min(args[half:])
}
return inner_print_min(expr.args)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [line.lstrip(' \t') for line in code]
increase = [int(any(map(line.endswith, inc_token))) for line in code]
decrease = [int(any(map(line.startswith, dec_token))) for line in code]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def _get_func_suffix(self, type_):
return self.type_func_suffixes[self.type_aliases.get(type_, type_)]
def _get_literal_suffix(self, type_):
return self.type_literal_suffixes[self.type_aliases.get(type_, type_)]
def _get_math_macro_suffix(self, type_):
alias = self.type_aliases.get(type_, type_)
dflt = self.type_math_macro_suffixes.get(alias, '')
return self.type_math_macro_suffixes.get(type_, dflt)
def _print_Tuple(self, expr):
return '{'+', '.join(self._print(e) for e in expr)+'}'
_print_List = _print_Tuple
def _print_Type(self, type_):
self.headers.update(self.type_headers.get(type_, set()))
self.macros.update(self.type_macros.get(type_, set()))
return self._print(self.type_mappings.get(type_, type_.name))
def _print_Declaration(self, decl):
from sympy.codegen.cnodes import restrict
var = decl.variable
val = var.value
if var.type == untyped:
raise ValueError("C does not support untyped variables")
if isinstance(var, Pointer):
result = '{vc}{t} *{pc} {r}{s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
pc=' const' if pointer_const in var.attrs else '',
r='restrict ' if restrict in var.attrs else '',
s=self._print(var.symbol)
)
elif isinstance(var, Variable):
result = '{vc}{t} {s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
s=self._print(var.symbol)
)
else:
raise NotImplementedError("Unknown type of var: %s" % type(var))
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
return result
def _print_Float(self, flt):
type_ = self.type_aliases.get(real, real)
self.macros.update(self.type_macros.get(type_, set()))
suffix = self._get_literal_suffix(type_)
num = str(flt.evalf(type_.decimal_dig))
if 'e' not in num and '.' not in num:
num += '.0'
num_parts = num.split('e')
num_parts[0] = num_parts[0].rstrip('0')
if num_parts[0].endswith('.'):
num_parts[0] += '0'
return 'e'.join(num_parts) + suffix
@requires(headers={'stdbool.h'})
def _print_BooleanTrue(self, expr):
return 'true'
@requires(headers={'stdbool.h'})
def _print_BooleanFalse(self, expr):
return 'false'
def _print_Element(self, elem):
if elem.strides == None: # Must be "== None", cannot be "is None"
if elem.offset != None: # Must be "!= None", cannot be "is not None"
raise ValueError("Expected strides when offset is given")
idxs = ']['.join(map(lambda arg: self._print(arg),
elem.indices))
else:
global_idx = sum([i*s for i, s in zip(elem.indices, elem.strides)])
if elem.offset != None: # Must be "!= None", cannot be "is not None"
global_idx += elem.offset
idxs = self._print(global_idx)
return "{symb}[{idxs}]".format(
symb=self._print(elem.symbol),
idxs=idxs
)
def _print_CodeBlock(self, expr):
""" Elements of code blocks printed as statements. """
return '\n'.join([self._get_statement(self._print(i)) for i in expr.args])
def _print_While(self, expr):
return 'while ({condition}) {{\n{body}\n}}'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_Scope(self, expr):
return '{\n%s\n}' % self._print_CodeBlock(expr.body)
@requires(headers={'stdio.h'})
def _print_Print(self, expr):
return 'printf({fmt}, {pargs})'.format(
fmt=self._print(expr.format_string),
pargs=', '.join(map(lambda arg: self._print(arg), expr.print_args))
)
def _print_FunctionPrototype(self, expr):
pars = ', '.join(map(lambda arg: self._print(Declaration(arg)),
expr.parameters))
return "%s %s(%s)" % (
tuple(map(lambda arg: self._print(arg),
(expr.return_type, expr.name))) + (pars,)
)
def _print_FunctionDefinition(self, expr):
return "%s%s" % (self._print_FunctionPrototype(expr),
self._print_Scope(expr))
def _print_Return(self, expr):
arg, = expr.args
return 'return %s' % self._print(arg)
def _print_CommaOperator(self, expr):
return '(%s)' % ', '.join(map(lambda arg: self._print(arg), expr.args))
def _print_Label(self, expr):
if expr.body == none:
return '%s:' % str(expr.name)
if len(expr.body.args) == 1:
return '%s:\n%s' % (str(expr.name), self._print_CodeBlock(expr.body))
return '%s:\n{\n%s\n}' % (str(expr.name), self._print_CodeBlock(expr.body))
def _print_goto(self, expr):
return 'goto %s' % expr.label.name
def _print_PreIncrement(self, expr):
arg, = expr.args
return '++(%s)' % self._print(arg)
def _print_PostIncrement(self, expr):
arg, = expr.args
return '(%s)++' % self._print(arg)
def _print_PreDecrement(self, expr):
arg, = expr.args
return '--(%s)' % self._print(arg)
def _print_PostDecrement(self, expr):
arg, = expr.args
return '(%s)--' % self._print(arg)
def _print_struct(self, expr):
return "%(keyword)s %(name)s {\n%(lines)s}" % dict(
keyword=expr.__class__.__name__, name=expr.name, lines=';\n'.join(
[self._print(decl) for decl in expr.declarations] + [''])
)
def _print_BreakToken(self, _):
return 'break'
def _print_ContinueToken(self, _):
return 'continue'
_print_union = _print_struct
class C99CodePrinter(C89CodePrinter):
standard = 'C99'
reserved_words = set(reserved_words + reserved_words_c99)
type_mappings=dict(chain(C89CodePrinter.type_mappings.items(), {
complex64: 'float complex',
complex128: 'double complex',
}.items()))
type_headers = dict(chain(C89CodePrinter.type_headers.items(), {
complex64: {'complex.h'},
complex128: {'complex.h'}
}.items()))
# known_functions-dict to copy
_kf: dict[str, Any] = known_functions_C99
# functions with versions with 'f' and 'l' suffixes:
_prec_funcs = ('fabs fmod remainder remquo fma fmax fmin fdim nan exp exp2'
' expm1 log log10 log2 log1p pow sqrt cbrt hypot sin cos tan'
' asin acos atan atan2 sinh cosh tanh asinh acosh atanh erf'
' erfc tgamma lgamma ceil floor trunc round nearbyint rint'
' frexp ldexp modf scalbn ilogb logb nextafter copysign').split()
def _print_Infinity(self, expr):
return 'INFINITY'
def _print_NegativeInfinity(self, expr):
return '-INFINITY'
def _print_NaN(self, expr):
return 'NAN'
# tgamma was already covered by 'known_functions' dict
@requires(headers={'math.h'}, libraries={'m'})
@_as_macro_if_defined
def _print_math_func(self, expr, nest=False, known=None):
if known is None:
known = self.known_functions[expr.__class__.__name__]
if not isinstance(known, str):
for cb, name in known:
if cb(*expr.args):
known = name
break
else:
raise ValueError("No matching printer")
try:
return known(self, *expr.args)
except TypeError:
suffix = self._get_func_suffix(real) if self._ns + known in self._prec_funcs else ''
if nest:
args = self._print(expr.args[0])
if len(expr.args) > 1:
paren_pile = ''
for curr_arg in expr.args[1:-1]:
paren_pile += ')'
args += ', {ns}{name}{suffix}({next}'.format(
ns=self._ns,
name=known,
suffix=suffix,
next = self._print(curr_arg)
)
args += ', %s%s' % (
self._print(expr.func(expr.args[-1])),
paren_pile
)
else:
args = ', '.join(map(lambda arg: self._print(arg), expr.args))
return '{ns}{name}{suffix}({args})'.format(
ns=self._ns,
name=known,
suffix=suffix,
args=args
)
def _print_Max(self, expr):
return self._print_math_func(expr, nest=True)
def _print_Min(self, expr):
return self._print_math_func(expr, nest=True)
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){" # C99
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
for k in ('Abs Sqrt exp exp2 expm1 log log10 log2 log1p Cbrt hypot fma'
' loggamma sin cos tan asin acos atan atan2 sinh cosh tanh asinh acosh '
'atanh erf erfc loggamma gamma ceiling floor').split():
setattr(C99CodePrinter, '_print_%s' % k, C99CodePrinter._print_math_func)
class C11CodePrinter(C99CodePrinter):
@requires(headers={'stdalign.h'})
def _print_alignof(self, expr):
arg, = expr.args
return 'alignof(%s)' % self._print(arg)
c_code_printers = {
'c89': C89CodePrinter,
'c99': C99CodePrinter,
'c11': C11CodePrinter
}
|
f51fb05eed5247b469e610c51ddad103408fa400c8e7ca9e1a84b16c032960fd | """
Mathematica code printer
"""
from __future__ import annotations
from typing import Any
from sympy.core import Basic, Expr, Float
from sympy.core.sorting import default_sort_key
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
# Used in MCodePrinter._print_Function(self)
known_functions = {
"exp": [(lambda x: True, "Exp")],
"log": [(lambda x: True, "Log")],
"sin": [(lambda x: True, "Sin")],
"cos": [(lambda x: True, "Cos")],
"tan": [(lambda x: True, "Tan")],
"cot": [(lambda x: True, "Cot")],
"sec": [(lambda x: True, "Sec")],
"csc": [(lambda x: True, "Csc")],
"asin": [(lambda x: True, "ArcSin")],
"acos": [(lambda x: True, "ArcCos")],
"atan": [(lambda x: True, "ArcTan")],
"acot": [(lambda x: True, "ArcCot")],
"asec": [(lambda x: True, "ArcSec")],
"acsc": [(lambda x: True, "ArcCsc")],
"atan2": [(lambda *x: True, "ArcTan")],
"sinh": [(lambda x: True, "Sinh")],
"cosh": [(lambda x: True, "Cosh")],
"tanh": [(lambda x: True, "Tanh")],
"coth": [(lambda x: True, "Coth")],
"sech": [(lambda x: True, "Sech")],
"csch": [(lambda x: True, "Csch")],
"asinh": [(lambda x: True, "ArcSinh")],
"acosh": [(lambda x: True, "ArcCosh")],
"atanh": [(lambda x: True, "ArcTanh")],
"acoth": [(lambda x: True, "ArcCoth")],
"asech": [(lambda x: True, "ArcSech")],
"acsch": [(lambda x: True, "ArcCsch")],
"sinc": [(lambda x: True, "Sinc")],
"conjugate": [(lambda x: True, "Conjugate")],
"Max": [(lambda *x: True, "Max")],
"Min": [(lambda *x: True, "Min")],
"erf": [(lambda x: True, "Erf")],
"erf2": [(lambda *x: True, "Erf")],
"erfc": [(lambda x: True, "Erfc")],
"erfi": [(lambda x: True, "Erfi")],
"erfinv": [(lambda x: True, "InverseErf")],
"erfcinv": [(lambda x: True, "InverseErfc")],
"erf2inv": [(lambda *x: True, "InverseErf")],
"expint": [(lambda *x: True, "ExpIntegralE")],
"Ei": [(lambda x: True, "ExpIntegralEi")],
"fresnelc": [(lambda x: True, "FresnelC")],
"fresnels": [(lambda x: True, "FresnelS")],
"gamma": [(lambda x: True, "Gamma")],
"uppergamma": [(lambda *x: True, "Gamma")],
"polygamma": [(lambda *x: True, "PolyGamma")],
"loggamma": [(lambda x: True, "LogGamma")],
"beta": [(lambda *x: True, "Beta")],
"Ci": [(lambda x: True, "CosIntegral")],
"Si": [(lambda x: True, "SinIntegral")],
"Chi": [(lambda x: True, "CoshIntegral")],
"Shi": [(lambda x: True, "SinhIntegral")],
"li": [(lambda x: True, "LogIntegral")],
"factorial": [(lambda x: True, "Factorial")],
"factorial2": [(lambda x: True, "Factorial2")],
"subfactorial": [(lambda x: True, "Subfactorial")],
"catalan": [(lambda x: True, "CatalanNumber")],
"harmonic": [(lambda *x: True, "HarmonicNumber")],
"lucas": [(lambda x: True, "LucasL")],
"RisingFactorial": [(lambda *x: True, "Pochhammer")],
"FallingFactorial": [(lambda *x: True, "FactorialPower")],
"laguerre": [(lambda *x: True, "LaguerreL")],
"assoc_laguerre": [(lambda *x: True, "LaguerreL")],
"hermite": [(lambda *x: True, "HermiteH")],
"jacobi": [(lambda *x: True, "JacobiP")],
"gegenbauer": [(lambda *x: True, "GegenbauerC")],
"chebyshevt": [(lambda *x: True, "ChebyshevT")],
"chebyshevu": [(lambda *x: True, "ChebyshevU")],
"legendre": [(lambda *x: True, "LegendreP")],
"assoc_legendre": [(lambda *x: True, "LegendreP")],
"mathieuc": [(lambda *x: True, "MathieuC")],
"mathieus": [(lambda *x: True, "MathieuS")],
"mathieucprime": [(lambda *x: True, "MathieuCPrime")],
"mathieusprime": [(lambda *x: True, "MathieuSPrime")],
"stieltjes": [(lambda x: True, "StieltjesGamma")],
"elliptic_e": [(lambda *x: True, "EllipticE")],
"elliptic_f": [(lambda *x: True, "EllipticE")],
"elliptic_k": [(lambda x: True, "EllipticK")],
"elliptic_pi": [(lambda *x: True, "EllipticPi")],
"zeta": [(lambda *x: True, "Zeta")],
"dirichlet_eta": [(lambda x: True, "DirichletEta")],
"riemann_xi": [(lambda x: True, "RiemannXi")],
"besseli": [(lambda *x: True, "BesselI")],
"besselj": [(lambda *x: True, "BesselJ")],
"besselk": [(lambda *x: True, "BesselK")],
"bessely": [(lambda *x: True, "BesselY")],
"hankel1": [(lambda *x: True, "HankelH1")],
"hankel2": [(lambda *x: True, "HankelH2")],
"airyai": [(lambda x: True, "AiryAi")],
"airybi": [(lambda x: True, "AiryBi")],
"airyaiprime": [(lambda x: True, "AiryAiPrime")],
"airybiprime": [(lambda x: True, "AiryBiPrime")],
"polylog": [(lambda *x: True, "PolyLog")],
"lerchphi": [(lambda *x: True, "LerchPhi")],
"gcd": [(lambda *x: True, "GCD")],
"lcm": [(lambda *x: True, "LCM")],
"jn": [(lambda *x: True, "SphericalBesselJ")],
"yn": [(lambda *x: True, "SphericalBesselY")],
"hyper": [(lambda *x: True, "HypergeometricPFQ")],
"meijerg": [(lambda *x: True, "MeijerG")],
"appellf1": [(lambda *x: True, "AppellF1")],
"DiracDelta": [(lambda x: True, "DiracDelta")],
"Heaviside": [(lambda x: True, "HeavisideTheta")],
"KroneckerDelta": [(lambda *x: True, "KroneckerDelta")],
"sqrt": [(lambda x: True, "Sqrt")], # For automatic rewrites
}
class MCodePrinter(CodePrinter):
"""A printer to convert Python expressions to
strings of the Wolfram's Mathematica code
"""
printmethod = "_mcode"
language = "Wolfram Language"
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
}
_number_symbols: set[tuple[Expr, Float]] = set()
_not_supported: set[Basic] = set()
def __init__(self, settings={}):
"""Register function mappings supplied by user"""
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {}).copy()
for k, v in userfuncs.items():
if not isinstance(v, list):
userfuncs[k] = [(lambda *x: True, v)]
self.known_functions.update(userfuncs)
def _format_code(self, lines):
return lines
def _print_Pow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Mul(self, expr):
PREC = precedence(expr)
c, nc = expr.args_cnc()
res = super()._print_Mul(expr.func(*c))
if nc:
res += '*'
res += '**'.join(self.parenthesize(a, PREC) for a in nc)
return res
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
# Primitive numbers
def _print_Zero(self, expr):
return '0'
def _print_One(self, expr):
return '1'
def _print_NegativeOne(self, expr):
return '-1'
def _print_Half(self, expr):
return '1/2'
def _print_ImaginaryUnit(self, expr):
return 'I'
# Infinity and invalid numbers
def _print_Infinity(self, expr):
return 'Infinity'
def _print_NegativeInfinity(self, expr):
return '-Infinity'
def _print_ComplexInfinity(self, expr):
return 'ComplexInfinity'
def _print_NaN(self, expr):
return 'Indeterminate'
# Mathematical constants
def _print_Exp1(self, expr):
return 'E'
def _print_Pi(self, expr):
return 'Pi'
def _print_GoldenRatio(self, expr):
return 'GoldenRatio'
def _print_TribonacciConstant(self, expr):
expanded = expr.expand(func=True)
PREC = precedence(expr)
return self.parenthesize(expanded, PREC)
def _print_EulerGamma(self, expr):
return 'EulerGamma'
def _print_Catalan(self, expr):
return 'Catalan'
def _print_list(self, expr):
return '{' + ', '.join(self.doprint(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_ImmutableDenseMatrix(self, expr):
return self.doprint(expr.tolist())
def _print_ImmutableSparseMatrix(self, expr):
def print_rule(pos, val):
return '{} -> {}'.format(
self.doprint((pos[0]+1, pos[1]+1)), self.doprint(val))
def print_data():
items = sorted(expr.todok().items(), key=default_sort_key)
return '{' + \
', '.join(print_rule(k, v) for k, v in items) + \
'}'
def print_dims():
return self.doprint(expr.shape)
return 'SparseArray[{}, {}]'.format(print_data(), print_dims())
def _print_ImmutableDenseNDimArray(self, expr):
return self.doprint(expr.tolist())
def _print_ImmutableSparseNDimArray(self, expr):
def print_string_list(string_list):
return '{' + ', '.join(a for a in string_list) + '}'
def to_mathematica_index(*args):
"""Helper function to change Python style indexing to
Pathematica indexing.
Python indexing (0, 1 ... n-1)
-> Mathematica indexing (1, 2 ... n)
"""
return tuple(i + 1 for i in args)
def print_rule(pos, val):
"""Helper function to print a rule of Mathematica"""
return '{} -> {}'.format(self.doprint(pos), self.doprint(val))
def print_data():
"""Helper function to print data part of Mathematica
sparse array.
It uses the fourth notation ``SparseArray[data,{d1,d2,...}]``
from
https://reference.wolfram.com/language/ref/SparseArray.html
``data`` must be formatted with rule.
"""
return print_string_list(
[print_rule(
to_mathematica_index(*(expr._get_tuple_index(key))),
value)
for key, value in sorted(expr._sparse_array.items())]
)
def print_dims():
"""Helper function to print dimensions part of Mathematica
sparse array.
It uses the fourth notation ``SparseArray[data,{d1,d2,...}]``
from
https://reference.wolfram.com/language/ref/SparseArray.html
"""
return self.doprint(expr.shape)
return 'SparseArray[{}, {}]'.format(print_data(), print_dims())
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_mfunc = self.known_functions[expr.func.__name__]
for cond, mfunc in cond_mfunc:
if cond(*expr.args):
return "%s[%s]" % (mfunc, self.stringify(expr.args, ", "))
elif expr.func.__name__ in self._rewriteable_functions:
# Simple rewrite to supported function possible
target_f, required_fs = self._rewriteable_functions[expr.func.__name__]
if self._can_print(target_f) and all(self._can_print(f) for f in required_fs):
return self._print(expr.rewrite(target_f))
return expr.func.__name__ + "[%s]" % self.stringify(expr.args, ", ")
_print_MinMaxBase = _print_Function
def _print_LambertW(self, expr):
if len(expr.args) == 1:
return "ProductLog[{}]".format(self._print(expr.args[0]))
return "ProductLog[{}, {}]".format(
self._print(expr.args[1]), self._print(expr.args[0]))
def _print_Integral(self, expr):
if len(expr.variables) == 1 and not expr.limits[0][1:]:
args = [expr.args[0], expr.variables[0]]
else:
args = expr.args
return "Hold[Integrate[" + ', '.join(self.doprint(a) for a in args) + "]]"
def _print_Sum(self, expr):
return "Hold[Sum[" + ', '.join(self.doprint(a) for a in expr.args) + "]]"
def _print_Derivative(self, expr):
dexpr = expr.expr
dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count]
return "Hold[D[" + ', '.join(self.doprint(a) for a in [dexpr] + dvars) + "]]"
def _get_comment(self, text):
return "(* {} *)".format(text)
def mathematica_code(expr, **settings):
r"""Converts an expr to a string of the Wolfram Mathematica code
Examples
========
>>> from sympy import mathematica_code as mcode, symbols, sin
>>> x = symbols('x')
>>> mcode(sin(x).series(x).removeO())
'(1/120)*x^5 - 1/6*x^3 + x'
"""
return MCodePrinter(settings).doprint(expr)
|
2fe0a8a4f477119151fa9a3fb7c88998deb94c1f7b7aa55d55f159d943a2524d | """
Fortran code printer
The FCodePrinter converts single SymPy expressions into single Fortran
expressions, using the functions defined in the Fortran 77 standard where
possible. Some useful pointers to Fortran can be found on wikipedia:
https://en.wikipedia.org/wiki/Fortran
Most of the code below is based on the "Professional Programmer\'s Guide to
Fortran77" by Clive G. Page:
http://www.star.le.ac.uk/~cgp/prof77.html
Fortran is a case-insensitive language. This might cause trouble because
SymPy is case sensitive. So, fcode adds underscores to variable names when
it is necessary to make them different for Fortran.
"""
from __future__ import annotations
from typing import Any
from collections import defaultdict
from itertools import chain
import string
from sympy.codegen.ast import (
Assignment, Declaration, Pointer, value_const,
float32, float64, float80, complex64, complex128, int8, int16, int32,
int64, intc, real, integer, bool_, complex_
)
from sympy.codegen.fnodes import (
allocatable, isign, dsign, cmplx, merge, literal_dp, elemental, pure,
intent_in, intent_out, intent_inout
)
from sympy.core import S, Add, N, Float, Symbol
from sympy.core.function import Function
from sympy.core.relational import Eq
from sympy.sets import Range
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.printing.printer import printer_context
# These are defined in the other file so we can avoid importing sympy.codegen
# from the top-level 'import sympy'. Export them here as well.
from sympy.printing.codeprinter import fcode, print_fcode # noqa:F401
known_functions = {
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"log": "log",
"exp": "exp",
"erf": "erf",
"Abs": "abs",
"conjugate": "conjg",
"Max": "max",
"Min": "min",
}
class FCodePrinter(CodePrinter):
"""A printer to convert SymPy expressions to strings of Fortran code"""
printmethod = "_fcode"
language = "Fortran"
type_aliases = {
integer: int32,
real: float64,
complex_: complex128,
}
type_mappings = {
intc: 'integer(c_int)',
float32: 'real*4', # real(kind(0.e0))
float64: 'real*8', # real(kind(0.d0))
float80: 'real*10', # real(kind(????))
complex64: 'complex*8',
complex128: 'complex*16',
int8: 'integer*1',
int16: 'integer*2',
int32: 'integer*4',
int64: 'integer*8',
bool_: 'logical'
}
type_modules = {
intc: {'iso_c_binding': 'c_int'}
}
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'source_format': 'fixed',
'contract': True,
'standard': 77,
'name_mangling': True,
}
_operators = {
'and': '.and.',
'or': '.or.',
'xor': '.neqv.',
'equivalent': '.eqv.',
'not': '.not. ',
}
_relationals = {
'!=': '/=',
}
def __init__(self, settings=None):
if not settings:
settings = {}
self.mangled_symbols = {} # Dict showing mapping of all words
self.used_name = []
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
super().__init__(settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
# leading columns depend on fixed or free format
standards = {66, 77, 90, 95, 2003, 2008}
if self._settings['standard'] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings[
'standard'])
self.module_uses = defaultdict(set) # e.g.: use iso_c_binding, only: c_int
@property
def _lead(self):
if self._settings['source_format'] == 'fixed':
return {'code': " ", 'cont': " @ ", 'comment': "C "}
elif self._settings['source_format'] == 'free':
return {'code': "", 'cont': " ", 'comment': "! "}
else:
raise ValueError("Unknown source format: %s" % self._settings['source_format'])
def _print_Symbol(self, expr):
if self._settings['name_mangling'] == True:
if expr not in self.mangled_symbols:
name = expr.name
while name.lower() in self.used_name:
name += '_'
self.used_name.append(name.lower())
if name == expr.name:
self.mangled_symbols[expr] = expr
else:
self.mangled_symbols[expr] = Symbol(name)
expr = expr.xreplace(self.mangled_symbols)
name = super()._print_Symbol(expr)
return name
def _rate_index_position(self, p):
return -p*5
def _get_statement(self, codestring):
return codestring
def _get_comment(self, text):
return "! {}".format(text)
def _declare_number_const(self, name, value):
return "parameter ({} = {})".format(name, self._print(value))
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr, Float(expr.evalf(self._settings['precision']))))
return str(expr)
def _format_code(self, lines):
return self._wrap_fortran(self.indent_code(lines))
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# fortran arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("do %s = %s, %s" % (var, start, stop))
close_lines.append("end do")
return open_lines, close_lines
def _print_sign(self, expr):
from sympy.functions.elementary.complexes import Abs
arg, = expr.args
if arg.is_integer:
new_expr = merge(0, isign(1, arg), Eq(arg, 0))
elif (arg.is_complex or arg.is_infinite):
new_expr = merge(cmplx(literal_dp(0), literal_dp(0)), arg/Abs(arg), Eq(Abs(arg), literal_dp(0)))
else:
new_expr = merge(literal_dp(0), dsign(literal_dp(1), arg), Eq(arg, literal_dp(0)))
return self._print(new_expr)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) then" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("else if (%s) then" % self._print(c))
lines.append(self._print(e))
lines.append("end if")
return "\n".join(lines)
elif self._settings["standard"] >= 95:
# Only supported in F95 and newer:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
pattern = "merge({T}, {F}, {COND})"
code = self._print(expr.args[-1].expr)
terms = list(expr.args[:-1])
while terms:
e, c = terms.pop()
expr = self._print(e)
cond = self._print(c)
code = pattern.format(T=expr, F=code, COND=cond)
return code
else:
# `merge` is not supported prior to F95
raise NotImplementedError("Using Piecewise as an expression using "
"inline operators is not supported in "
"standards earlier than Fortran95.")
def _print_MatrixElement(self, expr):
return "{}({}, {})".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True), expr.i + 1, expr.j + 1)
def _print_Add(self, expr):
# purpose: print complex numbers nicely in Fortran.
# collect the purely real and purely imaginary parts:
pure_real = []
pure_imaginary = []
mixed = []
for arg in expr.args:
if arg.is_number and arg.is_real:
pure_real.append(arg)
elif arg.is_number and arg.is_imaginary:
pure_imaginary.append(arg)
else:
mixed.append(arg)
if pure_imaginary:
if mixed:
PREC = precedence(expr)
term = Add(*mixed)
t = self._print(term)
if t.startswith('-'):
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < PREC:
t = "(%s)" % t
return "cmplx(%s,%s) %s %s" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
sign, t,
)
else:
return "cmplx(%s,%s)" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
)
else:
return CodePrinter._print_Add(self, expr)
def _print_Function(self, expr):
# All constant function args are evaluated as floats
prec = self._settings['precision']
args = [N(a, prec) for a in expr.args]
eval_expr = expr.func(*args)
if not isinstance(eval_expr, Function):
return self._print(eval_expr)
else:
return CodePrinter._print_Function(self, expr.func(*args))
def _print_Mod(self, expr):
# NOTE : Fortran has the functions mod() and modulo(). modulo() behaves
# the same wrt to the sign of the arguments as Python and SymPy's
# modulus computations (% and Mod()) but is not available in Fortran 66
# or Fortran 77, thus we raise an error.
if self._settings['standard'] in [66, 77]:
msg = ("Python % operator and SymPy's Mod() function are not "
"supported by Fortran 66 or 77 standards.")
raise NotImplementedError(msg)
else:
x, y = expr.args
return " modulo({}, {})".format(self._print(x), self._print(y))
def _print_ImaginaryUnit(self, expr):
# purpose: print complex numbers nicely in Fortran.
return "cmplx(0,1)"
def _print_int(self, expr):
return str(expr)
def _print_Mul(self, expr):
# purpose: print complex numbers nicely in Fortran.
if expr.is_number and expr.is_imaginary:
return "cmplx(0,%s)" % (
self._print(-S.ImaginaryUnit*expr)
)
else:
return CodePrinter._print_Mul(self, expr)
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '%s/%s' % (
self._print(literal_dp(1)),
self.parenthesize(expr.base, PREC)
)
elif expr.exp == 0.5:
if expr.base.is_integer:
# Fortran intrinsic sqrt() does not accept integer argument
if expr.base.is_Number:
return 'sqrt(%s.0d0)' % self._print(expr.base)
else:
return 'sqrt(dble(%s))' % self._print(expr.base)
else:
return 'sqrt(%s)' % self._print(expr.base)
else:
return CodePrinter._print_Pow(self, expr)
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return "%d.0d0/%d.0d0" % (p, q)
def _print_Float(self, expr):
printed = CodePrinter._print_Float(self, expr)
e = printed.find('e')
if e > -1:
return "%sd%s" % (printed[:e], printed[e + 1:])
return "%sd0" % printed
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
op = op if op not in self._relationals else self._relationals[op]
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
return self._get_statement("{0} = {0} {1} {2}".format(
*map(lambda arg: self._print(arg),
[lhs_code, expr.binop, rhs_code])))
def _print_sum_(self, sm):
params = self._print(sm.array)
if sm.dim != None: # Must use '!= None', cannot use 'is not None'
params += ', ' + self._print(sm.dim)
if sm.mask != None: # Must use '!= None', cannot use 'is not None'
params += ', mask=' + self._print(sm.mask)
return '%s(%s)' % (sm.__class__.__name__.rstrip('_'), params)
def _print_product_(self, prod):
return self._print_sum_(prod)
def _print_Do(self, do):
excl = ['concurrent']
if do.step == 1:
excl.append('step')
step = ''
else:
step = ', {step}'
return (
'do {concurrent}{counter} = {first}, {last}'+step+'\n'
'{body}\n'
'end do\n'
).format(
concurrent='concurrent ' if do.concurrent else '',
**do.kwargs(apply=lambda arg: self._print(arg), exclude=excl)
)
def _print_ImpliedDoLoop(self, idl):
step = '' if idl.step == 1 else ', {step}'
return ('({expr}, {counter} = {first}, {last}'+step+')').format(
**idl.kwargs(apply=lambda arg: self._print(arg))
)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('do {target} = {start}, {stop}, {step}\n'
'{body}\n'
'end do').format(target=target, start=start, stop=stop - 1,
step=step, body=body)
def _print_Type(self, type_):
type_ = self.type_aliases.get(type_, type_)
type_str = self.type_mappings.get(type_, type_.name)
module_uses = self.type_modules.get(type_)
if module_uses:
for k, v in module_uses:
self.module_uses[k].add(v)
return type_str
def _print_Element(self, elem):
return '{symbol}({idxs})'.format(
symbol=self._print(elem.symbol),
idxs=', '.join(map(lambda arg: self._print(arg), elem.indices))
)
def _print_Extent(self, ext):
return str(ext)
def _print_Declaration(self, expr):
var = expr.variable
val = var.value
dim = var.attr_params('dimension')
intents = [intent in var.attrs for intent in (intent_in, intent_out, intent_inout)]
if intents.count(True) == 0:
intent = ''
elif intents.count(True) == 1:
intent = ', intent(%s)' % ['in', 'out', 'inout'][intents.index(True)]
else:
raise ValueError("Multiple intents specified for %s" % self)
if isinstance(var, Pointer):
raise NotImplementedError("Pointers are not available by default in Fortran.")
if self._settings["standard"] >= 90:
result = '{t}{vc}{dim}{intent}{alloc} :: {s}'.format(
t=self._print(var.type),
vc=', parameter' if value_const in var.attrs else '',
dim=', dimension(%s)' % ', '.join(map(lambda arg: self._print(arg), dim)) if dim else '',
intent=intent,
alloc=', allocatable' if allocatable in var.attrs else '',
s=self._print(var.symbol)
)
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
else:
if value_const in var.attrs or val:
raise NotImplementedError("F77 init./parameter statem. req. multiple lines.")
result = ' '.join(map(lambda arg: self._print(arg), [var.type, var.symbol]))
return result
def _print_Infinity(self, expr):
return '(huge(%s) + 1)' % self._print(literal_dp(0))
def _print_While(self, expr):
return 'do while ({condition})\n{body}\nend do'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_BooleanTrue(self, expr):
return '.true.'
def _print_BooleanFalse(self, expr):
return '.false.'
def _pad_leading_columns(self, lines):
result = []
for line in lines:
if line.startswith('!'):
result.append(self._lead['comment'] + line[1:].lstrip())
else:
result.append(self._lead['code'] + line)
return result
def _wrap_fortran(self, lines):
"""Wrap long Fortran lines
Argument:
lines -- a list of lines (without \\n character)
A comment line is split at white space. Code lines are split with a more
complex rule to give nice results.
"""
# routine to find split point in a code line
my_alnum = set("_+-." + string.digits + string.ascii_letters)
my_white = set(" \t()")
def split_pos_code(line, endpos):
if len(line) <= endpos:
return len(line)
pos = endpos
split = lambda pos: \
(line[pos] in my_alnum and line[pos - 1] not in my_alnum) or \
(line[pos] not in my_alnum and line[pos - 1] in my_alnum) or \
(line[pos] in my_white and line[pos - 1] not in my_white) or \
(line[pos] not in my_white and line[pos - 1] in my_white)
while not split(pos):
pos -= 1
if pos == 0:
return endpos
return pos
# split line by line and add the split lines to result
result = []
if self._settings['source_format'] == 'free':
trailing = ' &'
else:
trailing = ''
for line in lines:
if line.startswith(self._lead['comment']):
# comment line
if len(line) > 72:
pos = line.rfind(" ", 6, 72)
if pos == -1:
pos = 72
hunk = line[:pos]
line = line[pos:].lstrip()
result.append(hunk)
while line:
pos = line.rfind(" ", 0, 66)
if pos == -1 or len(line) < 66:
pos = 66
hunk = line[:pos]
line = line[pos:].lstrip()
result.append("%s%s" % (self._lead['comment'], hunk))
else:
result.append(line)
elif line.startswith(self._lead['code']):
# code line
pos = split_pos_code(line, 72)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append(hunk)
while line:
pos = split_pos_code(line, 65)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append("%s%s" % (self._lead['cont'], hunk))
else:
result.append(line)
return result
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
free = self._settings['source_format'] == 'free'
code = [ line.lstrip(' \t') for line in code ]
inc_keyword = ('do ', 'if(', 'if ', 'do\n', 'else', 'program', 'interface')
dec_keyword = ('end do', 'enddo', 'end if', 'endif', 'else', 'end program', 'end interface')
increase = [ int(any(map(line.startswith, inc_keyword)))
for line in code ]
decrease = [ int(any(map(line.startswith, dec_keyword)))
for line in code ]
continuation = [ int(any(map(line.endswith, ['&', '&\n'])))
for line in code ]
level = 0
cont_padding = 0
tabwidth = 3
new_code = []
for i, line in enumerate(code):
if line in ('', '\n'):
new_code.append(line)
continue
level -= decrease[i]
if free:
padding = " "*(level*tabwidth + cont_padding)
else:
padding = " "*level*tabwidth
line = "%s%s" % (padding, line)
if not free:
line = self._pad_leading_columns([line])[0]
new_code.append(line)
if continuation[i]:
cont_padding = 2*tabwidth
else:
cont_padding = 0
level += increase[i]
if not free:
return self._wrap_fortran(new_code)
return new_code
def _print_GoTo(self, goto):
if goto.expr: # computed goto
return "go to ({labels}), {expr}".format(
labels=', '.join(map(lambda arg: self._print(arg), goto.labels)),
expr=self._print(goto.expr)
)
else:
lbl, = goto.labels
return "go to %s" % self._print(lbl)
def _print_Program(self, prog):
return (
"program {name}\n"
"{body}\n"
"end program\n"
).format(**prog.kwargs(apply=lambda arg: self._print(arg)))
def _print_Module(self, mod):
return (
"module {name}\n"
"{declarations}\n"
"\ncontains\n\n"
"{definitions}\n"
"end module\n"
).format(**mod.kwargs(apply=lambda arg: self._print(arg)))
def _print_Stream(self, strm):
if strm.name == 'stdout' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>input_unit')
return 'input_unit'
elif strm.name == 'stderr' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>error_unit')
return 'error_unit'
else:
if strm.name == 'stdout':
return '*'
else:
return strm.name
def _print_Print(self, ps):
if ps.format_string != None: # Must be '!= None', cannot be 'is not None'
fmt = self._print(ps.format_string)
else:
fmt = "*"
return "print {fmt}, {iolist}".format(fmt=fmt, iolist=', '.join(
map(lambda arg: self._print(arg), ps.print_args)))
def _print_Return(self, rs):
arg, = rs.args
return "{result_name} = {arg}".format(
result_name=self._context.get('result_name', 'sympy_result'),
arg=self._print(arg)
)
def _print_FortranReturn(self, frs):
arg, = frs.args
if arg:
return 'return %s' % self._print(arg)
else:
return 'return'
def _head(self, entity, fp, **kwargs):
bind_C_params = fp.attr_params('bind_C')
if bind_C_params is None:
bind = ''
else:
bind = ' bind(C, name="%s")' % bind_C_params[0] if bind_C_params else ' bind(C)'
result_name = self._settings.get('result_name', None)
return (
"{entity}{name}({arg_names}){result}{bind}\n"
"{arg_declarations}"
).format(
entity=entity,
name=self._print(fp.name),
arg_names=', '.join([self._print(arg.symbol) for arg in fp.parameters]),
result=(' result(%s)' % result_name) if result_name else '',
bind=bind,
arg_declarations='\n'.join(map(lambda arg: self._print(Declaration(arg)), fp.parameters))
)
def _print_FunctionPrototype(self, fp):
entity = "{} function ".format(self._print(fp.return_type))
return (
"interface\n"
"{function_head}\n"
"end function\n"
"end interface"
).format(function_head=self._head(entity, fp))
def _print_FunctionDefinition(self, fd):
if elemental in fd.attrs:
prefix = 'elemental '
elif pure in fd.attrs:
prefix = 'pure '
else:
prefix = ''
entity = "{} function ".format(self._print(fd.return_type))
with printer_context(self, result_name=fd.name):
return (
"{prefix}{function_head}\n"
"{body}\n"
"end function\n"
).format(
prefix=prefix,
function_head=self._head(entity, fd),
body=self._print(fd.body)
)
def _print_Subroutine(self, sub):
return (
'{subroutine_head}\n'
'{body}\n'
'end subroutine\n'
).format(
subroutine_head=self._head('subroutine ', sub),
body=self._print(sub.body)
)
def _print_SubroutineCall(self, scall):
return 'call {name}({args})'.format(
name=self._print(scall.name),
args=', '.join(map(lambda arg: self._print(arg), scall.subroutine_args))
)
def _print_use_rename(self, rnm):
return "%s => %s" % tuple(map(lambda arg: self._print(arg), rnm.args))
def _print_use(self, use):
result = 'use %s' % self._print(use.namespace)
if use.rename != None: # Must be '!= None', cannot be 'is not None'
result += ', ' + ', '.join([self._print(rnm) for rnm in use.rename])
if use.only != None: # Must be '!= None', cannot be 'is not None'
result += ', only: ' + ', '.join([self._print(nly) for nly in use.only])
return result
def _print_BreakToken(self, _):
return 'exit'
def _print_ContinueToken(self, _):
return 'cycle'
def _print_ArrayConstructor(self, ac):
fmtstr = "[%s]" if self._settings["standard"] >= 2003 else '(/%s/)'
return fmtstr % ', '.join(map(lambda arg: self._print(arg), ac.elements))
def _print_ArrayElement(self, elem):
return '{symbol}({idxs})'.format(
symbol=self._print(elem.name),
idxs=', '.join(map(lambda arg: self._print(arg), elem.indices))
)
|
af7f497e320d5c97e44bab30cbe5165f10b2d3c3ae6832508a9d4b7adca08af8 | """
A MathML printer.
"""
from __future__ import annotations
from typing import Any
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import sympify
from sympy.printing.conventions import split_super_sub, requires_partial
from sympy.printing.precedence import \
precedence_traditional, PRECEDENCE, PRECEDENCE_TRADITIONAL
from sympy.printing.pretty.pretty_symbology import greek_unicode
from sympy.printing.printer import Printer, print_function
from mpmath.libmp import prec_to_dps, repr_dps, to_str as mlib_to_str
class MathMLPrinterBase(Printer):
"""Contains common code required for MathMLContentPrinter and
MathMLPresentationPrinter.
"""
_default_settings: dict[str, Any] = {
"order": None,
"encoding": "utf-8",
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"inv_trig_style": "abbreviated",
"ln_notation": False,
"long_frac_ratio": None,
"mat_delim": "[",
"mat_symbol_style": "plain",
"mul_symbol": None,
"root_notation": True,
"symbol_names": {},
"mul_symbol_mathml_numbers": '·',
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
from xml.dom.minidom import Document, Text
self.dom = Document()
# Workaround to allow strings to remain unescaped
# Based on
# https://stackoverflow.com/questions/38015864/python-xml-dom-minidom-\
# please-dont-escape-my-strings/38041194
class RawText(Text):
def writexml(self, writer, indent='', addindent='', newl=''):
if self.data:
writer.write('{}{}{}'.format(indent, self.data, newl))
def createRawTextNode(data):
r = RawText()
r.data = data
r.ownerDocument = self.dom
return r
self.dom.createTextNode = createRawTextNode
def doprint(self, expr):
"""
Prints the expression as MathML.
"""
mathML = Printer._print(self, expr)
unistr = mathML.toxml()
xmlbstr = unistr.encode('ascii', 'xmlcharrefreplace')
res = xmlbstr.decode()
return res
def apply_patch(self):
# Applying the patch of xml.dom.minidom bug
# Date: 2011-11-18
# Description: http://ronrothman.com/public/leftbraned/xml-dom-minidom\
# -toprettyxml-and-silly-whitespace/#best-solution
# Issue: http://bugs.python.org/issue4147
# Patch: http://hg.python.org/cpython/rev/7262f8f276ff/
from xml.dom.minidom import Element, Text, Node, _write_data
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = list(attrs.keys())
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(
writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
self._Element_writexml_old = Element.writexml
Element.writexml = writexml
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
self._Text_writexml_old = Text.writexml
Text.writexml = writexml
def restore_patch(self):
from xml.dom.minidom import Element, Text
Element.writexml = self._Element_writexml_old
Text.writexml = self._Text_writexml_old
class MathMLContentPrinter(MathMLPrinterBase):
"""Prints an expression to the Content MathML markup language.
References: https://www.w3.org/TR/MathML2/chapter4.html
"""
printmethod = "_mathml_content"
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Add': 'plus',
'Mul': 'times',
'Derivative': 'diff',
'Number': 'cn',
'int': 'cn',
'Pow': 'power',
'Max': 'max',
'Min': 'min',
'Abs': 'abs',
'And': 'and',
'Or': 'or',
'Xor': 'xor',
'Not': 'not',
'Implies': 'implies',
'Symbol': 'ci',
'MatrixSymbol': 'ci',
'RandomSymbol': 'ci',
'Integral': 'int',
'Sum': 'sum',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'csc': 'csc',
'sec': 'sec',
'sinh': 'sinh',
'cosh': 'cosh',
'tanh': 'tanh',
'coth': 'coth',
'csch': 'csch',
'sech': 'sech',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'atan2': 'arctan',
'acot': 'arccot',
'acoth': 'arccoth',
'asec': 'arcsec',
'asech': 'arcsech',
'acsc': 'arccsc',
'acsch': 'arccsch',
'log': 'ln',
'Equality': 'eq',
'Unequality': 'neq',
'GreaterThan': 'geq',
'LessThan': 'leq',
'StrictGreaterThan': 'gt',
'StrictLessThan': 'lt',
'Union': 'union',
'Intersection': 'intersect',
}
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
n = e.__class__.__name__
return n.lower()
def _print_Mul(self, expr):
if expr.could_extract_minus_sign():
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self._print_Mul(-expr))
return x
from sympy.simplify import fraction
numer, denom = fraction(expr)
if denom is not S.One:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
x.appendChild(self._print(numer))
x.appendChild(self._print(denom))
return x
coeff, terms = expr.as_coeff_mul()
if coeff is S.One and len(terms) == 1:
# XXX since the negative coefficient has been handled, I don't
# think a coeff of 1 can remain
return self._print(terms[0])
if self.order != 'old':
terms = Mul._from_args(terms).as_ordered_factors()
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('times'))
if coeff != 1:
x.appendChild(self._print(coeff))
for term in terms:
x.appendChild(self._print(term))
return x
def _print_Add(self, expr, order=None):
args = self._as_ordered_terms(expr, order=order)
lastProcessed = self._print(args[0])
plusNodes = []
for arg in args[1:]:
if arg.could_extract_minus_sign():
# use minus
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(lastProcessed)
x.appendChild(self._print(-arg))
# invert expression since this is now minused
lastProcessed = x
if arg == args[-1]:
plusNodes.append(lastProcessed)
else:
plusNodes.append(lastProcessed)
lastProcessed = self._print(arg)
if arg == args[-1]:
plusNodes.append(self._print(arg))
if len(plusNodes) == 1:
return lastProcessed
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('plus'))
while plusNodes:
x.appendChild(plusNodes.pop(0))
return x
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
root = self.dom.createElement('piecewise')
for i, (e, c) in enumerate(expr.args):
if i == len(expr.args) - 1 and c == True:
piece = self.dom.createElement('otherwise')
piece.appendChild(self._print(e))
else:
piece = self.dom.createElement('piece')
piece.appendChild(self._print(e))
piece.appendChild(self._print(c))
root.appendChild(piece)
return root
def _print_MatrixBase(self, m):
x = self.dom.createElement('matrix')
for i in range(m.rows):
x_r = self.dom.createElement('matrixrow')
for j in range(m.cols):
x_r.appendChild(self._print(m[i, j]))
x.appendChild(x_r)
return x
def _print_Rational(self, e):
if e.q == 1:
# don't divide
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(str(e.p)))
return x
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
# numerator
xnum = self.dom.createElement('cn')
xnum.appendChild(self.dom.createTextNode(str(e.p)))
# denominator
xdenom = self.dom.createElement('cn')
xdenom.appendChild(self.dom.createTextNode(str(e.q)))
x.appendChild(xnum)
x.appendChild(xdenom)
return x
def _print_Limit(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x_1 = self.dom.createElement('bvar')
x_2 = self.dom.createElement('lowlimit')
x_1.appendChild(self._print(e.args[1]))
x_2.appendChild(self._print(e.args[2]))
x.appendChild(x_1)
x.appendChild(x_2)
x.appendChild(self._print(e.args[0]))
return x
def _print_ImaginaryUnit(self, e):
return self.dom.createElement('imaginaryi')
def _print_EulerGamma(self, e):
return self.dom.createElement('eulergamma')
def _print_GoldenRatio(self, e):
"""We use unicode #x3c6 for Greek letter phi as defined here
http://www.w3.org/2003/entities/2007doc/isogrk1.html"""
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode("\N{GREEK SMALL LETTER PHI}"))
return x
def _print_Exp1(self, e):
return self.dom.createElement('exponentiale')
def _print_Pi(self, e):
return self.dom.createElement('pi')
def _print_Infinity(self, e):
return self.dom.createElement('infinity')
def _print_NaN(self, e):
return self.dom.createElement('notanumber')
def _print_EmptySet(self, e):
return self.dom.createElement('emptyset')
def _print_BooleanTrue(self, e):
return self.dom.createElement('true')
def _print_BooleanFalse(self, e):
return self.dom.createElement('false')
def _print_NegativeInfinity(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self.dom.createElement('infinity'))
return x
def _print_Integral(self, e):
def lime_recur(limits):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
bvar_elem = self.dom.createElement('bvar')
bvar_elem.appendChild(self._print(limits[0][0]))
x.appendChild(bvar_elem)
if len(limits[0]) == 3:
low_elem = self.dom.createElement('lowlimit')
low_elem.appendChild(self._print(limits[0][1]))
x.appendChild(low_elem)
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][2]))
x.appendChild(up_elem)
if len(limits[0]) == 2:
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][1]))
x.appendChild(up_elem)
if len(limits) == 1:
x.appendChild(self._print(e.function))
else:
x.appendChild(lime_recur(limits[1:]))
return x
limits = list(e.limits)
limits.reverse()
return lime_recur(limits)
def _print_Sum(self, e):
# Printer can be shared because Sum and Integral have the
# same internal representation.
return self._print_Integral(e)
def _print_Symbol(self, sym):
ci = self.dom.createElement(self.mathml_tag(sym))
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mml:mrow')
for i, item in enumerate(items):
if i > 0:
mo = self.dom.createElement('mml:mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
# translate name, supers and subs to unicode characters
def translate(s):
if s in greek_unicode:
return greek_unicode.get(s)
else:
return s
name, supers, subs = split_super_sub(sym.name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
mname = self.dom.createElement('mml:mi')
mname.appendChild(self.dom.createTextNode(name))
if not supers:
if not subs:
ci.appendChild(self.dom.createTextNode(name))
else:
msub = self.dom.createElement('mml:msub')
msub.appendChild(mname)
msub.appendChild(join(subs))
ci.appendChild(msub)
else:
if not subs:
msup = self.dom.createElement('mml:msup')
msup.appendChild(mname)
msup.appendChild(join(supers))
ci.appendChild(msup)
else:
msubsup = self.dom.createElement('mml:msubsup')
msubsup.appendChild(mname)
msubsup.appendChild(join(subs))
msubsup.appendChild(join(supers))
ci.appendChild(msubsup)
return ci
_print_MatrixSymbol = _print_Symbol
_print_RandomSymbol = _print_Symbol
def _print_Pow(self, e):
# Here we use root instead of power if the exponent is the reciprocal
# of an integer
if (self._settings['root_notation'] and e.exp.is_Rational
and e.exp.p == 1):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('root'))
if e.exp.q != 2:
xmldeg = self.dom.createElement('degree')
xmlcn = self.dom.createElement('cn')
xmlcn.appendChild(self.dom.createTextNode(str(e.exp.q)))
xmldeg.appendChild(xmlcn)
x.appendChild(xmldeg)
x.appendChild(self._print(e.base))
return x
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_Float(self, e):
x = self.dom.createElement(self.mathml_tag(e))
repr_e = mlib_to_str(e._mpf_, repr_dps(e._prec))
x.appendChild(self.dom.createTextNode(repr_e))
return x
def _print_Derivative(self, e):
x = self.dom.createElement('apply')
diff_symbol = self.mathml_tag(e)
if requires_partial(e.expr):
diff_symbol = 'partialdiff'
x.appendChild(self.dom.createElement(diff_symbol))
x_1 = self.dom.createElement('bvar')
for sym, times in reversed(e.variable_count):
x_1.appendChild(self._print(sym))
if times > 1:
degree = self.dom.createElement('degree')
degree.appendChild(self._print(sympify(times)))
x_1.appendChild(degree)
x.appendChild(x_1)
x.appendChild(self._print(e.expr))
return x
def _print_Function(self, e):
x = self.dom.createElement("apply")
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Basic(self, e):
x = self.dom.createElement(self.mathml_tag(e))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_AssocOp(self, e):
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Relational(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x.appendChild(self._print(e.lhs))
x.appendChild(self._print(e.rhs))
return x
def _print_list(self, seq):
"""MathML reference for the <list> element:
http://www.w3.org/TR/MathML2/chapter4.html#contm.list"""
dom_element = self.dom.createElement('list')
for item in seq:
dom_element.appendChild(self._print(item))
return dom_element
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
_print_Implies = _print_AssocOp
_print_Not = _print_AssocOp
_print_Xor = _print_AssocOp
def _print_FiniteSet(self, e):
x = self.dom.createElement('set')
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Complement(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('setdiff'))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_ProductSet(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('cartesianproduct'))
for arg in e.args:
x.appendChild(self._print(arg))
return x
# XXX Symmetric difference is not supported for MathML content printers.
class MathMLPresentationPrinter(MathMLPrinterBase):
"""Prints an expression to the Presentation MathML markup language.
References: https://www.w3.org/TR/MathML2/chapter3.html
"""
printmethod = "_mathml_presentation"
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Number': 'mn',
'Limit': '→',
'Derivative': 'ⅆ',
'int': 'mn',
'Symbol': 'mi',
'Integral': '∫',
'Sum': '∑',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'acot': 'arccot',
'atan2': 'arctan',
'Equality': '=',
'Unequality': '≠',
'GreaterThan': '≥',
'LessThan': '≤',
'StrictGreaterThan': '>',
'StrictLessThan': '<',
'lerchphi': 'Φ',
'zeta': 'ζ',
'dirichlet_eta': 'η',
'elliptic_k': 'Κ',
'lowergamma': 'γ',
'uppergamma': 'Γ',
'gamma': 'Γ',
'totient': 'ϕ',
'reduced_totient': 'λ',
'primenu': 'ν',
'primeomega': 'Ω',
'fresnels': 'S',
'fresnelc': 'C',
'LambertW': 'W',
'Heaviside': 'Θ',
'BooleanTrue': 'True',
'BooleanFalse': 'False',
'NoneType': 'None',
'mathieus': 'S',
'mathieuc': 'C',
'mathieusprime': 'S′',
'mathieucprime': 'C′',
}
def mul_symbol_selection():
if (self._settings["mul_symbol"] is None or
self._settings["mul_symbol"] == 'None'):
return '⁢'
elif self._settings["mul_symbol"] == 'times':
return '×'
elif self._settings["mul_symbol"] == 'dot':
return '·'
elif self._settings["mul_symbol"] == 'ldot':
return '․'
elif not isinstance(self._settings["mul_symbol"], str):
raise TypeError
else:
return self._settings["mul_symbol"]
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
if e.__class__.__name__ == "Mul":
return mul_symbol_selection()
n = e.__class__.__name__
return n.lower()
def parenthesize(self, item, level, strict=False):
prec_val = precedence_traditional(item)
if (prec_val < level) or ((not strict) and prec_val <= level):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(item))
return brac
else:
return self._print(item)
def _print_Mul(self, expr):
def multiply(expr, mrow):
from sympy.simplify import fraction
numer, denom = fraction(expr)
if denom is not S.One:
frac = self.dom.createElement('mfrac')
if self._settings["fold_short_frac"] and len(str(expr)) < 7:
frac.setAttribute('bevelled', 'true')
xnum = self._print(numer)
xden = self._print(denom)
frac.appendChild(xnum)
frac.appendChild(xden)
mrow.appendChild(frac)
return mrow
coeff, terms = expr.as_coeff_mul()
if coeff is S.One and len(terms) == 1:
mrow.appendChild(self._print(terms[0]))
return mrow
if self.order != 'old':
terms = Mul._from_args(terms).as_ordered_factors()
if coeff != 1:
x = self._print(coeff)
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode(self.mathml_tag(expr)))
mrow.appendChild(x)
mrow.appendChild(y)
for term in terms:
mrow.appendChild(self.parenthesize(term, PRECEDENCE['Mul']))
if not term == terms[-1]:
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode(self.mathml_tag(expr)))
mrow.appendChild(y)
return mrow
mrow = self.dom.createElement('mrow')
if expr.could_extract_minus_sign():
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(x)
mrow = multiply(-expr, mrow)
else:
mrow = multiply(expr, mrow)
return mrow
def _print_Add(self, expr, order=None):
mrow = self.dom.createElement('mrow')
args = self._as_ordered_terms(expr, order=order)
mrow.appendChild(self._print(args[0]))
for arg in args[1:]:
if arg.could_extract_minus_sign():
# use minus
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('-'))
y = self._print(-arg)
# invert expression since this is now minused
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('+'))
y = self._print(arg)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_MatrixBase(self, m):
table = self.dom.createElement('mtable')
for i in range(m.rows):
x = self.dom.createElement('mtr')
for j in range(m.cols):
y = self.dom.createElement('mtd')
y.appendChild(self._print(m[i, j]))
x.appendChild(y)
table.appendChild(x)
if self._settings["mat_delim"] == '':
return table
brac = self.dom.createElement('mfenced')
if self._settings["mat_delim"] == "[":
brac.setAttribute('close', ']')
brac.setAttribute('open', '[')
brac.appendChild(table)
return brac
def _get_printed_Rational(self, e, folded=None):
if e.p < 0:
p = -e.p
else:
p = e.p
x = self.dom.createElement('mfrac')
if folded or self._settings["fold_short_frac"]:
x.setAttribute('bevelled', 'true')
x.appendChild(self._print(p))
x.appendChild(self._print(e.q))
if e.p < 0:
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(mo)
mrow.appendChild(x)
return mrow
else:
return x
def _print_Rational(self, e):
if e.q == 1:
# don't divide
return self._print(e.p)
return self._get_printed_Rational(e, self._settings["fold_short_frac"])
def _print_Limit(self, e):
mrow = self.dom.createElement('mrow')
munder = self.dom.createElement('munder')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('lim'))
x = self.dom.createElement('mrow')
x_1 = self._print(e.args[1])
arrow = self.dom.createElement('mo')
arrow.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
x_2 = self._print(e.args[2])
x.appendChild(x_1)
x.appendChild(arrow)
x.appendChild(x_2)
munder.appendChild(mi)
munder.appendChild(x)
mrow.appendChild(munder)
mrow.appendChild(self._print(e.args[0]))
return mrow
def _print_ImaginaryUnit(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ⅈ'))
return x
def _print_GoldenRatio(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('Φ'))
return x
def _print_Exp1(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ⅇ'))
return x
def _print_Pi(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('π'))
return x
def _print_Infinity(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('∞'))
return x
def _print_NegativeInfinity(self, e):
mrow = self.dom.createElement('mrow')
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode('-'))
x = self._print_Infinity(e)
mrow.appendChild(y)
mrow.appendChild(x)
return mrow
def _print_HBar(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℏ'))
return x
def _print_EulerGamma(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('γ'))
return x
def _print_TribonacciConstant(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('TribonacciConstant'))
return x
def _print_Dagger(self, e):
msup = self.dom.createElement('msup')
msup.appendChild(self._print(e.args[0]))
msup.appendChild(self.dom.createTextNode('†'))
return msup
def _print_Contains(self, e):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self._print(e.args[0]))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∈'))
mrow.appendChild(mo)
mrow.appendChild(self._print(e.args[1]))
return mrow
def _print_HilbertSpace(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℋ'))
return x
def _print_ComplexSpace(self, e):
msup = self.dom.createElement('msup')
msup.appendChild(self.dom.createTextNode('𝒞'))
msup.appendChild(self._print(e.args[0]))
return msup
def _print_FockSpace(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℱ'))
return x
def _print_Integral(self, expr):
intsymbols = {1: "∫", 2: "∬", 3: "∭"}
mrow = self.dom.createElement('mrow')
if len(expr.limits) <= 3 and all(len(lim) == 1 for lim in expr.limits):
# Only up to three-integral signs exists
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(intsymbols[len(expr.limits)]))
mrow.appendChild(mo)
else:
# Either more than three or limits provided
for lim in reversed(expr.limits):
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(intsymbols[1]))
if len(lim) == 1:
mrow.appendChild(mo)
if len(lim) == 2:
msup = self.dom.createElement('msup')
msup.appendChild(mo)
msup.appendChild(self._print(lim[1]))
mrow.appendChild(msup)
if len(lim) == 3:
msubsup = self.dom.createElement('msubsup')
msubsup.appendChild(mo)
msubsup.appendChild(self._print(lim[1]))
msubsup.appendChild(self._print(lim[2]))
mrow.appendChild(msubsup)
# print function
mrow.appendChild(self.parenthesize(expr.function, PRECEDENCE["Mul"],
strict=True))
# print integration variables
for lim in reversed(expr.limits):
d = self.dom.createElement('mo')
d.appendChild(self.dom.createTextNode('ⅆ'))
mrow.appendChild(d)
mrow.appendChild(self._print(lim[0]))
return mrow
def _print_Sum(self, e):
limits = list(e.limits)
subsup = self.dom.createElement('munderover')
low_elem = self._print(limits[0][1])
up_elem = self._print(limits[0][2])
summand = self.dom.createElement('mo')
summand.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
low = self.dom.createElement('mrow')
var = self._print(limits[0][0])
equal = self.dom.createElement('mo')
equal.appendChild(self.dom.createTextNode('='))
low.appendChild(var)
low.appendChild(equal)
low.appendChild(low_elem)
subsup.appendChild(summand)
subsup.appendChild(low)
subsup.appendChild(up_elem)
mrow = self.dom.createElement('mrow')
mrow.appendChild(subsup)
if len(str(e.function)) == 1:
mrow.appendChild(self._print(e.function))
else:
fence = self.dom.createElement('mfenced')
fence.appendChild(self._print(e.function))
mrow.appendChild(fence)
return mrow
def _print_Symbol(self, sym, style='plain'):
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mrow')
for i, item in enumerate(items):
if i > 0:
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
# translate name, supers and subs to unicode characters
def translate(s):
if s in greek_unicode:
return greek_unicode.get(s)
else:
return s
name, supers, subs = split_super_sub(sym.name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
mname = self.dom.createElement('mi')
mname.appendChild(self.dom.createTextNode(name))
if len(supers) == 0:
if len(subs) == 0:
x = mname
else:
x = self.dom.createElement('msub')
x.appendChild(mname)
x.appendChild(join(subs))
else:
if len(subs) == 0:
x = self.dom.createElement('msup')
x.appendChild(mname)
x.appendChild(join(supers))
else:
x = self.dom.createElement('msubsup')
x.appendChild(mname)
x.appendChild(join(subs))
x.appendChild(join(supers))
# Set bold font?
if style == 'bold':
x.setAttribute('mathvariant', 'bold')
return x
def _print_MatrixSymbol(self, sym):
return self._print_Symbol(sym,
style=self._settings['mat_symbol_style'])
_print_RandomSymbol = _print_Symbol
def _print_conjugate(self, expr):
enc = self.dom.createElement('menclose')
enc.setAttribute('notation', 'top')
enc.appendChild(self._print(expr.args[0]))
return enc
def _print_operator_after(self, op, expr):
row = self.dom.createElement('mrow')
row.appendChild(self.parenthesize(expr, PRECEDENCE["Func"]))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(op))
row.appendChild(mo)
return row
def _print_factorial(self, expr):
return self._print_operator_after('!', expr.args[0])
def _print_factorial2(self, expr):
return self._print_operator_after('!!', expr.args[0])
def _print_binomial(self, expr):
brac = self.dom.createElement('mfenced')
frac = self.dom.createElement('mfrac')
frac.setAttribute('linethickness', '0')
frac.appendChild(self._print(expr.args[0]))
frac.appendChild(self._print(expr.args[1]))
brac.appendChild(frac)
return brac
def _print_Pow(self, e):
# Here we use root instead of power if the exponent is the
# reciprocal of an integer
if (e.exp.is_Rational and abs(e.exp.p) == 1 and e.exp.q != 1 and
self._settings['root_notation']):
if e.exp.q == 2:
x = self.dom.createElement('msqrt')
x.appendChild(self._print(e.base))
if e.exp.q != 2:
x = self.dom.createElement('mroot')
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp.q))
if e.exp.p == -1:
frac = self.dom.createElement('mfrac')
frac.appendChild(self._print(1))
frac.appendChild(x)
return frac
else:
return x
if e.exp.is_Rational and e.exp.q != 1:
if e.exp.is_negative:
top = self.dom.createElement('mfrac')
top.appendChild(self._print(1))
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._get_printed_Rational(-e.exp,
self._settings['fold_frac_powers']))
top.appendChild(x)
return top
else:
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._get_printed_Rational(e.exp,
self._settings['fold_frac_powers']))
return x
if e.exp.is_negative:
top = self.dom.createElement('mfrac')
top.appendChild(self._print(1))
if e.exp == -1:
top.appendChild(self._print(e.base))
else:
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._print(-e.exp))
top.appendChild(x)
return top
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_AccumulationBounds(self, i):
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '\u27e9')
brac.setAttribute('open', '\u27e8')
brac.appendChild(self._print(i.min))
brac.appendChild(self._print(i.max))
return brac
def _print_Derivative(self, e):
if requires_partial(e.expr):
d = '∂'
else:
d = self.mathml_tag(e)
# Determine denominator
m = self.dom.createElement('mrow')
dim = 0 # Total diff dimension, for numerator
for sym, num in reversed(e.variable_count):
dim += num
if num >= 2:
x = self.dom.createElement('msup')
xx = self.dom.createElement('mo')
xx.appendChild(self.dom.createTextNode(d))
x.appendChild(xx)
x.appendChild(self._print(num))
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(d))
m.appendChild(x)
y = self._print(sym)
m.appendChild(y)
mnum = self.dom.createElement('mrow')
if dim >= 2:
x = self.dom.createElement('msup')
xx = self.dom.createElement('mo')
xx.appendChild(self.dom.createTextNode(d))
x.appendChild(xx)
x.appendChild(self._print(dim))
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(d))
mnum.appendChild(x)
mrow = self.dom.createElement('mrow')
frac = self.dom.createElement('mfrac')
frac.appendChild(mnum)
frac.appendChild(m)
mrow.appendChild(frac)
# Print function
mrow.appendChild(self._print(e.expr))
return mrow
def _print_Function(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mi')
if self.mathml_tag(e) == 'log' and self._settings["ln_notation"]:
x.appendChild(self.dom.createTextNode('ln'))
else:
x.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
y = self.dom.createElement('mfenced')
for arg in e.args:
y.appendChild(self._print(arg))
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib_to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_mathml_numbers']
mrow = self.dom.createElement('mrow')
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(mant))
mrow.appendChild(mn)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(separator))
mrow.appendChild(mo)
msup = self.dom.createElement('msup')
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode("10"))
msup.appendChild(mn)
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(exp))
msup.appendChild(mn)
mrow.appendChild(msup)
return mrow
elif str_real == "+inf":
return self._print_Infinity(None)
elif str_real == "-inf":
return self._print_NegativeInfinity(None)
else:
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(str_real))
return mn
def _print_polylog(self, expr):
mrow = self.dom.createElement('mrow')
m = self.dom.createElement('msub')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('Li'))
m.appendChild(mi)
m.appendChild(self._print(expr.args[0]))
mrow.appendChild(m)
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(expr.args[1]))
mrow.appendChild(brac)
return mrow
def _print_Basic(self, e):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(mi)
brac = self.dom.createElement('mfenced')
for arg in e.args:
brac.appendChild(self._print(arg))
mrow.appendChild(brac)
return mrow
def _print_Tuple(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
for arg in e.args:
x.appendChild(self._print(arg))
mrow.appendChild(x)
return mrow
def _print_Interval(self, i):
mrow = self.dom.createElement('mrow')
brac = self.dom.createElement('mfenced')
if i.start == i.end:
# Most often, this type of Interval is converted to a FiniteSet
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
brac.appendChild(self._print(i.start))
else:
if i.right_open:
brac.setAttribute('close', ')')
else:
brac.setAttribute('close', ']')
if i.left_open:
brac.setAttribute('open', '(')
else:
brac.setAttribute('open', '[')
brac.appendChild(self._print(i.start))
brac.appendChild(self._print(i.end))
mrow.appendChild(brac)
return mrow
def _print_Abs(self, expr, exp=None):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '|')
x.setAttribute('open', '|')
x.appendChild(self._print(expr.args[0]))
mrow.appendChild(x)
return mrow
_print_Determinant = _print_Abs
def _print_re_im(self, c, expr):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'fraktur')
mi.appendChild(self.dom.createTextNode(c))
mrow.appendChild(mi)
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(expr))
mrow.appendChild(brac)
return mrow
def _print_re(self, expr, exp=None):
return self._print_re_im('R', expr.args[0])
def _print_im(self, expr, exp=None):
return self._print_re_im('I', expr.args[0])
def _print_AssocOp(self, e):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(mi)
for arg in e.args:
mrow.appendChild(self._print(arg))
return mrow
def _print_SetOp(self, expr, symbol, prec):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self.parenthesize(expr.args[0], prec))
for arg in expr.args[1:]:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(symbol))
y = self.parenthesize(arg, prec)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_Union(self, expr):
prec = PRECEDENCE_TRADITIONAL['Union']
return self._print_SetOp(expr, '∪', prec)
def _print_Intersection(self, expr):
prec = PRECEDENCE_TRADITIONAL['Intersection']
return self._print_SetOp(expr, '∩', prec)
def _print_Complement(self, expr):
prec = PRECEDENCE_TRADITIONAL['Complement']
return self._print_SetOp(expr, '∖', prec)
def _print_SymmetricDifference(self, expr):
prec = PRECEDENCE_TRADITIONAL['SymmetricDifference']
return self._print_SetOp(expr, '∆', prec)
def _print_ProductSet(self, expr):
prec = PRECEDENCE_TRADITIONAL['ProductSet']
return self._print_SetOp(expr, '×', prec)
def _print_FiniteSet(self, s):
return self._print_set(s.args)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
for item in items:
brac.appendChild(self._print(item))
return brac
_print_frozenset = _print_set
def _print_LogOp(self, args, symbol):
mrow = self.dom.createElement('mrow')
if args[0].is_Boolean and not args[0].is_Not:
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(args[0]))
mrow.appendChild(brac)
else:
mrow.appendChild(self._print(args[0]))
for arg in args[1:]:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(symbol))
if arg.is_Boolean and not arg.is_Not:
y = self.dom.createElement('mfenced')
y.appendChild(self._print(arg))
else:
y = self._print(arg)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
if expr == expr.zero:
# Not clear if this is ever called
return self._print(expr.zero)
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
mrow = self.dom.createElement('mrow')
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x:x[0].__str__())
for i, (k, v) in enumerate(inneritems):
if v == 1:
if i: # No + for first item
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('+'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
elif v == -1:
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
else:
if i: # No + for first item
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('+'))
mrow.appendChild(mo)
mbrac = self.dom.createElement('mfenced')
mbrac.appendChild(self._print(v))
mrow.appendChild(mbrac)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('⁢'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
return mrow
def _print_And(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '∧')
def _print_Or(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '∨')
def _print_Xor(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '⊻')
def _print_Implies(self, expr):
return self._print_LogOp(expr.args, '⇒')
def _print_Equivalent(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '⇔')
def _print_Not(self, e):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('¬'))
mrow.appendChild(mo)
if (e.args[0].is_Boolean):
x = self.dom.createElement('mfenced')
x.appendChild(self._print(e.args[0]))
else:
x = self._print(e.args[0])
mrow.appendChild(x)
return mrow
def _print_bool(self, e):
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
return mi
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
return mi
def _print_Range(self, s):
dots = "\u2026"
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif len(s) > 4:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
printset = tuple(s)
for el in printset:
if el == dots:
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(dots))
brac.appendChild(mi)
else:
brac.appendChild(self._print(el))
return brac
def _hprint_variadic_function(self, expr):
args = sorted(expr.args, key=default_sort_key)
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode((str(expr.func)).lower()))
mrow.appendChild(mo)
brac = self.dom.createElement('mfenced')
for symbol in args:
brac.appendChild(self._print(symbol))
mrow.appendChild(brac)
return mrow
_print_Min = _print_Max = _hprint_variadic_function
def _print_exp(self, expr):
msup = self.dom.createElement('msup')
msup.appendChild(self._print_Exp1(None))
msup.appendChild(self._print(expr.args[0]))
return msup
def _print_Relational(self, e):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self._print(e.lhs))
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(x)
mrow.appendChild(self._print(e.rhs))
return mrow
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
def _print_BaseScalar(self, e):
msub = self.dom.createElement('msub')
index, system = e._id
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._variable_names[index]))
msub.appendChild(mi)
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._name))
msub.appendChild(mi)
return msub
def _print_BaseVector(self, e):
msub = self.dom.createElement('msub')
index, system = e._id
mover = self.dom.createElement('mover')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._vector_names[index]))
mover.appendChild(mi)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('^'))
mover.appendChild(mo)
msub.appendChild(mover)
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._name))
msub.appendChild(mi)
return msub
def _print_VectorZero(self, e):
mover = self.dom.createElement('mover')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode("0"))
mover.appendChild(mi)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('^'))
mover.appendChild(mo)
return mover
def _print_Cross(self, expr):
mrow = self.dom.createElement('mrow')
vec1 = expr._expr1
vec2 = expr._expr2
mrow.appendChild(self.parenthesize(vec1, PRECEDENCE['Mul']))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('×'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(vec2, PRECEDENCE['Mul']))
return mrow
def _print_Curl(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('×'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Divergence(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('·'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Dot(self, expr):
mrow = self.dom.createElement('mrow')
vec1 = expr._expr1
vec2 = expr._expr2
mrow.appendChild(self.parenthesize(vec1, PRECEDENCE['Mul']))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('·'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(vec2, PRECEDENCE['Mul']))
return mrow
def _print_Gradient(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Laplacian(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∆'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Integers(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℤ'))
return x
def _print_Complexes(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℂ'))
return x
def _print_Reals(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℝ'))
return x
def _print_Naturals(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℕ'))
return x
def _print_Naturals0(self, e):
sub = self.dom.createElement('msub')
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℕ'))
sub.appendChild(x)
sub.appendChild(self._print(S.Zero))
return sub
def _print_SingularityFunction(self, expr):
shift = expr.args[0] - expr.args[1]
power = expr.args[2]
sup = self.dom.createElement('msup')
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '\u27e9')
brac.setAttribute('open', '\u27e8')
brac.appendChild(self._print(shift))
sup.appendChild(brac)
sup.appendChild(self._print(power))
return sup
def _print_NaN(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('NaN'))
return x
def _print_number_function(self, e, name):
# Print name_arg[0] for one argument or name_arg[0](arg[1])
# for more than one argument
sub = self.dom.createElement('msub')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(name))
sub.appendChild(mi)
sub.appendChild(self._print(e.args[0]))
if len(e.args) == 1:
return sub
# TODO: copy-pasted from _print_Function: can we do better?
mrow = self.dom.createElement('mrow')
y = self.dom.createElement('mfenced')
for arg in e.args[1:]:
y.appendChild(self._print(arg))
mrow.appendChild(sub)
mrow.appendChild(y)
return mrow
def _print_bernoulli(self, e):
return self._print_number_function(e, 'B')
_print_bell = _print_bernoulli
def _print_catalan(self, e):
return self._print_number_function(e, 'C')
def _print_euler(self, e):
return self._print_number_function(e, 'E')
def _print_fibonacci(self, e):
return self._print_number_function(e, 'F')
def _print_lucas(self, e):
return self._print_number_function(e, 'L')
def _print_stieltjes(self, e):
return self._print_number_function(e, 'γ')
def _print_tribonacci(self, e):
return self._print_number_function(e, 'T')
def _print_ComplexInfinity(self, e):
x = self.dom.createElement('mover')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∞'))
x.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('~'))
x.appendChild(mo)
return x
def _print_EmptySet(self, e):
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('∅'))
return x
def _print_UniversalSet(self, e):
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('𝕌'))
return x
def _print_Adjoint(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('†'))
sup.appendChild(mo)
return sup
def _print_Transpose(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('T'))
sup.appendChild(mo)
return sup
def _print_Inverse(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
sup.appendChild(self._print(-1))
return sup
def _print_MatMul(self, expr):
from sympy.matrices.expressions.matmul import MatMul
x = self.dom.createElement('mrow')
args = expr.args
if isinstance(args[0], Mul):
args = args[0].as_ordered_factors() + list(args[1:])
else:
args = list(args)
if isinstance(expr, MatMul) and expr.could_extract_minus_sign():
if args[0] == -1:
args = args[1:]
else:
args[0] = -args[0]
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
x.appendChild(mo)
for arg in args[:-1]:
x.appendChild(self.parenthesize(arg, precedence_traditional(expr),
False))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('⁢'))
x.appendChild(mo)
x.appendChild(self.parenthesize(args[-1], precedence_traditional(expr),
False))
return x
def _print_MatPow(self, expr):
from sympy.matrices import MatrixSymbol
base, exp = expr.base, expr.exp
sup = self.dom.createElement('msup')
if not isinstance(base, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(base))
sup.appendChild(brac)
else:
sup.appendChild(self._print(base))
sup.appendChild(self._print(exp))
return sup
def _print_HadamardProduct(self, expr):
x = self.dom.createElement('mrow')
args = expr.args
for arg in args[:-1]:
x.appendChild(
self.parenthesize(arg, precedence_traditional(expr), False))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∘'))
x.appendChild(mo)
x.appendChild(
self.parenthesize(args[-1], precedence_traditional(expr), False))
return x
def _print_ZeroMatrix(self, Z):
x = self.dom.createElement('mn')
x.appendChild(self.dom.createTextNode('𝟘'))
return x
def _print_OneMatrix(self, Z):
x = self.dom.createElement('mn')
x.appendChild(self.dom.createTextNode('𝟙'))
return x
def _print_Identity(self, I):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('𝕀'))
return x
def _print_floor(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '\u230B')
x.setAttribute('open', '\u230A')
x.appendChild(self._print(e.args[0]))
mrow.appendChild(x)
return mrow
def _print_ceiling(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '\u2309')
x.setAttribute('open', '\u2308')
x.appendChild(self._print(e.args[0]))
mrow.appendChild(x)
return mrow
def _print_Lambda(self, e):
x = self.dom.createElement('mfenced')
mrow = self.dom.createElement('mrow')
symbols = e.args[0]
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(symbols)
mrow.appendChild(symbols)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('↦'))
mrow.appendChild(mo)
mrow.appendChild(self._print(e.args[1]))
x.appendChild(mrow)
return x
def _print_tuple(self, e):
x = self.dom.createElement('mfenced')
for i in e:
x.appendChild(self._print(i))
return x
def _print_IndexedBase(self, e):
return self._print(e.label)
def _print_Indexed(self, e):
x = self.dom.createElement('msub')
x.appendChild(self._print(e.base))
if len(e.indices) == 1:
x.appendChild(self._print(e.indices[0]))
return x
x.appendChild(self._print(e.indices))
return x
def _print_MatrixElement(self, e):
x = self.dom.createElement('msub')
x.appendChild(self.parenthesize(e.parent, PRECEDENCE["Atom"], strict = True))
brac = self.dom.createElement('mfenced')
brac.setAttribute("close", "")
brac.setAttribute("open", "")
for i in e.indices:
brac.appendChild(self._print(i))
x.appendChild(brac)
return x
def _print_elliptic_f(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝖥'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
y.setAttribute("separators", "|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_elliptic_e(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝖤'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
y.setAttribute("separators", "|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_elliptic_pi(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝛱'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
if len(e.args) == 2:
y.setAttribute("separators", "|")
else:
y.setAttribute("separators", ";|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_Ei(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('Ei'))
x.appendChild(mi)
x.appendChild(self._print(e.args))
return x
def _print_expint(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('E'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_jacobi(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:3]))
x.appendChild(y)
x.appendChild(self._print(e.args[3:]))
return x
def _print_gegenbauer(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('C'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_chebyshevt(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('T'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_chebyshevu(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('U'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_legendre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_assoc_legendre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_laguerre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('L'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_assoc_laguerre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('L'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_hermite(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('H'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
@print_function(MathMLPrinterBase)
def mathml(expr, printer='content', **settings):
"""Returns the MathML representation of expr. If printer is presentation
then prints Presentation MathML else prints content MathML.
"""
if printer == 'presentation':
return MathMLPresentationPrinter(settings).doprint(expr)
else:
return MathMLContentPrinter(settings).doprint(expr)
def print_mathml(expr, printer='content', **settings):
"""
Prints a pretty representation of the MathML code for expr. If printer is
presentation then prints Presentation MathML else prints content MathML.
Examples
========
>>> ##
>>> from sympy import print_mathml
>>> from sympy.abc import x
>>> print_mathml(x+1) #doctest: +NORMALIZE_WHITESPACE
<apply>
<plus/>
<ci>x</ci>
<cn>1</cn>
</apply>
>>> print_mathml(x+1, printer='presentation')
<mrow>
<mi>x</mi>
<mo>+</mo>
<mn>1</mn>
</mrow>
"""
if printer == 'presentation':
s = MathMLPresentationPrinter(settings)
else:
s = MathMLContentPrinter(settings)
xml = s._print(sympify(expr))
s.apply_patch()
pretty_xml = xml.toprettyxml()
s.restore_patch()
print(pretty_xml)
# For backward compatibility
MathMLPrinter = MathMLContentPrinter
|
7cd3cf18dfb63b941031e2887380ade0d68e9839012c4b944755dfcd49c87be5 | """
R code printer
The RCodePrinter converts single SymPy expressions into single R expressions,
using the functions defined in math.h where possible.
"""
from __future__ import annotations
from typing import Any
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.sets.fancysets import Range
# dictionary mapping SymPy function to (argument_conditions, C_function).
# Used in RCodePrinter._print_Function(self)
known_functions = {
#"Abs": [(lambda x: not x.is_integer, "fabs")],
"Abs": "abs",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceiling",
"sign": "sign",
"Max": "max",
"Min": "min",
"factorial": "factorial",
"gamma": "gamma",
"digamma": "digamma",
"trigamma": "trigamma",
"beta": "beta",
"sqrt": "sqrt", # To enable automatic rewrite
}
# These are the core reserved words in the R language. Taken from:
# https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Reserved-words
reserved_words = ['if',
'else',
'repeat',
'while',
'function',
'for',
'in',
'next',
'break',
'TRUE',
'FALSE',
'NULL',
'Inf',
'NaN',
'NA',
'NA_integer_',
'NA_real_',
'NA_complex_',
'NA_character_',
'volatile']
class RCodePrinter(CodePrinter):
"""A printer to convert SymPy expressions to strings of R code"""
printmethod = "_rcode"
language = "R"
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
_operators = {
'and': '&',
'or': '|',
'not': '!',
}
_relationals: dict[str, str] = {}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
return "{} = {};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (%(var)s in %(start)s:%(end)s){"
for i in indices:
# R arrays start at 1 and end at dimension
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower+1),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0/%d.0' % (p, q)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "exp(1)"
def _print_Pi(self, expr):
return 'pi'
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_Assignment(self, expr):
from sympy.codegen.ast import Assignment
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
#if isinstance(expr.rhs, Piecewise):
# from sympy.functions.elementary.piecewise import Piecewise
# # Here we modify Piecewise so each expression is now
# # an Assignment, and then continue on the print.
# expressions = []
# conditions = []
# for (e, c) in rhs.args:
# expressions.append(Assignment(lhs, e))
# conditions.append(c)
# temp = Piecewise(*zip(expressions, conditions))
# return self._print(temp)
#elif isinstance(lhs, MatrixSymbol):
if isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
if expr.args[-1].cond == True:
last_line = "%s" % self._print(expr.args[-1].expr)
else:
last_line = "ifelse(%s,%s,NA)" % (self._print(expr.args[-1].cond), self._print(expr.args[-1].expr))
code=last_line
for e, c in reversed(expr.args[:-1]):
code= "ifelse(%s,%s," % (self._print(c), self._print(e))+code+")"
return(code)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
return self._print(expr.rewrite(Piecewise))
def _print_MatrixElement(self, expr):
return "{}[{}]".format(self.parenthesize(expr.parent, PRECEDENCE["Atom"],
strict=True), expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if expr in self._dereference:
return '(*{})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
op = expr.op
rhs_code = self._print(expr.rhs)
return "{} {} {};".format(lhs_code, op, rhs_code)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return 'for({target} in seq(from={start}, to={stop}, by={step}){{\n{body}\n}}'.format(target=target, start=start,
stop=stop-1, step=step, body=body)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of r code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired R string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
rfunction_string)] or [(argument_test, rfunction_formater)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rcode((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau^(7.0/2.0)'
>>> rcode(sin(x), assign_to="s")
's = sin(x);'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
or if the R-function takes a subset of the original arguments:
>>> rcode(2**x + 3**x, user_functions={'Pow': [
... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e),
... (lambda b, e: b != 2, 'pow')]})
'exp2(x) + pow(3, x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rcode(expr, assign_to=tau))
tau = ifelse(x > 0,x + 1,x);
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rcode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rcode(mat, A))
A[0] = x^2;
A[1] = ifelse(x > 0,x + 1,x);
A[2] = sin(x);
"""
return RCodePrinter(settings).doprint(expr, assign_to)
def print_rcode(expr, **settings):
"""Prints R representation of the given expression."""
print(rcode(expr, **settings))
|
526e4876860a165c7a7d1f1c633498b01f7a8dc04e1c03da9ba5825afed2bace | """
Octave (and Matlab) code printer
The `OctaveCodePrinter` converts SymPy expressions into Octave expressions.
It uses a subset of the Octave language for Matlab compatibility.
A complete code generator, which uses `octave_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import annotations
from typing import Any
from sympy.core import Mul, Pow, S, Rational
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Octave. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "cot", "sec", "csc",
"asin", "acos", "acot", "atan", "atan2", "asec", "acsc",
"sinh", "cosh", "tanh", "coth", "csch", "sech",
"asinh", "acosh", "atanh", "acoth", "asech", "acsch",
"erfc", "erfi", "erf", "erfinv", "erfcinv",
"besseli", "besselj", "besselk", "bessely",
"bernoulli", "beta", "euler", "exp", "factorial", "floor",
"fresnelc", "fresnels", "gamma", "harmonic", "log",
"polylog", "sign", "zeta", "legendre"]
# These functions have different names ("SymPy": "Octave"), more
# generally a mapping to (argument_conditions, octave_function).
known_fcns_src2 = {
"Abs": "abs",
"arg": "angle", # arg/angle ok in Octave but only angle in Matlab
"binomial": "bincoeff",
"ceiling": "ceil",
"chebyshevu": "chebyshevU",
"chebyshevt": "chebyshevT",
"Chi": "coshint",
"Ci": "cosint",
"conjugate": "conj",
"DiracDelta": "dirac",
"Heaviside": "heaviside",
"im": "imag",
"laguerre": "laguerreL",
"LambertW": "lambertw",
"li": "logint",
"loggamma": "gammaln",
"Max": "max",
"Min": "min",
"Mod": "mod",
"polygamma": "psi",
"re": "real",
"RisingFactorial": "pochhammer",
"Shi": "sinhint",
"Si": "sinint",
}
class OctaveCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Octave/Matlab code.
"""
printmethod = "_octave"
language = "Octave"
_operators = {
'and': '&',
'or': '|',
'not': '~',
}
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Octave.
def __init__(self, settings={}):
super().__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "% {}".format(text)
def _declare_number_const(self, name, value):
return "{} = {};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Octave uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Octave arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Octave
if (expr.is_number and expr.is_imaginary and
(S.ImaginaryUnit*expr).is_Integer):
return "%si" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
pow_paren = [] # Will collect all pow with more than one base element and exp = -1
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
if len(item.args[0].args) != 1 and isinstance(item.base, Mul): # To avoid situations like #14160
pow_paren.append(item)
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# To parenthesize Pow with exp = -1 and having more than one Symbol
for item in pow_paren:
if item.base in b:
b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if not b:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all(bi.is_number for bi in b) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Pow(self, expr):
powsymbol = '^' if all(x.is_number for x in expr.args) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_MatrixSolve(self, expr):
PREC = precedence(expr)
return "%s \\ %s" % (self.parenthesize(expr.matrix, PREC),
self.parenthesize(expr.vector, PREC))
def _print_Pi(self, expr):
return 'pi'
def _print_ImaginaryUnit(self, expr):
return "1i"
def _print_Exp1(self, expr):
return "exp(1)"
def _print_GoldenRatio(self, expr):
# FIXME: how to do better, e.g., for octave_code(2*GoldenRatio)?
#return self._print((1+sqrt(S(5)))/2)
return "(1+sqrt(5))/2"
def _print_Assignment(self, expr):
from sympy.codegen.ast import Assignment
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'inf'
def _print_NegativeInfinity(self, expr):
return '-inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return '{' + ', '.join(self._print(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
_print_List = _print_list
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if (A.rows, A.cols) == (0, 0):
return '[]'
elif S.Zero in A.shape:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
# Octave does not distinguish between scalars and 1x1 matrices
return self._print(A[0, 0])
return "[%s]" % "; ".join(" ".join([self._print(a) for a in A[r, :]])
for r in range(A.rows))
def _print_SparseRepMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([[k[0] + 1 for k in L]])
J = Matrix([[k[1] + 1 for k in L]])
AIJ = Matrix([[k[2] for k in L]])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \
+ '(%s, %s)' % (expr.i + 1, expr.j + 1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '(' +
strslice(expr.rowslice, expr.parent.shape[0]) + ', ' +
strslice(expr.colslice, expr.parent.shape[1]) + ')')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_KroneckerDelta(self, expr):
prec = PRECEDENCE["Pow"]
return "double(%s == %s)" % tuple(self.parenthesize(x, prec)
for x in expr.args)
def _print_HadamardProduct(self, expr):
return '.*'.join([self.parenthesize(arg, precedence(expr))
for arg in expr.args])
def _print_HadamardPower(self, expr):
PREC = precedence(expr)
return '.**'.join([
self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC)
])
def _print_Identity(self, expr):
shape = expr.shape
if len(shape) == 2 and shape[0] == shape[1]:
shape = [shape[0]]
s = ", ".join(self._print(n) for n in shape)
return "eye(" + s + ")"
def _print_lowergamma(self, expr):
# Octave implements regularized incomplete gamma function
return "(gammainc({1}, {0}).*gamma({0}))".format(
self._print(expr.args[0]), self._print(expr.args[1]))
def _print_uppergamma(self, expr):
return "(gammainc({1}, {0}, 'upper').*gamma({0}))".format(
self._print(expr.args[0]), self._print(expr.args[1]))
def _print_sinc(self, expr):
#Note: Divide by pi because Octave implements normalized sinc function.
return "sinc(%s)" % self._print(expr.args[0]/S.Pi)
def _print_hankel1(self, expr):
return "besselh(%s, 1, %s)" % (self._print(expr.order),
self._print(expr.argument))
def _print_hankel2(self, expr):
return "besselh(%s, 2, %s)" % (self._print(expr.order),
self._print(expr.argument))
# Note: as of 2015, Octave doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_airyai(self, expr):
return "airy(0, %s)" % self._print(expr.args[0])
def _print_airyaiprime(self, expr):
return "airy(1, %s)" % self._print(expr.args[0])
def _print_airybi(self, expr):
return "airy(2, %s)" % self._print(expr.args[0])
def _print_airybiprime(self, expr):
return "airy(3, %s)" % self._print(expr.args[0])
def _print_expint(self, expr):
mu, x = expr.args
if mu != 1:
return self._print_not_supported(expr)
return "expint(%s)" % self._print(x)
def _one_or_two_reversed_args(self, expr):
assert len(expr.args) <= 2
return '{name}({args})'.format(
name=self.known_functions[expr.__class__.__name__],
args=", ".join([self._print(x) for x in reversed(expr.args)])
)
_print_DiracDelta = _print_LambertW = _one_or_two_reversed_args
def _nested_binary_math_func(self, expr):
return '{name}({arg1}, {arg2})'.format(
name=self.known_functions[expr.__class__.__name__],
arg1=self._print(expr.args[0]),
arg2=self._print(expr.func(*expr.args[1:]))
)
_print_Max = _print_Min = _nested_binary_math_func
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}).*({1}) + (~({0})).*(".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = "%s" % self._print(expr.args[-1].expr)
pw = " ...\n".join(ecpairs) + elast + ")"*len(ecpairs)
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def _print_zeta(self, expr):
if len(expr.args) == 1:
return "zeta(%s)" % self._print(expr.args[0])
else:
# Matlab two argument zeta is not equivalent to SymPy's
return self._print_not_supported(expr)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(search(re, line) for re in inc_regex))
for line in code ]
decrease = [ int(any(search(re, line) for re in dec_regex))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def octave_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Octave (or Matlab) code.
The string uses a subset of the Octave language for Matlab compatibility.
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import octave_code, symbols, sin, pi
>>> x = symbols('x')
>>> octave_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling
>>> x, y, tau = symbols("x, y, tau")
>>> octave_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its very common in Octave to write "vectorized"
code. It is harmless if the values are scalars.
>>> octave_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y);'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> octave_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> octave_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Octave inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimensions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)];'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> octave_code(pw, assign_to=tau)
'tau = ((x > 0).*(x + 1) + (~(x > 0)).*(x));'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 ((x > 0).*(x + 1) + (~(x > 0)).*(x)) sin(x)];'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Octave function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_octave_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> octave_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_octave_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> octave_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy(i) = (y(i + 1) - y(i))./(t(i + 1) - t(i));'
"""
return OctaveCodePrinter(settings).doprint(expr, assign_to)
def print_octave_code(expr, **settings):
"""Prints the Octave (or Matlab) representation of the given expression.
See `octave_code` for the meaning of the optional arguments.
"""
print(octave_code(expr, **settings))
|
39826c935b3dc11118f4cec21582ef9c09b9b19da155bc80d83189d407489c45 | from __future__ import annotations
from typing import Any
from functools import wraps
from sympy.core import Add, Mul, Pow, S, sympify, Float
from sympy.core.basic import Basic
from sympy.core.expr import UnevaluatedExpr
from sympy.core.function import Lambda
from sympy.core.mul import _keep_coeff
from sympy.core.sorting import default_sort_key
from sympy.core.symbol import Symbol
from sympy.functions.elementary.complexes import re
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import precedence, PRECEDENCE
class requires:
""" Decorator for registering requirements on print methods. """
def __init__(self, **kwargs):
self._req = kwargs
def __call__(self, method):
def _method_wrapper(self_, *args, **kwargs):
for k, v in self._req.items():
getattr(self_, k).update(v)
return method(self_, *args, **kwargs)
return wraps(method)(_method_wrapper)
class AssignmentError(Exception):
"""
Raised if an assignment variable for a loop is missing.
"""
pass
def _convert_python_lists(arg):
if isinstance(arg, list):
from sympy.codegen.abstract_nodes import List
return List(*(_convert_python_lists(e) for e in arg))
elif isinstance(arg, tuple):
return tuple(_convert_python_lists(e) for e in arg)
else:
return arg
class CodePrinter(StrPrinter):
"""
The base class for code-printing subclasses.
"""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'error_on_reserved': False,
'reserved_word_suffix': '_',
'human': True,
'inline': False,
'allow_unknown_functions': False,
}
# Functions which are "simple" to rewrite to other functions that
# may be supported
# function_to_rewrite : (function_to_rewrite_to, iterable_with_other_functions_required)
_rewriteable_functions = {
'cot': ('tan', []),
'csc': ('sin', []),
'sec': ('cos', []),
'acot': ('atan', []),
'acsc': ('asin', []),
'asec': ('acos', []),
'coth': ('exp', []),
'csch': ('exp', []),
'sech': ('exp', []),
'acoth': ('log', []),
'acsch': ('log', []),
'asech': ('log', []),
'catalan': ('gamma', []),
'fibonacci': ('sqrt', []),
'lucas': ('sqrt', []),
'beta': ('gamma', []),
'sinc': ('sin', ['Piecewise']),
'Mod': ('floor', []),
'factorial': ('gamma', []),
'factorial2': ('gamma', ['Piecewise']),
'subfactorial': ('uppergamma', []),
'RisingFactorial': ('gamma', ['Piecewise']),
'FallingFactorial': ('gamma', ['Piecewise']),
'binomial': ('gamma', []),
'frac': ('floor', []),
'Max': ('Piecewise', []),
'Min': ('Piecewise', []),
'Heaviside': ('Piecewise', []),
'erf2': ('erf', []),
'erfc': ('erf', []),
'Li': ('li', []),
'Ei': ('li', []),
'dirichlet_eta': ('zeta', []),
'riemann_xi': ('zeta', ['gamma']),
}
def __init__(self, settings=None):
super().__init__(settings=settings)
if not hasattr(self, 'reserved_words'):
self.reserved_words = set()
def _handle_UnevaluatedExpr(self, expr):
return expr.replace(re, lambda arg: arg if isinstance(
arg, UnevaluatedExpr) and arg.args[0].is_real else re(arg))
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
----------
expr : Expression
The expression to be printed.
assign_to : Symbol, string, MatrixSymbol, list of strings or Symbols (optional)
If provided, the printed code will set the expression to a variable or multiple variables
with the name or names given in ``assign_to``.
"""
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.codegen.ast import CodeBlock, Assignment
def _handle_assign_to(expr, assign_to):
if assign_to is None:
return sympify(expr)
if isinstance(assign_to, (list, tuple)):
if len(expr) != len(assign_to):
raise ValueError('Failed to assign an expression of length {} to {} variables'.format(len(expr), len(assign_to)))
return CodeBlock(*[_handle_assign_to(lhs, rhs) for lhs, rhs in zip(expr, assign_to)])
if isinstance(assign_to, str):
if expr.is_Matrix:
assign_to = MatrixSymbol(assign_to, *expr.shape)
else:
assign_to = Symbol(assign_to)
elif not isinstance(assign_to, Basic):
raise TypeError("{} cannot assign to object of type {}".format(
type(self).__name__, type(assign_to)))
return Assignment(assign_to, expr)
expr = _convert_python_lists(expr)
expr = _handle_assign_to(expr, assign_to)
# Remove re(...) nodes due to UnevaluatedExpr.is_real always is None:
expr = self._handle_UnevaluatedExpr(expr)
# keep a set of expressions that are not strictly translatable to Code
# and number constants that must be declared and initialized
self._not_supported = set()
self._number_symbols = set()
lines = self._print(expr).splitlines()
# format the output
if self._settings["human"]:
frontlines = []
if self._not_supported:
frontlines.append(self._get_comment(
"Not supported in {}:".format(self.language)))
for expr in sorted(self._not_supported, key=str):
frontlines.append(self._get_comment(type(expr).__name__))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append(self._declare_number_const(name, value))
lines = frontlines + lines
lines = self._format_code(lines)
result = "\n".join(lines)
else:
lines = self._format_code(lines)
num_syms = {(k, self._print(v)) for k, v in self._number_symbols}
result = (num_syms, self._not_supported, "\n".join(lines))
self._not_supported = set()
self._number_symbols = set()
return result
def _doprint_loops(self, expr, assign_to=None):
# Here we print an expression that contains Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
if self._settings.get('contract', True):
from sympy.tensor import get_contraction_structure
# Setup loops over non-dummy indices -- all terms need these
indices = self._get_expression_indices(expr, assign_to)
# Setup loops over dummy indices -- each term needs separate treatment
dummies = get_contraction_structure(expr)
else:
indices = []
dummies = {None: (expr,)}
openloop, closeloop = self._get_loop_opening_ending(indices)
# terms with no summations first
if None in dummies:
text = StrPrinter.doprint(self, Add(*dummies[None]))
else:
# If all terms have summations we must initialize array to Zero
text = StrPrinter.doprint(self, 0)
# skip redundant assignments (where lhs == rhs)
lhs_printed = self._print(assign_to)
lines = []
if text != lhs_printed:
lines.extend(openloop)
if assign_to is not None:
text = self._get_statement("%s = %s" % (lhs_printed, text))
lines.append(text)
lines.extend(closeloop)
# then terms with summations
for d in dummies:
if isinstance(d, tuple):
indices = self._sort_optimized(d, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in dummies[d]:
if term in dummies and not ([list(f.keys()) for f in dummies[term]]
== [[None] for f in dummies[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
"FIXME: no support for contractions in factor yet")
else:
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
if assign_to is None:
raise AssignmentError(
"need assignment variable for loops")
if term.has(assign_to):
raise ValueError("FIXME: lhs present in rhs,\
this is undefined in CodePrinter")
lines.extend(openloop)
lines.extend(openloop_d)
text = "%s = %s" % (lhs_printed, StrPrinter.doprint(
self, assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return "\n".join(lines)
def _get_expression_indices(self, expr, assign_to):
from sympy.tensor import get_indices
rinds, junk = get_indices(expr)
linds, junk = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError("lhs indices must match non-dummy"
" rhs indices in %s" % expr)
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
from sympy.tensor.indexed import Indexed
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc..."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists
of codelines"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _print_Dummy(self, expr):
if expr.name.startswith('Dummy_'):
return '_' + expr.name
else:
return '%s_%d' % (expr.name, expr.dummy_index)
def _print_CodeBlock(self, expr):
return '\n'.join([self._print(i) for i in expr.args])
def _print_String(self, string):
return str(string)
def _print_QuotedString(self, arg):
return '"%s"' % arg.text
def _print_Comment(self, string):
return self._get_comment(str(string))
def _print_Assignment(self, expr):
from sympy.codegen.ast import Assignment
from sympy.functions.elementary.piecewise import Piecewise
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
elif isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings.get("contract", False) and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
return self._get_statement("{} {} {}".format(
*map(lambda arg: self._print(arg),
[lhs_code, expr.op, rhs_code])))
def _print_FunctionCall(self, expr):
return '%s(%s)' % (
expr.name,
', '.join(map(lambda arg: self._print(arg),
expr.function_args)))
def _print_Variable(self, expr):
return self._print(expr.symbol)
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
else:
return name
def _can_print(self, name):
""" Check if function ``name`` is either a known function or has its own
printing method. Used to check if rewriting is possible."""
return name in self.known_functions or getattr(self, '_print_{}'.format(name), False)
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
if isinstance(cond_func, str):
return "%s(%s)" % (cond_func, self.stringify(expr.args, ", "))
else:
for cond, func in cond_func:
if cond(*expr.args):
break
if func is not None:
try:
return func(*[self.parenthesize(item, 0) for item in expr.args])
except TypeError:
return "%s(%s)" % (func, self.stringify(expr.args, ", "))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
elif expr.func.__name__ in self._rewriteable_functions:
# Simple rewrite to supported function possible
target_f, required_fs = self._rewriteable_functions[expr.func.__name__]
if self._can_print(target_f) and all(self._can_print(f) for f in required_fs):
return self._print(expr.rewrite(target_f))
if expr.is_Function and self._settings.get('allow_unknown_functions', False):
return '%s(%s)' % (self._print(expr.func), ', '.join(map(self._print, expr.args)))
else:
return self._print_not_supported(expr)
_print_Expr = _print_Function
# Don't inherit the str-printer method for Heaviside to the code printers
_print_Heaviside = None
def _print_NumberSymbol(self, expr):
if self._settings.get("inline", False):
return self._print(Float(expr.evalf(self._settings["precision"])))
else:
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
Float(expr.evalf(self._settings["precision"]))))
return str(expr)
def _print_Catalan(self, expr):
return self._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
return self._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
return self._print_NumberSymbol(expr)
def _print_TribonacciConstant(self, expr):
return self._print_NumberSymbol(expr)
def _print_Exp1(self, expr):
return self._print_NumberSymbol(expr)
def _print_Pi(self, expr):
return self._print_NumberSymbol(expr)
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print(expr.to_nnf())
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print(expr.to_nnf())
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_BooleanFunction(self, expr):
return self._print(expr.to_nnf())
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
pow_paren = [] # Will collect all pow with more than one base element and exp = -1
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
if len(item.args[0].args) != 1 and isinstance(item.base, Mul): # To avoid situations like #14160
pow_paren.append(item)
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
if len(a) == 1 and sign == "-":
# Unary minus does not have a SymPy class, and hence there's no
# precedence weight associated with it, Python's unary minus has
# an operator precedence between multiplication and exponentiation,
# so we use this to compute a weight.
a_str = [self.parenthesize(a[0], 0.5*(PRECEDENCE["Pow"]+PRECEDENCE["Mul"]))]
else:
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# To parenthesize Pow with exp = -1 and having more than one Symbol
for item in pow_paren:
if item.base in b:
b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)]
if not b:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + "/" + b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
try:
self._not_supported.add(expr)
except TypeError:
# not hashable
pass
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_AccumulationBounds = _print_not_supported
_print_Limit = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_DeferredVector = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Order = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_Uniform = _print_not_supported
_print_Unit = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
_print_Relational = _print_not_supported
# Code printer functions. These are included in this file so that they can be
# imported in the top-level __init__.py without importing the sympy.codegen
# module.
def ccode(expr, assign_to=None, standard='c99', **settings):
"""Converts an expr to a string of c code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
standard : str, optional
String specifying the standard. If your compiler supports a more modern
standard you may set this to 'c99' to allow the printer to use more math
functions. [default='c89'].
precision : integer, optional
The precision for numbers such as pi [default=17].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired C string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)] or [(argument_test, cfunction_formater)]. See below
for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> expr = (2*tau)**Rational(7, 2)
>>> ccode(expr)
'8*M_SQRT2*pow(tau, 7.0/2.0)'
>>> ccode(expr, math_macros={})
'8*sqrt(2)*pow(tau, 7.0/2.0)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
>>> from sympy.codegen.ast import real, float80
>>> ccode(expr, type_aliases={real: float80})
'8*M_SQRT2l*powl(tau, 7.0L/2.0L)'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> ccode(func(Abs(x) + ceiling(x)), standard='C89', user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
or if the C-function takes a subset of the original arguments:
>>> ccode(2**x + 3**x, standard='C99', user_functions={'Pow': [
... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e),
... (lambda b, e: b != 2, 'pow')]})
'exp2(x) + pow(3, x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(ccode(expr, tau, standard='C89'))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> ccode(e.rhs, assign_to=e.lhs, contract=False, standard='C89')
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(ccode(mat, A, standard='C89'))
A[0] = pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = sin(x);
"""
from sympy.printing.c import c_code_printers
return c_code_printers[standard.lower()](settings).doprint(expr, assign_to)
def print_ccode(expr, **settings):
"""Prints C representation of the given expression."""
print(ccode(expr, **settings))
def fcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of fortran code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
DEPRECATED. Use type_mappings instead. The precision for numbers such
as pi [default=17].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
source_format : optional
The source format can be either 'fixed' or 'free'. [default='fixed']
standard : integer, optional
The Fortran standard to be followed. This is specified as an integer.
Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77.
Note that currently the only distinction internally is between
standards before 95, and those 95 and after. This may change later as
more features are added.
name_mangling : bool, optional
If True, then the variables that would become identical in
case-insensitive Fortran are mangled by appending different number
of ``_`` at the end. If False, SymPy Will not interfere with naming of
variables. [default=True]
Examples
========
>>> from sympy import fcode, symbols, Rational, sin, ceiling, floor
>>> x, tau = symbols("x, tau")
>>> fcode((2*tau)**Rational(7, 2))
' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)'
>>> fcode(sin(x), assign_to="s")
' s = sin(x)'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "floor": [(lambda x: not x.is_integer, "FLOOR1"),
... (lambda x: x.is_integer, "FLOOR2")]
... }
>>> fcode(floor(x) + ceiling(x), user_functions=custom_functions)
' CEIL(x) + FLOOR1(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(fcode(expr, tau))
if (x > 0) then
tau = x + 1
else
tau = x
end if
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> fcode(e.rhs, assign_to=e.lhs, contract=False)
' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(fcode(mat, A))
A(1, 1) = x**2
if (x > 0) then
A(2, 1) = x + 1
else
A(2, 1) = x
end if
A(3, 1) = sin(x)
"""
from sympy.printing.fortran import FCodePrinter
return FCodePrinter(settings).doprint(expr, assign_to)
def print_fcode(expr, **settings):
"""Prints the Fortran representation of the given expression.
See fcode for the meaning of the optional arguments.
"""
print(fcode(expr, **settings))
def cxxcode(expr, assign_to=None, standard='c++11', **settings):
""" C++ equivalent of :func:`~.ccode`. """
from sympy.printing.cxx import cxx_code_printers
return cxx_code_printers[standard.lower()](settings).doprint(expr, assign_to)
|
9db3ec5c5afa7cffd2a742a6455d5828ebf47c801b33576e7eeb854d2daaa5f3 | from __future__ import annotations
from typing import Any
from sympy.external import import_module
from sympy.printing.printer import Printer
from sympy.utilities.iterables import is_sequence
import sympy
from functools import partial
aesara = import_module('aesara')
if aesara:
aes = aesara.scalar
aet = aesara.tensor
from aesara.tensor import nlinalg
from aesara.tensor.elemwise import Elemwise
from aesara.tensor.elemwise import DimShuffle
mapping = {
sympy.Add: aet.add,
sympy.Mul: aet.mul,
sympy.Abs: aet.abs,
sympy.sign: aet.sgn,
sympy.ceiling: aet.ceil,
sympy.floor: aet.floor,
sympy.log: aet.log,
sympy.exp: aet.exp,
sympy.sqrt: aet.sqrt,
sympy.cos: aet.cos,
sympy.acos: aet.arccos,
sympy.sin: aet.sin,
sympy.asin: aet.arcsin,
sympy.tan: aet.tan,
sympy.atan: aet.arctan,
sympy.atan2: aet.arctan2,
sympy.cosh: aet.cosh,
sympy.acosh: aet.arccosh,
sympy.sinh: aet.sinh,
sympy.asinh: aet.arcsinh,
sympy.tanh: aet.tanh,
sympy.atanh: aet.arctanh,
sympy.re: aet.real,
sympy.im: aet.imag,
sympy.arg: aet.angle,
sympy.erf: aet.erf,
sympy.gamma: aet.gamma,
sympy.loggamma: aet.gammaln,
sympy.Pow: aet.pow,
sympy.Eq: aet.eq,
sympy.StrictGreaterThan: aet.gt,
sympy.StrictLessThan: aet.lt,
sympy.LessThan: aet.le,
sympy.GreaterThan: aet.ge,
sympy.And: aet.bitwise_and, # bitwise
sympy.Or: aet.bitwise_or, # bitwise
sympy.Not: aet.invert, # bitwise
sympy.Xor: aet.bitwise_xor, # bitwise
sympy.Max: aet.maximum, # Sympy accept >2 inputs, Aesara only 2
sympy.Min: aet.minimum, # Sympy accept >2 inputs, Aesara only 2
sympy.conjugate: aet.conj,
sympy.core.numbers.ImaginaryUnit: lambda:aet.complex(0,1),
# Matrices
sympy.MatAdd: Elemwise(aes.add),
sympy.HadamardProduct: Elemwise(aes.mul),
sympy.Trace: nlinalg.trace,
sympy.Determinant : nlinalg.det,
sympy.Inverse: nlinalg.matrix_inverse,
sympy.Transpose: DimShuffle((False, False), [1, 0]),
}
class AesaraPrinter(Printer):
""" Code printer which creates Aesara symbolic expression graphs.
Parameters
==========
cache : dict
Cache dictionary to use. If None (default) will use
the global cache. To create a printer which does not depend on or alter
global state pass an empty dictionary. Note: the dictionary is not
copied on initialization of the printer and will be updated in-place,
so using the same dict object when creating multiple printers or making
multiple calls to :func:`.aesara_code` or :func:`.aesara_function` means
the cache is shared between all these applications.
Attributes
==========
cache : dict
A cache of Aesara variables which have been created for SymPy
symbol-like objects (e.g. :class:`sympy.core.symbol.Symbol` or
:class:`sympy.matrices.expressions.MatrixSymbol`). This is used to
ensure that all references to a given symbol in an expression (or
multiple expressions) are printed as the same Aesara variable, which is
created only once. Symbols are differentiated only by name and type. The
format of the cache's contents should be considered opaque to the user.
"""
printmethod = "_aesara"
def __init__(self, *args, **kwargs):
self.cache = kwargs.pop('cache', {})
super().__init__(*args, **kwargs)
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
""" Get the cache key for a SymPy object.
Parameters
==========
s : sympy.core.basic.Basic
SymPy object to get key for.
name : str
Name of object, if it does not have a ``name`` attribute.
"""
if name is None:
name = s.name
return (name, type(s), s.args, dtype, broadcastable)
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
"""
Get the Aesara variable for a SymPy symbol from the cache, or create it
if it does not exist.
"""
# Defaults
if name is None:
name = s.name
if dtype is None:
dtype = 'floatX'
if broadcastable is None:
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if key in self.cache:
return self.cache[key]
value = aet.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value
def _print_Symbol(self, s, **kwargs):
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, dtype=dtype, broadcastable=bc)
def _print_AppliedUndef(self, s, **kwargs):
name = str(type(s)) + '_' + str(s.args[0])
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, name=name, dtype=dtype, broadcastable=bc)
def _print_Basic(self, expr, **kwargs):
op = mapping[type(expr)]
children = [self._print(arg, **kwargs) for arg in expr.args]
return op(*children)
def _print_Number(self, n, **kwargs):
# Integers already taken care of below, interpret as float
return float(n.evalf())
def _print_MatrixSymbol(self, X, **kwargs):
dtype = kwargs.get('dtypes', {}).get(X)
return self._get_or_create(X, dtype=dtype, broadcastable=(None, None))
def _print_DenseMatrix(self, X, **kwargs):
if not hasattr(aet, 'stacklists'):
raise NotImplementedError(
"Matrix translation not yet supported in this version of Aesara")
return aet.stacklists([
[self._print(arg, **kwargs) for arg in L]
for L in X.tolist()
])
_print_ImmutableMatrix = _print_ImmutableDenseMatrix = _print_DenseMatrix
def _print_MatMul(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = children[0]
for child in children[1:]:
result = aet.dot(result, child)
return result
def _print_MatPow(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = 1
if isinstance(children[1], int) and children[1] > 0:
for i in range(children[1]):
result = aet.dot(result, children[0])
else:
raise NotImplementedError('''Only non-negative integer
powers of matrices can be handled by Aesara at the moment''')
return result
def _print_MatrixSlice(self, expr, **kwargs):
parent = self._print(expr.parent, **kwargs)
rowslice = self._print(slice(*expr.rowslice), **kwargs)
colslice = self._print(slice(*expr.colslice), **kwargs)
return parent[rowslice, colslice]
def _print_BlockMatrix(self, expr, **kwargs):
nrows, ncols = expr.blocks.shape
blocks = [[self._print(expr.blocks[r, c], **kwargs)
for c in range(ncols)]
for r in range(nrows)]
return aet.join(0, *[aet.join(1, *row) for row in blocks])
def _print_slice(self, expr, **kwargs):
return slice(*[self._print(i, **kwargs)
if isinstance(i, sympy.Basic) else i
for i in (expr.start, expr.stop, expr.step)])
def _print_Pi(self, expr, **kwargs):
return 3.141592653589793
def _print_Piecewise(self, expr, **kwargs):
import numpy as np
e, cond = expr.args[0].args # First condition and corresponding value
# Print conditional expression and value for first condition
p_cond = self._print(cond, **kwargs)
p_e = self._print(e, **kwargs)
# One condition only
if len(expr.args) == 1:
# Return value if condition else NaN
return aet.switch(p_cond, p_e, np.nan)
# Return value_1 if condition_1 else evaluate remaining conditions
p_remaining = self._print(sympy.Piecewise(*expr.args[1:]), **kwargs)
return aet.switch(p_cond, p_e, p_remaining)
def _print_Rational(self, expr, **kwargs):
return aet.true_div(self._print(expr.p, **kwargs),
self._print(expr.q, **kwargs))
def _print_Integer(self, expr, **kwargs):
return expr.p
def _print_factorial(self, expr, **kwargs):
return self._print(sympy.gamma(expr.args[0] + 1), **kwargs)
def _print_Derivative(self, deriv, **kwargs):
from aesara.gradient import Rop
rv = self._print(deriv.expr, **kwargs)
for var in deriv.variables:
var = self._print(var, **kwargs)
rv = Rop(rv, var, aet.ones_like(var))
return rv
def emptyPrinter(self, expr):
return expr
def doprint(self, expr, dtypes=None, broadcastables=None):
""" Convert a SymPy expression to a Aesara graph variable.
The ``dtypes`` and ``broadcastables`` arguments are used to specify the
data type, dimension, and broadcasting behavior of the Aesara variables
corresponding to the free symbols in ``expr``. Each is a mapping from
SymPy symbols to the value of the corresponding argument to
``aesara.tensor.var.TensorVariable``.
See the corresponding `documentation page`__ for more information on
broadcasting in Aesara.
.. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html
Parameters
==========
expr : sympy.core.expr.Expr
SymPy expression to print.
dtypes : dict
Mapping from SymPy symbols to Aesara datatypes to use when creating
new Aesara variables for those symbols. Corresponds to the ``dtype``
argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``
for symbols not included in the mapping.
broadcastables : dict
Mapping from SymPy symbols to the value of the ``broadcastable``
argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara
variables for those symbols. Defaults to the empty tuple for symbols
not included in the mapping (resulting in a scalar).
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara
symbolic expression graph.
"""
if dtypes is None:
dtypes = {}
if broadcastables is None:
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)
global_cache: dict[Any, Any] = {}
def aesara_code(expr, cache=None, **kwargs):
"""
Convert a SymPy expression into a Aesara graph variable.
Parameters
==========
expr : sympy.core.expr.Expr
SymPy expression object to convert.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara symbolic
expression graph.
"""
if not aesara:
raise ImportError("aesara is required for aesara_code")
if cache is None:
cache = global_cache
return AesaraPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
r"""
Get value of ``broadcastables`` argument to :func:`.aesara_code` from
keyword arguments to :func:`.aesara_function`.
Included for backwards compatibility.
Parameters
==========
inputs
Sequence of input symbols.
dim : int
Common number of dimensions for all inputs. Overrides other arguments
if given.
dims : dict
Mapping from input symbols to number of dimensions. Overrides
``broadcastables`` argument if given.
broadcastables : dict
Explicit value of ``broadcastables`` argument to
:meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.
Returns
=======
dict
Dictionary mapping elements of ``inputs`` to their "broadcastable"
values (tuple of ``bool``\ s).
"""
if dim is not None:
return {s: (False,) * dim for s in inputs}
if dims is not None:
maxdim = max(dims.values())
return {
s: (False,) * d + (True,) * (maxdim - d)
for s, d in dims.items()
}
if broadcastables is not None:
return broadcastables
return {}
def aesara_function(inputs, outputs, scalar=False, *,
dim=None, dims=None, broadcastables=None, **kwargs):
"""
Create a Aesara function from SymPy expressions.
The inputs and outputs are converted to Aesara variables using
:func:`.aesara_code` and then passed to ``aesara.function``.
Parameters
==========
inputs
Sequence of symbols which constitute the inputs of the function.
outputs
Sequence of expressions which constitute the outputs(s) of the
function. The free symbols of each expression must be a subset of
``inputs``.
scalar : bool
Convert 0-dimensional arrays in output to scalars. This will return a
Python wrapper function around the Aesara function object.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
dims : dict
Alternative to ``broadcastables`` argument. Mapping from elements of
``inputs`` to integers indicating the dimension of their associated
arrays/tensors. Overrides ``broadcastables`` argument if given.
dim : int
Another alternative to the ``broadcastables`` argument. Common number of
dimensions to use for all arrays/tensors.
``aesara_function([x, y], [...], dim=2)`` is equivalent to using
``broadcastables={x: (False, False), y: (False, False)}``.
Returns
=======
callable
A callable object which takes values of ``inputs`` as positional
arguments and returns an output array for each of the expressions
in ``outputs``. If ``outputs`` is a single expression the function will
return a Numpy array, if it is a list of multiple expressions the
function will return a list of arrays. See description of the ``squeeze``
argument above for the behavior when a single output is passed in a list.
The returned object will either be an instance of
``aesara.compile.function.types.Function`` or a Python wrapper
function around one. In both cases, the returned value will have a
``aesara_function`` attribute which points to the return value of
``aesara.function``.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.printing.aesaracode import aesara_function
A simple function with one input and one output:
>>> f1 = aesara_function([x], [x**2 - 1], scalar=True)
>>> f1(3)
8.0
A function with multiple inputs and one output:
>>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)
>>> f2(3, 4, 2)
5.0
A function with multiple inputs and multiple outputs:
>>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)
>>> f3(2, 3)
[13.0, -5.0]
See also
========
dim_handling
"""
if not aesara:
raise ImportError("Aesara is required for aesara_function")
# Pop off non-aesara keyword args
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(
inputs, dim=dim, dims=dims, broadcastables=broadcastables,
)
# Print inputs/outputs
code = partial(aesara_code, cache=cache, dtypes=dtypes,
broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
#fix constant expressions as variables
toutputs = [output if isinstance(output, aesara.graph.basic.Variable) else aet.as_tensor_variable(output) for output in toutputs]
if len(toutputs) == 1:
toutputs = toutputs[0]
# Compile aesara func
func = aesara.function(tinputs, toutputs, **kwargs)
is_0d = [len(o.variable.broadcastable) == 0 for o in func.outputs]
# No wrapper required
if not scalar or not any(is_0d):
func.aesara_function = func
return func
# Create wrapper to convert 0-dimensional outputs to scalars
def wrapper(*args):
out = func(*args)
# out can be array(1.0) or [array(1.0), array(2.0)]
if is_sequence(out):
return [o[()] if is_0d[i] else o for i, o in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.aesara_function = func
return wrapper
|
12cd2f7c81d49fa821be8a263ed88ae63f3e488445f92dd3d7b881e782a18700 | import os
from os.path import join
import shutil
import tempfile
try:
from subprocess import STDOUT, CalledProcessError, check_output
except ImportError:
pass
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.misc import debug
from .latex import latex
__doctest_requires__ = {('preview',): ['pyglet']}
def _check_output_no_window(*args, **kwargs):
# Avoid showing a cmd.exe window when running this
# on Windows
if os.name == 'nt':
creation_flag = 0x08000000 # CREATE_NO_WINDOW
else:
creation_flag = 0 # Default value
return check_output(*args, creationflags=creation_flag, **kwargs)
def system_default_viewer(fname, fmt):
""" Open fname with the default system viewer.
In practice, it is impossible for python to know when the system viewer is
done. For this reason, we ensure the passed file will not be deleted under
it, and this function does not attempt to block.
"""
# copy to a new temporary file that will not be deleted
with tempfile.NamedTemporaryFile(prefix='sympy-preview-',
suffix=os.path.splitext(fname)[1],
delete=False) as temp_f:
with open(fname, 'rb') as f:
shutil.copyfileobj(f, temp_f)
import platform
if platform.system() == 'Darwin':
import subprocess
subprocess.call(('open', temp_f.name))
elif platform.system() == 'Windows':
os.startfile(temp_f.name)
else:
import subprocess
subprocess.call(('xdg-open', temp_f.name))
def pyglet_viewer(fname, fmt):
try:
from pyglet import window, image, gl
from pyglet.window import key
from pyglet.image.codecs import ImageDecodeException
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
try:
img = image.load(fname)
except ImageDecodeException:
raise ValueError("pyglet preview does not work for '{}' files.".format(fmt))
offset = 25
config = gl.Config(double_buffer=False)
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="SymPy",
resizable=False,
config=config
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
def _get_latex_main(expr, *, preamble=None, packages=(), extra_preamble=None,
euler=True, fontsize=None, **latex_settings):
"""
Generate string of a LaTeX document rendering ``expr``.
"""
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
if extra_preamble:
package_includes += extra_preamble
if not fontsize:
fontsize = "12pt"
elif isinstance(fontsize, int):
fontsize = "{}pt".format(fontsize)
preamble = r"""\documentclass[varwidth,%s]{standalone}
%s
\begin{document}
""" % (fontsize, package_includes)
else:
if packages or extra_preamble:
raise ValueError("The \"packages\" or \"extra_preamble\" keywords"
"must not be set if a "
"custom LaTeX preamble was specified")
if isinstance(expr, str):
latex_string = expr
else:
latex_string = ('$\\displaystyle ' +
latex(expr, mode='plain', **latex_settings) +
'$')
return preamble + '\n' + latex_string + '\n\n' + r"\end{document}"
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, extra_preamble=None, fontsize=None,
**latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found, it will fall back to using a system file
association (via ``open`` and ``xdg-open``). To always use your system file
association without searching for the above readers, use
>>> from sympy.printing.preview import system_default_viewer
>>> preview(x + y, output='dvi', viewer=system_default_viewer)
If this still does not find the viewer you want, it can be set explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If ``view`` fails to find it on your system it will
gracefully raise an exception.
You may also enter ``'file'`` for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if ``filename``
is unset. However, if it was set, then 'preview' writes the generated
file to this filename instead.
There is also support for writing to a ``io.BytesIO`` like object, which
needs to be passed to the ``outputbuffer`` argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
It is also possible to use the standard preamble and provide additional
information to the preamble using the ``extra_preamble`` keyword argument.
>>> from sympy import sin
>>> extra_preamble = "\\renewcommand{\\sin}{\\cos}"
>>> preview(sin(x), output='png', extra_preamble=extra_preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see ``subprocess.Popen``).
Additional keyword args will be passed to the :func:`~sympy.printing.latex.latex` call,
e.g., the ``symbol_names`` flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd: r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
``"sample.tex"`` and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
# pyglet is the default for png
if viewer is None and output == "png":
try:
import pyglet # noqa: F401
except ImportError:
pass
else:
viewer = pyglet_viewer
# look up a known application
if viewer is None:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
for candidate in candidates.get(output, []):
path = shutil.which(candidate)
if path is not None:
viewer = path
break
# otherwise, use the system default for file association
if viewer is None:
viewer = system_default_viewer
if viewer == "file":
if filename is None:
raise ValueError("filename has to be specified if viewer=\"file\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif not callable(viewer) and not shutil.which(viewer):
raise OSError("Unrecognized viewer: %s" % viewer)
latex_main = _get_latex_main(expr, preamble=preamble, packages=packages,
euler=euler, extra_preamble=extra_preamble,
fontsize=fontsize, **latex_settings)
debug("Latex code:")
debug(latex_main)
with tempfile.TemporaryDirectory() as workdir:
with open(join(workdir, 'texput.tex'), 'w', encoding='utf-8') as fh:
fh.write(latex_main)
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not shutil.which('latex'):
raise RuntimeError("latex program is not installed")
try:
_check_output_no_window(
['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'],
cwd=workdir,
stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
src = "texput.%s" % (output)
if output != "dvi":
# in order of preference
commandnames = {
"ps": ["dvips"],
"pdf": ["dvipdfmx", "dvipdfm", "dvipdf"],
"png": ["dvipng"],
"svg": ["dvisvgm"],
}
try:
cmd_variants = commandnames[output]
except KeyError:
raise ValueError("Invalid output format: %s" % output) from None
# find an appropriate command
for cmd_variant in cmd_variants:
cmd_path = shutil.which(cmd_variant)
if cmd_path:
cmd = [cmd_path]
break
else:
if len(cmd_variants) > 1:
raise RuntimeError("None of %s are installed" % ", ".join(cmd_variants))
else:
raise RuntimeError("%s is not installed" % cmd_variants[0])
defaultoptions = {
"dvipng": ["-T", "tight", "-z", "9", "--truecolor"],
"dvisvgm": ["--no-fonts"],
}
commandend = {
"dvips": ["-o", src, "texput.dvi"],
"dvipdf": ["texput.dvi", src],
"dvipdfm": ["-o", src, "texput.dvi"],
"dvipdfmx": ["-o", src, "texput.dvi"],
"dvipng": ["-o", src, "texput.dvi"],
"dvisvgm": ["-o", src, "texput.dvi"],
}
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions.get(cmd_variant, []))
cmd.extend(commandend[cmd_variant])
try:
_check_output_no_window(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
if viewer == "file":
shutil.move(join(workdir, src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif callable(viewer):
viewer(join(workdir, src), fmt=output)
else:
try:
_check_output_no_window(
[viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
|
a4c5167438747338ac3d85ad0f2a29ee1acbaded34eabb51ff8b6633216ee8cd | """
Javascript code printer
The JavascriptCodePrinter converts single SymPy expressions into single
Javascript expressions, using the functions defined in the Javascript
Math object where possible.
"""
from __future__ import annotations
from typing import Any
from sympy.core import S
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
# dictionary mapping SymPy function to (argument_conditions, Javascript_function).
# Used in JavascriptCodePrinter._print_Function(self)
known_functions = {
'Abs': 'Math.abs',
'acos': 'Math.acos',
'acosh': 'Math.acosh',
'asin': 'Math.asin',
'asinh': 'Math.asinh',
'atan': 'Math.atan',
'atan2': 'Math.atan2',
'atanh': 'Math.atanh',
'ceiling': 'Math.ceil',
'cos': 'Math.cos',
'cosh': 'Math.cosh',
'exp': 'Math.exp',
'floor': 'Math.floor',
'log': 'Math.log',
'Max': 'Math.max',
'Min': 'Math.min',
'sign': 'Math.sign',
'sin': 'Math.sin',
'sinh': 'Math.sinh',
'tan': 'Math.tan',
'tanh': 'Math.tanh',
}
class JavascriptCodePrinter(CodePrinter):
""""A Printer to convert Python expressions to strings of JavaScript code
"""
printmethod = '_javascript'
language = 'JavaScript'
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
return "var {} = {};".format(name, value.evalf(self._settings['precision']))
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (var %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# Javascript arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'Math.sqrt(%s)' % self._print(expr.base)
elif expr.exp == S.One/3:
return 'Math.cbrt(%s)' % self._print(expr.base)
else:
return 'Math.pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d/%d' % (p, q)
def _print_Mod(self, expr):
num, den = expr.args
PREC = precedence(expr)
snum, sden = [self.parenthesize(arg, PREC) for arg in expr.args]
# % is remainder (same sign as numerator), not modulo (same sign as
# denominator), in js. Hence, % only works as modulo if both numbers
# have the same sign
if (num.is_nonnegative and den.is_nonnegative or
num.is_nonpositive and den.is_nonpositive):
return f"{snum} % {sden}"
return f"(({snum} % {sden}) + {sden}) % {sden}"
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "Math.E"
def _print_Pi(self, expr):
return 'Math.PI'
def _print_Infinity(self, expr):
return 'Number.POSITIVE_INFINITY'
def _print_NegativeInfinity(self, expr):
return 'Number.NEGATIVE_INFINITY'
def _print_Piecewise(self, expr):
from sympy.codegen.ast import Assignment
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_MatrixElement(self, expr):
return "{}[{}]".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True),
expr.j + expr.i*expr.parent.shape[1])
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> jscode((2*tau)**Rational(7, 2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(jscode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> jscode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(jscode(mat, A))
A[0] = Math.pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = Math.sin(x);
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
def print_jscode(expr, **settings):
"""Prints the Javascript representation of the given expression.
See jscode for the meaning of the optional arguments.
"""
print(jscode(expr, **settings))
|
9bb3017b13e3df55fe7f2ae39017363f831f0efee64ae857af98806067687554 | """
A Printer for generating executable code.
The most important function here is srepr that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from __future__ import annotations
from typing import Any
from sympy.core.function import AppliedUndef
from sympy.core.mul import Mul
from mpmath.libmp import repr_dps, to_str as mlib_to_str
from .printer import Printer, print_function
class ReprPrinter(Printer):
printmethod = "_sympyrepr"
_default_settings: dict[str, Any] = {
"order": None,
"perm_cyclic" : True,
}
def reprify(self, args, sep):
"""
Prints each item in `args` and joins them with `sep`.
"""
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
"""
The fallback printer.
"""
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter__"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)' % ', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__name__"):
return "<'%s.%s'>" % (expr.__module__, expr.__name__)
else:
return str(expr)
def _print_Add(self, expr, order=None):
args = self._as_ordered_terms(expr, order=order)
args = map(self._print, args)
clsname = type(expr).__name__
return clsname + "(%s)" % ", ".join(args)
def _print_Cycle(self, expr):
return expr.__repr__()
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
from sympy.utilities.exceptions import sympy_deprecation_warning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=7,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
if not expr.size:
return 'Permutation()'
# before taking Cycle notation, see if the last element is
# a singleton and move it to the head of the string
s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):]
last = s.rfind('(')
if not last == 0 and ',' not in s[last:]:
s = s[last:] + s[:last]
return 'Permutation%s' %s
else:
s = expr.support()
if not s:
if expr.size < 5:
return 'Permutation(%s)' % str(expr.array_form)
return 'Permutation([], size=%s)' % expr.size
trim = str(expr.array_form[:s[-1] + 1]) + ', size=%s' % expr.size
use = full = str(expr.array_form)
if len(trim) < len(full):
use = trim
return 'Permutation(%s)' % use
def _print_Function(self, expr):
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_Heaviside(self, expr):
# Same as _print_Function but uses pargs to suppress default value for
# 2nd arg.
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.pargs])
return r
def _print_FunctionClass(self, expr):
if issubclass(expr, AppliedUndef):
return 'Function(%r)' % (expr.__name__)
else:
return expr.__name__
def _print_Half(self, expr):
return 'Rational(1, 2)'
def _print_RationalConstant(self, expr):
return str(expr)
def _print_AtomicExpr(self, expr):
return str(expr)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Integer(self, expr):
return 'Integer(%i)' % expr.p
def _print_Complexes(self, expr):
return 'Complexes'
def _print_Integers(self, expr):
return 'Integers'
def _print_Naturals(self, expr):
return 'Naturals'
def _print_Naturals0(self, expr):
return 'Naturals0'
def _print_Rationals(self, expr):
return 'Rationals'
def _print_Reals(self, expr):
return 'Reals'
def _print_EmptySet(self, expr):
return 'EmptySet'
def _print_UniversalSet(self, expr):
return 'UniversalSet'
def _print_EmptySequence(self, expr):
return 'EmptySequence'
def _print_list(self, expr):
return "[%s]" % self.reprify(expr, ", ")
def _print_dict(self, expr):
sep = ", "
dict_kvs = ["%s: %s" % (self.doprint(key), self.doprint(value)) for key, value in expr.items()]
return "{%s}" % sep.join(dict_kvs)
def _print_set(self, expr):
if not expr:
return "set()"
return "{%s}" % self.reprify(expr, ", ")
def _print_MatrixBase(self, expr):
# special case for some empty matrices
if (expr.rows == 0) ^ (expr.cols == 0):
return '%s(%s, %s, %s)' % (expr.__class__.__name__,
self._print(expr.rows),
self._print(expr.cols),
self._print([]))
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i, j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_NaN(self, expr):
return "nan"
def _print_Mul(self, expr, order=None):
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
args = map(self._print, args)
clsname = type(expr).__name__
return clsname + "(%s)" % ", ".join(args)
def _print_Rational(self, expr):
return 'Rational(%s, %s)' % (self._print(expr.p), self._print(expr.q))
def _print_PythonRational(self, expr):
return "%s(%d, %d)" % (expr.__class__.__name__, expr.p, expr.q)
def _print_Fraction(self, expr):
return 'Fraction(%s, %s)' % (self._print(expr.numerator), self._print(expr.denominator))
def _print_Float(self, expr):
r = mlib_to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', precision=%i)" % (expr.__class__.__name__, r, expr._prec)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Str(self, s):
return "%s(%s)" % (s.__class__.__name__, self._print(s.name))
def _print_Symbol(self, expr):
d = expr._assumptions.generator
# print the dummy_index like it was an assumption
if expr.is_Dummy:
d['dummy_index'] = expr.dummy_index
if d == {}:
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
else:
attr = ['%s=%s' % (k, v) for k, v in d.items()]
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.name), ', '.join(attr))
def _print_CoordinateSymbol(self, expr):
d = expr._assumptions.generator
if d == {}:
return "%s(%s, %s)" % (
expr.__class__.__name__,
self._print(expr.coord_sys),
self._print(expr.index)
)
else:
attr = ['%s=%s' % (k, v) for k, v in d.items()]
return "%s(%s, %s, %s)" % (
expr.__class__.__name__,
self._print(expr.coord_sys),
self._print(expr.index),
', '.join(attr)
)
def _print_Predicate(self, expr):
return "Q.%s" % expr.name
def _print_AppliedPredicate(self, expr):
# will be changed to just expr.args when args overriding is removed
args = expr._args
return "%s(%s)" % (expr.__class__.__name__, self.reprify(args, ", "))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.root), self._print(expr.coeffs()))
def _print_PolyRing(self, ring):
return "%s(%s, %s, %s)" % (ring.__class__.__name__,
self._print(ring.symbols), self._print(ring.domain), self._print(ring.order))
def _print_FracField(self, field):
return "%s(%s, %s, %s)" % (field.__class__.__name__,
self._print(field.symbols), self._print(field.domain), self._print(field.order))
def _print_PolyElement(self, poly):
terms = list(poly.terms())
terms.sort(key=poly.ring.order, reverse=True)
return "%s(%s, %s)" % (poly.__class__.__name__, self._print(poly.ring), self._print(terms))
def _print_FracElement(self, frac):
numer_terms = list(frac.numer.terms())
numer_terms.sort(key=frac.field.order, reverse=True)
denom_terms = list(frac.denom.terms())
denom_terms.sort(key=frac.field.order, reverse=True)
numer = self._print(numer_terms)
denom = self._print(denom_terms)
return "%s(%s, %s, %s)" % (frac.__class__.__name__, self._print(frac.field), numer, denom)
def _print_FractionField(self, domain):
cls = domain.__class__.__name__
field = self._print(domain.field)
return "%s(%s)" % (cls, field)
def _print_PolynomialRingBase(self, ring):
cls = ring.__class__.__name__
dom = self._print(ring.domain)
gens = ', '.join(map(self._print, ring.gens))
order = str(ring.order)
if order != ring.default_order:
orderstr = ", order=" + order
else:
orderstr = ""
return "%s(%s, %s%s)" % (cls, dom, gens, orderstr)
def _print_DMP(self, p):
cls = p.__class__.__name__
rep = self._print(p.rep)
dom = self._print(p.dom)
if p.ring is not None:
ringstr = ", ring=" + self._print(p.ring)
else:
ringstr = ""
return "%s(%s, %s%s)" % (cls, rep, dom, ringstr)
def _print_MonogenicFiniteExtension(self, ext):
# The expanded tree shown by srepr(ext.modulus)
# is not practical.
return "FiniteExtension(%s)" % str(ext.modulus)
def _print_ExtensionElement(self, f):
rep = self._print(f.rep)
ext = self._print(f.ext)
return "ExtElem(%s, %s)" % (rep, ext)
@print_function(ReprPrinter)
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
|
b09d3efcdb36bc7e396f9a045c83c9b48386f65bc212a1e52514334288950b8e | import typing
import sympy
from sympy.core import Add, Mul
from sympy.core import Symbol, Expr, Float, Rational, Integer, Basic
from sympy.core.function import UndefinedFunction, Function
from sympy.core.relational import Relational, Unequality, Equality, LessThan, GreaterThan, StrictLessThan, StrictGreaterThan
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp, log, Pow
from sympy.functions.elementary.hyperbolic import sinh, cosh, tanh
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import sin, cos, tan, asin, acos, atan, atan2
from sympy.logic.boolalg import And, Or, Xor, Implies, Boolean
from sympy.logic.boolalg import BooleanTrue, BooleanFalse, BooleanFunction, Not, ITE
from sympy.printing.printer import Printer
from sympy.sets import Interval
class SMTLibPrinter(Printer):
printmethod = "_smtlib"
# based on dReal, an automated reasoning tool for solving problems that can be encoded as first-order logic formulas over the real numbers.
# dReal's special strength is in handling problems that involve a wide range of nonlinear real functions.
_default_settings: dict = {
'precision': None,
'known_types': {
bool: 'Bool',
int: 'Int',
float: 'Real'
},
'known_constants': {
# pi: 'MY_VARIABLE_PI_DECLARED_ELSEWHERE',
},
'known_functions': {
Add: '+',
Mul: '*',
Equality: '=',
LessThan: '<=',
GreaterThan: '>=',
StrictLessThan: '<',
StrictGreaterThan: '>',
exp: 'exp',
log: 'log',
Abs: 'abs',
sin: 'sin',
cos: 'cos',
tan: 'tan',
asin: 'arcsin',
acos: 'arccos',
atan: 'arctan',
atan2: 'arctan2',
sinh: 'sinh',
cosh: 'cosh',
tanh: 'tanh',
Min: 'min',
Max: 'max',
Pow: 'pow',
And: 'and',
Or: 'or',
Xor: 'xor',
Not: 'not',
ITE: 'ite',
Implies: '=>',
}
}
symbol_table: dict
def __init__(self, settings: typing.Optional[dict] = None,
symbol_table=None):
settings = settings or {}
self.symbol_table = symbol_table or {}
Printer.__init__(self, settings)
self._precision = self._settings['precision']
self._known_types = dict(self._settings['known_types'])
self._known_constants = dict(self._settings['known_constants'])
self._known_functions = dict(self._settings['known_functions'])
for _ in self._known_types.values(): assert self._is_legal_name(_)
for _ in self._known_constants.values(): assert self._is_legal_name(_)
# for _ in self._known_functions.values(): assert self._is_legal_name(_) # +, *, <, >, etc.
def _is_legal_name(self, s: str):
if not s: return False
if s[0].isnumeric(): return False
return all(_.isalnum() or _ == '_' for _ in s)
def _s_expr(self, op: str, args: typing.Union[list, tuple]) -> str:
args_str = ' '.join(
a if isinstance(a, str)
else self._print(a)
for a in args
)
return f'({op} {args_str})'
def _print_Function(self, e):
if e in self._known_functions:
op = self._known_functions[e]
elif type(e) in self._known_functions:
op = self._known_functions[type(e)]
elif type(type(e)) == UndefinedFunction:
op = e.name
else:
op = self._known_functions[e] # throw KeyError
return self._s_expr(op, e.args)
def _print_Relational(self, e: Relational):
return self._print_Function(e)
def _print_BooleanFunction(self, e: BooleanFunction):
return self._print_Function(e)
def _print_Expr(self, e: Expr):
return self._print_Function(e)
def _print_Unequality(self, e: Unequality):
if type(e) in self._known_functions:
return self._print_Relational(e) # default
else:
eq_op = self._known_functions[Equality]
not_op = self._known_functions[Not]
return self._s_expr(not_op, [self._s_expr(eq_op, e.args)])
def _print_Piecewise(self, e: Piecewise):
def _print_Piecewise_recursive(args: typing.Union[list, tuple]):
e, c = args[0]
if len(args) == 1:
assert (c is True) or isinstance(c, BooleanTrue)
return self._print(e)
else:
ite = self._known_functions[ITE]
return self._s_expr(ite, [
c, e, _print_Piecewise_recursive(args[1:])
])
return _print_Piecewise_recursive(e.args)
def _print_Interval(self, e: Interval):
if e.start.is_infinite and e.end.is_infinite:
return ''
elif e.start.is_infinite != e.end.is_infinite:
raise ValueError(f'One-sided intervals (`{e}`) are not supported in SMT.')
else:
return f'[{e.start}, {e.end}]'
# todo: Sympy does not support quantifiers yet as of 2022, but quantifiers can be handy in SMT.
# For now, users can extend this class and build in their own quantifier support.
# See `test_quantifier_extensions()` in test_smtlib.py for an example of how this might look.
# def _print_ForAll(self, e: ForAll):
# return self._s('forall', [
# self._s('', [
# self._s(sym.name, [self._type_name(sym), Interval(start, end)])
# for sym, start, end in e.limits
# ]),
# e.function
# ])
def _print_BooleanTrue(self, x: BooleanTrue):
return 'true'
def _print_BooleanFalse(self, x: BooleanFalse):
return 'false'
def _print_Float(self, x: Float):
f = x.evalf(self._precision) if self._precision else x.evalf()
return str(f).rstrip('0')
def _print_float(self, x: float):
return str(x)
def _print_Rational(self, x: Rational):
return self._s_expr('/', [x.p, x.q])
def _print_Integer(self, x: Integer):
assert x.q == 1
return str(x.p)
def _print_int(self, x: int):
return str(x)
def _print_Symbol(self, x: Symbol):
assert self._is_legal_name(x.name)
return x.name
def _print_NumberSymbol(self, x):
name = self._known_constants.get(x)
return name if name else self._print_Float(x)
def _print_UndefinedFunction(self, x):
assert self._is_legal_name(x.name)
return x.name
def _print_Exp1(self, x):
return (
self._print_Function(exp(1, evaluate=False))
if exp in self._known_functions else
self._print_NumberSymbol(x)
)
def emptyPrinter(self, expr):
raise NotImplementedError(f'Cannot convert `{repr(expr)}` of type `{type(expr)}` to SMT.')
def smtlib_code(
expr,
auto_assert=True, auto_declare=True,
precision=None,
symbol_table=None,
known_types=None, known_constants=None, known_functions=None,
prefix_expressions=None, suffix_expressions=None,
log_warn=None
):
r"""Converts ``expr`` to a string of smtlib code.
Parameters
==========
expr : Expr | List[Expr]
A SymPy expression or system to be converted.
auto_assert : bool, optional
If false, do not modify expr and produce only the S-Expression equivalent of expr.
If true, assume expr is a system and assert each boolean element.
auto_declare : bool, optional
If false, do not produce declarations for the symbols used in expr.
If true, prepend all necessary declarations for variables used in expr based on symbol_table.
precision : integer, optional
The ``evalf(..)`` precision for numbers such as pi.
symbol_table : dict, optional
A dictionary where keys are ``Symbol`` or ``Function`` instances and values are their Python type i.e. ``bool``, ``int``, ``float``, or ``Callable[...]``.
If incomplete, an attempt will be made to infer types from ``expr``.
known_types: dict, optional
A dictionary where keys are ``bool``, ``int``, ``float`` etc. and values are their corresponding SMT type names.
If not given, a partial listing compatible with several solvers will be used.
known_functions : dict, optional
A dictionary where keys are ``Function``, ``Relational``, ``BooleanFunction``, or ``Expr`` instances and values are their SMT string representations.
If not given, a partial listing optimized for dReal solver (but compatible with others) will be used.
known_constants: dict, optional
A dictionary where keys are ``NumberSymbol`` instances and values are their SMT variable names.
When using this feature, extra caution must be taken to avoid naming collisions between user symbols and listed constants.
If not given, constants will be expanded inline i.e. ``3.14159`` instead of ``MY_SMT_VARIABLE_FOR_PI``.
prefix_expressions: list, optional
A list of lists of ``str`` and/or expressions to convert into SMTLib and prefix to the output.
suffix_expressions: list, optional
A list of lists of ``str`` and/or expressions to convert into SMTLib and postfix to the output.
log_warn: lambda function, optional
A function to record all warnings during potentially risky operations.
Soundness is a core value in SMT solving, so it is good to log all assumptions made.
Examples
========
>>> from sympy import smtlib_code, symbols, sin, Eq
>>> x = symbols('x')
>>> smtlib_code(sin(x).series(x).removeO(), log_warn=print)
Could not infer type of `x`. Defaulting to float.
Non-Boolean expression `x**5/120 - x**3/6 + x` will not be asserted. Converting to SMTLib verbatim.
'(declare-const x Real)\n(+ x (* (/ -1 6) (pow x 3)) (* (/ 1 120) (pow x 5)))'
>>> from sympy import Rational
>>> x, y, tau = symbols("x, y, tau")
>>> smtlib_code((2*tau)**Rational(7, 2), log_warn=print)
Could not infer type of `tau`. Defaulting to float.
Non-Boolean expression `8*sqrt(2)*tau**(7/2)` will not be asserted. Converting to SMTLib verbatim.
'(declare-const tau Real)\n(* 8 (pow 2 (/ 1 2)) (pow tau (/ 7 2)))'
``Piecewise`` expressions are implemented with ``ite`` expressions by default.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> smtlib_code(Eq(pw, 3), symbol_table={x: float}, log_warn=print)
'(declare-const x Real)\n(assert (= (ite (> x 0) (+ 1 x) x) 3))'
Custom printing can be defined for certain types by passing a dictionary of
PythonType : "SMT Name" to the ``known_types``, ``known_constants``, and ``known_functions`` kwargs.
>>> from typing import Callable
>>> from sympy import Function, Add
>>> f = Function('f')
>>> g = Function('g')
>>> smt_builtin_funcs = { # functions our SMT solver will understand
... f: "existing_smtlib_fcn",
... Add: "sum",
... }
>>> user_def_funcs = { # functions defined by the user must have their types specified explicitly
... g: Callable[[int], float],
... }
>>> smtlib_code(f(x) + g(x), symbol_table=user_def_funcs, known_functions=smt_builtin_funcs, log_warn=print)
Non-Boolean expression `f(x) + g(x)` will not be asserted. Converting to SMTLib verbatim.
'(declare-const x Int)\n(declare-fun g (Int) Real)\n(sum (existing_smtlib_fcn x) (g x))'
"""
log_warn = log_warn or (lambda _: None)
if not isinstance(expr, list): expr = [expr]
expr = [
sympy.sympify(_, strict=True, evaluate=False, convert_xor=False)
for _ in expr
]
if not symbol_table: symbol_table = {}
symbol_table = _auto_infer_smtlib_types(
*expr, symbol_table=symbol_table
)
# See [FALLBACK RULES]
# Need SMTLibPrinter to populate known_functions and known_constants first.
settings = {}
if precision: settings['precision'] = precision
del precision
if known_types: settings['known_types'] = known_types
del known_types
if known_functions: settings['known_functions'] = known_functions
del known_functions
if known_constants: settings['known_constants'] = known_constants
del known_constants
if not prefix_expressions: prefix_expressions = []
if not suffix_expressions: suffix_expressions = []
p = SMTLibPrinter(settings, symbol_table)
del symbol_table
# [FALLBACK RULES]
for e in expr:
for sym in e.atoms(Symbol, Function):
if (
sym.is_Symbol and
sym not in p._known_constants and
sym not in p.symbol_table
):
log_warn(f"Could not infer type of `{sym}`. Defaulting to float.")
p.symbol_table[sym] = float
if (
sym.is_Function and
type(sym) not in p._known_functions and
type(sym) not in p.symbol_table and
not sym.is_Piecewise
): raise TypeError(
f"Unknown type of undefined function `{sym}`. "
f"Must be mapped to ``str`` in known_functions or mapped to ``Callable[..]`` in symbol_table."
)
declarations = []
if auto_declare:
constants = {sym.name: sym for e in expr for sym in e.free_symbols
if sym not in p._known_constants}
functions = {fnc.name: fnc for e in expr for fnc in e.atoms(Function)
if type(fnc) not in p._known_functions and not fnc.is_Piecewise}
declarations = \
[
_auto_declare_smtlib(sym, p, log_warn)
for sym in constants.values()
] + [
_auto_declare_smtlib(fnc, p, log_warn)
for fnc in functions.values()
]
declarations = [decl for decl in declarations if decl]
if auto_assert:
expr = [_auto_assert_smtlib(e, p, log_warn) for e in expr]
# return SMTLibPrinter().doprint(expr)
return '\n'.join([
# ';; PREFIX EXPRESSIONS',
*[
e if isinstance(e, str) else p.doprint(e)
for e in prefix_expressions
],
# ';; DECLARATIONS',
*sorted(e for e in declarations),
# ';; EXPRESSIONS',
*[
e if isinstance(e, str) else p.doprint(e)
for e in expr
],
# ';; SUFFIX EXPRESSIONS',
*[
e if isinstance(e, str) else p.doprint(e)
for e in suffix_expressions
],
])
def _auto_declare_smtlib(sym: typing.Union[Symbol, Function], p: SMTLibPrinter, log_warn: typing.Callable[[str], None]):
if sym.is_Symbol:
type_signature = p.symbol_table[sym]
assert isinstance(type_signature, type)
type_signature = p._known_types[type_signature]
return p._s_expr('declare-const', [sym, type_signature])
elif sym.is_Function:
type_signature = p.symbol_table[type(sym)]
assert callable(type_signature)
type_signature = [p._known_types[_] for _ in type_signature.__args__]
assert len(type_signature) > 0
params_signature = f"({' '.join(type_signature[:-1])})"
return_signature = type_signature[-1]
return p._s_expr('declare-fun', [type(sym), params_signature, return_signature])
else:
log_warn(f"Non-Symbol/Function `{sym}` will not be declared.")
return None
def _auto_assert_smtlib(e: Expr, p: SMTLibPrinter, log_warn: typing.Callable[[str], None]):
if isinstance(e, Boolean) or (
e in p.symbol_table and p.symbol_table[e] == bool
) or (
e.is_Function and
type(e) in p.symbol_table and
p.symbol_table[type(e)].__args__[-1] == bool
):
return p._s_expr('assert', [e])
else:
log_warn(f"Non-Boolean expression `{e}` will not be asserted. Converting to SMTLib verbatim.")
return e
def _auto_infer_smtlib_types(
*exprs: Basic,
symbol_table: typing.Optional[dict] = None
) -> dict:
# [TYPE INFERENCE RULES]
# X is alone in an expr => X is bool
# X in BooleanFunction.args => X is bool
# X matches to a bool param of a symbol_table function => X is bool
# X matches to an int param of a symbol_table function => X is int
# X.is_integer => X is int
# X == Y, where X is T => Y is T
# [FALLBACK RULES]
# see _auto_declare_smtlib(..)
# X is not bool and X is not int and X is Symbol => X is float
# else (e.g. X is Function) => error. must be specified explicitly.
_symbols = dict(symbol_table) if symbol_table else {}
def safe_update(syms: set, inf):
for s in syms:
assert s.is_Symbol
if (old_type := _symbols.setdefault(s, inf)) != inf:
raise TypeError(f"Could not infer type of `{s}`. Apparently both `{old_type}` and `{inf}`?")
# EXPLICIT TYPES
safe_update({
e
for e in exprs
if e.is_Symbol
}, bool)
safe_update({
symbol
for e in exprs
for boolfunc in e.atoms(BooleanFunction)
for symbol in boolfunc.args
if symbol.is_Symbol
}, bool)
safe_update({
symbol
for e in exprs
for boolfunc in e.atoms(Function)
if type(boolfunc) in _symbols
for symbol, param in zip(boolfunc.args, _symbols[type(boolfunc)].__args__)
if symbol.is_Symbol and param == bool
}, bool)
safe_update({
symbol
for e in exprs
for intfunc in e.atoms(Function)
if type(intfunc) in _symbols
for symbol, param in zip(intfunc.args, _symbols[type(intfunc)].__args__)
if symbol.is_Symbol and param == int
}, int)
safe_update({
symbol
for e in exprs
for symbol in e.atoms(Symbol)
if symbol.is_integer
}, int)
safe_update({
symbol
for e in exprs
for symbol in e.atoms(Symbol)
if symbol.is_real and not symbol.is_integer
}, float)
# EQUALITY RELATION RULE
rels = [rel for expr in exprs for rel in expr.atoms(Equality)]
rels = [
(rel.lhs, rel.rhs) for rel in rels if rel.lhs.is_Symbol
] + [
(rel.rhs, rel.lhs) for rel in rels if rel.rhs.is_Symbol
]
for infer, reltd in rels:
inference = (
_symbols[infer] if infer in _symbols else
_symbols[reltd] if reltd in _symbols else
_symbols[type(reltd)].__args__[-1]
if reltd.is_Function and type(reltd) in _symbols else
bool if reltd.is_Boolean else
int if reltd.is_integer or reltd.is_Integer else
float if reltd.is_real else
None
)
if inference: safe_update({infer}, inference)
return _symbols
|
359f9acabce2f2e3fb72acb318479d848ada34581615f519183a2b9e69b9c1e9 | """
Julia code printer
The `JuliaCodePrinter` converts SymPy expressions into Julia expressions.
A complete code generator, which uses `julia_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import annotations
from typing import Any
from sympy.core import Mul, Pow, S, Rational
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Julia. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "cot", "sec", "csc",
"asin", "acos", "atan", "acot", "asec", "acsc",
"sinh", "cosh", "tanh", "coth", "sech", "csch",
"asinh", "acosh", "atanh", "acoth", "asech", "acsch",
"sinc", "atan2", "sign", "floor", "log", "exp",
"cbrt", "sqrt", "erf", "erfc", "erfi",
"factorial", "gamma", "digamma", "trigamma",
"polygamma", "beta",
"airyai", "airyaiprime", "airybi", "airybiprime",
"besselj", "bessely", "besseli", "besselk",
"erfinv", "erfcinv"]
# These functions have different names ("SymPy": "Julia"), more
# generally a mapping to (argument_conditions, julia_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"conjugate": "conj",
"hankel1": "hankelh1",
"hankel2": "hankelh2",
"im": "imag",
"re": "real"
}
class JuliaCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Julia code.
"""
printmethod = "_julia"
language = "Julia"
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings: dict[str, Any] = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Julia.
def __init__(self, settings={}):
super().__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s" % codestring
def _get_comment(self, text):
return "# {}".format(text)
def _declare_number_const(self, name, value):
return "const {} = {}".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Julia uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Julia arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Julia
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%sim" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
pow_paren = [] # Will collect all pow with more than one base element and exp = -1
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
if len(item.args[0].args) != 1 and isinstance(item.base, Mul): # To avoid situations like #14160
pow_paren.append(item)
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity and item.p == 1:
# Save the Rational type in julia Unless the numerator is 1.
# For example:
# julia_code(Rational(3, 7)*x) --> (3 // 7) * x
# julia_code(x/3) --> x / 3 but not x * (1 // 3)
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# To parenthesize Pow with exp = -1 and having more than one Symbol
for item in pow_paren:
if item.base in b:
b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = "%s %s %s" % (r, mulsym, a_str[i])
return r
if not b:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return "%s %s %s" % (sign+multjoin(a, a_str), divsym, b_str[0])
else:
divsym = '/' if all(bi.is_number for bi in b) else './'
return "%s %s (%s)" % (sign + multjoin(a, a_str), divsym, multjoin(b, b_str))
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Pow(self, expr):
powsymbol = '^' if all(x.is_number for x in expr.args) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1 %s sqrt(%s)" % (sym, self._print(expr.base))
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1 %s %s" % (sym, self.parenthesize(expr.base, PREC))
return '%s %s %s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s ^ %s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
if self._settings["inline"]:
return "pi"
else:
return super()._print_NumberSymbol(expr)
def _print_ImaginaryUnit(self, expr):
return "im"
def _print_Exp1(self, expr):
if self._settings["inline"]:
return "e"
else:
return super()._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
if self._settings["inline"]:
return "eulergamma"
else:
return super()._print_NumberSymbol(expr)
def _print_Catalan(self, expr):
if self._settings["inline"]:
return "catalan"
else:
return super()._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
if self._settings["inline"]:
return "golden"
else:
return super()._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.codegen.ast import Assignment
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return 'Any[' + ', '.join(self._print(a) for a in expr) + ']'
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.stringify(expr, ", ")
_print_Tuple = _print_tuple
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if S.Zero in A.shape:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
return "[%s]" % A[0, 0]
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % ", ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseRepMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([k[0] + 1 for k in L])
J = Matrix([k[1] + 1 for k in L])
AIJ = Matrix([k[2] for k in L])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \
+ '[%s,%s]' % (expr.i + 1, expr.j + 1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '[' +
strslice(expr.rowslice, expr.parent.shape[0]) + ',' +
strslice(expr.colslice, expr.parent.shape[1]) + ']')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ",".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
def _print_HadamardProduct(self, expr):
return ' .* '.join([self.parenthesize(arg, precedence(expr))
for arg in expr.args])
def _print_HadamardPower(self, expr):
PREC = precedence(expr)
return '.**'.join([
self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC)
])
def _print_Rational(self, expr):
if expr.q == 1:
return str(expr.p)
return "%s // %s" % (expr.p, expr.q)
# Note: as of 2022, Julia doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({}) ? ({}) :".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = " (%s)" % self._print(expr.args[-1].expr)
pw = "\n".join(ecpairs) + elast
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def _print_MatMul(self, expr):
c, m = expr.as_coeff_mmul()
sign = ""
if c.is_number:
re, im = c.as_real_imag()
if im.is_zero and re.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
elif re.is_zero and im.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
return sign + ' * '.join(
(self.parenthesize(arg, precedence(expr)) for arg in expr.args)
)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(search(re, line) for re in inc_regex))
for line in code ]
decrease = [ int(any(search(re, line) for re in dec_regex))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def julia_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Julia code.
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import julia_code, symbols, sin, pi
>>> x = symbols('x')
>>> julia_code(sin(x).series(x).removeO())
'x .^ 5 / 120 - x .^ 3 / 6 + x'
>>> from sympy import Rational, ceiling
>>> x, y, tau = symbols("x, y, tau")
>>> julia_code((2*tau)**Rational(7, 2))
'8 * sqrt(2) * tau .^ (7 // 2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its possible in Julia to write "vectorized"
code. It is harmless if the values are scalars.
>>> julia_code(sin(pi*x*y), assign_to="s")
's = sin(pi * x .* y)'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> julia_code(3*pi*A**3)
'(3 * pi) * A ^ 3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> julia_code(x**2*y*A**3)
'(x .^ 2 .* y) * A ^ 3'
Matrices are supported using Julia inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimensions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> julia_code(mat, assign_to='A')
'A = [x .^ 2 sin(x) ceil(x)]'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> julia_code(pw, assign_to=tau)
'tau = ((x > 0) ? (x + 1) : (x))'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> julia_code(mat, assign_to='A')
'A = [x .^ 2 ((x > 0) ? (x + 1) : (x)) sin(x)]'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Julia function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_julia_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> julia_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_julia_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> julia_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i]) ./ (t[i + 1] - t[i])'
"""
return JuliaCodePrinter(settings).doprint(expr, assign_to)
def print_julia_code(expr, **settings):
"""Prints the Julia representation of the given expression.
See `julia_code` for the meaning of the optional arguments.
"""
print(julia_code(expr, **settings))
|
26c59e7c094bb87e308a6878a568121910a4e7cfcd72fff293385fcbab4dda64 | from __future__ import annotations
from sympy.core import Basic, S
from sympy.core.function import Lambda
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from functools import reduce
known_functions = {
'Abs': 'abs',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'acos': 'acos',
'asin': 'asin',
'atan': 'atan',
'atan2': 'atan',
'ceiling': 'ceil',
'floor': 'floor',
'sign': 'sign',
'exp': 'exp',
'log': 'log',
'add': 'add',
'sub': 'sub',
'mul': 'mul',
'pow': 'pow'
}
class GLSLPrinter(CodePrinter):
"""
Rudimentary, generic GLSL printing tools.
Additional settings:
'use_operators': Boolean (should the printer use operators for +,-,*, or functions?)
"""
_not_supported: set[Basic] = set()
printmethod = "_glsl"
language = "GLSL"
_default_settings = {
'use_operators': True,
'zero': 0,
'mat_nested': False,
'mat_separator': ',\n',
'mat_transpose': False,
'array_type': 'float',
'glsl_types': True,
'order': None,
'full_prec': 'auto',
'precision': 9,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
return "float {} = {};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [line.lstrip(' \t') for line in code]
increase = [int(any(map(line.endswith, inc_token))) for line in code]
decrease = [int(any(map(line.startswith, dec_token))) for line in code]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def _print_MatrixBase(self, mat):
mat_separator = self._settings['mat_separator']
mat_transpose = self._settings['mat_transpose']
column_vector = (mat.rows == 1) if mat_transpose else (mat.cols == 1)
A = mat.transpose() if mat_transpose != column_vector else mat
glsl_types = self._settings['glsl_types']
array_type = self._settings['array_type']
array_size = A.cols*A.rows
array_constructor = "{}[{}]".format(array_type, array_size)
if A.cols == 1:
return self._print(A[0]);
if A.rows <= 4 and A.cols <= 4 and glsl_types:
if A.rows == 1:
return "vec{}{}".format(
A.cols, A.table(self,rowstart='(',rowend=')')
)
elif A.rows == A.cols:
return "mat{}({})".format(
A.rows, A.table(self,rowsep=', ',
rowstart='',rowend='')
)
else:
return "mat{}x{}({})".format(
A.cols, A.rows,
A.table(self,rowsep=', ',
rowstart='',rowend='')
)
elif S.One in A.shape:
return "{}({})".format(
array_constructor,
A.table(self,rowsep=mat_separator,rowstart='',rowend='')
)
elif not self._settings['mat_nested']:
return "{}(\n{}\n) /* a {}x{} matrix */".format(
array_constructor,
A.table(self,rowsep=mat_separator,rowstart='',rowend=''),
A.rows, A.cols
)
elif self._settings['mat_nested']:
return "{}[{}][{}](\n{}\n)".format(
array_type, A.rows, A.cols,
A.table(self,rowsep=mat_separator,rowstart='float[](',rowend=')')
)
def _print_SparseRepMatrix(self, mat):
# do not allow sparse matrices to be made dense
return self._print_not_supported(mat)
def _traverse_matrix_indices(self, mat):
mat_transpose = self._settings['mat_transpose']
if mat_transpose:
rows,cols = mat.shape
else:
cols,rows = mat.shape
return ((i, j) for i in range(cols) for j in range(rows))
def _print_MatrixElement(self, expr):
# print('begin _print_MatrixElement')
nest = self._settings['mat_nested'];
glsl_types = self._settings['glsl_types'];
mat_transpose = self._settings['mat_transpose'];
if mat_transpose:
cols,rows = expr.parent.shape
i,j = expr.j,expr.i
else:
rows,cols = expr.parent.shape
i,j = expr.i,expr.j
pnt = self._print(expr.parent)
if glsl_types and ((rows <= 4 and cols <=4) or nest):
return "{}[{}][{}]".format(pnt, i, j)
else:
return "{}[{}]".format(pnt, i + j*rows)
def _print_list(self, expr):
l = ', '.join(self._print(item) for item in expr)
glsl_types = self._settings['glsl_types']
array_type = self._settings['array_type']
array_size = len(expr)
array_constructor = '{}[{}]'.format(array_type, array_size)
if array_size <= 4 and glsl_types:
return 'vec{}({})'.format(array_size, l)
else:
return '{}({})'.format(array_constructor, l)
_print_tuple = _print_list
_print_Tuple = _print_list
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# GLSL arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Function_with_args(self, func, func_args):
if func in self.known_functions:
cond_func = self.known_functions[func]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(func_args):
break
if func is not None:
try:
return func(*[self.parenthesize(item, 0) for item in func_args])
except TypeError:
return '{}({})'.format(func, self.stringify(func_args, ", "))
elif isinstance(func, Lambda):
# inlined function
return self._print(func(*func_args))
else:
return self._print_not_supported(func)
def _print_Piecewise(self, expr):
from sympy.codegen.ast import Assignment
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c),
self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "{}[{}]".format(
self._print(expr.base.label),
self._print(elem)
)
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
try:
e = self._print(float(expr.exp))
except TypeError:
e = self._print(expr.exp)
return self._print_Function_with_args('pow', (
self._print(expr.base),
e
))
def _print_int(self, expr):
return str(float(expr))
def _print_Rational(self, expr):
return "{}.0/{}.0".format(expr.p, expr.q)
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Add(self, expr, order=None):
if self._settings['use_operators']:
return CodePrinter._print_Add(self, expr, order=order)
terms = expr.as_ordered_terms()
def partition(p,l):
return reduce(lambda x, y: (x[0]+[y], x[1]) if p(y) else (x[0], x[1]+[y]), l, ([], []))
def add(a,b):
return self._print_Function_with_args('add', (a, b))
# return self.known_functions['add']+'(%s, %s)' % (a,b)
neg, pos = partition(lambda arg: arg.could_extract_minus_sign(), terms)
if pos:
s = pos = reduce(lambda a,b: add(a,b), map(lambda t: self._print(t),pos))
else:
s = pos = self._print(self._settings['zero'])
if neg:
# sum the absolute values of the negative terms
neg = reduce(lambda a,b: add(a,b), map(lambda n: self._print(-n),neg))
# then subtract them from the positive terms
s = self._print_Function_with_args('sub', (pos,neg))
# s = self.known_functions['sub']+'(%s, %s)' % (pos,neg)
return s
def _print_Mul(self, expr, **kwargs):
if self._settings['use_operators']:
return CodePrinter._print_Mul(self, expr, **kwargs)
terms = expr.as_ordered_factors()
def mul(a,b):
# return self.known_functions['mul']+'(%s, %s)' % (a,b)
return self._print_Function_with_args('mul', (a,b))
s = reduce(lambda a,b: mul(a,b), map(lambda t: self._print(t), terms))
return s
def glsl_code(expr,assign_to=None,**settings):
"""Converts an expr to a string of GLSL code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used for naming the variable or variables
to which the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol`` or ``Indexed`` type object. In cases where ``expr``
would be printed as an array, a list of string or ``Symbol`` objects
can also be passed.
This is helpful in case of line-wrapping, or for expressions that
generate multi-line statements. It can also be used to spread an array-like
expression into multiple assignments.
use_operators: bool, optional
If set to False, then *,/,+,- operators will be replaced with functions
mul, add, and sub, which must be implemented by the user, e.g. for
implementing non-standard rings or emulated quad/octal precision.
[default=True]
glsl_types: bool, optional
Set this argument to ``False`` in order to avoid using the ``vec`` and ``mat``
types. The printer will instead use arrays (or nested arrays).
[default=True]
mat_nested: bool, optional
GLSL version 4.3 and above support nested arrays (arrays of arrays). Set this to ``True``
to render matrices as nested arrays.
[default=False]
mat_separator: str, optional
By default, matrices are rendered with newlines using this separator,
making them easier to read, but less compact. By removing the newline
this option can be used to make them more vertically compact.
[default=',\n']
mat_transpose: bool, optional
GLSL's matrix multiplication implementation assumes column-major indexing.
By default, this printer ignores that convention. Setting this option to
``True`` transposes all matrix output.
[default=False]
array_type: str, optional
The GLSL array constructor type.
[default='float']
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import glsl_code, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> glsl_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*pow(tau, 3.5)'
>>> glsl_code(sin(x), assign_to="float y")
'float y = sin(x);'
Various GLSL types are supported:
>>> from sympy import Matrix, glsl_code
>>> glsl_code(Matrix([1,2,3]))
'vec3(1, 2, 3)'
>>> glsl_code(Matrix([[1, 2],[3, 4]]))
'mat2(1, 2, 3, 4)'
Pass ``mat_transpose = True`` to switch to column-major indexing:
>>> glsl_code(Matrix([[1, 2],[3, 4]]), mat_transpose = True)
'mat2(1, 3, 2, 4)'
By default, larger matrices get collapsed into float arrays:
>>> print(glsl_code( Matrix([[1,2,3,4,5],[6,7,8,9,10]]) ))
float[10](
1, 2, 3, 4, 5,
6, 7, 8, 9, 10
) /* a 2x5 matrix */
The type of array constructor used to print GLSL arrays can be controlled
via the ``array_type`` parameter:
>>> glsl_code(Matrix([1,2,3,4,5]), array_type='int')
'int[5](1, 2, 3, 4, 5)'
Passing a list of strings or ``symbols`` to the ``assign_to`` parameter will yield
a multi-line assignment for each item in an array-like expression:
>>> x_struct_members = symbols('x.a x.b x.c x.d')
>>> print(glsl_code(Matrix([1,2,3,4]), assign_to=x_struct_members))
x.a = 1;
x.b = 2;
x.c = 3;
x.d = 4;
This could be useful in cases where it's desirable to modify members of a
GLSL ``Struct``. It could also be used to spread items from an array-like
expression into various miscellaneous assignments:
>>> misc_assignments = ('x[0]', 'x[1]', 'float y', 'float z')
>>> print(glsl_code(Matrix([1,2,3,4]), assign_to=misc_assignments))
x[0] = 1;
x[1] = 2;
float y = 3;
float z = 4;
Passing ``mat_nested = True`` instead prints out nested float arrays, which are
supported in GLSL 4.3 and above.
>>> mat = Matrix([
... [ 0, 1, 2],
... [ 3, 4, 5],
... [ 6, 7, 8],
... [ 9, 10, 11],
... [12, 13, 14]])
>>> print(glsl_code( mat, mat_nested = True ))
float[5][3](
float[]( 0, 1, 2),
float[]( 3, 4, 5),
float[]( 6, 7, 8),
float[]( 9, 10, 11),
float[](12, 13, 14)
)
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> glsl_code(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
If further control is needed, addition, subtraction, multiplication and
division operators can be replaced with ``add``, ``sub``, and ``mul``
functions. This is done by passing ``use_operators = False``:
>>> x,y,z = symbols('x,y,z')
>>> glsl_code(x*(y+z), use_operators = False)
'mul(x, add(y, z))'
>>> glsl_code(x*(y+z*(x-y)**z), use_operators = False)
'mul(x, add(y, mul(z, pow(sub(x, y), z))))'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(glsl_code(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> glsl_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(glsl_code(mat, A))
A[0][0] = pow(x, 2.0);
if (x > 0) {
A[1][0] = x + 1;
}
else {
A[1][0] = x;
}
A[2][0] = sin(x);
"""
return GLSLPrinter(settings).doprint(expr,assign_to)
def print_glsl(expr, **settings):
"""Prints the GLSL representation of the given expression.
See GLSLPrinter init function for settings.
"""
print(glsl_code(expr, **settings))
|
0b07f0bd2a2beb6614eec473a083d967e43371336d9009d1304f8f692266038e | """
.. deprecated:: 1.8
``sympy.printing.theanocode`` is deprecated. Theano has been renamed to
Aesara. Use ``sympy.printing.aesaracode`` instead. See
:ref:`theanocode-deprecated` for more information.
"""
from __future__ import annotations
from typing import Any
from sympy.external import import_module
from sympy.printing.printer import Printer
from sympy.utilities.iterables import is_sequence
import sympy
from functools import partial
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.exceptions import sympy_deprecation_warning
theano = import_module('theano')
if theano:
ts = theano.scalar
tt = theano.tensor
from theano.sandbox import linalg as tlinalg
mapping = {
sympy.Add: tt.add,
sympy.Mul: tt.mul,
sympy.Abs: tt.abs_,
sympy.sign: tt.sgn,
sympy.ceiling: tt.ceil,
sympy.floor: tt.floor,
sympy.log: tt.log,
sympy.exp: tt.exp,
sympy.sqrt: tt.sqrt,
sympy.cos: tt.cos,
sympy.acos: tt.arccos,
sympy.sin: tt.sin,
sympy.asin: tt.arcsin,
sympy.tan: tt.tan,
sympy.atan: tt.arctan,
sympy.atan2: tt.arctan2,
sympy.cosh: tt.cosh,
sympy.acosh: tt.arccosh,
sympy.sinh: tt.sinh,
sympy.asinh: tt.arcsinh,
sympy.tanh: tt.tanh,
sympy.atanh: tt.arctanh,
sympy.re: tt.real,
sympy.im: tt.imag,
sympy.arg: tt.angle,
sympy.erf: tt.erf,
sympy.gamma: tt.gamma,
sympy.loggamma: tt.gammaln,
sympy.Pow: tt.pow,
sympy.Eq: tt.eq,
sympy.StrictGreaterThan: tt.gt,
sympy.StrictLessThan: tt.lt,
sympy.LessThan: tt.le,
sympy.GreaterThan: tt.ge,
sympy.And: tt.and_,
sympy.Or: tt.or_,
sympy.Max: tt.maximum, # SymPy accept >2 inputs, Theano only 2
sympy.Min: tt.minimum, # SymPy accept >2 inputs, Theano only 2
sympy.conjugate: tt.conj,
sympy.core.numbers.ImaginaryUnit: lambda:tt.complex(0,1),
# Matrices
sympy.MatAdd: tt.Elemwise(ts.add),
sympy.HadamardProduct: tt.Elemwise(ts.mul),
sympy.Trace: tlinalg.trace,
sympy.Determinant : tlinalg.det,
sympy.Inverse: tlinalg.matrix_inverse,
sympy.Transpose: tt.DimShuffle((False, False), [1, 0]),
}
class TheanoPrinter(Printer):
""" Code printer which creates Theano symbolic expression graphs.
Parameters
==========
cache : dict
Cache dictionary to use. If None (default) will use
the global cache. To create a printer which does not depend on or alter
global state pass an empty dictionary. Note: the dictionary is not
copied on initialization of the printer and will be updated in-place,
so using the same dict object when creating multiple printers or making
multiple calls to :func:`.theano_code` or :func:`.theano_function` means
the cache is shared between all these applications.
Attributes
==========
cache : dict
A cache of Theano variables which have been created for SymPy
symbol-like objects (e.g. :class:`sympy.core.symbol.Symbol` or
:class:`sympy.matrices.expressions.MatrixSymbol`). This is used to
ensure that all references to a given symbol in an expression (or
multiple expressions) are printed as the same Theano variable, which is
created only once. Symbols are differentiated only by name and type. The
format of the cache's contents should be considered opaque to the user.
"""
printmethod = "_theano"
def __init__(self, *args, **kwargs):
self.cache = kwargs.pop('cache', dict())
super().__init__(*args, **kwargs)
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
""" Get the cache key for a SymPy object.
Parameters
==========
s : sympy.core.basic.Basic
SymPy object to get key for.
name : str
Name of object, if it does not have a ``name`` attribute.
"""
if name is None:
name = s.name
return (name, type(s), s.args, dtype, broadcastable)
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
"""
Get the Theano variable for a SymPy symbol from the cache, or create it
if it does not exist.
"""
# Defaults
if name is None:
name = s.name
if dtype is None:
dtype = 'floatX'
if broadcastable is None:
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if key in self.cache:
return self.cache[key]
value = tt.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value
def _print_Symbol(self, s, **kwargs):
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, dtype=dtype, broadcastable=bc)
def _print_AppliedUndef(self, s, **kwargs):
name = str(type(s)) + '_' + str(s.args[0])
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, name=name, dtype=dtype, broadcastable=bc)
def _print_Basic(self, expr, **kwargs):
op = mapping[type(expr)]
children = [self._print(arg, **kwargs) for arg in expr.args]
return op(*children)
def _print_Number(self, n, **kwargs):
# Integers already taken care of below, interpret as float
return float(n.evalf())
def _print_MatrixSymbol(self, X, **kwargs):
dtype = kwargs.get('dtypes', {}).get(X)
return self._get_or_create(X, dtype=dtype, broadcastable=(None, None))
def _print_DenseMatrix(self, X, **kwargs):
if not hasattr(tt, 'stacklists'):
raise NotImplementedError(
"Matrix translation not yet supported in this version of Theano")
return tt.stacklists([
[self._print(arg, **kwargs) for arg in L]
for L in X.tolist()
])
_print_ImmutableMatrix = _print_ImmutableDenseMatrix = _print_DenseMatrix
def _print_MatMul(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = children[0]
for child in children[1:]:
result = tt.dot(result, child)
return result
def _print_MatPow(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = 1
if isinstance(children[1], int) and children[1] > 0:
for i in range(children[1]):
result = tt.dot(result, children[0])
else:
raise NotImplementedError('''Only non-negative integer
powers of matrices can be handled by Theano at the moment''')
return result
def _print_MatrixSlice(self, expr, **kwargs):
parent = self._print(expr.parent, **kwargs)
rowslice = self._print(slice(*expr.rowslice), **kwargs)
colslice = self._print(slice(*expr.colslice), **kwargs)
return parent[rowslice, colslice]
def _print_BlockMatrix(self, expr, **kwargs):
nrows, ncols = expr.blocks.shape
blocks = [[self._print(expr.blocks[r, c], **kwargs)
for c in range(ncols)]
for r in range(nrows)]
return tt.join(0, *[tt.join(1, *row) for row in blocks])
def _print_slice(self, expr, **kwargs):
return slice(*[self._print(i, **kwargs)
if isinstance(i, sympy.Basic) else i
for i in (expr.start, expr.stop, expr.step)])
def _print_Pi(self, expr, **kwargs):
return 3.141592653589793
def _print_Exp1(self, expr, **kwargs):
return ts.exp(1)
def _print_Piecewise(self, expr, **kwargs):
import numpy as np
e, cond = expr.args[0].args # First condition and corresponding value
# Print conditional expression and value for first condition
p_cond = self._print(cond, **kwargs)
p_e = self._print(e, **kwargs)
# One condition only
if len(expr.args) == 1:
# Return value if condition else NaN
return tt.switch(p_cond, p_e, np.nan)
# Return value_1 if condition_1 else evaluate remaining conditions
p_remaining = self._print(sympy.Piecewise(*expr.args[1:]), **kwargs)
return tt.switch(p_cond, p_e, p_remaining)
def _print_Rational(self, expr, **kwargs):
return tt.true_div(self._print(expr.p, **kwargs),
self._print(expr.q, **kwargs))
def _print_Integer(self, expr, **kwargs):
return expr.p
def _print_factorial(self, expr, **kwargs):
return self._print(sympy.gamma(expr.args[0] + 1), **kwargs)
def _print_Derivative(self, deriv, **kwargs):
rv = self._print(deriv.expr, **kwargs)
for var in deriv.variables:
var = self._print(var, **kwargs)
rv = tt.Rop(rv, var, tt.ones_like(var))
return rv
def emptyPrinter(self, expr):
return expr
def doprint(self, expr, dtypes=None, broadcastables=None):
""" Convert a SymPy expression to a Theano graph variable.
The ``dtypes`` and ``broadcastables`` arguments are used to specify the
data type, dimension, and broadcasting behavior of the Theano variables
corresponding to the free symbols in ``expr``. Each is a mapping from
SymPy symbols to the value of the corresponding argument to
``theano.tensor.Tensor``.
See the corresponding `documentation page`__ for more information on
broadcasting in Theano.
.. __: http://deeplearning.net/software/theano/tutorial/broadcasting.html
Parameters
==========
expr : sympy.core.expr.Expr
SymPy expression to print.
dtypes : dict
Mapping from SymPy symbols to Theano datatypes to use when creating
new Theano variables for those symbols. Corresponds to the ``dtype``
argument to ``theano.tensor.Tensor``. Defaults to ``'floatX'``
for symbols not included in the mapping.
broadcastables : dict
Mapping from SymPy symbols to the value of the ``broadcastable``
argument to ``theano.tensor.Tensor`` to use when creating Theano
variables for those symbols. Defaults to the empty tuple for symbols
not included in the mapping (resulting in a scalar).
Returns
=======
theano.gof.graph.Variable
A variable corresponding to the expression's value in a Theano
symbolic expression graph.
"""
if dtypes is None:
dtypes = {}
if broadcastables is None:
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)
global_cache: dict[Any, Any] = {}
def theano_code(expr, cache=None, **kwargs):
"""
Convert a SymPy expression into a Theano graph variable.
.. deprecated:: 1.8
``sympy.printing.theanocode`` is deprecated. Theano has been renamed to
Aesara. Use ``sympy.printing.aesaracode`` instead. See
:ref:`theanocode-deprecated` for more information.
Parameters
==========
expr : sympy.core.expr.Expr
SymPy expression object to convert.
cache : dict
Cached Theano variables (see :class:`TheanoPrinter.cache
<TheanoPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.TheanoPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.TheanoPrinter.doprint`.
Returns
=======
theano.gof.graph.Variable
A variable corresponding to the expression's value in a Theano symbolic
expression graph.
"""
sympy_deprecation_warning(
"""
sympy.printing.theanocode is deprecated. Theano has been renamed to
Aesara. Use sympy.printing.aesaracode instead.""",
deprecated_since_version="1.8",
active_deprecations_target='theanocode-deprecated')
if not theano:
raise ImportError("theano is required for theano_code")
if cache is None:
cache = global_cache
return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
r"""
Get value of ``broadcastables`` argument to :func:`.theano_code` from
keyword arguments to :func:`.theano_function`.
Included for backwards compatibility.
Parameters
==========
inputs
Sequence of input symbols.
dim : int
Common number of dimensions for all inputs. Overrides other arguments
if given.
dims : dict
Mapping from input symbols to number of dimensions. Overrides
``broadcastables`` argument if given.
broadcastables : dict
Explicit value of ``broadcastables`` argument to
:meth:`.TheanoPrinter.doprint`. If not None function will return this value unchanged.
Returns
=======
dict
Dictionary mapping elements of ``inputs`` to their "broadcastable"
values (tuple of ``bool``\ s).
"""
if dim is not None:
return {s: (False,) * dim for s in inputs}
if dims is not None:
maxdim = max(dims.values())
return {
s: (False,) * d + (True,) * (maxdim - d)
for s, d in dims.items()
}
if broadcastables is not None:
return broadcastables
return {}
@doctest_depends_on(modules=('theano',))
def theano_function(inputs, outputs, scalar=False, *,
dim=None, dims=None, broadcastables=None, **kwargs):
"""
Create a Theano function from SymPy expressions.
.. deprecated:: 1.8
``sympy.printing.theanocode`` is deprecated. Theano has been renamed to
Aesara. Use ``sympy.printing.aesaracode`` instead. See
:ref:`theanocode-deprecated` for more information.
The inputs and outputs are converted to Theano variables using
:func:`.theano_code` and then passed to ``theano.function``.
Parameters
==========
inputs
Sequence of symbols which constitute the inputs of the function.
outputs
Sequence of expressions which constitute the outputs(s) of the
function. The free symbols of each expression must be a subset of
``inputs``.
scalar : bool
Convert 0-dimensional arrays in output to scalars. This will return a
Python wrapper function around the Theano function object.
cache : dict
Cached Theano variables (see :class:`TheanoPrinter.cache
<TheanoPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.TheanoPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.TheanoPrinter.doprint`.
dims : dict
Alternative to ``broadcastables`` argument. Mapping from elements of
``inputs`` to integers indicating the dimension of their associated
arrays/tensors. Overrides ``broadcastables`` argument if given.
dim : int
Another alternative to the ``broadcastables`` argument. Common number of
dimensions to use for all arrays/tensors.
``theano_function([x, y], [...], dim=2)`` is equivalent to using
``broadcastables={x: (False, False), y: (False, False)}``.
Returns
=======
callable
A callable object which takes values of ``inputs`` as positional
arguments and returns an output array for each of the expressions
in ``outputs``. If ``outputs`` is a single expression the function will
return a Numpy array, if it is a list of multiple expressions the
function will return a list of arrays. See description of the ``squeeze``
argument above for the behavior when a single output is passed in a list.
The returned object will either be an instance of
``theano.compile.function_module.Function`` or a Python wrapper
function around one. In both cases, the returned value will have a
``theano_function`` attribute which points to the return value of
``theano.function``.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.printing.theanocode import theano_function
A simple function with one input and one output:
>>> f1 = theano_function([x], [x**2 - 1], scalar=True)
>>> f1(3)
8.0
A function with multiple inputs and one output:
>>> f2 = theano_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)
>>> f2(3, 4, 2)
5.0
A function with multiple inputs and multiple outputs:
>>> f3 = theano_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)
>>> f3(2, 3)
[13.0, -5.0]
See also
========
dim_handling
"""
sympy_deprecation_warning(
"""
sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead""",
deprecated_since_version="1.8",
active_deprecations_target='theanocode-deprecated')
if not theano:
raise ImportError("theano is required for theano_function")
# Pop off non-theano keyword args
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(
inputs, dim=dim, dims=dims, broadcastables=broadcastables,
)
# Print inputs/outputs
code = partial(theano_code, cache=cache, dtypes=dtypes,
broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
#fix constant expressions as variables
toutputs = [output if isinstance(output, theano.Variable) else tt.as_tensor_variable(output) for output in toutputs]
if len(toutputs) == 1:
toutputs = toutputs[0]
# Compile theano func
func = theano.function(tinputs, toutputs, **kwargs)
is_0d = [len(o.variable.broadcastable) == 0 for o in func.outputs]
# No wrapper required
if not scalar or not any(is_0d):
func.theano_function = func
return func
# Create wrapper to convert 0-dimensional outputs to scalars
def wrapper(*args):
out = func(*args)
# out can be array(1.0) or [array(1.0), array(2.0)]
if is_sequence(out):
return [o[()] if is_0d[i] else o for i, o in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.theano_function = func
return wrapper
|
65d0661e07809faeca34bdc4193e2a8bb771851773417a698a98e8943960443c | """Integration method that emulates by-hand techniques.
This module also provides functionality to get the steps used to evaluate a
particular integral, in the ``integral_steps`` function. This will return
nested ``Rule`` s representing the integration rules used.
Each ``Rule`` class represents a (maybe parametrized) integration rule, e.g.
``SinRule`` for integrating ``sin(x)`` and ``ReciprocalSqrtQuadraticRule``
for integrating ``1/sqrt(a+b*x+c*x**2)``. The ``eval`` method returns the
integration result.
The ``manualintegrate`` function computes the integral by calling ``eval``
on the rule returned by ``integral_steps``.
The integrator can be extended with new heuristics and evaluation
techniques. To do so, extend the ``Rule`` class, implement ``eval`` method,
then write a function that accepts an ``IntegralInfo`` object and returns
either a ``Rule`` instance or ``None``. If the new technique requires a new
match, add the key and call to the antiderivative function to integral_steps.
To enable simple substitutions, add the match to find_substitutions.
"""
from __future__ import annotations
from typing import NamedTuple, Type, Callable, Sequence
from abc import ABC, abstractmethod
from dataclasses import dataclass
from collections import defaultdict
from collections.abc import Mapping
from sympy.core.add import Add
from sympy.core.cache import cacheit
from sympy.core.containers import Dict
from sympy.core.expr import Expr
from sympy.core.function import Derivative
from sympy.core.logic import fuzzy_not
from sympy.core.mul import Mul
from sympy.core.numbers import Integer, Number, E
from sympy.core.power import Pow
from sympy.core.relational import Eq, Ne, Boolean
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol, Wild
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.hyperbolic import (HyperbolicFunction, csch,
cosh, coth, sech, sinh, tanh, asinh)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
cos, sin, tan, cot, csc, sec, acos, asin, atan, acot, acsc, asec)
from sympy.functions.special.delta_functions import Heaviside, DiracDelta
from sympy.functions.special.error_functions import (erf, erfi, fresnelc,
fresnels, Ci, Chi, Si, Shi, Ei, li)
from sympy.functions.special.gamma_functions import uppergamma
from sympy.functions.special.elliptic_integrals import elliptic_e, elliptic_f
from sympy.functions.special.polynomials import (chebyshevt, chebyshevu,
legendre, hermite, laguerre, assoc_laguerre, gegenbauer, jacobi,
OrthogonalPolynomial)
from sympy.functions.special.zeta_functions import polylog
from .integrals import Integral
from sympy.logic.boolalg import And
from sympy.ntheory.factor_ import primefactors
from sympy.polys.polytools import degree, lcm_list, gcd_list, Poly
from sympy.simplify.radsimp import fraction
from sympy.simplify.simplify import simplify
from sympy.solvers.solvers import solve
from sympy.strategies.core import switch, do_one, null_safe, condition
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import debug
@dataclass
class Rule(ABC):
integrand: Expr
variable: Symbol
@abstractmethod
def eval(self) -> Expr:
pass
@abstractmethod
def contains_dont_know(self) -> bool:
pass
@dataclass
class AtomicRule(Rule, ABC):
"""A simple rule that does not depend on other rules"""
def contains_dont_know(self) -> bool:
return False
@dataclass
class ConstantRule(AtomicRule):
"""integrate(a, x) -> a*x"""
def eval(self) -> Expr:
return self.integrand * self.variable
@dataclass
class ConstantTimesRule(Rule):
"""integrate(a*f(x), x) -> a*integrate(f(x), x)"""
constant: Expr
other: Expr
substep: Rule
def eval(self) -> Expr:
return self.constant * self.substep.eval()
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
class PowerRule(AtomicRule):
"""integrate(x**a, x)"""
base: Expr
exp: Expr
def eval(self) -> Expr:
return Piecewise(
((self.base**(self.exp + 1))/(self.exp + 1), Ne(self.exp, -1)),
(log(self.base), True),
)
@dataclass
class NestedPowRule(AtomicRule):
"""integrate((x**a)**b, x)"""
base: Expr
exp: Expr
def eval(self) -> Expr:
m = self.base * self.integrand
return Piecewise((m / (self.exp + 1), Ne(self.exp, -1)),
(m * log(self.base), True))
@dataclass
class AddRule(Rule):
"""integrate(f(x) + g(x), x) -> integrate(f(x), x) + integrate(g(x), x)"""
substeps: list[Rule]
def eval(self) -> Expr:
return Add(*(substep.eval() for substep in self.substeps))
def contains_dont_know(self) -> bool:
return any(substep.contains_dont_know() for substep in self.substeps)
@dataclass
class URule(Rule):
"""integrate(f(g(x))*g'(x), x) -> integrate(f(u), u), u = g(x)"""
u_var: Symbol
u_func: Expr
substep: Rule
def eval(self) -> Expr:
result = self.substep.eval()
if self.u_func.is_Pow:
base, exp_ = self.u_func.as_base_exp()
if exp_ == -1:
# avoid needless -log(1/x) from substitution
result = result.subs(log(self.u_var), -log(base))
return result.subs(self.u_var, self.u_func)
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
class PartsRule(Rule):
"""integrate(u(x)*v'(x), x) -> u(x)*v(x) - integrate(u'(x)*v(x), x)"""
u: Symbol
dv: Expr
v_step: Rule
second_step: Rule | None # None when is a substep of CyclicPartsRule
def eval(self) -> Expr:
assert self.second_step is not None
v = self.v_step.eval()
return self.u * v - self.second_step.eval()
def contains_dont_know(self) -> bool:
return self.v_step.contains_dont_know() or (
self.second_step is not None and self.second_step.contains_dont_know())
@dataclass
class CyclicPartsRule(Rule):
"""Apply PartsRule multiple times to integrate exp(x)*sin(x)"""
parts_rules: list[PartsRule]
coefficient: Expr
def eval(self) -> Expr:
result = []
sign = 1
for rule in self.parts_rules:
result.append(sign * rule.u * rule.v_step.eval())
sign *= -1
return Add(*result) / (1 - self.coefficient)
def contains_dont_know(self) -> bool:
return any(substep.contains_dont_know() for substep in self.parts_rules)
@dataclass
class TrigRule(AtomicRule, ABC):
pass
@dataclass
class SinRule(TrigRule):
"""integrate(sin(x), x) -> -cos(x)"""
def eval(self) -> Expr:
return -cos(self.variable)
@dataclass
class CosRule(TrigRule):
"""integrate(cos(x), x) -> sin(x)"""
def eval(self) -> Expr:
return sin(self.variable)
@dataclass
class SecTanRule(TrigRule):
"""integrate(sec(x)*tan(x), x) -> sec(x)"""
def eval(self) -> Expr:
return sec(self.variable)
@dataclass
class CscCotRule(TrigRule):
"""integrate(csc(x)*cot(x), x) -> -csc(x)"""
def eval(self) -> Expr:
return -csc(self.variable)
@dataclass
class Sec2Rule(TrigRule):
"""integrate(sec(x)**2, x) -> tan(x)"""
def eval(self) -> Expr:
return tan(self.variable)
@dataclass
class Csc2Rule(TrigRule):
"""integrate(csc(x)**2, x) -> -cot(x)"""
def eval(self) -> Expr:
return -cot(self.variable)
@dataclass
class HyperbolicRule(AtomicRule, ABC):
pass
@dataclass
class SinhRule(HyperbolicRule):
"""integrate(sinh(x), x) -> cosh(x)"""
def eval(self) -> Expr:
return cosh(self.variable)
@dataclass
class CoshRule(HyperbolicRule):
"""integrate(cosh(x), x) -> sinh(x)"""
def eval(self):
return sinh(self.variable)
@dataclass
class ExpRule(AtomicRule):
"""integrate(a**x, x) -> a**x/ln(a)"""
base: Expr
exp: Expr
def eval(self) -> Expr:
return self.integrand / log(self.base)
@dataclass
class ReciprocalRule(AtomicRule):
"""integrate(1/x, x) -> ln(x)"""
base: Expr
def eval(self) -> Expr:
return log(self.base)
@dataclass
class ArcsinRule(AtomicRule):
"""integrate(1/sqrt(1-x**2), x) -> asin(x)"""
def eval(self) -> Expr:
return asin(self.variable)
@dataclass
class ArcsinhRule(AtomicRule):
"""integrate(1/sqrt(1+x**2), x) -> asin(x)"""
def eval(self) -> Expr:
return asinh(self.variable)
@dataclass
class ReciprocalSqrtQuadraticRule(AtomicRule):
"""integrate(1/sqrt(a+b*x+c*x**2), x) -> log(2*sqrt(c)*sqrt(a+b*x+c*x**2)+b+2*c*x)/sqrt(c)"""
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
return log(2*sqrt(c)*sqrt(a+b*x+c*x**2)+b+2*c*x)/sqrt(c)
@dataclass
class SqrtQuadraticDenomRule(AtomicRule):
"""integrate(poly(x)/sqrt(a+b*x+c*x**2), x)"""
a: Expr
b: Expr
c: Expr
coeffs: list[Expr]
def eval(self) -> Expr:
a, b, c, coeffs, x = self.a, self.b, self.c, self.coeffs.copy(), self.variable
# Integrate poly/sqrt(a+b*x+c*x**2) using recursion.
# coeffs are coefficients of the polynomial.
# Let I_n = x**n/sqrt(a+b*x+c*x**2), then
# I_n = A * x**(n-1)*sqrt(a+b*x+c*x**2) - B * I_{n-1} - C * I_{n-2}
# where A = 1/(n*c), B = (2*n-1)*b/(2*n*c), C = (n-1)*a/(n*c)
# See https://github.com/sympy/sympy/pull/23608 for proof.
result_coeffs = []
coeffs = coeffs.copy()
for i in range(len(coeffs)-2):
n = len(coeffs)-1-i
coeff = coeffs[i]/(c*n)
result_coeffs.append(coeff)
coeffs[i+1] -= (2*n-1)*b/2*coeff
coeffs[i+2] -= (n-1)*a*coeff
d, e = coeffs[-1], coeffs[-2]
s = sqrt(a+b*x+c*x**2)
constant = d-b*e/(2*c)
if constant == 0:
I0 = 0
else:
step = inverse_trig_rule(IntegralInfo(1/s, x), degenerate=False)
I0 = constant*step.eval()
return Add(*(result_coeffs[i]*x**(len(coeffs)-2-i)
for i in range(len(result_coeffs))), e/c)*s + I0
@dataclass
class SqrtQuadraticRule(AtomicRule):
"""integrate(sqrt(a+b*x+c*x**2), x)"""
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
step = sqrt_quadratic_rule(IntegralInfo(self.integrand, self.variable), degenerate=False)
return step.eval()
@dataclass
class AlternativeRule(Rule):
"""Multiple ways to do integration."""
alternatives: list[Rule]
def eval(self) -> Expr:
return self.alternatives[0].eval()
def contains_dont_know(self) -> bool:
return any(substep.contains_dont_know() for substep in self.alternatives)
@dataclass
class DontKnowRule(Rule):
"""Leave the integral as is."""
def eval(self) -> Expr:
return Integral(self.integrand, self.variable)
def contains_dont_know(self) -> bool:
return True
@dataclass
class DerivativeRule(AtomicRule):
"""integrate(f'(x), x) -> f(x)"""
def eval(self) -> Expr:
assert isinstance(self.integrand, Derivative)
variable_count = list(self.integrand.variable_count)
for i, (var, count) in enumerate(variable_count):
if var == self.variable:
variable_count[i] = (var, count - 1)
break
return Derivative(self.integrand.expr, *variable_count)
@dataclass
class RewriteRule(Rule):
"""Rewrite integrand to another form that is easier to handle."""
rewritten: Expr
substep: Rule
def eval(self) -> Expr:
return self.substep.eval()
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
class CompleteSquareRule(RewriteRule):
"""Rewrite a+b*x+c*x**2 to a-b**2/(4*c) + c*(x+b/(2*c))**2"""
pass
@dataclass
class PiecewiseRule(Rule):
subfunctions: Sequence[tuple[Rule, bool | Boolean]]
def eval(self) -> Expr:
return Piecewise(*[(substep.eval(), cond)
for substep, cond in self.subfunctions])
def contains_dont_know(self) -> bool:
return any(substep.contains_dont_know() for substep, _ in self.subfunctions)
@dataclass
class HeavisideRule(Rule):
harg: Expr
ibnd: Expr
substep: Rule
def eval(self) -> Expr:
# If we are integrating over x and the integrand has the form
# Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol)
# then there needs to be continuity at -b/m == ibnd,
# so we subtract the appropriate term.
result = self.substep.eval()
return Heaviside(self.harg) * (result - result.subs(self.variable, self.ibnd))
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
class DiracDeltaRule(AtomicRule):
n: Expr
a: Expr
b: Expr
def eval(self) -> Expr:
n, a, b, x = self.n, self.a, self.b, self.variable
if n == 0:
return Heaviside(a+b*x)/b
return DiracDelta(a+b*x, n-1)/b
@dataclass
class TrigSubstitutionRule(Rule):
theta: Expr
func: Expr
rewritten: Expr
substep: Rule
restriction: bool | Boolean
def eval(self) -> Expr:
theta, func, x = self.theta, self.func, self.variable
func = func.subs(sec(theta), 1/cos(theta))
func = func.subs(csc(theta), 1/sin(theta))
func = func.subs(cot(theta), 1/tan(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = solve(x - func, trig_function)
assert len(relation) == 1
numer, denom = fraction(relation[0])
if isinstance(trig_function, sin):
opposite = numer
hypotenuse = denom
adjacent = sqrt(denom**2 - numer**2)
inverse = asin(relation[0])
elif isinstance(trig_function, cos):
adjacent = numer
hypotenuse = denom
opposite = sqrt(denom**2 - numer**2)
inverse = acos(relation[0])
else: # tan
opposite = numer
adjacent = denom
hypotenuse = sqrt(denom**2 + numer**2)
inverse = atan(relation[0])
substitution = [
(sin(theta), opposite/hypotenuse),
(cos(theta), adjacent/hypotenuse),
(tan(theta), opposite/adjacent),
(theta, inverse)
]
return Piecewise(
(self.substep.eval().subs(substitution).trigsimp(), self.restriction)
)
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
class ArctanRule(AtomicRule):
"""integrate(a/(b*x**2+c), x) -> a/b / sqrt(c/b) * atan(x/sqrt(c/b))"""
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
return a/b / sqrt(c/b) * atan(x/sqrt(c/b))
@dataclass
class OrthogonalPolyRule(AtomicRule, ABC):
n: Expr
@dataclass
class JacobiRule(OrthogonalPolyRule):
a: Expr
b: Expr
def eval(self) -> Expr:
n, a, b, x = self.n, self.a, self.b, self.variable
return Piecewise(
(2*jacobi(n + 1, a - 1, b - 1, x)/(n + a + b), Ne(n + a + b, 0)),
(x, Eq(n, 0)),
((a + b + 2)*x**2/4 + (a - b)*x/2, Eq(n, 1)))
@dataclass
class GegenbauerRule(OrthogonalPolyRule):
a: Expr
def eval(self) -> Expr:
n, a, x = self.n, self.a, self.variable
return Piecewise(
(gegenbauer(n + 1, a - 1, x)/(2*(a - 1)), Ne(a, 1)),
(chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)),
(S.Zero, True))
@dataclass
class ChebyshevTRule(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return Piecewise(
((chebyshevt(n + 1, x)/(n + 1) -
chebyshevt(n - 1, x)/(n - 1))/2, Ne(Abs(n), 1)),
(x**2/2, True))
@dataclass
class ChebyshevURule(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return Piecewise(
(chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)),
(S.Zero, True))
@dataclass
class LegendreRule(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return(legendre(n + 1, x) - legendre(n - 1, x))/(2*n + 1)
@dataclass
class HermiteRule(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return hermite(n + 1, x)/(2*(n + 1))
@dataclass
class LaguerreRule(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return laguerre(n, x) - laguerre(n + 1, x)
@dataclass
class AssocLaguerreRule(OrthogonalPolyRule):
a: Expr
def eval(self) -> Expr:
return -assoc_laguerre(self.n + 1, self.a - 1, self.variable)
@dataclass
class IRule(AtomicRule, ABC):
a: Expr
b: Expr
@dataclass
class CiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return cos(b)*Ci(a*x) - sin(b)*Si(a*x)
@dataclass
class ChiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return cosh(b)*Chi(a*x) + sinh(b)*Shi(a*x)
@dataclass
class EiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return exp(b)*Ei(a*x)
@dataclass
class SiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return sin(b)*Ci(a*x) + cos(b)*Si(a*x)
@dataclass
class ShiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return sinh(b)*Chi(a*x) + cosh(b)*Shi(a*x)
@dataclass
class LiRule(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return li(a*x + b)/a
@dataclass
class ErfRule(AtomicRule):
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
if a.is_extended_real:
return Piecewise(
(sqrt(S.Pi/(-a))/2 * exp(c - b**2/(4*a)) *
erf((-2*a*x - b)/(2*sqrt(-a))), a < 0),
(sqrt(S.Pi/a)/2 * exp(c - b**2/(4*a)) *
erfi((2*a*x + b)/(2*sqrt(a))), True))
return sqrt(S.Pi/a)/2 * exp(c - b**2/(4*a)) * \
erfi((2*a*x + b)/(2*sqrt(a)))
@dataclass
class FresnelCRule(AtomicRule):
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
return sqrt(S.Pi/(2*a)) * (
cos(b**2/(4*a) - c)*fresnelc((2*a*x + b)/sqrt(2*a*S.Pi)) +
sin(b**2/(4*a) - c)*fresnels((2*a*x + b)/sqrt(2*a*S.Pi)))
@dataclass
class FresnelSRule(AtomicRule):
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
return sqrt(S.Pi/(2*a)) * (
cos(b**2/(4*a) - c)*fresnels((2*a*x + b)/sqrt(2*a*S.Pi)) -
sin(b**2/(4*a) - c)*fresnelc((2*a*x + b)/sqrt(2*a*S.Pi)))
@dataclass
class PolylogRule(AtomicRule):
a: Expr
b: Expr
def eval(self) -> Expr:
return polylog(self.b + 1, self.a * self.variable)
@dataclass
class UpperGammaRule(AtomicRule):
a: Expr
e: Expr
def eval(self) -> Expr:
a, e, x = self.a, self.e, self.variable
return x**e * (-a*x)**(-e) * uppergamma(e + 1, -a*x)/a
@dataclass
class EllipticFRule(AtomicRule):
a: Expr
d: Expr
def eval(self) -> Expr:
return elliptic_f(self.variable, self.d/self.a)/sqrt(self.a)
@dataclass
class EllipticERule(AtomicRule):
a: Expr
d: Expr
def eval(self) -> Expr:
return elliptic_e(self.variable, self.d/self.a)*sqrt(self.a)
class IntegralInfo(NamedTuple):
integrand: Expr
symbol: Symbol
def manual_diff(f, symbol):
"""Derivative of f in form expected by find_substitutions
SymPy's derivatives for some trig functions (like cot) are not in a form
that works well with finding substitutions; this replaces the
derivatives for those particular forms with something that works better.
"""
if f.args:
arg = f.args[0]
if isinstance(f, tan):
return arg.diff(symbol) * sec(arg)**2
elif isinstance(f, cot):
return -arg.diff(symbol) * csc(arg)**2
elif isinstance(f, sec):
return arg.diff(symbol) * sec(arg) * tan(arg)
elif isinstance(f, csc):
return -arg.diff(symbol) * csc(arg) * cot(arg)
elif isinstance(f, Add):
return sum([manual_diff(arg, symbol) for arg in f.args])
elif isinstance(f, Mul):
if len(f.args) == 2 and isinstance(f.args[0], Number):
return f.args[0] * manual_diff(f.args[1], symbol)
return f.diff(symbol)
def manual_subs(expr, *args):
"""
A wrapper for `expr.subs(*args)` with additional logic for substitution
of invertible functions.
"""
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, (Dict, Mapping)):
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError("Expected an iterable of (old, new) pairs")
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
new_subs = []
for old, new in sequence:
if isinstance(old, log):
# If log(x) = y, then exp(a*log(x)) = exp(a*y)
# that is, x**a = exp(a*y). Replace nontrivial powers of x
# before subs turns them into `exp(y)**a`, but
# do not replace x itself yet, to avoid `log(exp(y))`.
x0 = old.args[0]
expr = expr.replace(lambda x: x.is_Pow and x.base == x0,
lambda x: exp(x.exp*new))
new_subs.append((x0, exp(new)))
return expr.subs(list(sequence) + new_subs)
# Method based on that on SIN, described in "Symbolic Integration: The
# Stormy Decade"
inverse_trig_functions = (atan, asin, acos, acot, acsc, asec)
def find_substitutions(integrand, symbol, u_var):
results = []
def test_subterm(u, u_diff):
if u_diff == 0:
return False
substituted = integrand / u_diff
debug("substituted: {}, u: {}, u_var: {}".format(substituted, u, u_var))
substituted = manual_subs(substituted, u, u_var).cancel()
if substituted.has_free(symbol):
return False
# avoid increasing the degree of a rational function
if integrand.is_rational_function(symbol) and substituted.is_rational_function(u_var):
deg_before = max([degree(t, symbol) for t in integrand.as_numer_denom()])
deg_after = max([degree(t, u_var) for t in substituted.as_numer_denom()])
if deg_after > deg_before:
return False
return substituted.as_independent(u_var, as_Add=False)
def exp_subterms(term: Expr):
linear_coeffs = []
terms = []
n = Wild('n', properties=[lambda n: n.is_Integer])
for exp_ in term.find(exp):
arg = exp_.args[0]
if symbol not in arg.free_symbols:
continue
match = arg.match(n*symbol)
if match:
linear_coeffs.append(match[n])
else:
terms.append(exp_)
if linear_coeffs:
terms.append(exp(gcd_list(linear_coeffs)*symbol))
return terms
def possible_subterms(term):
if isinstance(term, (TrigonometricFunction, HyperbolicFunction,
*inverse_trig_functions,
exp, log, Heaviside)):
return [term.args[0]]
elif isinstance(term, (chebyshevt, chebyshevu,
legendre, hermite, laguerre)):
return [term.args[1]]
elif isinstance(term, (gegenbauer, assoc_laguerre)):
return [term.args[2]]
elif isinstance(term, jacobi):
return [term.args[3]]
elif isinstance(term, Mul):
r = []
for u in term.args:
r.append(u)
r.extend(possible_subterms(u))
return r
elif isinstance(term, Pow):
r = [arg for arg in term.args if arg.has(symbol)]
if term.exp.is_Integer:
r.extend([term.base**d for d in primefactors(term.exp)
if 1 < d < abs(term.args[1])])
if term.base.is_Add:
r.extend([t for t in possible_subterms(term.base)
if t.is_Pow])
return r
elif isinstance(term, Add):
r = []
for arg in term.args:
r.append(arg)
r.extend(possible_subterms(arg))
return r
return []
for u in list(dict.fromkeys(possible_subterms(integrand) + exp_subterms(integrand))):
if u == symbol:
continue
u_diff = manual_diff(u, symbol)
new_integrand = test_subterm(u, u_diff)
if new_integrand is not False:
constant, new_integrand = new_integrand
if new_integrand == integrand.subs(symbol, u_var):
continue
substitution = (u, constant, new_integrand)
if substitution not in results:
results.append(substitution)
return results
def rewriter(condition, rewrite):
"""Strategy that rewrites an integrand."""
def _rewriter(integral):
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {}".format(integrand, rewrite, symbol))
if condition(*integral):
rewritten = rewrite(*integral)
if rewritten != integrand:
substep = integral_steps(rewritten, symbol)
if not isinstance(substep, DontKnowRule) and substep:
return RewriteRule(integrand, symbol, rewritten, substep)
return _rewriter
def proxy_rewriter(condition, rewrite):
"""Strategy that rewrites an integrand based on some other criteria."""
def _proxy_rewriter(criteria):
criteria, integral = criteria
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {} and criteria: {}".format(integrand, rewrite, symbol, criteria))
args = criteria + list(integral)
if condition(*args):
rewritten = rewrite(*args)
if rewritten != integrand:
return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol))
return _proxy_rewriter
def multiplexer(conditions):
"""Apply the rule that matches the condition, else None"""
def multiplexer_rl(expr):
for key, rule in conditions.items():
if key(expr):
return rule(expr)
return multiplexer_rl
def alternatives(*rules):
"""Strategy that makes an AlternativeRule out of multiple possible results."""
def _alternatives(integral):
alts = []
count = 0
debug("List of Alternative Rules")
for rule in rules:
count = count + 1
debug("Rule {}: {}".format(count, rule))
result = rule(integral)
if (result and not isinstance(result, DontKnowRule) and
result != integral and result not in alts):
alts.append(result)
if len(alts) == 1:
return alts[0]
elif alts:
doable = [rule for rule in alts if not rule.contains_dont_know()]
if doable:
return AlternativeRule(*integral, doable)
else:
return AlternativeRule(*integral, alts)
return _alternatives
def constant_rule(integral):
return ConstantRule(*integral)
def power_rule(integral):
integrand, symbol = integral
base, expt = integrand.as_base_exp()
if symbol not in expt.free_symbols and isinstance(base, Symbol):
if simplify(expt + 1) == 0:
return ReciprocalRule(integrand, symbol, base)
return PowerRule(integrand, symbol, base, expt)
elif symbol not in base.free_symbols and isinstance(expt, Symbol):
rule = ExpRule(integrand, symbol, base, expt)
if fuzzy_not(log(base).is_zero):
return rule
elif log(base).is_zero:
return ConstantRule(1, symbol)
return PiecewiseRule(integrand, symbol, [
(rule, Ne(log(base), 0)),
(ConstantRule(1, symbol), True)
])
def exp_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], Symbol):
return ExpRule(integrand, symbol, E, integrand.args[0])
def orthogonal_poly_rule(integral):
orthogonal_poly_classes = {
jacobi: JacobiRule,
gegenbauer: GegenbauerRule,
chebyshevt: ChebyshevTRule,
chebyshevu: ChebyshevURule,
legendre: LegendreRule,
hermite: HermiteRule,
laguerre: LaguerreRule,
assoc_laguerre: AssocLaguerreRule
}
orthogonal_poly_var_index = {
jacobi: 3,
gegenbauer: 2,
assoc_laguerre: 2
}
integrand, symbol = integral
for klass in orthogonal_poly_classes:
if isinstance(integrand, klass):
var_index = orthogonal_poly_var_index.get(klass, 1)
if (integrand.args[var_index] is symbol and not
any(v.has(symbol) for v in integrand.args[:var_index])):
return orthogonal_poly_classes[klass](integrand, symbol, *integrand.args[:var_index])
_special_function_patterns: list[tuple[Type, Expr, Callable | None, tuple]] = []
_wilds = []
_symbol = Dummy('x')
def special_function_rule(integral):
integrand, symbol = integral
if not _special_function_patterns:
a = Wild('a', exclude=[_symbol], properties=[lambda x: not x.is_zero])
b = Wild('b', exclude=[_symbol])
c = Wild('c', exclude=[_symbol])
d = Wild('d', exclude=[_symbol], properties=[lambda x: not x.is_zero])
e = Wild('e', exclude=[_symbol], properties=[
lambda x: not (x.is_nonnegative and x.is_integer)])
_wilds.extend((a, b, c, d, e))
# patterns consist of a SymPy class, a wildcard expr, an optional
# condition coded as a lambda (when Wild properties are not enough),
# followed by an applicable rule
linear_pattern = a*_symbol + b
quadratic_pattern = a*_symbol**2 + b*_symbol + c
_special_function_patterns.extend((
(Mul, exp(linear_pattern, evaluate=False)/_symbol, None, EiRule),
(Mul, cos(linear_pattern, evaluate=False)/_symbol, None, CiRule),
(Mul, cosh(linear_pattern, evaluate=False)/_symbol, None, ChiRule),
(Mul, sin(linear_pattern, evaluate=False)/_symbol, None, SiRule),
(Mul, sinh(linear_pattern, evaluate=False)/_symbol, None, ShiRule),
(Pow, 1/log(linear_pattern, evaluate=False), None, LiRule),
(exp, exp(quadratic_pattern, evaluate=False), None, ErfRule),
(sin, sin(quadratic_pattern, evaluate=False), None, FresnelSRule),
(cos, cos(quadratic_pattern, evaluate=False), None, FresnelCRule),
(Mul, _symbol**e*exp(a*_symbol, evaluate=False), None, UpperGammaRule),
(Mul, polylog(b, a*_symbol, evaluate=False)/_symbol, None, PolylogRule),
(Pow, 1/sqrt(a - d*sin(_symbol, evaluate=False)**2),
lambda a, d: a != d, EllipticFRule),
(Pow, sqrt(a - d*sin(_symbol, evaluate=False)**2),
lambda a, d: a != d, EllipticERule),
))
_integrand = integrand.subs(symbol, _symbol)
for type_, pattern, constraint, rule in _special_function_patterns:
if isinstance(_integrand, type_):
match = _integrand.match(pattern)
if match:
wild_vals = tuple(match.get(w) for w in _wilds
if match.get(w) is not None)
if constraint is None or constraint(*wild_vals):
return rule(integrand, symbol, *wild_vals)
def _add_degenerate_step(generic_cond, generic_step: Rule, degenerate_step: Rule | None) -> Rule:
if degenerate_step is None:
return generic_step
if isinstance(generic_step, PiecewiseRule):
subfunctions = [(substep, (cond & generic_cond).simplify())
for substep, cond in generic_step.subfunctions]
else:
subfunctions = [(generic_step, generic_cond)]
if isinstance(degenerate_step, PiecewiseRule):
subfunctions += degenerate_step.subfunctions
else:
subfunctions.append((degenerate_step, S.true))
return PiecewiseRule(generic_step.integrand, generic_step.variable, subfunctions)
def nested_pow_rule(integral: IntegralInfo):
# nested (c*(a+b*x)**d)**e
integrand, x = integral
a_ = Wild('a', exclude=[x])
b_ = Wild('b', exclude=[x, 0])
pattern = a_+b_*x
generic_cond = S.true
class NoMatch(Exception):
pass
def _get_base_exp(expr: Expr) -> tuple[Expr, Expr]:
if not expr.has_free(x):
return S.One, S.Zero
if expr.is_Mul:
_, terms = expr.as_coeff_mul()
if not terms:
return S.One, S.Zero
results = [_get_base_exp(term) for term in terms]
bases = set(b for b, _ in results)
bases.discard(S.One)
if len(bases) == 1:
return bases.pop(), Add(*(e for _, e in results))
raise NoMatch
if expr.is_Pow:
b, e = expr.base, expr.exp # type: ignore
if e.has_free(x):
raise NoMatch
base_, sub_exp = _get_base_exp(b)
return base_, sub_exp * e
match = expr.match(pattern)
if match:
a, b = match[a_], match[b_]
base_ = x + a/b
nonlocal generic_cond
generic_cond = Ne(b, 0)
return base_, S.One
raise NoMatch
try:
base, exp_ = _get_base_exp(integrand)
except NoMatch:
return
if generic_cond is S.true:
degenerate_step = None
else:
# equivalent with subs(b, 0) but no need to find b
degenerate_step = ConstantRule(integrand.subs(x, 0), x)
generic_step = NestedPowRule(integrand, x, base, exp_)
return _add_degenerate_step(generic_cond, generic_step, degenerate_step)
def inverse_trig_rule(integral: IntegralInfo, degenerate=True):
"""
Set degenerate=False on recursive call where coefficient of quadratic term
is assumed non-zero.
"""
integrand, symbol = integral
base, exp = integrand.as_base_exp()
a = Wild('a', exclude=[symbol])
b = Wild('b', exclude=[symbol])
c = Wild('c', exclude=[symbol, 0])
match = base.match(a + b*symbol + c*symbol**2)
if not match:
return
def make_inverse_trig(RuleClass, a, sign_a, c, sign_c, h) -> Rule:
u_var = Dummy("u")
rewritten = 1/sqrt(sign_a*a + sign_c*c*(symbol-h)**2) # a>0, c>0
quadratic_base = sqrt(c/a)*(symbol-h)
constant = 1/sqrt(c)
u_func = None
if quadratic_base is not symbol:
u_func = quadratic_base
quadratic_base = u_var
standard_form = 1/sqrt(sign_a + sign_c*quadratic_base**2)
substep = RuleClass(standard_form, quadratic_base)
if constant != 1:
substep = ConstantTimesRule(constant*standard_form, symbol, constant, standard_form, substep)
if u_func is not None:
substep = URule(rewritten, symbol, u_var, u_func, substep)
if h != 0:
substep = CompleteSquareRule(integrand, symbol, rewritten, substep)
return substep
a, b, c = [match.get(i, S.Zero) for i in (a, b, c)]
generic_cond = Ne(c, 0)
if not degenerate or generic_cond is S.true:
degenerate_step = None
elif b.is_zero:
degenerate_step = ConstantRule(a ** exp, symbol)
else:
degenerate_step = sqrt_linear_rule(IntegralInfo((a + b * symbol) ** exp, symbol))
if simplify(2*exp + 1) == 0:
h, k = -b/(2*c), a - b**2/(4*c) # rewrite base to k + c*(symbol-h)**2
non_square_cond = Ne(k, 0)
square_step = None
if non_square_cond is not S.true:
square_step = NestedPowRule(1/sqrt(c*(symbol-h)**2), symbol, symbol-h, S.NegativeOne)
if non_square_cond is S.false:
return square_step
generic_step = ReciprocalSqrtQuadraticRule(integrand, symbol, a, b, c)
step = _add_degenerate_step(non_square_cond, generic_step, square_step)
if k.is_real and c.is_real:
# list of ((rule, base_exp, a, sign_a, b, sign_b), condition)
rules = []
for args, cond in ( # don't apply ArccoshRule to x**2-1
((ArcsinRule, k, 1, -c, -1, h), And(k > 0, c < 0)), # 1-x**2
((ArcsinhRule, k, 1, c, 1, h), And(k > 0, c > 0)), # 1+x**2
):
if cond is S.true:
return make_inverse_trig(*args)
if cond is not S.false:
rules.append((make_inverse_trig(*args), cond))
if rules:
if not k.is_positive: # conditions are not thorough, need fall back rule
rules.append((generic_step, S.true))
step = PiecewiseRule(integrand, symbol, rules)
else:
step = generic_step
return _add_degenerate_step(generic_cond, step, degenerate_step)
if exp == S.Half:
step = SqrtQuadraticRule(integrand, symbol, a, b, c)
return _add_degenerate_step(generic_cond, step, degenerate_step)
def add_rule(integral):
integrand, symbol = integral
results = [integral_steps(g, symbol)
for g in integrand.as_ordered_terms()]
return None if None in results else AddRule(integrand, symbol, results)
def mul_rule(integral: IntegralInfo):
integrand, symbol = integral
# Constant times function case
coeff, f = integrand.as_independent(symbol)
if coeff != 1:
next_step = integral_steps(f, symbol)
if next_step is not None:
return ConstantTimesRule(integrand, symbol, coeff, f, next_step)
def _parts_rule(integrand, symbol) -> tuple[Expr, Expr, Expr, Expr, Rule] | None:
# LIATE rule:
# log, inverse trig, algebraic, trigonometric, exponential
def pull_out_algebraic(integrand):
integrand = integrand.cancel().together()
# iterating over Piecewise args would not work here
algebraic = ([] if isinstance(integrand, Piecewise) or not integrand.is_Mul
else [arg for arg in integrand.args if arg.is_algebraic_expr(symbol)])
if algebraic:
u = Mul(*algebraic)
dv = (integrand / u).cancel()
return u, dv
def pull_out_u(*functions) -> Callable[[Expr], tuple[Expr, Expr] | None]:
def pull_out_u_rl(integrand: Expr) -> tuple[Expr, Expr] | None:
if any(integrand.has(f) for f in functions):
args = [arg for arg in integrand.args
if any(isinstance(arg, cls) for cls in functions)]
if args:
u = Mul(*args)
dv = integrand / u
return u, dv
return None
return pull_out_u_rl
liate_rules = [pull_out_u(log), pull_out_u(*inverse_trig_functions),
pull_out_algebraic, pull_out_u(sin, cos),
pull_out_u(exp)]
dummy = Dummy("temporary")
# we can integrate log(x) and atan(x) by setting dv = 1
if isinstance(integrand, (log, *inverse_trig_functions)):
integrand = dummy * integrand
for index, rule in enumerate(liate_rules):
result = rule(integrand)
if result:
u, dv = result
# Don't pick u to be a constant if possible
if symbol not in u.free_symbols and not u.has(dummy):
return None
u = u.subs(dummy, 1)
dv = dv.subs(dummy, 1)
# Don't pick a non-polynomial algebraic to be differentiated
if rule == pull_out_algebraic and not u.is_polynomial(symbol):
return None
# Don't trade one logarithm for another
if isinstance(u, log):
rec_dv = 1/dv
if (rec_dv.is_polynomial(symbol) and
degree(rec_dv, symbol) == 1):
return None
# Can integrate a polynomial times OrthogonalPolynomial
if rule == pull_out_algebraic:
if dv.is_Derivative or dv.has(TrigonometricFunction) or \
isinstance(dv, OrthogonalPolynomial):
v_step = integral_steps(dv, symbol)
if v_step.contains_dont_know():
return None
else:
du = u.diff(symbol)
v = v_step.eval()
return u, dv, v, du, v_step
# make sure dv is amenable to integration
accept = False
if index < 2: # log and inverse trig are usually worth trying
accept = True
elif (rule == pull_out_algebraic and dv.args and
all(isinstance(a, (sin, cos, exp))
for a in dv.args)):
accept = True
else:
for lrule in liate_rules[index + 1:]:
r = lrule(integrand)
if r and r[0].subs(dummy, 1).equals(dv):
accept = True
break
if accept:
du = u.diff(symbol)
v_step = integral_steps(simplify(dv), symbol)
if not v_step.contains_dont_know():
v = v_step.eval()
return u, dv, v, du, v_step
return None
def parts_rule(integral):
integrand, symbol = integral
constant, integrand = integrand.as_coeff_Mul()
result = _parts_rule(integrand, symbol)
steps = []
if result:
u, dv, v, du, v_step = result
debug("u : {}, dv : {}, v : {}, du : {}, v_step: {}".format(u, dv, v, du, v_step))
steps.append(result)
if isinstance(v, Integral):
return
# Set a limit on the number of times u can be used
if isinstance(u, (sin, cos, exp, sinh, cosh)):
cachekey = u.xreplace({symbol: _cache_dummy})
if _parts_u_cache[cachekey] > 2:
return
_parts_u_cache[cachekey] += 1
# Try cyclic integration by parts a few times
for _ in range(4):
debug("Cyclic integration {} with v: {}, du: {}, integrand: {}".format(_, v, du, integrand))
coefficient = ((v * du) / integrand).cancel()
if coefficient == 1:
break
if symbol not in coefficient.free_symbols:
rule = CyclicPartsRule(integrand, symbol,
[PartsRule(None, None, u, dv, v_step, None)
for (u, dv, v, du, v_step) in steps],
(-1) ** len(steps) * coefficient)
if (constant != 1) and rule:
rule = ConstantTimesRule(constant * integrand, symbol, constant, integrand, rule)
return rule
# _parts_rule is sensitive to constants, factor it out
next_constant, next_integrand = (v * du).as_coeff_Mul()
result = _parts_rule(next_integrand, symbol)
if result:
u, dv, v, du, v_step = result
u *= next_constant
du *= next_constant
steps.append((u, dv, v, du, v_step))
else:
break
def make_second_step(steps, integrand):
if steps:
u, dv, v, du, v_step = steps[0]
return PartsRule(integrand, symbol, u, dv, v_step, make_second_step(steps[1:], v * du))
return integral_steps(integrand, symbol)
if steps:
u, dv, v, du, v_step = steps[0]
rule = PartsRule(integrand, symbol, u, dv, v_step, make_second_step(steps[1:], v * du))
if (constant != 1) and rule:
rule = ConstantTimesRule(constant * integrand, symbol, constant, integrand, rule)
return rule
def trig_rule(integral):
integrand, symbol = integral
if integrand == sin(symbol):
return SinRule(integrand, symbol)
if integrand == cos(symbol):
return CosRule(integrand, symbol)
if integrand == sec(symbol)**2:
return Sec2Rule(integrand, symbol)
if integrand == csc(symbol)**2:
return Csc2Rule(integrand, symbol)
if isinstance(integrand, tan):
rewritten = sin(*integrand.args) / cos(*integrand.args)
elif isinstance(integrand, cot):
rewritten = cos(*integrand.args) / sin(*integrand.args)
elif isinstance(integrand, sec):
arg = integrand.args[0]
rewritten = ((sec(arg)**2 + tan(arg) * sec(arg)) /
(sec(arg) + tan(arg)))
elif isinstance(integrand, csc):
arg = integrand.args[0]
rewritten = ((csc(arg)**2 + cot(arg) * csc(arg)) /
(csc(arg) + cot(arg)))
else:
return
return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol))
def trig_product_rule(integral: IntegralInfo):
integrand, symbol = integral
if integrand == sec(symbol) * tan(symbol):
return SecTanRule(integrand, symbol)
if integrand == csc(symbol) * cot(symbol):
return CscCotRule(integrand, symbol)
def quadratic_denom_rule(integral):
integrand, symbol = integral
a = Wild('a', exclude=[symbol])
b = Wild('b', exclude=[symbol])
c = Wild('c', exclude=[symbol])
match = integrand.match(a / (b * symbol ** 2 + c))
if match:
a, b, c = match[a], match[b], match[c]
general_rule = ArctanRule(integrand, symbol, a, b, c)
if b.is_extended_real and c.is_extended_real:
positive_cond = c/b > 0
if positive_cond is S.true:
return general_rule
coeff = a/(2*sqrt(-c)*sqrt(b))
constant = sqrt(-c/b)
r1 = 1/(symbol-constant)
r2 = 1/(symbol+constant)
log_steps = [ReciprocalRule(r1, symbol, symbol-constant),
ConstantTimesRule(-r2, symbol, -1, r2, ReciprocalRule(r2, symbol, symbol+constant))]
rewritten = sub = r1 - r2
negative_step = AddRule(sub, symbol, log_steps)
if coeff != 1:
rewritten = Mul(coeff, sub, evaluate=False)
negative_step = ConstantTimesRule(rewritten, symbol, coeff, sub, negative_step)
negative_step = RewriteRule(integrand, symbol, rewritten, negative_step)
if positive_cond is S.false:
return negative_step
return PiecewiseRule(integrand, symbol, [(general_rule, positive_cond), (negative_step, S.true)])
return general_rule
d = Wild('d', exclude=[symbol])
match2 = integrand.match(a / (b * symbol ** 2 + c * symbol + d))
if match2:
b, c = match2[b], match2[c]
if b.is_zero:
return
u = Dummy('u')
u_func = symbol + c/(2*b)
integrand2 = integrand.subs(symbol, u - c / (2*b))
next_step = integral_steps(integrand2, u)
if next_step:
return URule(integrand2, symbol, u, u_func, next_step)
else:
return
e = Wild('e', exclude=[symbol])
match3 = integrand.match((a* symbol + b) / (c * symbol ** 2 + d * symbol + e))
if match3:
a, b, c, d, e = match3[a], match3[b], match3[c], match3[d], match3[e]
if c.is_zero:
return
denominator = c * symbol**2 + d * symbol + e
const = a/(2*c)
numer1 = (2*c*symbol+d)
numer2 = - const*d + b
u = Dummy('u')
step1 = URule(integrand, symbol,
u, denominator, integral_steps(u**(-1), u))
if const != 1:
step1 = ConstantTimesRule(const*numer1/denominator, symbol,
const, numer1/denominator, step1)
if numer2.is_zero:
return step1
step2 = integral_steps(numer2/denominator, symbol)
substeps = AddRule(integrand, symbol, [step1, step2])
rewriten = const*numer1/denominator+numer2/denominator
return RewriteRule(integrand, symbol, rewriten, substeps)
return
def sqrt_linear_rule(integral: IntegralInfo):
"""
Substitute common (a+b*x)**(1/n)
"""
integrand, x = integral
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x, 0])
a0 = b0 = 0
bases, qs, bs = [], [], []
for pow_ in integrand.find(Pow): # collect all (a+b*x)**(p/q)
base, exp_ = pow_.base, pow_.exp
if exp_.is_Integer or x not in base.free_symbols: # skip 1/x and sqrt(2)
continue
if not exp_.is_Rational: # exclude x**pi
return
match = base.match(a+b*x)
if not match: # skip non-linear
continue # for sqrt(x+sqrt(x)), although base is non-linear, we can still substitute sqrt(x)
a1, b1 = match[a], match[b]
if a0*b1 != a1*b0 or not (b0/b1).is_nonnegative: # cannot transform sqrt(x) to sqrt(x+1) or sqrt(-x)
return
if b0 == 0 or (b0/b1 > 1) is S.true: # choose the latter of sqrt(2*x) and sqrt(x) as representative
a0, b0 = a1, b1
bases.append(base)
bs.append(b1)
qs.append(exp_.q)
if b0 == 0: # no such pattern found
return
q0: Integer = lcm_list(qs)
u_x = (a0 + b0*x)**(1/q0)
u = Dummy("u")
substituted = integrand.subs({base**(S.One/q): (b/b0)**(S.One/q)*u**(q0/q)
for base, b, q in zip(bases, bs, qs)}).subs(x, (u**q0-a0)/b0)
substep = integral_steps(substituted*u**(q0-1)*q0/b0, u)
if not substep.contains_dont_know():
step: Rule = URule(integrand, x, u, u_x, substep)
generic_cond = Ne(b0, 0)
if generic_cond is not S.true: # possible degenerate case
simplified = integrand.subs({b: 0 for b in bs})
degenerate_step = integral_steps(simplified, x)
step = PiecewiseRule(integrand, x, [(step, generic_cond), (degenerate_step, S.true)])
return step
def sqrt_quadratic_rule(integral: IntegralInfo, degenerate=True):
integrand, x = integral
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x, 0])
f = Wild('f')
n = Wild('n', properties=[lambda n: n.is_Integer and n.is_odd])
match = integrand.match(f*sqrt(a+b*x+c*x**2)**n)
if not match:
return
a, b, c, f, n = match[a], match[b], match[c], match[f], match[n]
f_poly = f.as_poly(x)
if f_poly is None:
return
generic_cond = Ne(c, 0)
if not degenerate or generic_cond is S.true:
degenerate_step = None
elif b.is_zero:
degenerate_step = integral_steps(f*sqrt(a)**n, x)
else:
degenerate_step = sqrt_linear_rule(IntegralInfo(f*sqrt(a+b*x)**n, x))
def sqrt_quadratic_denom_rule(numer_poly: Poly, integrand: Expr):
denom = sqrt(a+b*x+c*x**2)
deg = numer_poly.degree()
if deg <= 1:
# integrand == (d+e*x)/sqrt(a+b*x+c*x**2)
e, d = numer_poly.all_coeffs() if deg == 1 else (S.Zero, numer_poly.as_expr())
# rewrite numerator to A*(2*c*x+b) + B
A = e/(2*c)
B = d-A*b
pre_substitute = (2*c*x+b)/denom
constant_step: Rule | None = None
linear_step: Rule | None = None
if A != 0:
u = Dummy("u")
pow_rule = PowerRule(1/sqrt(u), u, u, -S.Half)
linear_step = URule(pre_substitute, x, u, a+b*x+c*x**2, pow_rule)
if A != 1:
linear_step = ConstantTimesRule(A*pre_substitute, x, A, pre_substitute, linear_step)
if B != 0:
constant_step = inverse_trig_rule(IntegralInfo(1/denom, x), degenerate=False)
if B != 1:
constant_step = ConstantTimesRule(B/denom, x, B, 1/denom, constant_step) # type: ignore
if linear_step and constant_step:
add = Add(A*pre_substitute, B/denom, evaluate=False)
step: Rule | None = RewriteRule(integrand, x, add, AddRule(add, x, [linear_step, constant_step]))
else:
step = linear_step or constant_step
else:
coeffs = numer_poly.all_coeffs()
step = SqrtQuadraticDenomRule(integrand, x, a, b, c, coeffs)
return step
if n > 0: # rewrite poly * sqrt(s)**(2*k-1) to poly*s**k / sqrt(s)
numer_poly = f_poly * (a+b*x+c*x**2)**((n+1)/2)
rewritten = numer_poly.as_expr()/sqrt(a+b*x+c*x**2)
substep = sqrt_quadratic_denom_rule(numer_poly, rewritten)
generic_step = RewriteRule(integrand, x, rewritten, substep)
elif n == -1:
generic_step = sqrt_quadratic_denom_rule(f_poly, integrand)
else:
return # todo: handle n < -1 case
return _add_degenerate_step(generic_cond, generic_step, degenerate_step)
def hyperbolic_rule(integral: tuple[Expr, Symbol]):
integrand, symbol = integral
if isinstance(integrand, HyperbolicFunction) and integrand.args[0] == symbol:
if integrand.func == sinh:
return SinhRule(integrand, symbol)
if integrand.func == cosh:
return CoshRule(integrand, symbol)
u = Dummy('u')
if integrand.func == tanh:
rewritten = sinh(symbol)/cosh(symbol)
return RewriteRule(integrand, symbol, rewritten,
URule(rewritten, symbol, u, cosh(symbol), ReciprocalRule(1/u, u, u)))
if integrand.func == coth:
rewritten = cosh(symbol)/sinh(symbol)
return RewriteRule(integrand, symbol, rewritten,
URule(rewritten, symbol, u, sinh(symbol), ReciprocalRule(1/u, u, u)))
else:
rewritten = integrand.rewrite(tanh)
if integrand.func == sech:
return RewriteRule(integrand, symbol, rewritten,
URule(rewritten, symbol, u, tanh(symbol/2),
ArctanRule(2/(u**2 + 1), u, S(2), S.One, S.One)))
if integrand.func == csch:
return RewriteRule(integrand, symbol, rewritten,
URule(rewritten, symbol, u, tanh(symbol/2),
ReciprocalRule(1/u, u, u)))
@cacheit
def make_wilds(symbol):
a = Wild('a', exclude=[symbol])
b = Wild('b', exclude=[symbol])
m = Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, Integer)])
n = Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, Integer)])
return a, b, m, n
@cacheit
def sincos_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sin(a*symbol)**m * cos(b*symbol)**n
return pattern, a, b, m, n
@cacheit
def tansec_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = tan(a*symbol)**m * sec(b*symbol)**n
return pattern, a, b, m, n
@cacheit
def cotcsc_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = cot(a*symbol)**m * csc(b*symbol)**n
return pattern, a, b, m, n
@cacheit
def heaviside_pattern(symbol):
m = Wild('m', exclude=[symbol])
b = Wild('b', exclude=[symbol])
g = Wild('g')
pattern = Heaviside(m*symbol + b) * g
return pattern, m, b, g
def uncurry(func):
def uncurry_rl(args):
return func(*args)
return uncurry_rl
def trig_rewriter(rewrite):
def trig_rewriter_rl(args):
a, b, m, n, integrand, symbol = args
rewritten = rewrite(a, b, m, n, integrand, symbol)
if rewritten != integrand:
return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol))
return trig_rewriter_rl
sincos_botheven_condition = uncurry(
lambda a, b, m, n, i, s: m.is_even and n.is_even and
m.is_nonnegative and n.is_nonnegative)
sincos_botheven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (((1 - cos(2*a*symbol)) / 2) ** (m / 2)) *
(((1 + cos(2*b*symbol)) / 2) ** (n / 2)) ))
sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3)
sincos_sinodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - cos(a*symbol)**2)**((m - 1) / 2) *
sin(a*symbol) *
cos(b*symbol) ** n))
sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3)
sincos_cosodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sin(b*symbol)**2)**((n - 1) / 2) *
cos(b*symbol) *
sin(a*symbol) ** m))
tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
tansec_seceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + tan(b*symbol)**2) ** (n/2 - 1) *
sec(b*symbol)**2 *
tan(a*symbol) ** m ))
tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
tansec_tanodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sec(a*symbol)**2 - 1) ** ((m - 1) / 2) *
tan(a*symbol) *
sec(b*symbol) ** n ))
tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0)
tan_tansquared = trig_rewriter(
lambda a, b, m, n, i, symbol: ( sec(a*symbol)**2 - 1))
cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
cotcsc_csceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + cot(b*symbol)**2) ** (n/2 - 1) *
csc(b*symbol)**2 *
cot(a*symbol) ** m ))
cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
cotcsc_cotodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (csc(a*symbol)**2 - 1) ** ((m - 1) / 2) *
cot(a*symbol) *
csc(b*symbol) ** n ))
def trig_sincos_rule(integral):
integrand, symbol = integral
if any(integrand.has(f) for f in (sin, cos)):
pattern, a, b, m, n = sincos_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
sincos_botheven_condition: sincos_botheven,
sincos_sinodd_condition: sincos_sinodd,
sincos_cosodd_condition: sincos_cosodd
})(tuple(
[match.get(i, S.Zero) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_tansec_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / cos(symbol): sec(symbol)
})
if any(integrand.has(f) for f in (tan, sec)):
pattern, a, b, m, n = tansec_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
tansec_tanodd_condition: tansec_tanodd,
tansec_seceven_condition: tansec_seceven,
tan_tansquared_condition: tan_tansquared
})(tuple(
[match.get(i, S.Zero) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_cotcsc_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sin(symbol): csc(symbol),
1 / tan(symbol): cot(symbol),
cos(symbol) / tan(symbol): cot(symbol)
})
if any(integrand.has(f) for f in (cot, csc)):
pattern, a, b, m, n = cotcsc_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
cotcsc_cotodd_condition: cotcsc_cotodd,
cotcsc_csceven_condition: cotcsc_csceven
})(tuple(
[match.get(i, S.Zero) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_sindouble_rule(integral):
integrand, symbol = integral
a = Wild('a', exclude=[sin(2*symbol)])
match = integrand.match(sin(2*symbol)*a)
if match:
sin_double = 2*sin(symbol)*cos(symbol)/sin(2*symbol)
return integral_steps(integrand * sin_double, symbol)
def trig_powers_products_rule(integral):
return do_one(null_safe(trig_sincos_rule),
null_safe(trig_tansec_rule),
null_safe(trig_cotcsc_rule),
null_safe(trig_sindouble_rule))(integral)
def trig_substitution_rule(integral):
integrand, symbol = integral
A = Wild('a', exclude=[0, symbol])
B = Wild('b', exclude=[0, symbol])
theta = Dummy("theta")
target_pattern = A + B*symbol**2
matches = integrand.find(target_pattern)
for expr in matches:
match = expr.match(target_pattern)
a = match.get(A, S.Zero)
b = match.get(B, S.Zero)
a_positive = ((a.is_number and a > 0) or a.is_positive)
b_positive = ((b.is_number and b > 0) or b.is_positive)
a_negative = ((a.is_number and a < 0) or a.is_negative)
b_negative = ((b.is_number and b < 0) or b.is_negative)
x_func = None
if a_positive and b_positive:
# a**2 + b*x**2. Assume sec(theta) > 0, -pi/2 < theta < pi/2
x_func = (sqrt(a)/sqrt(b)) * tan(theta)
# Do not restrict the domain: tan(theta) takes on any real
# value on the interval -pi/2 < theta < pi/2 so x takes on
# any value
restriction = True
elif a_positive and b_negative:
# a**2 - b*x**2. Assume cos(theta) > 0, -pi/2 < theta < pi/2
constant = sqrt(a)/sqrt(-b)
x_func = constant * sin(theta)
restriction = And(symbol > -constant, symbol < constant)
elif a_negative and b_positive:
# b*x**2 - a**2. Assume sin(theta) > 0, 0 < theta < pi
constant = sqrt(-a)/sqrt(b)
x_func = constant * sec(theta)
restriction = And(symbol > -constant, symbol < constant)
if x_func:
# Manually simplify sqrt(trig(theta)**2) to trig(theta)
# Valid due to assumed domain restriction
substitutions = {}
for f in [sin, cos, tan,
sec, csc, cot]:
substitutions[sqrt(f(theta)**2)] = f(theta)
substitutions[sqrt(f(theta)**(-2))] = 1/f(theta)
replaced = integrand.subs(symbol, x_func).trigsimp()
replaced = manual_subs(replaced, substitutions)
if not replaced.has(symbol):
replaced *= manual_diff(x_func, theta)
replaced = replaced.trigsimp()
secants = replaced.find(1/cos(theta))
if secants:
replaced = replaced.xreplace({
1/cos(theta): sec(theta)
})
substep = integral_steps(replaced, theta)
if not substep.contains_dont_know():
return TrigSubstitutionRule(integrand, symbol,
theta, x_func, replaced, substep, restriction)
def heaviside_rule(integral):
integrand, symbol = integral
pattern, m, b, g = heaviside_pattern(symbol)
match = integrand.match(pattern)
if match and 0 != match[g]:
# f = Heaviside(m*x + b)*g
substep = integral_steps(match[g], symbol)
m, b = match[m], match[b]
return HeavisideRule(integrand, symbol, m*symbol + b, -b/m, substep)
def dirac_delta_rule(integral: IntegralInfo):
integrand, x = integral
if len(integrand.args) == 1:
n = S.Zero
else:
n = integrand.args[1]
if not n.is_Integer or n < 0:
return
a, b = Wild('a', exclude=[x]), Wild('b', exclude=[x, 0])
match = integrand.args[0].match(a+b*x)
if not match:
return
a, b = match[a], match[b]
generic_cond = Ne(b, 0)
if generic_cond is S.true:
degenerate_step = None
else:
degenerate_step = ConstantRule(DiracDelta(a, n), x)
generic_step = DiracDeltaRule(integrand, x, n, a, b)
return _add_degenerate_step(generic_cond, generic_step, degenerate_step)
def substitution_rule(integral):
integrand, symbol = integral
u_var = Dummy("u")
substitutions = find_substitutions(integrand, symbol, u_var)
count = 0
if substitutions:
debug("List of Substitution Rules")
ways = []
for u_func, c, substituted in substitutions:
subrule = integral_steps(substituted, u_var)
count = count + 1
debug("Rule {}: {}".format(count, subrule))
if subrule.contains_dont_know():
continue
if simplify(c - 1) != 0:
_, denom = c.as_numer_denom()
if subrule:
subrule = ConstantTimesRule(c * substituted, u_var, c, substituted, subrule)
if denom.free_symbols:
piecewise = []
could_be_zero = []
if isinstance(denom, Mul):
could_be_zero = denom.args
else:
could_be_zero.append(denom)
for expr in could_be_zero:
if not fuzzy_not(expr.is_zero):
substep = integral_steps(manual_subs(integrand, expr, 0), symbol)
if substep:
piecewise.append((
substep,
Eq(expr, 0)
))
piecewise.append((subrule, True))
subrule = PiecewiseRule(substituted, symbol, piecewise)
ways.append(URule(integrand, symbol, u_var, u_func, subrule))
if len(ways) > 1:
return AlternativeRule(integrand, symbol, ways)
elif ways:
return ways[0]
partial_fractions_rule = rewriter(
lambda integrand, symbol: integrand.is_rational_function(),
lambda integrand, symbol: integrand.apart(symbol))
cancel_rule = rewriter(
# lambda integrand, symbol: integrand.is_algebraic_expr(),
# lambda integrand, symbol: isinstance(integrand, Mul),
lambda integrand, symbol: True,
lambda integrand, symbol: integrand.cancel())
distribute_expand_rule = rewriter(
lambda integrand, symbol: (
all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args)
or isinstance(integrand, Pow)
or isinstance(integrand, Mul)),
lambda integrand, symbol: integrand.expand())
trig_expand_rule = rewriter(
# If there are trig functions with different arguments, expand them
lambda integrand, symbol: (
len({a.args[0] for a in integrand.atoms(TrigonometricFunction)}) > 1),
lambda integrand, symbol: integrand.expand(trig=True))
def derivative_rule(integral):
integrand = integral[0]
diff_variables = integrand.variables
undifferentiated_function = integrand.expr
integrand_variables = undifferentiated_function.free_symbols
if integral.symbol in integrand_variables:
if integral.symbol in diff_variables:
return DerivativeRule(*integral)
else:
return DontKnowRule(integrand, integral.symbol)
else:
return ConstantRule(*integral)
def rewrites_rule(integral):
integrand, symbol = integral
if integrand.match(1/cos(symbol)):
rewritten = integrand.subs(1/cos(symbol), sec(symbol))
return RewriteRule(integrand, symbol, rewritten, integral_steps(rewritten, symbol))
def fallback_rule(integral):
return DontKnowRule(*integral)
# Cache is used to break cyclic integrals.
# Need to use the same dummy variable in cached expressions for them to match.
# Also record "u" of integration by parts, to avoid infinite repetition.
_integral_cache: dict[Expr, Expr | None] = {}
_parts_u_cache: dict[Expr, int] = defaultdict(int)
_cache_dummy = Dummy("z")
def integral_steps(integrand, symbol, **options):
"""Returns the steps needed to compute an integral.
Explanation
===========
This function attempts to mirror what a student would do by hand as
closely as possible.
SymPy Gamma uses this to provide a step-by-step explanation of an
integral. The code it uses to format the results of this function can be
found at
https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py.
Examples
========
>>> from sympy import exp, sin
>>> from sympy.integrals.manualintegrate import integral_steps
>>> from sympy.abc import x
>>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \
# doctest: +NORMALIZE_WHITESPACE
URule(integrand=exp(x)/(exp(2*x) + 1), variable=x, u_var=_u, u_func=exp(x),
substep=ArctanRule(integrand=1/(_u**2 + 1), variable=_u, a=1, b=1, c=1))
>>> print(repr(integral_steps(sin(x), x))) \
# doctest: +NORMALIZE_WHITESPACE
SinRule(integrand=sin(x), variable=x)
>>> print(repr(integral_steps((x**2 + 3)**2, x))) \
# doctest: +NORMALIZE_WHITESPACE
RewriteRule(integrand=(x**2 + 3)**2, variable=x, rewritten=x**4 + 6*x**2 + 9,
substep=AddRule(integrand=x**4 + 6*x**2 + 9, variable=x,
substeps=[PowerRule(integrand=x**4, variable=x, base=x, exp=4),
ConstantTimesRule(integrand=6*x**2, variable=x, constant=6, other=x**2,
substep=PowerRule(integrand=x**2, variable=x, base=x, exp=2)),
ConstantRule(integrand=9, variable=x)]))
Returns
=======
rule : Rule
The first step; most rules have substeps that must also be
considered. These substeps can be evaluated using ``manualintegrate``
to obtain a result.
"""
cachekey = integrand.xreplace({symbol: _cache_dummy})
if cachekey in _integral_cache:
if _integral_cache[cachekey] is None:
# Stop this attempt, because it leads around in a loop
return DontKnowRule(integrand, symbol)
else:
# TODO: This is for future development, as currently
# _integral_cache gets no values other than None
return (_integral_cache[cachekey].xreplace(_cache_dummy, symbol),
symbol)
else:
_integral_cache[cachekey] = None
integral = IntegralInfo(integrand, symbol)
def key(integral):
integrand = integral.integrand
if symbol not in integrand.free_symbols:
return Number
for cls in (Symbol, TrigonometricFunction, OrthogonalPolynomial):
if isinstance(integrand, cls):
return cls
return type(integrand)
def integral_is_subclass(*klasses):
def _integral_is_subclass(integral):
k = key(integral)
return k and issubclass(k, klasses)
return _integral_is_subclass
result = do_one(
null_safe(special_function_rule),
null_safe(switch(key, {
Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule),
null_safe(sqrt_linear_rule),
null_safe(quadratic_denom_rule)),
Symbol: power_rule,
exp: exp_rule,
Add: add_rule,
Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule),
null_safe(heaviside_rule), null_safe(quadratic_denom_rule),
null_safe(sqrt_linear_rule),
null_safe(sqrt_quadratic_rule)),
Derivative: derivative_rule,
TrigonometricFunction: trig_rule,
Heaviside: heaviside_rule,
DiracDelta: dirac_delta_rule,
OrthogonalPolynomial: orthogonal_poly_rule,
Number: constant_rule
})),
do_one(
null_safe(trig_rule),
null_safe(hyperbolic_rule),
null_safe(alternatives(
rewrites_rule,
substitution_rule,
condition(
integral_is_subclass(Mul, Pow),
partial_fractions_rule),
condition(
integral_is_subclass(Mul, Pow),
cancel_rule),
condition(
integral_is_subclass(Mul, log,
*inverse_trig_functions),
parts_rule),
condition(
integral_is_subclass(Mul, Pow),
distribute_expand_rule),
trig_powers_products_rule,
trig_expand_rule
)),
null_safe(condition(integral_is_subclass(Mul, Pow), nested_pow_rule)),
null_safe(trig_substitution_rule)
),
fallback_rule)(integral)
del _integral_cache[cachekey]
return result
def manualintegrate(f, var):
"""manualintegrate(f, var)
Explanation
===========
Compute indefinite integral of a single variable using an algorithm that
resembles what a student would do by hand.
Unlike :func:`~.integrate`, var can only be a single symbol.
Examples
========
>>> from sympy import sin, cos, tan, exp, log, integrate
>>> from sympy.integrals.manualintegrate import manualintegrate
>>> from sympy.abc import x
>>> manualintegrate(1 / x, x)
log(x)
>>> integrate(1/x)
log(x)
>>> manualintegrate(log(x), x)
x*log(x) - x
>>> integrate(log(x))
x*log(x) - x
>>> manualintegrate(exp(x) / (1 + exp(2 * x)), x)
atan(exp(x))
>>> integrate(exp(x) / (1 + exp(2 * x)))
RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x))))
>>> manualintegrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> manualintegrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> manualintegrate(tan(x), x)
-log(cos(x))
>>> integrate(tan(x), x)
-log(cos(x))
See Also
========
sympy.integrals.integrals.integrate
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
result = integral_steps(f, var).eval()
# Clear the cache of u-parts
_parts_u_cache.clear()
# If we got Piecewise with two parts, put generic first
if isinstance(result, Piecewise) and len(result.args) == 2:
cond = result.args[0][1]
if isinstance(cond, Eq) and result.args[1][1] == True:
result = result.func(
(result.args[1][0], Ne(*cond.args)),
(result.args[0][0], True))
return result
|
d305dfc9305d9a98383f85376c75358d635d81fe40ddd33ac3b595094b5f8327 | """ Integral Transforms """
from functools import reduce, wraps
from itertools import repeat
from sympy.core import S, pi, I
from sympy.core.add import Add
from sympy.core.function import (AppliedUndef, count_ops, Derivative, expand,
expand_complex, expand_mul, Function, Lambda,
WildFunction)
from sympy.core.mul import Mul
from sympy.core.numbers import igcd, ilcm
from sympy.core.relational import _canonical, Ge, Gt, Lt, Unequality, Eq
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.core.traversal import postorder_traversal
from sympy.functions.combinatorial.factorials import factorial, rf
from sympy.functions.elementary.complexes import (re, arg, Abs, polar_lift,
periodic_argument)
from sympy.functions.elementary.exponential import exp, log, exp_polar
from sympy.functions.elementary.hyperbolic import cosh, coth, sinh, tanh, asinh
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import Max, Min, sqrt
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.trigonometric import cos, cot, sin, tan, atan
from sympy.functions.special.bessel import besseli, besselj, besselk, bessely
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.error_functions import erf, erfc, Ei
from sympy.functions.special.gamma_functions import digamma, gamma, lowergamma
from sympy.functions.special.hyper import meijerg
from sympy.integrals import integrate, Integral
from sympy.integrals.meijerint import _dummy
from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And
from sympy.matrices.matrices import MatrixBase
from sympy.polys.matrices.linsolve import _lin_eq2dict, PolyNonlinearError
from sympy.polys.polyroots import roots
from sympy.polys.polytools import factor, Poly
from sympy.polys.rationaltools import together
from sympy.polys.rootoftools import CRootOf, RootSum
from sympy.utilities.exceptions import (sympy_deprecation_warning,
SymPyDeprecationWarning,
ignore_warnings)
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import debug
##########################################################################
# Helpers / Utilities
##########################################################################
class IntegralTransformError(NotImplementedError):
"""
Exception raised in relation to problems computing transforms.
Explanation
===========
This class is mostly used internally; if integrals cannot be computed
objects representing unevaluated transforms are usually returned.
The hint ``needeval=True`` can be used to disable returning transform
objects, and instead raise this exception if an integral cannot be
computed.
"""
def __init__(self, transform, function, msg):
super().__init__(
"%s Transform could not be computed: %s." % (transform, msg))
self.function = function
class IntegralTransform(Function):
"""
Base class for integral transforms.
Explanation
===========
This class represents unevaluated transforms.
To implement a concrete transform, derive from this class and implement
the ``_compute_transform(f, x, s, **hints)`` and ``_as_integral(f, x, s)``
functions. If the transform cannot be computed, raise :obj:`IntegralTransformError`.
Also set ``cls._name``. For instance,
>>> from sympy import LaplaceTransform
>>> LaplaceTransform._name
'Laplace'
Implement ``self._collapse_extra`` if your function returns more than just a
number and possibly a convergence condition.
"""
@property
def function(self):
""" The function to be transformed. """
return self.args[0]
@property
def function_variable(self):
""" The dependent variable of the function to be transformed. """
return self.args[1]
@property
def transform_variable(self):
""" The independent transform variable. """
return self.args[2]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the transform
is evaluated.
"""
return self.function.free_symbols.union({self.transform_variable}) \
- {self.function_variable}
def _compute_transform(self, f, x, s, **hints):
raise NotImplementedError
def _as_integral(self, f, x, s):
raise NotImplementedError
def _collapse_extra(self, extra):
cond = And(*extra)
if cond == False:
raise IntegralTransformError(self.__class__.name, None, '')
return cond
def _try_directly(self, **hints):
T = None
try_directly = not any(func.has(self.function_variable)
for func in self.function.atoms(AppliedUndef))
if try_directly:
try:
T = self._compute_transform(self.function,
self.function_variable, self.transform_variable, **hints)
except IntegralTransformError:
T = None
fn = self.function
if not fn.is_Add:
fn = expand_mul(fn)
return fn, T
def doit(self, **hints):
"""
Try to evaluate the transform in closed form.
Explanation
===========
This general function handles linearity, but apart from that leaves
pretty much everything to _compute_transform.
Standard hints are the following:
- ``simplify``: whether or not to simplify the result
- ``noconds``: if True, do not return convergence conditions
- ``needeval``: if True, raise IntegralTransformError instead of
returning IntegralTransform objects
The default values of these hints depend on the concrete transform,
usually the default is
``(simplify, noconds, needeval) = (True, False, False)``.
"""
needeval = hints.pop('needeval', False)
simplify = hints.pop('simplify', True)
hints['simplify'] = simplify
fn, T = self._try_directly(**hints)
if T is not None:
return T
if fn.is_Add:
hints['needeval'] = needeval
res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints)
for x in fn.args]
extra = []
ress = []
for x in res:
if not isinstance(x, tuple):
x = [x]
ress.append(x[0])
if len(x) == 2:
# only a condition
extra.append(x[1])
elif len(x) > 2:
# some region parameters and a condition (Mellin, Laplace)
extra += [x[1:]]
if simplify==True:
res = Add(*ress).simplify()
else:
res = Add(*ress)
if not extra:
return res
try:
extra = self._collapse_extra(extra)
if iterable(extra):
return tuple([res]) + tuple(extra)
else:
return (res, extra)
except IntegralTransformError:
pass
if needeval:
raise IntegralTransformError(
self.__class__._name, self.function, 'needeval')
# TODO handle derivatives etc
# pull out constant coefficients
coeff, rest = fn.as_coeff_mul(self.function_variable)
return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:])))
@property
def as_integral(self):
return self._as_integral(self.function, self.function_variable,
self.transform_variable)
def _eval_rewrite_as_Integral(self, *args, **kwargs):
return self.as_integral
def _simplify(expr, doit):
if doit:
from sympy.simplify import simplify
from sympy.simplify.powsimp import powdenest
return simplify(powdenest(piecewise_fold(expr), polar=True))
return expr
def _noconds_(default):
"""
This is a decorator generator for dropping convergence conditions.
Explanation
===========
Suppose you define a function ``transform(*args)`` which returns a tuple of
the form ``(result, cond1, cond2, ...)``.
Decorating it ``@_noconds_(default)`` will add a new keyword argument
``noconds`` to it. If ``noconds=True``, the return value will be altered to
be only ``result``, whereas if ``noconds=False`` the return value will not
be altered.
The default value of the ``noconds`` keyword will be ``default`` (i.e. the
argument of this function).
"""
def make_wrapper(func):
@wraps(func)
def wrapper(*args, noconds=default, **kwargs):
res = func(*args, **kwargs)
if noconds:
return res[0]
return res
return wrapper
return make_wrapper
_noconds = _noconds_(False)
##########################################################################
# Mellin Transform
##########################################################################
def _default_integrator(f, x):
return integrate(f, (x, S.Zero, S.Infinity))
@_noconds
def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True):
""" Backend function to compute Mellin transforms. """
# We use a fresh dummy, because assumptions on s might drop conditions on
# convergence of the integral.
s = _dummy('s', 'mellin-transform', f)
F = integrator(x**(s - 1) * f, x)
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), (S.NegativeInfinity, S.Infinity), S.true
if not F.is_Piecewise: # XXX can this work if integration gives continuous result now?
raise IntegralTransformError('Mellin', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Mellin', f, 'integral in unexpected form')
def process_conds(cond):
"""
Turn ``cond`` into a strip (a, b), and auxiliary conditions.
"""
from sympy.solvers.inequalities import _solve_inequality
a = S.NegativeInfinity
b = S.Infinity
aux = S.true
conds = conjuncts(to_cnf(cond))
t = Dummy('t', real=True)
for c in conds:
a_ = S.Infinity
b_ = S.NegativeInfinity
aux_ = []
for d in disjuncts(c):
d_ = d.replace(
re, lambda x: x.as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op in ('==', '!=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op in ('==', '!='):
aux_ += [d]
continue
if soln.lts == t:
b_ = Max(soln.gts, b_)
else:
a_ = Min(soln.lts, a_)
if a_ is not S.Infinity and a_ != b:
a = Max(a_, a)
elif b_ is not S.NegativeInfinity and b_ != a:
b = Min(b_, b)
else:
aux = And(aux, Or(*aux_))
return a, b, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds = [x for x in conds if x[2] != False]
conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2])))
if not conds:
raise IntegralTransformError('Mellin', f, 'no convergence found')
a, b, aux = conds[0]
return _simplify(F.subs(s, s_), simplify), (a, b), aux
class MellinTransform(IntegralTransform):
"""
Class representing unevaluated Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Mellin transforms, see the :func:`mellin_transform`
docstring.
"""
_name = 'Mellin'
def _compute_transform(self, f, x, s, **hints):
return _mellin_transform(f, x, s, **hints)
def _as_integral(self, f, x, s):
return Integral(f*x**(s - 1), (x, S.Zero, S.Infinity))
def _collapse_extra(self, extra):
a = []
b = []
cond = []
for (sa, sb), c in extra:
a += [sa]
b += [sb]
cond += [c]
res = (Max(*a), Min(*b)), And(*cond)
if (res[0][0] >= res[0][1]) == True or res[1] == False:
raise IntegralTransformError(
'Mellin', None, 'no combined convergence.')
return res
def mellin_transform(f, x, s, **hints):
r"""
Compute the Mellin transform `F(s)` of `f(x)`,
.. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x.
For all "sensible" functions, this converges absolutely in a strip
`a < \operatorname{Re}(s) < b`.
Explanation
===========
The Mellin transform is related via change of variables to the Fourier
transform, and also to the (bilateral) Laplace transform.
This function returns ``(F, (a, b), cond)``
where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip
(as above), and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`MellinTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,
then only `F` will be returned (i.e. not ``cond``, and also not the strip
``(a, b)``).
Examples
========
>>> from sympy import mellin_transform, exp
>>> from sympy.abc import x, s
>>> mellin_transform(exp(-x), x, s)
(gamma(s), (0, oo), True)
See Also
========
inverse_mellin_transform, laplace_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return MellinTransform(f, x, s).doit(**hints)
def _rewrite_sin(m_n, s, a, b):
"""
Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible
with the strip (a, b).
Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``.
Examples
========
>>> from sympy.integrals.transforms import _rewrite_sin
>>> from sympy import pi, S
>>> from sympy.abc import s
>>> _rewrite_sin((pi, 0), s, 0, 1)
(gamma(s), gamma(1 - s), pi)
>>> _rewrite_sin((pi, 0), s, 1, 0)
(gamma(s - 1), gamma(2 - s), -pi)
>>> _rewrite_sin((pi, 0), s, -1, 0)
(gamma(s + 1), gamma(-s), -pi)
>>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2)
(gamma(s - 1/2), gamma(3/2 - s), -pi)
>>> _rewrite_sin((pi, pi), s, 0, 1)
(gamma(s), gamma(1 - s), -pi)
>>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2)
(gamma(2*s), gamma(1 - 2*s), pi)
>>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1)
(gamma(2*s - 1), gamma(2 - 2*s), -pi)
"""
# (This is a separate function because it is moderately complicated,
# and I want to doctest it.)
# We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x).
# But there is one comlication: the gamma functions determine the
# inegration contour in the definition of the G-function. Usually
# it would not matter if this is slightly shifted, unless this way
# we create an undefined function!
# So we try to write this in such a way that the gammas are
# eminently on the right side of the strip.
m, n = m_n
m = expand_mul(m/pi)
n = expand_mul(n/pi)
r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand
return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi
class MellinTransformStripError(ValueError):
"""
Exception raised by _rewrite_gamma. Mainly for internal use.
"""
pass
def _rewrite_gamma(f, s, a, b):
"""
Try to rewrite the product f(s) as a product of gamma functions,
so that the inverse Mellin transform of f can be expressed as a meijer
G function.
Explanation
===========
Return (an, ap), (bm, bq), arg, exp, fac such that
G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s).
Raises IntegralTransformError or MellinTransformStripError on failure.
It is asserted that f has no poles in the fundamental strip designated by
(a, b). One of a and b is allowed to be None. The fundamental strip is
important, because it determines the inversion contour.
This function can handle exponentials, linear factors, trigonometric
functions.
This is a helper function for inverse_mellin_transform that will not
attempt any transformations on f.
Examples
========
>>> from sympy.integrals.transforms import _rewrite_gamma
>>> from sympy.abc import s
>>> from sympy import oo
>>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo)
(([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1)
>>> _rewrite_gamma((s-1)**2, s, -oo, oo)
(([], [1, 1]), ([2, 2], []), 1, 1, 1)
Importance of the fundamental strip:
>>> _rewrite_gamma(1/s, s, 0, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, None, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, 0, None)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, -oo, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, None, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, -oo, None)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(2**(-s+3), s, -oo, oo)
(([], []), ([], []), 1/2, 1, 8)
"""
# Our strategy will be as follows:
# 1) Guess a constant c such that the inversion integral should be
# performed wrt s'=c*s (instead of plain s). Write s for s'.
# 2) Process all factors, rewrite them independently as gamma functions in
# argument s, or exponentials of s.
# 3) Try to transform all gamma functions s.t. they have argument
# a+s or a-s.
# 4) Check that the resulting G function parameters are valid.
# 5) Combine all the exponentials.
a_, b_ = S([a, b])
def left(c, is_numer):
"""
Decide whether pole at c lies to the left of the fundamental strip.
"""
# heuristically, this is the best chance for us to solve the inequalities
c = expand(re(c))
if a_ is None and b_ is S.Infinity:
return True
if a_ is None:
return c < b_
if b_ is None:
return c <= a_
if (c >= b_) == True:
return False
if (c <= a_) == True:
return True
if is_numer:
return None
if a_.free_symbols or b_.free_symbols or c.free_symbols:
return None # XXX
#raise IntegralTransformError('Inverse Mellin', f,
# 'Could not determine position of singularity %s'
# ' relative to fundamental strip' % c)
raise MellinTransformStripError('Pole inside critical strip?')
# 1)
s_multipliers = []
for g in f.atoms(gamma):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff]
for g in f.atoms(sin, cos, tan, cot):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff/pi]
s_multipliers = [Abs(x) if x.is_extended_real else x for x in s_multipliers]
common_coefficient = S.One
for x in s_multipliers:
if not x.is_Rational:
common_coefficient = x
break
s_multipliers = [x/common_coefficient for x in s_multipliers]
if not (all(x.is_Rational for x in s_multipliers) and
common_coefficient.is_extended_real):
raise IntegralTransformError("Gamma", None, "Nonrational multiplier")
s_multiplier = common_coefficient/reduce(ilcm, [S(x.q)
for x in s_multipliers], S.One)
if s_multiplier == common_coefficient:
if len(s_multipliers) == 0:
s_multiplier = common_coefficient
else:
s_multiplier = common_coefficient \
*reduce(igcd, [S(x.p) for x in s_multipliers])
f = f.subs(s, s/s_multiplier)
fac = S.One/s_multiplier
exponent = S.One/s_multiplier
if a_ is not None:
a_ *= s_multiplier
if b_ is not None:
b_ *= s_multiplier
# 2)
numer, denom = f.as_numer_denom()
numer = Mul.make_args(numer)
denom = Mul.make_args(denom)
args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False)))
facs = []
dfacs = []
# *_gammas will contain pairs (a, c) representing Gamma(a*s + c)
numer_gammas = []
denom_gammas = []
# exponentials will contain bases for exponentials of s
exponentials = []
def exception(fact):
return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact)
while args:
fact, is_numer = args.pop()
if is_numer:
ugammas, lgammas = numer_gammas, denom_gammas
ufacs = facs
else:
ugammas, lgammas = denom_gammas, numer_gammas
ufacs = dfacs
def linear_arg(arg):
""" Test if arg is of form a*s+b, raise exception if not. """
if not arg.is_polynomial(s):
raise exception(fact)
p = Poly(arg, s)
if p.degree() != 1:
raise exception(fact)
return p.all_coeffs()
# constants
if not fact.has(s):
ufacs += [fact]
# exponentials
elif fact.is_Pow or isinstance(fact, exp):
if fact.is_Pow:
base = fact.base
exp_ = fact.exp
else:
base = exp_polar(1)
exp_ = fact.exp
if exp_.is_Integer:
cond = is_numer
if exp_ < 0:
cond = not cond
args += [(base, cond)]*Abs(exp_)
continue
elif not base.has(s):
a, b = linear_arg(exp_)
if not is_numer:
base = 1/base
exponentials += [base**a]
facs += [base**b]
else:
raise exception(fact)
# linear factors
elif fact.is_polynomial(s):
p = Poly(fact, s)
if p.degree() != 1:
# We completely factor the poly. For this we need the roots.
# Now roots() only works in some cases (low degree), and CRootOf
# only works without parameters. So try both...
coeff = p.LT()[1]
rs = roots(p, s)
if len(rs) != p.degree():
rs = CRootOf.all_roots(p)
ufacs += [coeff]
args += [(s - c, is_numer) for c in rs]
continue
a, c = p.all_coeffs()
ufacs += [a]
c /= -a
# Now need to convert s - c
if left(c, is_numer):
ugammas += [(S.One, -c + 1)]
lgammas += [(S.One, -c)]
else:
ufacs += [-1]
ugammas += [(S.NegativeOne, c + 1)]
lgammas += [(S.NegativeOne, c)]
elif isinstance(fact, gamma):
a, b = linear_arg(fact.args[0])
if is_numer:
if (a > 0 and (left(-b/a, is_numer) == False)) or \
(a < 0 and (left(-b/a, is_numer) == True)):
raise NotImplementedError(
'Gammas partially over the strip.')
ugammas += [(a, b)]
elif isinstance(fact, sin):
# We try to re-write all trigs as gammas. This is not in
# general the best strategy, since sometimes this is impossible,
# but rewriting as exponentials would work. However trig functions
# in inverse mellin transforms usually all come from simplifying
# gamma terms, so this should work.
a = fact.args[0]
if is_numer:
# No problem with the poles.
gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi
else:
gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_)
args += [(gamma1, not is_numer), (gamma2, not is_numer)]
ufacs += [fac_]
elif isinstance(fact, tan):
a = fact.args[0]
args += [(sin(a, evaluate=False), is_numer),
(sin(pi/2 - a, evaluate=False), not is_numer)]
elif isinstance(fact, cos):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer)]
elif isinstance(fact, cot):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer),
(sin(a, evaluate=False), not is_numer)]
else:
raise exception(fact)
fac *= Mul(*facs)/Mul(*dfacs)
# 3)
an, ap, bm, bq = [], [], [], []
for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True),
(denom_gammas, bq, ap, False)]:
while gammas:
a, c = gammas.pop()
if a != -1 and a != +1:
# We use the gamma function multiplication theorem.
p = Abs(S(a))
newa = a/p
newc = c/p
if not a.is_Integer:
raise TypeError("a is not an integer")
for k in range(p):
gammas += [(newa, newc + k/p)]
if is_numer:
fac *= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**a]
else:
fac /= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**(-a)]
continue
if a == +1:
plus.append(1 - c)
else:
minus.append(c)
# 4)
# TODO
# 5)
arg = Mul(*exponentials)
# for testability, sort the arguments
an.sort(key=default_sort_key)
ap.sort(key=default_sort_key)
bm.sort(key=default_sort_key)
bq.sort(key=default_sort_key)
return (an, ap), (bm, bq), arg, exponent, fac
@_noconds_(True)
def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False):
""" A helper for the real inverse_mellin_transform function, this one here
assumes x to be real and positive. """
x = _dummy('t', 'inverse-mellin-transform', F, positive=True)
# Actually, we won't try integration at all. Instead we use the definition
# of the Meijer G function as a fairly general inverse mellin transform.
F = F.rewrite(gamma)
for g in [factor(F), expand_mul(F), expand(F)]:
if g.is_Add:
# do all terms separately
ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg,
noconds=False)
for G in g.args]
conds = [p[1] for p in ress]
ress = [p[0] for p in ress]
res = Add(*ress)
if not as_meijerg:
res = factor(res, gens=res.atoms(Heaviside))
return res.subs(x, x_), And(*conds)
try:
a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1])
except IntegralTransformError:
continue
try:
G = meijerg(a, b, C/x**e)
except ValueError:
continue
if as_meijerg:
h = G
else:
try:
from sympy.simplify import hyperexpand
h = hyperexpand(G)
except NotImplementedError:
raise IntegralTransformError(
'Inverse Mellin', F, 'Could not calculate integral')
if h.is_Piecewise and len(h.args) == 3:
# XXX we break modularity here!
h = Heaviside(x - Abs(C))*h.args[0].args[0] \
+ Heaviside(Abs(C) - x)*h.args[1].args[0]
# We must ensure that the integral along the line we want converges,
# and return that value.
# See [L], 5.2
cond = [Abs(arg(G.argument)) < G.delta*pi]
# Note: we allow ">=" here, this corresponds to convergence if we let
# limits go to oo symmetrically. ">" corresponds to absolute convergence.
cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1),
Abs(arg(G.argument)) == G.delta*pi)]
cond = Or(*cond)
if cond == False:
raise IntegralTransformError(
'Inverse Mellin', F, 'does not converge')
return (h*fac).subs(x, x_), cond
raise IntegralTransformError('Inverse Mellin', F, '')
_allowed = None
class InverseMellinTransform(IntegralTransform):
"""
Class representing unevaluated inverse Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Mellin transforms, see the
:func:`inverse_mellin_transform` docstring.
"""
_name = 'Inverse Mellin'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, a, b, **opts):
if a is None:
a = InverseMellinTransform._none_sentinel
if b is None:
b = InverseMellinTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, a, b, **opts)
@property
def fundamental_strip(self):
a, b = self.args[3], self.args[4]
if a is InverseMellinTransform._none_sentinel:
a = None
if b is InverseMellinTransform._none_sentinel:
b = None
return a, b
def _compute_transform(self, F, s, x, **hints):
# IntegralTransform's doit will cause this hint to exist, but
# InverseMellinTransform should ignore it
hints.pop('simplify', True)
global _allowed
if _allowed is None:
_allowed = {
exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth,
factorial, rf}
for f in postorder_traversal(F):
if f.is_Function and f.has(s) and f.func not in _allowed:
raise IntegralTransformError('Inverse Mellin', F,
'Component %s not recognised.' % f)
strip = self.fundamental_strip
return _inverse_mellin_transform(F, s, x, strip, **hints)
def _as_integral(self, F, s, x):
c = self.__class__._c
return Integral(F*x**(-s), (s, c - S.ImaginaryUnit*S.Infinity, c +
S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit)
def inverse_mellin_transform(F, s, x, strip, **hints):
r"""
Compute the inverse Mellin transform of `F(s)` over the fundamental
strip given by ``strip=(a, b)``.
Explanation
===========
This can be defined as
.. math:: f(x) = \frac{1}{2\pi i} \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s,
for any `c` in the fundamental strip. Under certain regularity
conditions on `F` and/or `f`,
this recovers `f` from its Mellin transform `F`
(and vice versa), for positive real `x`.
One of `a` or `b` may be passed as ``None``; a suitable `c` will be
inferred.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseMellinTransform` object.
Note that this function will assume x to be positive and real, regardless
of the SymPy assumptions!
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Examples
========
>>> from sympy import inverse_mellin_transform, oo, gamma
>>> from sympy.abc import x, s
>>> inverse_mellin_transform(gamma(s), s, x, (0, oo))
exp(-x)
The fundamental strip matters:
>>> f = 1/(s**2 - 1)
>>> inverse_mellin_transform(f, s, x, (-oo, -1))
x*(1 - 1/x**2)*Heaviside(x - 1)/2
>>> inverse_mellin_transform(f, s, x, (-1, 1))
-x*Heaviside(1 - x)/2 - Heaviside(x - 1)/(2*x)
>>> inverse_mellin_transform(f, s, x, (1, oo))
(1/2 - x**2/2)*Heaviside(1 - x)/x
See Also
========
mellin_transform
hankel_transform, inverse_hankel_transform
"""
return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints)
##########################################################################
# Laplace Transform
##########################################################################
def _simplifyconds(expr, s, a):
r"""
Naively simplify some conditions occurring in ``expr``, given that `\operatorname{Re}(s) > a`.
Examples
========
>>> from sympy.integrals.transforms import _simplifyconds as simp
>>> from sympy.abc import x
>>> from sympy import sympify as S
>>> simp(abs(x**2) < 1, x, 1)
False
>>> simp(abs(x**2) < 1, x, 2)
False
>>> simp(abs(x**2) < 1, x, 0)
Abs(x**2) < 1
>>> simp(abs(1/x**2) < 1, x, 1)
True
>>> simp(S(1) < abs(x), x, 1)
True
>>> simp(S(1) < abs(1/x), x, 1)
False
>>> from sympy import Ne
>>> simp(Ne(1, x**3), x, 1)
True
>>> simp(Ne(1, x**3), x, 2)
True
>>> simp(Ne(1, x**3), x, 0)
Ne(1, x**3)
"""
def power(ex):
if ex == s:
return 1
if ex.is_Pow and ex.base == s:
return ex.exp
return None
def bigger(ex1, ex2):
""" Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|.
Else return None. """
if ex1.has(s) and ex2.has(s):
return None
if isinstance(ex1, Abs):
ex1 = ex1.args[0]
if isinstance(ex2, Abs):
ex2 = ex2.args[0]
if ex1.has(s):
return bigger(1/ex2, 1/ex1)
n = power(ex2)
if n is None:
return None
try:
if n > 0 and (Abs(ex1) <= Abs(a)**n) == True:
return False
if n < 0 and (Abs(ex1) >= Abs(a)**n) == True:
return True
except TypeError:
pass
def replie(x, y):
""" simplify x < y """
if not (x.is_positive or isinstance(x, Abs)) \
or not (y.is_positive or isinstance(y, Abs)):
return (x < y)
r = bigger(x, y)
if r is not None:
return not r
return (x < y)
def replue(x, y):
b = bigger(x, y)
if b in (True, False):
return True
return Unequality(x, y)
def repl(ex, *args):
if ex in (True, False):
return bool(ex)
return ex.replace(*args)
from sympy.simplify.radsimp import collect_abs
expr = collect_abs(expr)
expr = repl(expr, Lt, replie)
expr = repl(expr, Gt, lambda x, y: replie(y, x))
expr = repl(expr, Unequality, replue)
return S(expr)
def expand_dirac_delta(expr):
"""
Expand an expression involving DiractDelta to get it as a linear
combination of DiracDelta functions.
"""
return _lin_eq2dict(expr, expr.atoms(DiracDelta))
@_noconds
def _laplace_transform(f, t, s_, simplify=True):
""" The backend function for Laplace transforms.
This backend assumes that the frontend has already split sums
such that `f` is to an addition anymore.
"""
s = Dummy('s')
a = Wild('a', exclude=[t])
deltazero = []
deltanonzero = []
try:
integratable, deltadict = expand_dirac_delta(f)
except PolyNonlinearError:
raise IntegralTransformError(
'Laplace', f, 'could not expand DiracDelta expressions')
for dirac_func, dirac_coeff in deltadict.items():
p = dirac_func.match(DiracDelta(a*t))
if p:
deltazero.append(dirac_coeff.subs(t,0)/p[a])
else:
if dirac_func.args[0].subs(t,0).is_zero:
raise IntegralTransformError('Laplace', f,\
'not implemented yet.')
else:
deltanonzero.append(dirac_func*dirac_coeff)
F = Add(integrate(exp(-s*t) * Add(integratable, *deltanonzero),
(t, S.Zero, S.Infinity)),
Add(*deltazero))
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), S.NegativeInfinity, S.true
if not F.is_Piecewise:
raise IntegralTransformError(
'Laplace', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Laplace', f, 'integral in unexpected form')
def process_conds(conds):
""" Turn ``conds`` into a strip and auxiliary conditions. """
from sympy.solvers.inequalities import _solve_inequality
a = S.NegativeInfinity
aux = S.true
conds = conjuncts(to_cnf(conds))
p, q, w1, w2, w3, w4, w5 = symbols(
'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s])
patterns = (
p*Abs(arg((s + w3)*q)) < w2,
p*Abs(arg((s + w3)*q)) <= w2,
Abs(periodic_argument((s + w3)**p*q, w1)) < w2,
Abs(periodic_argument((s + w3)**p*q, w1)) <= w2,
Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) < w2,
Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) <= w2)
for c in conds:
a_ = S.Infinity
aux_ = []
for d in disjuncts(c):
if d.is_Relational and s in d.rhs.free_symbols:
d = d.reversed
if d.is_Relational and isinstance(d, (Ge, Gt)):
d = d.reversedsign
for pat in patterns:
m = d.match(pat)
if m:
break
if m:
if m[q].is_positive and m[w2]/m[p] == pi/2:
d = -re(s + m[w3]) < 0
m = d.match(p - cos(w1*Abs(arg(s*w5))*w2)*Abs(s**w3)**w4 < 0)
if not m:
m = d.match(
cos(p - Abs(periodic_argument(s**w1*w5, q))*w2)*Abs(s**w3)**w4 < 0)
if not m:
m = d.match(
p - cos(Abs(periodic_argument(polar_lift(s)**w1*w5, q))*w2
)*Abs(s**w3)**w4 < 0)
if m and all(m[wild].is_positive for wild in [w1, w2, w3, w4, w5]):
d = re(s) > m[p]
d_ = d.replace(
re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op in ('==', '!=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op in ('==', '!='):
aux_ += [d]
continue
if soln.lts == t:
raise IntegralTransformError('Laplace', f,
'convergence not in half-plane?')
else:
a_ = Min(soln.lts, a_)
if a_ is not S.Infinity:
a = Max(a_, a)
else:
aux = And(aux, Or(*aux_))
return a, aux.canonical if aux.is_Relational else aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds2 = [x for x in conds if x[1] != False and x[0] is not S.NegativeInfinity]
if not conds2:
conds2 = [x for x in conds if x[1] != False]
conds = list(ordered(conds2))
def cnt(expr):
if expr in (True, False):
return 0
return expr.count_ops()
conds.sort(key=lambda x: (-x[0], cnt(x[1])))
if not conds:
raise IntegralTransformError('Laplace', f, 'no convergence found')
a, aux = conds[0] # XXX is [0] always the right one?
def sbs(expr):
return expr.subs(s, s_)
if simplify:
F = _simplifyconds(F, s, a)
aux = _simplifyconds(aux, s, a)
return _simplify(F.subs(s, s_), simplify), sbs(a), _canonical(sbs(aux))
def _laplace_deep_collect(f, t):
"""
This is an internal helper function that traverses through the epression
tree of `f(t)` and collects arguments. The purpose of it is that
anything like `f(w*t-1*t-c)` will be written as `f((w-1)*t-c)` such that
it can match `f(a*t+b)`.
"""
func = f.func
args = list(f.args)
if len(f.args) == 0:
return f
else:
args = [_laplace_deep_collect(arg, t) for arg in args]
if func.is_Add:
return func(*args).collect(t)
else:
return func(*args)
def _laplace_build_rules(t, s):
"""
This is an internal helper function that returns the table of Laplace
transform rules in terms of the time variable `t` and the frequency
variable `s`. It is used by `_laplace_apply_rules`.
"""
a = Wild('a', exclude=[t])
b = Wild('b', exclude=[t])
n = Wild('n', exclude=[t])
tau = Wild('tau', exclude=[t])
omega = Wild('omega', exclude=[t])
dco = lambda f: _laplace_deep_collect(f,t)
laplace_transform_rules = [
# ( time domain,
# laplace domain,
# condition, convergence plane, preparation function )
#
# Catch constant (would otherwise be treated by 2.12)
(a, a/s, S.true, S.Zero, dco),
# DiracDelta rules
(DiracDelta(a*t-b),
exp(-s*b/a)/Abs(a),
Or(And(a>0, b>=0), And(a<0, b<=0)), S.Zero, dco),
(DiracDelta(a*t-b),
S(0),
Or(And(a<0, b>=0), And(a>0, b<=0)), S.Zero, dco),
# Rules from http://eqworld.ipmnet.ru/en/auxiliary/inttrans/
# 2.1
(1,
1/s,
S.true, S.Zero, dco),
# 2.2 expressed in terms of Heaviside
(Heaviside(a*t-b),
exp(-s*b/a)/s,
And(a>0, b>0), S.Zero, dco),
(Heaviside(a*t-b),
(1-exp(-s*b/a))/s,
And(a<0, b<0), S.Zero, dco),
(Heaviside(a*t-b),
1/s,
And(a>0, b<=0), S.Zero, dco),
(Heaviside(a*t-b),
0,
And(a<0, b>0), S.Zero, dco),
# 2.3
(t,
1/s**2,
S.true, S.Zero, dco),
# 2.4
(1/(a*t+b),
-exp(-b/a*s)*Ei(-b/a*s)/a,
a>0, S.Zero, dco),
# 2.5 and 2.6 are covered by 2.11
# 2.7
(1/sqrt(a*t+b),
sqrt(a*pi/s)*exp(b/a*s)*erfc(sqrt(b/a*s))/a,
a>0, S.Zero, dco),
# 2.8
(sqrt(t)/(t+b),
sqrt(pi/s)-pi*sqrt(b)*exp(b*s)*erfc(sqrt(b*s)),
S.true, S.Zero, dco),
# 2.9
((a*t+b)**(-S(3)/2),
2*b**(-S(1)/2)-2*(pi*s/a)**(S(1)/2)*exp(b/a*s)*erfc(sqrt(b/a*s))/a,
a>0, S.Zero, dco),
# 2.10
(t**(S(1)/2)*(t+a)**(-1),
(pi/s)**(S(1)/2)-pi*a**(S(1)/2)*exp(a*s)*erfc(sqrt(a*s)),
S.true, S.Zero, dco),
# 2.11
(1/(a*sqrt(t) + t**(3/2)),
pi*a**(S(1)/2)*exp(a*s)*erfc(sqrt(a*s)),
S.true, S.Zero, dco),
# 2.12
(t**n,
gamma(n+1)/s**(n+1),
n>-1, S.Zero, dco),
# 2.13
((a*t+b)**n,
lowergamma(n+1, b/a*s)*exp(-b/a*s)/s**(n+1)/a,
And(n>-1, a>0), S.Zero, dco),
# 2.14
(t**n/(t+a),
a**n*gamma(n+1)*lowergamma(-n,a*s),
n>-1, S.Zero, dco),
# 3.1
(exp(a*t-tau),
exp(-tau)/(s-a),
S.true, a, dco),
# 3.2
(t*exp(a*t-tau),
exp(-tau)/(s-a)**2,
S.true, a, dco),
# 3.3
(t**n*exp(a*t),
gamma(n+1)/(s-a)**(n+1),
n>-1, a, dco),
# 3.4 and 3.5 cannot be covered here because they are
# sums and only the individual sum terms will get here.
# 3.6
(exp(-a*t**2),
sqrt(pi/4/a)*exp(s**2/4/a)*erfc(s/sqrt(4*a)),
a>0, S.Zero, dco),
# 3.7
(t*exp(-a*t**2),
1/(2*a)-2/sqrt(pi)/(4*a)**(S(3)/2)*s*erfc(s/sqrt(4*a)),
S.true, S.Zero, dco),
# 3.8
(exp(-a/t),
2*sqrt(a/s)*besselk(1, 2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.9
(sqrt(t)*exp(-a/t),
S(1)/2*sqrt(pi/s**3)*(1+2*sqrt(a*s))*exp(-2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.10
(exp(-a/t)/sqrt(t),
sqrt(pi/s)*exp(-2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.11
(exp(-a/t)/(t*sqrt(t)),
sqrt(pi/a)*exp(-2*sqrt(a*s)),
a>0, S.Zero, dco),
# 3.12
(t**n*exp(-a/t),
2*(a/s)**((n+1)/2)*besselk(n+1, 2*sqrt(a*s)),
a>0, S.Zero, dco),
# 3.13
(exp(-2*sqrt(a*t)),
s**(-1)-sqrt(pi*a)*s**(-S(3)/2)*exp(a/s)*erfc(sqrt(a/s)),
S.true, S.Zero, dco),
# 3.14
(exp(-2*sqrt(a*t))/sqrt(t),
(pi/s)**(S(1)/2)*exp(a/s)*erfc(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.1
(sinh(a*t),
a/(s**2-a**2),
S.true, Abs(a), dco),
# 4.2
(sinh(a*t)**2,
2*a**2/(s**3-4*a**2*s**2),
S.true, Abs(2*a), dco),
# 4.3
(sinh(a*t)/t,
log((s+a)/(s-a))/2,
S.true, a, dco),
# 4.4
(t**n*sinh(a*t),
gamma(n+1)/2*((s-a)**(-n-1)-(s+a)**(-n-1)),
n>-2, Abs(a), dco),
# 4.5
(sinh(2*sqrt(a*t)),
sqrt(pi*a)/s/sqrt(s)*exp(a/s),
S.true, S.Zero, dco),
# 4.6
(sqrt(t)*sinh(2*sqrt(a*t)),
pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a)*exp(a/s)*erf(sqrt(a/s))-a**(S(1)/2)*s**(-2),
S.true, S.Zero, dco),
# 4.7
(sinh(2*sqrt(a*t))/sqrt(t),
pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s)*erf(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.8
(sinh(sqrt(a*t))**2/sqrt(t),
pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)-1),
S.true, S.Zero, dco),
# 4.9
(cosh(a*t),
s/(s**2-a**2),
S.true, Abs(a), dco),
# 4.10
(cosh(a*t)**2,
(s**2-2*a**2)/(s**3-4*a**2*s**2),
S.true, Abs(2*a), dco),
# 4.11
(t**n*cosh(a*t),
gamma(n+1)/2*((s-a)**(-n-1)+(s+a)**(-n-1)),
n>-1, Abs(a), dco),
# 4.12
(cosh(2*sqrt(a*t)),
1/s+sqrt(pi*a)/s/sqrt(s)*exp(a/s)*erf(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.13
(sqrt(t)*cosh(2*sqrt(a*t)),
pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a)*exp(a/s),
S.true, S.Zero, dco),
# 4.14
(cosh(2*sqrt(a*t))/sqrt(t),
pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s),
S.true, S.Zero, dco),
# 4.15
(cosh(sqrt(a*t))**2/sqrt(t),
pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)+1),
S.true, S.Zero, dco),
# 5.1
(log(a*t),
-log(s/a+S.EulerGamma)/s,
a>0, S.Zero, dco),
# 5.2
(log(1+a*t),
-exp(s/a)/s*Ei(-s/a),
S.true, S.Zero, dco),
# 5.3
(log(a*t+b),
(log(b)-exp(s/b/a)/s*a*Ei(-s/b))/s*a,
a>0, S.Zero, dco),
# 5.4 is covered by 5.7
# 5.5
(log(t)/sqrt(t),
-sqrt(pi/s)*(log(4*s)+S.EulerGamma),
S.true, S.Zero, dco),
# 5.6 is covered by 5.7
# 5.7
(t**n*log(t),
gamma(n+1)*s**(-n-1)*(digamma(n+1)-log(s)),
n>-1, S.Zero, dco),
# 5.8
(log(a*t)**2,
((log(s/a)+S.EulerGamma)**2+pi**2/6)/s,
a>0, S.Zero, dco),
# 5.9
(exp(-a*t)*log(t),
-(log(s+a)+S.EulerGamma)/(s+a),
S.true, -a, dco),
# 6.1
(sin(omega*t),
omega/(s**2+omega**2),
S.true, S.Zero, dco),
# 6.2
(Abs(sin(omega*t)),
omega/(s**2+omega**2)*coth(pi*s/2/omega),
omega>0, S.Zero, dco),
# 6.3 and 6.4 are covered by 1.8
# 6.5 is covered by 1.8 together with 2.5
# 6.6
(sin(omega*t)/t,
atan(omega/s),
S.true, S.Zero, dco),
# 6.7
(sin(omega*t)**2/t,
log(1+4*omega**2/s**2)/4,
S.true, S.Zero, dco),
# 6.8
(sin(omega*t)**2/t**2,
omega*atan(2*omega/s)-s*log(1+4*omega**2/s**2)/4,
S.true, S.Zero, dco),
# 6.9
(sin(2*sqrt(a*t)),
sqrt(pi*a)/s/sqrt(s)*exp(-a/s),
a>0, S.Zero, dco),
# 6.10
(sin(2*sqrt(a*t))/t,
pi*erf(sqrt(a/s)),
a>0, S.Zero, dco),
# 6.11
(cos(omega*t),
s/(s**2+omega**2),
S.true, S.Zero, dco),
# 6.12
(cos(omega*t)**2,
(s**2+2*omega**2)/(s**2+4*omega**2)/s,
S.true, S.Zero, dco),
# 6.13 is covered by 1.9 together with 2.5
# 6.14 and 6.15 cannot be done with this method, the respective sum
# parts do not converge. Solve elsewhere if really needed.
# 6.16
(sqrt(t)*cos(2*sqrt(a*t)),
sqrt(pi)/2*s**(-S(5)/2)*(s-2*a)*exp(-a/s),
a>0, S.Zero, dco),
# 6.17
(cos(2*sqrt(a*t))/sqrt(t),
sqrt(pi/s)*exp(-a/s),
a>0, S.Zero, dco),
# 6.18
(sin(a*t)*sin(b*t),
2*a*b*s/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.19
(cos(a*t)*sin(b*t),
b*(s**2-a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.20
(cos(a*t)*cos(b*t),
s*(s**2+a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.21
(exp(b*t)*sin(a*t),
a/((s-b)**2+a**2),
S.true, b, dco),
# 6.22
(exp(b*t)*cos(a*t),
(s-b)/((s-b)**2+a**2),
S.true, b, dco),
# 7.1
(erf(a*t),
exp(s**2/(2*a)**2)*erfc(s/(2*a))/s,
a>0, S.Zero, dco),
# 7.2
(erf(sqrt(a*t)),
sqrt(a)/sqrt(s+a)/s,
a>0, S.Zero, dco),
# 7.3
(exp(a*t)*erf(sqrt(a*t)),
sqrt(a)/sqrt(s)/(s-a),
a>0, a, dco),
# 7.4
(erf(sqrt(a/t)/2),
(1-exp(-sqrt(a*s)))/s,
a>0, S.Zero, dco),
# 7.5
(erfc(sqrt(a*t)),
(sqrt(s+a)-sqrt(a))/sqrt(s+a)/s,
a>0, S.Zero, dco),
# 7.6
(exp(a*t)*erfc(sqrt(a*t)),
1/(s+sqrt(a*s)),
a>0, S.Zero, dco),
# 7.7
(erfc(sqrt(a/t)/2),
exp(-sqrt(a*s))/s,
a>0, S.Zero, dco),
# 8.1, 8.2
(besselj(n, a*t),
a**n/(sqrt(s**2+a**2)*(s+sqrt(s**2+a**2))**n),
And(a>0, n>-1), S.Zero, dco),
# 8.3, 8.4
(t**b*besselj(n, a*t),
2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2+a**2)**(-n-S.Half),
And(And(a>0, n>-S.Half), Eq(b, n)), S.Zero, dco),
# 8.5
(t**b*besselj(n, a*t),
2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2+a**2)**(-n-S(3)/2),
And(And(a>0, n>-1), Eq(b, n+1)), S.Zero, dco),
# 8.6
(besselj(0, 2*sqrt(a*t)),
exp(-a/s)/s,
a>0, S.Zero, dco),
# 8.7, 8.8
(t**(b)*besselj(n, 2*sqrt(a*t)),
a**(n/2)*s**(-n-1)*exp(-a/s),
And(And(a>0, n>-1), Eq(b, n*S.Half)), S.Zero, dco),
# 8.9
(besselj(0, a*sqrt(t**2+b*t)),
exp(b*s-b*sqrt(s**2+a**2))/sqrt(s**2+a**2),
b>0, S.Zero, dco),
# 8.10, 8.11
(besseli(n, a*t),
a**n/(sqrt(s**2-a**2)*(s+sqrt(s**2-a**2))**n),
And(a>0, n>-1), Abs(a), dco),
# 8.12
(t**b*besseli(n, a*t),
2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2-a**2)**(-n-S.Half),
And(And(a>0, n>-S.Half), Eq(b, n)), Abs(a), dco),
# 8.13
(t**b*besseli(n, a*t),
2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2-a**2)**(-n-S(3)/2),
And(And(a>0, n>-1), Eq(b, n+1)), Abs(a), dco),
# 8.15, 8.16
(t**(b)*besseli(n, 2*sqrt(a*t)),
a**(n/2)*s**(-n-1)*exp(a/s),
And(And(a>0, n>-1), Eq(b, n*S.Half)), S.Zero, dco),
# 8.17
(bessely(0, a*t),
-2/pi*asinh(s/a)/sqrt(s**2+a**2),
a>0, S.Zero, dco),
# 8.18
(besselk(0, a*t),
(log(s+sqrt(s**2-a**2)))/(sqrt(s**2-a**2)),
a>0, Abs(a), dco)
]
return laplace_transform_rules
def _laplace_cr(f, a, c, **hints):
"""
Internal helper function that will return `(f, a, c)` unless `**hints`
contains `noconds=True`, in which case it will only return `f`.
"""
conds = not hints.get('noconds', False)
if conds:
return f, a, c
else:
return f
def _laplace_rule_timescale(f, t, s, doit=True, **hints):
r"""
This internal helper function tries to apply the time-scaling rule of the
Laplace transform and returns `None` if it cannot do it.
Time-scaling means the following: if $F(s)$ is the Laplace transform of,
$f(t)$, then, for any $a>0$, the Laplace transform of $f(at)$ will be
$\frac1a F(\frac{s}{a})$. This scaling will also affect the transform's
convergence plane.
"""
_simplify = hints.pop('simplify', True)
b = Wild('b', exclude=[t])
g = WildFunction('g', nargs=1)
ma1 = f.match(g)
if ma1:
arg = ma1[g].args[0].collect(t)
ma2 = arg.match(b*t)
if ma2 and ma2[b]>0:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: amplitude and time scaling (1.1, 1.2)')
if ma2[b]==1:
if doit==True and not any(func.has(t) for func
in ma1[g].atoms(AppliedUndef)):
return _laplace_transform(ma1[g].func(t), t, s,
simplify=_simplify)
else:
return LaplaceTransform(ma1[g].func(t), t, s, **hints)
else:
L = _laplace_apply_rules(ma1[g].func(t), t, s/ma2[b],
doit=doit, **hints)
noconds = hints.get('noconds', False)
if not noconds and type(L) is tuple:
r, p, c = L
return (1/ma2[b]*r, p, c)
else:
return 1/ma2[b]*L
return None
def _laplace_rule_heaviside(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing the
`Heaviside` function and returns `None` if it cannot do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
b = Wild('b', exclude=[t])
y = Wild('y')
g = WildFunction('g', nargs=1)
ma1 = f.match(Heaviside(y)*g)
if ma1:
ma2 = ma1[y].match(t-a)
ma3 = ma1[g].args[0].collect(t).match(t-b)
if ma2 and ma2[a]>0 and ma3 and ma2[a]==ma3[b]:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s, %s )'%(f, ma1, ma2, ma3))
debug(' rule: time shift (1.3)')
L = _laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints)
noconds = hints.get('noconds', False)
if not noconds and type(L) is tuple:
r, p, c = L
return (exp(-ma2[a]*s)*r, p, c)
else:
return exp(-ma2[a]*s)*L
return None
def _laplace_rule_exp(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing the
`exp` function and returns `None` if it cannot do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
ma1 = f.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with exp (1.5)')
L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints)
noconds = hints.get('noconds', False)
if not noconds and type(L) is tuple:
r, p, c = L
return (r, p+ma2[a], c)
else:
return L
return None
def _laplace_rule_trig(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing a
trigonometric function (`sin`, `cos`, `sinh`, `cosh`, ) and returns
`None` if it cannot do it.
"""
_simplify = hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
# All of the rules have a very similar form: trig(y)*z is matched, and then
# two copies of the Laplace transform of z are shifted in the s Domain
# and added with a weight; see rules 1.6 to 1.9 in
# http://eqworld.ipmnet.ru/en/auxiliary/inttrans/laplace1.pdf
# The parameters in the tuples are (fm, nu, s1, s2, sd):
# fm: Function to match
# nu: Number of the rule, for debug purposes
# s1: weight of the sum, 'I' for sin and '1' for all others
# s2: sign of the second copy of the Laplace transform of z
# sd: shift direction; shift along real or imaginary axis if `1` or `I`
trigrules = [(sinh(y), '1.6', 1, -1, 1), (cosh(y), '1.7', 1, 1, 1),
(sin(y), '1.8', -I, -1, I), (cos(y), '1.9', 1, 1, I)]
for trigrule in trigrules:
fm, nu, s1, s2, sd = trigrule
ma1 = f.match(fm*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with %s (%s)'%(fm.func, nu))
L = _laplace_apply_rules(ma1[z], t, s, doit=doit, **hints)
noconds = hints.get('noconds', False)
if not noconds and type(L) is tuple:
r, p, c = L
# The convergence plane changes only if the shift has been
# done along the real axis:
if sd==1:
cp_shift = Abs(ma2[a])
else:
cp_shift = 0
return ((s1*(r.subs(s, s-sd*ma2[a])+\
s2*r.subs(s, s+sd*ma2[a]))).simplify()/2,
p+cp_shift, c)
else:
if doit==True and _simplify==True:
return (s1*(L.subs(s, s-sd*ma2[a])+\
s2*L.subs(s, s+sd*ma2[a]))).simplify()/2
else:
return (s1*(L.subs(s, s-sd*ma2[a])+\
s2*L.subs(s, s+sd*ma2[a])))/2
return None
def _laplace_rule_diff(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform an expression containing
a derivative of an undefined function and returns `None` if it cannot
do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
n = Wild('n', exclude=[t])
g = WildFunction('g', nargs=1)
ma1 = f.match(a*Derivative(g, (t, n)))
if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:
debug('_laplace_apply_rules match:')
debug(' f: %s'%(f,))
debug(' rule: time derivative (1.11, 1.12)')
d = []
for k in range(ma1[n]):
if k==0:
y = ma1[g].func(t).subs(t, 0)
else:
y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0)
d.append(s**(ma1[n]-k-1)*y)
r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit,
**hints)
return ma1[a]*(r - Add(*d))
return None
def _laplace_apply_rules(f, t, s, doit=True, **hints):
"""
Helper function for the class LaplaceTransform.
This function does a Laplace transform based on rules and, after
applying the rules, hands the rest over to `_laplace_transform`, which
will attempt to integrate.
If it is called with `doit=False`, then it will instead return
`LaplaceTransform` objects.
"""
k, func = f.as_independent(t, as_Add=False)
simple_rules = _laplace_build_rules(t, s)
for t_dom, s_dom, check, plane, prep in simple_rules:
ma = prep(func).match(t_dom)
if ma:
debug('_laplace_apply_rules match:')
debug(' f: %s'%(func,))
debug(' rule: %s o---o %s'%(t_dom, s_dom))
try:
debug(' try %s'%(check,))
c = check.xreplace(ma)
debug(' check %s -> %s'%(check, c))
if c==True:
return _laplace_cr(k*s_dom.xreplace(ma),
plane.xreplace(ma), S.true, **hints)
except Exception:
debug('_laplace_apply_rules did not match.')
if f.has(DiracDelta):
return None
prog_rules = [_laplace_rule_timescale, _laplace_rule_heaviside,
_laplace_rule_exp, _laplace_rule_trig, _laplace_rule_diff]
for p_rule in prog_rules:
L = p_rule(func, t, s, doit=doit, **hints)
if L is not None:
noconds = hints.get('noconds', False)
if not noconds and type(L) is tuple:
r, p, c = L
return (k*r, p, c)
else:
return k*L
return None
class LaplaceTransform(IntegralTransform):
"""
Class representing unevaluated Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Laplace transforms, see the :func:`laplace_transform`
docstring.
"""
_name = 'Laplace'
def _compute_transform(self, f, t, s, **hints):
LT = _laplace_apply_rules(f, t, s, **hints)
if LT is None:
_simplify = hints.pop('simplify', True)
debug('_laplace_apply_rules could not match function %s'%(f,))
debug(' hints: %s'%(hints,))
return _laplace_transform(f, t, s, simplify=_simplify, **hints)
else:
return LT
def _as_integral(self, f, t, s):
return Integral(f*exp(-s*t), (t, S.Zero, S.Infinity))
def _collapse_extra(self, extra):
conds = []
planes = []
for plane, cond in extra:
conds.append(cond)
planes.append(plane)
cond = And(*conds)
plane = Max(*planes)
if cond == False:
raise IntegralTransformError(
'Laplace', None, 'No combined convergence.')
return plane, cond
def _try_directly(self, **hints):
fn = self.function
debug('----> _try_directly: %s'%(fn, ))
t_ = self.function_variable
s_ = self.transform_variable
LT = None
if not fn.is_Add:
fn = expand_mul(fn)
try:
LT = self._compute_transform(fn, t_, s_, **hints)
except IntegralTransformError:
LT = None
return fn, LT
def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r"""
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t.
Explanation
===========
For all sensible functions, this converges absolutely in a
half-plane
.. math :: a < \operatorname{Re}(s)
This function returns ``(F, a, cond)`` where ``F`` is the Laplace
transform of ``f``, `a` is the half-plane of convergence, and `cond` are
auxiliary convergence conditions.
The implementation is rule-based, and if you are interested in which
rules are applied, and whether integration is attempted, you can switch
debug information on by setting ``sympy.SYMPY_DEBUG=True``.
The lower bound is `0-`, meaning that this bound should be approached
from the lower side. This is only necessary if distributions are involved.
At present, it is only done if `f(t)` contains ``DiracDelta``, in which
case the Laplace transform is computed implicitly as
.. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t
by applying rules.
If the integral cannot be fully computed in closed form, this function
returns an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
.. deprecated:: 1.9
Legacy behavior for matrices where ``laplace_transform`` with
``noconds=False`` (the default) returns a Matrix whose elements are
tuples. The behavior of ``laplace_transform`` for matrices will change
in a future release of SymPy to return a tuple of the transformed
Matrix and the convergence conditions for the matrix as a whole. Use
``legacy_matrix=False`` to enable the new behavior.
Examples
========
>>> from sympy import DiracDelta, exp, laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**4, t, s)
(24/s**5, 0, True)
>>> laplace_transform(t**a, t, s)
(s**(-a - 1)*gamma(a + 1), 0, re(a) > -1)
>>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s)
(s/(a + s), Max(0, -a), True)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
sympy_deprecation_warning(
"""
Calling laplace_transform() on a Matrix with noconds=False (the default) is
deprecated. Either noconds=True or use legacy_matrix=False to get the new
behavior.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-laplace-transform-matrix",
)
# Temporarily disable the deprecation warning for non-Expr objects
# in Matrix
with ignore_warnings(SymPyDeprecationWarning):
return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))
else:
elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]
if conds:
elements, avals, conditions = zip(*elements_trans)
f_laplace = type(f)(*f.shape, elements)
return f_laplace, Max(*avals), And(*conditions)
else:
return type(f)(*f.shape, elements_trans)
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True)
def _inverse_laplace_transform(F, s, t_, plane, simplify=True):
""" The backend function for inverse Laplace transforms. """
from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp
# There are two strategies we can try:
# 1) Use inverse mellin transforms - related by a simple change of variables.
# 2) Use the inversion integral.
t = Dummy('t', real=True)
def pw_simp(*args):
""" Simplify a piecewise expression from hyperexpand. """
# XXX we break modularity here!
if len(args) != 3:
return Piecewise(*args)
arg = args[2].args[0].argument
coeff, exponent = _get_coeff_exp(arg, t)
e1 = args[0].args[0]
e2 = args[1].args[0]
return Heaviside(1/Abs(coeff) - t**exponent)*e1 \
+ Heaviside(t**exponent - 1/Abs(coeff))*e2
if F.is_rational_function(s):
F = F.apart(s)
if F.is_Add:
f = Add(*[_inverse_laplace_transform(X, s, t, plane, simplify)\
for X in F.args])
return _simplify(f.subs(t, t_), simplify), True
try:
f, cond = inverse_mellin_transform(F, s, exp(-t), (None, S.Infinity),
needeval=True, noconds=False)
except IntegralTransformError:
f = None
if f is None:
f = meijerint_inversion(F, s, t)
if f is None:
raise IntegralTransformError('Inverse Laplace', f, '')
if f.is_Piecewise:
f, cond = f.args[0]
if f.has(Integral):
raise IntegralTransformError('Inverse Laplace', f,
'inversion integral of unrecognised form.')
else:
cond = S.true
f = f.replace(Piecewise, pw_simp)
if f.is_Piecewise:
# many of the functions called below can't work with piecewise
# (b/c it has a bool in args)
return f.subs(t, t_), cond
u = Dummy('u')
def simp_heaviside(arg, H0=S.Half):
a = arg.subs(exp(-t), u)
if a.has(t):
return Heaviside(arg, H0)
from sympy.solvers.inequalities import _solve_inequality
rel = _solve_inequality(a > 0, u)
if rel.lts == u:
k = log(rel.gts)
return Heaviside(t + k, H0)
else:
k = log(rel.lts)
return Heaviside(-(t + k), H0)
f = f.replace(Heaviside, simp_heaviside)
def simp_exp(arg):
return expand_complex(exp(arg))
f = f.replace(exp, simp_exp)
# TODO it would be nice to fix cosh and sinh ... simplify messes these
# exponentials up
return _simplify(f.subs(t, t_), simplify), cond
class InverseLaplaceTransform(IntegralTransform):
"""
Class representing unevaluated inverse Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Laplace transforms, see the
:func:`inverse_laplace_transform` docstring.
"""
_name = 'Inverse Laplace'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, plane, **opts):
if plane is None:
plane = InverseLaplaceTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, plane, **opts)
@property
def fundamental_plane(self):
plane = self.args[3]
if plane is InverseLaplaceTransform._none_sentinel:
plane = None
return plane
def _compute_transform(self, F, s, t, **hints):
return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints)
def _as_integral(self, F, s, t):
c = self.__class__._c
return Integral(exp(s*t)*F, (s, c - S.ImaginaryUnit*S.Infinity,
c + S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit)
def inverse_laplace_transform(F, s, t, plane=None, **hints):
r"""
Compute the inverse Laplace transform of `F(s)`, defined as
.. math :: f(t) = \frac{1}{2\pi i} \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s,
for `c` so large that `F(s)` has no singularites in the
half-plane `\operatorname{Re}(s) > c-\epsilon`.
Explanation
===========
The plane can be specified by
argument ``plane``, but will be inferred if passed as None.
Under certain regularity conditions, this recovers `f(t)` from its
Laplace Transform `F(s)`, for non-negative `t`, and vice
versa.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseLaplaceTransform` object.
Note that this function will always assume `t` to be real,
regardless of the SymPy assumption on `t`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Examples
========
>>> from sympy import inverse_laplace_transform, exp, Symbol
>>> from sympy.abc import s, t
>>> a = Symbol('a', positive=True)
>>> inverse_laplace_transform(exp(-a*s)/s, s, t)
Heaviside(-a + t)
See Also
========
laplace_transform, _fast_inverse_laplace
hankel_transform, inverse_hankel_transform
"""
if isinstance(F, MatrixBase) and hasattr(F, 'applyfunc'):
return F.applyfunc(lambda Fij: inverse_laplace_transform(Fij, s, t, plane, **hints))
return InverseLaplaceTransform(F, s, t, plane).doit(**hints)
def _fast_inverse_laplace(e, s, t):
"""Fast inverse Laplace transform of rational function including RootSum"""
a, b, n = symbols('a, b, n', cls=Wild, exclude=[s])
def _ilt(e):
if not e.has(s):
return e
elif e.is_Add:
return _ilt_add(e)
elif e.is_Mul:
return _ilt_mul(e)
elif e.is_Pow:
return _ilt_pow(e)
elif isinstance(e, RootSum):
return _ilt_rootsum(e)
else:
raise NotImplementedError
def _ilt_add(e):
return e.func(*map(_ilt, e.args))
def _ilt_mul(e):
coeff, expr = e.as_independent(s)
if expr.is_Mul:
raise NotImplementedError
return coeff * _ilt(expr)
def _ilt_pow(e):
match = e.match((a*s + b)**n)
if match is not None:
nm, am, bm = match[n], match[a], match[b]
if nm.is_Integer and nm < 0:
return t**(-nm-1)*exp(-(bm/am)*t)/(am**-nm*gamma(-nm))
if nm == 1:
return exp(-(bm/am)*t) / am
raise NotImplementedError
def _ilt_rootsum(e):
expr = e.fun.expr
[variable] = e.fun.variables
return RootSum(e.poly, Lambda(variable, together(_ilt(expr))))
return _ilt(e)
##########################################################################
# Fourier Transform
##########################################################################
@_noconds_(True)
def _fourier_transform(f, x, k, a, b, name, simplify=True):
r"""
Compute a general Fourier-type transform
.. math::
F(k) = a \int_{-\infty}^{\infty} e^{bixk} f(x)\, dx.
For suitable choice of *a* and *b*, this reduces to the standard Fourier
and inverse Fourier transforms.
"""
F = integrate(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
integral_f = integrate(f, (x, S.NegativeInfinity, S.Infinity))
if integral_f in (S.NegativeInfinity, S.Infinity, S.NaN) or integral_f.has(Integral):
raise IntegralTransformError(name, f, 'function not integrable on real axis')
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class FourierTypeTransform(IntegralTransform):
""" Base class for Fourier transforms."""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _fourier_transform(f, x, k,
self.a(), self.b(),
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
return Integral(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
class FourierTransform(FourierTypeTransform):
"""
Class representing unevaluated Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Fourier transforms, see the :func:`fourier_transform`
docstring.
"""
_name = 'Fourier'
def a(self):
return 1
def b(self):
return -2*S.Pi
def fourier_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency Fourier transform of ``f``, defined
as
.. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`FourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import fourier_transform, exp
>>> from sympy.abc import x, k
>>> fourier_transform(exp(-x**2), x, k)
sqrt(pi)*exp(-pi**2*k**2)
>>> fourier_transform(exp(-x**2), x, k, noconds=False)
(sqrt(pi)*exp(-pi**2*k**2), True)
See Also
========
inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return FourierTransform(f, x, k).doit(**hints)
class InverseFourierTransform(FourierTypeTransform):
"""
Class representing unevaluated inverse Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Fourier transforms, see the
:func:`inverse_fourier_transform` docstring.
"""
_name = 'Inverse Fourier'
def a(self):
return 1
def b(self):
return 2*S.Pi
def inverse_fourier_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse Fourier transform of `F`,
defined as
.. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseFourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_fourier_transform, exp, sqrt, pi
>>> from sympy.abc import x, k
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x)
exp(-x**2)
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False)
(exp(-x**2), True)
See Also
========
fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseFourierTransform(F, k, x).doit(**hints)
##########################################################################
# Fourier Sine and Cosine Transform
##########################################################################
@_noconds_(True)
def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True):
"""
Compute a general sine or cosine-type transform
F(k) = a int_0^oo b*sin(x*k) f(x) dx.
F(k) = a int_0^oo b*cos(x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard sine/cosine
and inverse sine/cosine transforms.
"""
F = integrate(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class SineCosineTypeTransform(IntegralTransform):
"""
Base class for sine and cosine transforms.
Specify cls._kern.
"""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _sine_cosine_transform(f, x, k,
self.a(), self.b(),
self.__class__._kern,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
K = self.__class__._kern
return Integral(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
class SineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute sine transforms, see the :func:`sine_transform`
docstring.
"""
_name = 'Sine'
_kern = sin
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def sine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency sine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`SineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import sine_transform, exp
>>> from sympy.abc import x, k, a
>>> sine_transform(x*exp(-a*x**2), x, k)
sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2))
>>> sine_transform(x**(-a), x, k)
2**(1/2 - a)*k**(a - 1)*gamma(1 - a/2)/gamma(a/2 + 1/2)
See Also
========
fourier_transform, inverse_fourier_transform
inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return SineTransform(f, x, k).doit(**hints)
class InverseSineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse sine transforms, see the
:func:`inverse_sine_transform` docstring.
"""
_name = 'Inverse Sine'
_kern = sin
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def inverse_sine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse sine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseSineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_sine_transform, exp, sqrt, gamma
>>> from sympy.abc import x, k, a
>>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)*
... gamma(-a/2 + 1)/gamma((a+1)/2), k, x)
x**(-a)
>>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x)
x*exp(-a*x**2)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseSineTransform(F, k, x).doit(**hints)
class CosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute cosine transforms, see the :func:`cosine_transform`
docstring.
"""
_name = 'Cosine'
_kern = cos
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def cosine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency cosine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`CosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import cosine_transform, exp, sqrt, cos
>>> from sympy.abc import x, k, a
>>> cosine_transform(exp(-a*x), x, k)
sqrt(2)*a/(sqrt(pi)*(a**2 + k**2))
>>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k)
a*exp(-a**2/(2*k))/(2*k**(3/2))
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return CosineTransform(f, x, k).doit(**hints)
class InverseCosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse cosine transforms, see the
:func:`inverse_cosine_transform` docstring.
"""
_name = 'Inverse Cosine'
_kern = cos
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def inverse_cosine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse cosine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseCosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_cosine_transform, sqrt, pi
>>> from sympy.abc import x, k, a
>>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x)
exp(-a*x)
>>> inverse_cosine_transform(1/sqrt(k), k, x)
1/sqrt(x)
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseCosineTransform(F, k, x).doit(**hints)
##########################################################################
# Hankel Transform
##########################################################################
@_noconds_(True)
def _hankel_transform(f, r, k, nu, name, simplify=True):
r"""
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
"""
F = integrate(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class HankelTypeTransform(IntegralTransform):
"""
Base class for Hankel transforms.
"""
def doit(self, **hints):
return self._compute_transform(self.function,
self.function_variable,
self.transform_variable,
self.args[3],
**hints)
def _compute_transform(self, f, r, k, nu, **hints):
return _hankel_transform(f, r, k, nu, self._name, **hints)
def _as_integral(self, f, r, k, nu):
return Integral(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity))
@property
def as_integral(self):
return self._as_integral(self.function,
self.function_variable,
self.transform_variable,
self.args[3])
class HankelTransform(HankelTypeTransform):
"""
Class representing unevaluated Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Hankel transforms, see the :func:`hankel_transform`
docstring.
"""
_name = 'Hankel'
def hankel_transform(f, r, k, nu, **hints):
r"""
Compute the Hankel transform of `f`, defined as
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`HankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import exp
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2))
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
inverse_hankel_transform
mellin_transform, laplace_transform
"""
return HankelTransform(f, r, k, nu).doit(**hints)
class InverseHankelTransform(HankelTypeTransform):
"""
Class representing unevaluated inverse Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Hankel transforms, see the
:func:`inverse_hankel_transform` docstring.
"""
_name = 'Inverse Hankel'
def inverse_hankel_transform(F, k, r, nu, **hints):
r"""
Compute the inverse Hankel transform of `F` defined as
.. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseHankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import exp
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2))
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform
mellin_transform, laplace_transform
"""
return InverseHankelTransform(F, k, r, nu).doit(**hints)
|
e51a31b4863d53d08ed5e235dd10623e0a262cff441478cdd96fd3750924b608 | from typing import Tuple as tTuple
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import diff
from sympy.core.logic import fuzzy_bool
from sympy.core.mul import Mul
from sympy.core.numbers import oo, pi
from sympy.core.relational import Ne
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.functions import Piecewise, sqrt, piecewise_fold, tan, cot, atan
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.complexes import Abs, sign
from sympy.functions.elementary.miscellaneous import Min, Max
from .rationaltools import ratint
from sympy.matrices import MatrixBase
from sympy.polys import Poly, PolynomialError
from sympy.series.formal import FormalPowerSeries
from sympy.series.limits import limit
from sympy.series.order import Order
from sympy.tensor.functions import shape
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import filldedent
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ()
args: tTuple[Expr, Tuple]
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Explanation
===========
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a prepended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_0, (_0, x))
"""
#This will help other classes define their own definitions
#of behaviour with Integral.
if hasattr(function, '_eval_Integral'):
return function._eval_Integral(*symbols, **assumptions)
if isinstance(function, Poly):
sympy_deprecation_warning(
"""
integrate(Poly) and Integral(Poly) are deprecated. Instead,
use the Poly.integrate() method, or convert the Poly to an
Expr first with the Poly.as_expr() method.
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-integrate-poly")
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
{y}
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.function
sympy.concrete.expr_with_limits.ExprWithLimits.limits
sympy.concrete.expr_with_limits.ExprWithLimits.variables
"""
return super().free_symbols
def _eval_is_zero(self):
# This is a very naive and quick test, not intended to do the integral to
# answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi))
# is zero but this routine should return None for that case. But, like
# Mul, there are trivial situations for which the integral will be
# zero so we check for those.
if self.function.is_zero:
return True
got_none = False
for l in self.limits:
if len(l) == 3:
z = (l[1] == l[2]) or (l[1] - l[2]).is_zero
if z:
return True
elif z is None:
got_none = True
free = self.function.free_symbols
for xab in self.limits:
if len(xab) == 1:
free.add(xab[0])
continue
if len(xab) == 2 and xab[0] not in free:
if xab[1].is_zero:
return True
elif xab[1].is_zero is None:
got_none = True
# take integration symbol out of free since it will be replaced
# with the free symbols in the limits
free.discard(xab[0])
# add in the new symbols
for i in xab[1:]:
free.update(i.free_symbols)
if self.function.is_zero is False and got_none is False:
return False
def transform(self, x, u):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, ``2*x``, ``1/x`` and ``sqrt(x)``, will
always work; quadratic expressions like ``x**2 - 1`` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if ``x`` is not a variable of
integration.
``x`` must be (or contain) only one of of the integration variables. If
``u`` has more than one free symbol then it should be sent as a tuple
(``u``, ``uvar``) where ``uvar`` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, x, u
>>> from sympy import Integral, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, 1 - a))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, 1 - u))
See Also
========
sympy.concrete.expr_with_limits.ExprWithLimits.variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) == 0:
raise ValueError(filldedent('''
f(u) cannot be a constant'''))
if len(ufree) > 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
# This probably never evaluates to True
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
from sympy.solvers.solvers import solve
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
from sympy.simplify.simplify import posify
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = {(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f}
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_finite is False and a.is_finite:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list({_calc_limit_1(Fi, a, b) for Fi in F})
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if fuzzy_bool(a - b > 0):
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Piecewise, S
>>> from sympy.abc import x, t
>>> p = x**2 + Piecewise((0, x/t < 0), (1, True))
>>> p.integrate((t, S(4)/5, 1), (x, -1, 1))
1/3
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.heurisch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
heurisch = hints.get('heurisch', None)
manual = hints.get('manual', None)
if len(list(filter(None, (manual, meijerg, risch, heurisch)))) > 1:
raise ValueError("At most one of manual, meijerg, risch, heurisch can be True")
elif manual:
meijerg = risch = heurisch = False
elif meijerg:
manual = risch = heurisch = False
elif risch:
manual = meijerg = heurisch = False
elif heurisch:
manual = meijerg = risch = False
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual, heurisch=heurisch,
conds=conds)
if conds not in ('separate', 'piecewise', 'none'):
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial zero
if self.is_zero:
return S.Zero
# hacks to handle integrals of
# nested summations
from sympy.concrete.summations import Sum
if isinstance(self.function, Sum):
if any(v in self.function.limits[0] for v in self.variables):
raise ValueError('Limit of the sum cannot be an integration variable.')
if any(l.is_infinite for l in self.function.limits[0][1:]):
return self
_i = self
_sum = self.function
return _sum.func(_i.func(_sum.function, *_i.limits).doit(), *_sum.limits).doit()
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# hacks to handle special cases
if isinstance(function, MatrixBase):
return function.applyfunc(
lambda f: self.func(f, *self.limits).doit(**hints))
if isinstance(function, FormalPowerSeries):
if len(self.limits) > 1:
raise NotImplementedError
xab = self.limits[0]
if len(xab) > 1:
return function.integrate(xab, **eval_kwargs)
else:
return function.integrate(xab[0], **eval_kwargs)
# There is no trivial answer and special handling
# is done so continue
# first make sure any definite limits have integration
# variables with matching assumptions
reps = {}
for xab in self.limits:
if len(xab) != 3:
# it makes sense to just make
# all x real but in practice with the
# current state of integration...this
# doesn't work out well
# x = xab[0]
# if x not in reps and not x.is_real:
# reps[x] = Dummy(real=True)
continue
x, a, b = xab
l = (a, b)
if all(i.is_nonnegative for i in l) and not x.is_nonnegative:
d = Dummy(positive=True)
elif all(i.is_nonpositive for i in l) and not x.is_nonpositive:
d = Dummy(negative=True)
elif all(i.is_real for i in l) and not x.is_real:
d = Dummy(real=True)
else:
d = None
if d:
reps[x] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
# continue with existing assumptions
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
if function.has(Abs, sign) and (
(len(xab) < 3 and all(x.is_extended_real for x in xab)) or
(len(xab) == 3 and all(x.is_extended_real and not x.is_infinite for
x in xab[1:]))):
# some improper integrals are better off with Abs
xr = Dummy("xr", real=True)
function = (function.xreplace({xab[0]: xr})
.rewrite(Piecewise).xreplace({xr: xab[0]}))
elif function.has(Min, Max):
function = function.rewrite(Piecewise)
if (function.has(Piecewise) and
not isinstance(function, Piecewise)):
function = piecewise_fold(function)
if isinstance(function, Piecewise):
if len(xab) == 1:
antideriv = function._eval_integral(xab[0],
**eval_kwargs)
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
else:
# There are a number of tradeoffs in using the
# Meijer G method. It can sometimes be a lot faster
# than other methods, and sometimes slower. And
# there are certain types of integrals for which it
# is more likely to work than others. These
# heuristics are incorporated in deciding what
# integration methods to try, in what order. See the
# integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
_debug('NotImplementedError '
'from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
u = self.func(function, (x, a, b))
# if Piecewise modifies cond too
# much it may not be recognized by
# _condsimp pattern matching so just
# turn off all evaluation
return Piecewise((f, cond), (u, True),
evaluate=False)
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError(filldedent('''
conds=separate not supported in
multiple integrals'''))
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if (meijerg is not False and
len(xab) == 3 and xab[1].is_extended_real and xab[2].is_extended_real
and not function.is_Poly and
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo))):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
meijerg1 = False
# If the special meijerg code did not succeed in
# finding a definite integral, then the code using
# meijerint_indefinite will not either (it might
# find an antiderivative, but the answer is likely
# to be nonsensical). Thus if we are requested to
# only use Meijer G-function methods, we give up at
# this stage. Otherwise we just disable G-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
if antideriv is None and meijerg is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
final = hints.get('final', True)
# dotit may be iterated but floor terms making atan and acot
# continuous should only be added in the final round
if (final and not isinstance(antideriv, Integral) and
antideriv is not None):
for atan_term in antideriv.atoms(atan):
atan_arg = atan_term.args[0]
# Checking `atan_arg` to be linear combination of `tan` or `cot`
for tan_part in atan_arg.atoms(tan):
x1 = Dummy('x1')
tan_exp1 = atan_arg.subs(tan_part, x1)
# The coefficient of `tan` should be constant
coeff = tan_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = tan_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a-pi/2)/pi)))
for cot_part in atan_arg.atoms(cot):
x1 = Dummy('x1')
cot_exp1 = atan_arg.subs(cot_part, x1)
# The coefficient of `cot` should be constant
coeff = cot_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = cot_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a)/pi)))
if antideriv is None:
undone_limits.append(xab)
function = self.func(*([function] + [xab])).factor()
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
elif len(xab) == 2:
x, b = xab
a = None
else:
raise NotImplementedError
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
def is_indef_int(g, x):
return (isinstance(g, Integral) and
any(i == (x,) for i in g.limits))
def eval_factored(f, x, a, b):
# _eval_interval for integrals with
# (constant) factors
# a single indefinite integral is assumed
args = []
for g in Mul.make_args(f):
if is_indef_int(g, x):
args.append(g._eval_interval(x, a, b))
else:
args.append(g)
return Mul(*args)
integrals, others, piecewises = [], [], []
for f in Add.make_args(antideriv):
if any(is_indef_int(g, x)
for g in Mul.make_args(f)):
integrals.append(f)
elif any(isinstance(g, Piecewise)
for g in Mul.make_args(f)):
piecewises.append(piecewise_fold(f))
else:
others.append(f)
uneval = Add(*[eval_factored(f, x, a, b)
for f in integrals])
try:
evalued = Add(*others)._eval_interval(x, a, b)
evalued_pw = piecewise_fold(Add(*piecewises))._eval_interval(x, a, b)
function = uneval + evalued + evalued_pw
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Explanation
===========
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References
==========
.. [1] https://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
.. [2] https://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
{x}
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = S.Zero
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
if arg:
rv += self.func(arg, (x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
heurisch=None, conds='piecewise',final=None):
"""
Calculate the anti-derivative to the function f(x).
Explanation
===========
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of
trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G-function methods
so that this can be deleted.
Setting heurisch=True will cause integrate() to use only this
method. Set heurisch=False to not use it.
"""
from sympy.integrals.risch import risch_integrate, NonElementaryIntegral
from sympy.integrals.manualintegrate import manualintegrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual,
heurisch=heurisch, conds=conds)
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a SymPy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not (manual or meijerg or risch):
# Note: this is deprecated, but the deprecation warning is already
# issued in the Integral constructor.
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if isinstance(f, Piecewise):
return f.piecewise_integrate(x, **eval_kwargs)
# let's cut it short if `f` does not depend on `x`; if
# x is only a dummy, that will be handled below
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not (manual or meijerg or risch):
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True,
conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
# if no part of the NonElementaryIntegral is integrated by
# the Risch algorithm, then use the original function to
# integrate, instead of re-written one
if result == 0:
return NonElementaryIntegral(f, x).doit(risch=False)
else:
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
from sympy.simplify.fu import sincos_to_sum
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x, **eval_kwargs)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x, **eval_kwargs)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then
# there is no point in trying other methods because they
# will fail, too.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h2, Ne(g.exp, -1)), (h1, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not (manual or meijerg or risch):
parts.append(coeff * ratint(g, x))
continue
if not (manual or meijerg or risch):
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
from .singularityfunctions import singularityintegrate
# g(x) has at least a Singularity Function term
h = singularityintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x,
separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
if heurisch is not False:
from sympy.integrals.heurisch import (heurisch as heurisch_,
heurisch_wrapper)
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch_(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
_debug('NotImplementedError from meijerint_definite')
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral) and not manual:
# Try to have other algorithms do the integrals
# manualintegrate can't handle,
# unless we were asked to use manual only.
# Keep the rest of eval_kwargs in case another
# method was set to False already
new_eval_kwargs = eval_kwargs
new_eval_kwargs["manual"] = False
new_eval_kwargs["final"] = False
result = result.func(*[
arg.doit(**new_eval_kwargs) if
arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# at the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = sincos_to_sum(f).expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, **eval_kwargs)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
for term in expr.function.lseries(symb, logx):
yield integrate(term, *expr.limits)
def _eval_nseries(self, x, n, logx=None, cdir=0):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = expr.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(Order)
order = [o.subs(symb, x) for o in order]
return integrate(terms, *expr.limits) + Add(*order)*x
def _eval_as_leading_term(self, x, logx=None, cdir=0):
series_gen = self.args[0].lseries(x)
for leading_term in series_gen:
if leading_term != 0:
break
return integrate(leading_term, *self.args[1:])
def _eval_simplify(self, **kwargs):
expr = factor_terms(self)
if isinstance(expr, Integral):
from sympy.simplify.simplify import simplify
return expr.func(*[simplify(i, **kwargs) for i in expr.args])
return expr.simplify(**kwargs)
def as_sum(self, n=None, method="midpoint", evaluate=True):
"""
Approximates a definite integral by a sum.
Parameters
==========
n :
The number of subintervals to use, optional.
method :
One of: 'left', 'right', 'midpoint', 'trapezoid'.
evaluate : bool
If False, returns an unevaluated Sum expression. The default
is True, evaluate the sum.
Notes
=====
These methods of approximate integration are described in [1].
Examples
========
>>> from sympy import Integral, sin, sqrt
>>> from sympy.abc import x, n
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
Here, the discontinuity at x = 0 can be avoided by using the
midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return infinity:
>>> e.as_sum(5, 'left')
zoo
The number of intervals can be symbolic. If omitted, a dummy symbol
will be used for it.
>>> e = Integral(x**2, (x, 0, 2))
>>> e.as_sum(n, 'right').expand()
8/3 + 4/n + 4/(3*n**2)
This shows that the midpoint rule is more accurate, as its error
term decays as the square of n:
>>> e.as_sum(method='midpoint').expand()
8/3 - 2/(3*_n**2)
A symbolic sum is returned with evaluate=False:
>>> e.as_sum(n, 'midpoint', evaluate=False)
2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
See Also
========
Integral.doit : Perform the integration using any hints
References
==========
.. [1] https://en.wikipedia.org/wiki/Riemann_sum#Methods
"""
from sympy.concrete.summations import Sum
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if (len(limit) != 3 or limit[1].is_finite is False or
limit[2].is_finite is False):
raise ValueError("Expecting a definite integral over "
"a finite interval.")
if n is None:
n = Dummy('n', integer=True, positive=True)
else:
n = sympify(n)
if (n.is_positive is False or n.is_integer is False or
n.is_finite is False):
raise ValueError("n must be a positive integer, got %s" % n)
x, a, b = limit
dx = (b - a)/n
k = Dummy('k', integer=True, positive=True)
f = self.function
if method == "left":
result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n))
elif method == "right":
result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n))
elif method == "midpoint":
result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n))
elif method == "trapezoid":
result = dx*((f.subs(x, a) + f.subs(x, b))/2 +
Sum(f.subs(x, a + k*dx), (k, 1, n - 1)))
else:
raise ValueError("Unknown method %s" % method)
return result.doit() if evaluate else result
def principal_value(self, **kwargs):
"""
Compute the Cauchy Principal Value of the definite integral of a real function in the given interval
on the real axis.
Explanation
===========
In mathematics, the Cauchy principal value, is a method for assigning values to certain improper
integrals which would otherwise be undefined.
Examples
========
>>> from sympy import Integral, oo
>>> from sympy.abc import x
>>> Integral(x+1, (x, -oo, oo)).principal_value()
oo
>>> f = 1 / (x**3)
>>> Integral(f, (x, -oo, oo)).principal_value()
0
>>> Integral(f, (x, -10, 10)).principal_value()
0
>>> Integral(f, (x, -10, oo)).principal_value() + Integral(f, (x, -oo, 10)).principal_value()
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_principal_value
.. [2] http://mathworld.wolfram.com/CauchyPrincipalValue.html
"""
if len(self.limits) != 1 or len(list(self.limits[0])) != 3:
raise ValueError("You need to insert a variable, lower_limit, and upper_limit correctly to calculate "
"cauchy's principal value")
x, a, b = self.limits[0]
if not (a.is_comparable and b.is_comparable and a <= b):
raise ValueError("The lower_limit must be smaller than or equal to the upper_limit to calculate "
"cauchy's principal value. Also, a and b need to be comparable.")
if a == b:
return S.Zero
from sympy.calculus.singularities import singularities
r = Dummy('r')
f = self.function
singularities_list = [s for s in singularities(f, x) if s.is_comparable and a <= s <= b]
for i in singularities_list:
if i in (a, b):
raise ValueError(
'The principal value is not defined in the given interval due to singularity at %d.' % (i))
F = integrate(f, x, **kwargs)
if F.has(Integral):
return self
if a is -oo and b is oo:
I = limit(F - F.subs(x, -x), x, oo)
else:
I = limit(F, x, b, '-') - limit(F, x, a, '+')
for s in singularities_list:
I += limit(((F.subs(x, s - r)) - F.subs(x, s + r)), r, 0, '+')
return I
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):
"""integrate(f, var, ...)
.. deprecated:: 1.6
Using ``integrate()`` with :class:`~.Poly` is deprecated. Use
:meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`.
Explanation
===========
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with ``a`` replacing ``symbol``
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, Meijer
G-functions second to last, and heuristic Risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), re(a) > -1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), re(a) > -1)
See Also
========
Integral, Integral.doit
"""
doit_flags = {
'deep': False,
'meijerg': meijerg,
'conds': conds,
'risch': risch,
'heurisch': heurisch,
'manual': manual
}
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(**doit_flags)
else:
new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a
for a in integral.args]
return integral.func(*new_args)
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
sympy.integrals.integrals.integrate, Integral
"""
from sympy.geometry import Curve
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
### Property function dispatching ###
@shape.register(Integral)
def _(expr):
return shape(expr.function)
# Delayed imports
from .deltafunctions import deltaintegrate
from .meijerint import meijerint_definite, meijerint_indefinite, _debug
from .trigonometry import trigintegrate
|
a5b8d8da9881a447d25908136e2aaa57c2f7d0fa336e96093cfe97ae686f29ad | from __future__ import annotations
from itertools import permutations
from functools import reduce
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.mul import Mul
from sympy.core.symbol import Wild, Dummy, Symbol
from sympy.core.basic import sympify
from sympy.core.numbers import Rational, pi, I
from sympy.core.relational import Eq, Ne
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.traversal import iterfreeargs
from sympy.functions import exp, sin, cos, tan, cot, asin, atan
from sympy.functions import log, sinh, cosh, tanh, coth, asinh
from sympy.functions import sqrt, erf, erfi, li, Ei
from sympy.functions import besselj, bessely, besseli, besselk
from sympy.functions import hankel1, hankel2, jn, yn
from sympy.functions.elementary.complexes import Abs, re, im, sign, arg
from sympy.functions.elementary.exponential import LambertW
from sympy.functions.elementary.integers import floor, ceiling
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.delta_functions import Heaviside, DiracDelta
from sympy.simplify.radsimp import collect
from sympy.logic.boolalg import And, Or
from sympy.utilities.iterables import uniq
from sympy.polys import quo, gcd, lcm, factor_list, cancel, PolynomialError
from sympy.polys.monomials import itermonomials
from sympy.polys.polyroots import root_factors
from sympy.polys.rings import PolyRing
from sympy.polys.solvers import solve_lin_sys
from sympy.polys.constructor import construct_domain
from sympy.integrals.integrals import integrate
def components(f, x):
"""
Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with
minimal, positive exponents.
Examples
========
>>> from sympy import cos, sin
>>> from sympy.abc import x
>>> from sympy.integrals.heurisch import components
>>> components(sin(x)*cos(x)**2, x)
{x, sin(x), cos(x)}
See Also
========
heurisch
"""
result = set()
if f.has_free(x):
if f.is_symbol and f.is_commutative:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(f.base**Rational(1, f.exp.q))
else:
result |= components(f.exp, x) | {f}
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache: dict[str, list[Dummy]] = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""get vector of symbols local to this module"""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Dummy('%s%i' % (name, len(lsyms))) )
return lsyms[:n]
def heurisch_wrapper(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None,
_try_heurisch=None):
"""
A wrapper around the heurisch integration algorithm.
Explanation
===========
This method takes the result from heurisch and checks for poles in the
denominator. For each of these poles, the integral is reevaluated, and
the final integration result is given in terms of a Piecewise.
Examples
========
>>> from sympy import cos, symbols
>>> from sympy.integrals.heurisch import heurisch, heurisch_wrapper
>>> n, x = symbols('n x')
>>> heurisch(cos(n*x), x)
sin(n*x)/n
>>> heurisch_wrapper(cos(n*x), x)
Piecewise((sin(n*x)/n, Ne(n, 0)), (x, True))
See Also
========
heurisch
"""
from sympy.solvers.solvers import solve, denoms
f = sympify(f)
if not f.has_free(x):
return f*x
res = heurisch(f, x, rewrite, hints, mappings, retries, degree_offset,
unnecessary_permutations, _try_heurisch)
if not isinstance(res, Basic):
return res
# We consider each denominator in the expression, and try to find
# cases where one or more symbolic denominator might be zero. The
# conditions for these cases are stored in the list slns.
#
# Since denoms returns a set we use ordered. This is important because the
# ordering of slns determines the order of the resulting Piecewise so we
# need a deterministic order here to make the output deterministic.
slns = []
for d in ordered(denoms(res)):
try:
slns += solve([d], dict=True, exclude=(x,))
except NotImplementedError:
pass
if not slns:
return res
slns = list(uniq(slns))
# Remove the solutions corresponding to poles in the original expression.
slns0 = []
for d in denoms(f):
try:
slns0 += solve([d], dict=True, exclude=(x,))
except NotImplementedError:
pass
slns = [s for s in slns if s not in slns0]
if not slns:
return res
if len(slns) > 1:
eqs = []
for sub_dict in slns:
eqs.extend([Eq(key, value) for key, value in sub_dict.items()])
slns = solve(eqs, dict=True, exclude=(x,)) + slns
# For each case listed in the list slns, we reevaluate the integral.
pairs = []
for sub_dict in slns:
expr = heurisch(f.subs(sub_dict), x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations,
_try_heurisch)
cond = And(*[Eq(key, value) for key, value in sub_dict.items()])
generic = Or(*[Ne(key, value) for key, value in sub_dict.items()])
if expr is None:
expr = integrate(f.subs(sub_dict),x)
pairs.append((expr, cond))
# If there is one condition, put the generic case first. Otherwise,
# doing so may lead to longer Piecewise formulas
if len(pairs) == 1:
pairs = [(heurisch(f, x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations,
_try_heurisch),
generic),
(pairs[0][0], True)]
else:
pairs.append((heurisch(f, x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations,
_try_heurisch),
True))
return Piecewise(*pairs)
class BesselTable:
"""
Derivatives of Bessel functions of orders n and n-1
in terms of each other.
See the docstring of DiffCache.
"""
def __init__(self):
self.table = {}
self.n = Dummy('n')
self.z = Dummy('z')
self._create_table()
def _create_table(t):
table, n, z = t.table, t.n, t.z
for f in (besselj, bessely, hankel1, hankel2):
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
f = besseli
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z + f(n, z))
f = besselk
table[f] = (-f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
for f in (jn, yn):
table[f] = (f(n-1, z) - (n+1)*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
def diffs(t, f, n, z):
if f in t.table:
diff0, diff1 = t.table[f]
repl = [(t.n, n), (t.z, z)]
return (diff0.subs(repl), diff1.subs(repl))
def has(t, f):
return f in t.table
_bessel_table = None
class DiffCache:
"""
Store for derivatives of expressions.
Explanation
===========
The standard form of the derivative of a Bessel function of order n
contains two Bessel functions of orders n-1 and n+1, respectively.
Such forms cannot be used in parallel Risch algorithm, because
there is a linear recurrence relation between the three functions
while the algorithm expects that functions and derivatives are
represented in terms of algebraically independent transcendentals.
The solution is to take two of the functions, e.g., those of orders
n and n-1, and to express the derivatives in terms of the pair.
To guarantee that the proper form is used the two derivatives are
cached as soon as one is encountered.
Derivatives of other functions are also cached at no extra cost.
All derivatives are with respect to the same variable `x`.
"""
def __init__(self, x):
self.cache = {}
self.x = x
global _bessel_table
if not _bessel_table:
_bessel_table = BesselTable()
def get_diff(self, f):
cache = self.cache
if f in cache:
pass
elif (not hasattr(f, 'func') or
not _bessel_table.has(f.func)):
cache[f] = cancel(f.diff(self.x))
else:
n, z = f.args
d0, d1 = _bessel_table.diffs(f.func, n, z)
dz = self.get_diff(z)
cache[f] = d0*dz
cache[f.func(n-1, z)] = d1*dz
return cache[f]
def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None,
_try_heurisch=None):
"""
Compute indefinite integral using heuristic Risch algorithm.
Explanation
===========
This is a heuristic approach to indefinite integration in finite
terms using the extended heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute the antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use top level
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Specification
=============
heurisch(f, x, rewrite=False, hints=None)
where
f : expression
x : symbol
rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
hints -> a list of functions that may appear in anti-derivate
- hints = None --> no suggestions at all
- hints = [ ] --> try to figure out
- hints = [f1, ..., fn] --> we know better
Examples
========
>>> from sympy import tan
>>> from sympy.integrals.heurisch import heurisch
>>> from sympy.abc import x, y
>>> heurisch(y*tan(x), x)
y*log(tan(x)**2 + 1)/2
See Manuel Bronstein's "Poor Man's Integrator":
References
==========
.. [1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
For more information on the implemented algorithm refer to:
.. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
.. [3] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
.. [4] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
.. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
See Also
========
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
sympy.integrals.heurisch.components
"""
f = sympify(f)
# There are some functions that Heurisch cannot currently handle,
# so do not even try.
# Set _try_heurisch=True to skip this check
if _try_heurisch is not True:
if f.has(Abs, re, im, sign, Heaviside, DiracDelta, floor, ceiling, arg):
return
if not f.has_free(x):
return f*x
if not f.is_Add:
indep, f = f.as_independent(x)
else:
indep = S.One
rewritables = {
(sin, cos, cot): tan,
(sinh, cosh, coth): tanh,
}
if rewrite:
for candidates, rule in rewritables.items():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.keys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
dcache = DiffCache(x)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x])
for g in set(terms): # using copy of terms
if g.is_Function:
if isinstance(g, li):
M = g.args[0].match(a*x**b)
if M is not None:
terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) )
elif isinstance(g, exp):
M = g.args[0].match(a*x**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*x))
else: # M[a].is_negative or unknown
terms.add(erf(sqrt(-M[a])*x))
M = g.args[0].match(a*x**2 + b*x + c)
if M is not None:
if M[a].is_positive:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a]))))
elif M[a].is_negative:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a]))))
M = g.args[0].match(a*log(x)**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a]))))
if M[a].is_negative:
terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a]))))
elif g.is_Pow:
if g.exp.is_Rational and g.exp.q == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
M = g.base.match(a*x**2 - b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
dF = 1/sqrt(M[a]*x**2 - M[b])
F = log(2*sqrt(M[a])*sqrt(M[a]*x**2 - M[b]) + 2*M[a]*x)/sqrt(M[a])
dcache.cache[F] = dF # hack: F.diff(x) doesn't automatically simplify to f
terms.add(F)
elif M[a].is_negative:
terms.add(-M[b]/2*sqrt(-M[a])*
atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b])))
else:
terms |= set(hints)
for g in set(terms): # using copy of terms
terms |= components(dcache.get_diff(g), x)
# XXX: The commented line below makes heurisch more deterministic wrt
# PYTHONHASHSEED and the iteration order of sets. There are other places
# where sets are iterated over but this one is possibly the most important.
# Theoretically the order here should not matter but different orderings
# can expose potential bugs in the different code paths so potentially it
# is better to keep the non-determinism.
#
# terms = list(ordered(terms))
# TODO: caching is significant factor for why permutations work at all. Change this.
V = _symbols('x', len(terms))
# sort mapping expressions from largest to smallest (last is always x).
mapping = list(reversed(list(zip(*ordered( #
[(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) #
rev_mapping = {v: k for k, v in mapping} #
if mappings is None: #
# optimizing the number of permutations of mapping #
assert mapping[-1][0] == x # if not, find it and correct this comment
unnecessary_permutations = [mapping.pop(-1)]
mappings = permutations(mapping)
else:
unnecessary_permutations = unnecessary_permutations or []
def _substitute(expr):
return expr.subs(mapping)
for mapping in mappings:
mapping = list(mapping)
mapping = mapping + unnecessary_permutations
diffs = [ _substitute(dcache.get_diff(g)) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V):
denom = reduce(lambda p, q: lcm(p, q, *V), denoms)
break
else:
if not rewrite:
result = heurisch(f, x, rewrite=True, hints=hints,
unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
return None
numers = [ cancel(denom*g) for g in diffs ]
def _derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def _deflation(p):
for y in V:
if not p.has(y):
continue
if _derivation(p) is not S.Zero:
c, q = p.as_poly(y).primitive()
return _deflation(c)*gcd(q, q.diff(y)).as_expr()
return p
def _splitter(p):
for y in V:
if not p.has(y):
continue
if _derivation(y) is not S.Zero:
c, q = p.as_poly(y).primitive()
q = q.as_expr()
h = gcd(q, _derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = _splitter(c)
if s.as_poly(y).degree() == 0:
return (c_split[0], q * c_split[1])
q_split = _splitter(cancel(q / s))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
return (S.One, p)
special = {}
for term in terms:
if term.is_Function:
if isinstance(term, tan):
special[1 + _substitute(term)**2] = False
elif isinstance(term, tanh):
special[1 + _substitute(term)] = False
special[1 - _substitute(term)] = False
elif isinstance(term, LambertW):
special[_substitute(term)] = True
F = _substitute(f)
P, Q = F.as_numer_denom()
u_split = _splitter(denom)
v_split = _splitter(Q)
polys = set(list(v_split) + [ u_split[0] ] + list(special.keys()))
s = u_split[0] * Mul(*[ k for k, v in special.items() if v ])
polified = [ p.as_poly(*V) for p in [s, P, Q] ]
if None in polified:
return None
#--- definitions for _integrate
a, b, c = [ p.total_degree() for p in polified ]
poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr()
def _exponent(g):
if g.is_Pow:
if g.exp.is_Rational and g.exp.q != 1:
if g.exp.p > 0:
return g.exp.p + g.exp.q - 1
else:
return abs(g.exp.p + g.exp.q)
else:
return 1
elif not g.is_Atom and g.args:
return max([ _exponent(h) for h in g.args ])
else:
return 1
A, B = _exponent(f), a + max(b, c)
if A > 1 and B > 1:
monoms = tuple(ordered(itermonomials(V, A + B - 1 + degree_offset)))
else:
monoms = tuple(ordered(itermonomials(V, A + B + degree_offset)))
poly_coeffs = _symbols('A', len(monoms))
poly_part = Add(*[ poly_coeffs[i]*monomial
for i, monomial in enumerate(monoms) ])
reducibles = set()
for poly in ordered(polys):
coeff, factors = factor_list(poly, *V)
reducibles.add(coeff)
for fact, mul in factors:
reducibles.add(fact)
def _integrate(field=None):
atans = set()
pairs = set()
if field == 'Q':
irreducibles = set(reducibles)
else:
setV = set(V)
irreducibles = set()
for poly in ordered(reducibles):
zV = setV & set(iterfreeargs(poly))
for z in ordered(zV):
s = set(root_factors(poly, z, filter=field))
irreducibles |= s
break
log_part, atan_part = [], []
for poly in ordered(irreducibles):
m = collect(poly, I, evaluate=False)
y = m.get(I, S.Zero)
if y:
x = m.get(S.One, S.Zero)
if x.has(I) or y.has(I):
continue # nontrivial x + I*y
pairs.add((x, y))
irreducibles.remove(poly)
while pairs:
x, y = pairs.pop()
if (x, -y) in pairs:
pairs.remove((x, -y))
# Choosing b with no minus sign
if y.could_extract_minus_sign():
y = -y
irreducibles.add(x*x + y*y)
atans.add(atan(x/y))
else:
irreducibles.add(x + I*y)
B = _symbols('B', len(irreducibles))
C = _symbols('C', len(atans))
# Note: the ordering matters here
for poly, b in reversed(list(zip(ordered(irreducibles), B))):
if poly.has(*V):
poly_coeffs.append(b)
log_part.append(b * log(poly))
for poly, c in reversed(list(zip(ordered(atans), C))):
if poly.has(*V):
poly_coeffs.append(c)
atan_part.append(c * poly)
# TODO: Currently it's better to use symbolic expressions here instead
# of rational functions, because it's simpler and FracElement doesn't
# give big speed improvement yet. This is because cancellation is slow
# due to slow polynomial GCD algorithms. If this gets improved then
# revise this code.
candidate = poly_part/poly_denom + Add(*log_part) + Add(*atan_part)
h = F - _derivation(candidate) / denom
raw_numer = h.as_numer_denom()[0]
# Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field
# that we have to determine. We can't use simply atoms() because log(3),
# sqrt(y) and similar expressions can appear, leading to non-trivial
# domains.
syms = set(poly_coeffs) | set(V)
non_syms = set()
def find_non_syms(expr):
if expr.is_Integer or expr.is_Rational:
pass # ignore trivial numbers
elif expr in syms:
pass # ignore variables
elif not expr.has_free(*syms):
non_syms.add(expr)
elif expr.is_Add or expr.is_Mul or expr.is_Pow:
list(map(find_non_syms, expr.args))
else:
# TODO: Non-polynomial expression. This should have been
# filtered out at an earlier stage.
raise PolynomialError
try:
find_non_syms(raw_numer)
except PolynomialError:
return None
else:
ground, _ = construct_domain(non_syms, field=True)
coeff_ring = PolyRing(poly_coeffs, ground)
ring = PolyRing(V, coeff_ring)
try:
numer = ring.from_expr(raw_numer)
except ValueError:
raise PolynomialError
solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False)
if solution is None:
return None
else:
return candidate.xreplace(solution).xreplace(
dict(zip(poly_coeffs, [S.Zero]*len(poly_coeffs))))
if all(isinstance(_, Symbol) for _ in V):
more_free = F.free_symbols - set(V)
else:
Fd = F.as_dummy()
more_free = Fd.xreplace(dict(zip(V, (Dummy() for _ in V)))
).free_symbols & Fd.free_symbols
if not more_free:
# all free generators are identified in V
solution = _integrate('Q')
if solution is None:
solution = _integrate()
else:
solution = _integrate()
if solution is not None:
antideriv = solution.subs(rev_mapping)
antideriv = cancel(antideriv).expand()
if antideriv.is_Add:
antideriv = antideriv.as_independent(x)[1]
return indep*antideriv
else:
if retries >= 0:
result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
return None
|
5db2c9bffd3899009d1389e567c8f3e32b8d44fec0a730a2175ebd8dc509d651 | """ This module cooks up a docstring when imported. Its only purpose is to
be displayed in the sphinx documentation. """
from __future__ import annotations
from typing import Any
from sympy.integrals.meijerint import _create_lookup_table
from sympy.core.add import Add
from sympy.core.relational import Eq
from sympy.core.symbol import Symbol
from sympy.printing.latex import latex
t: dict[tuple[type, ...], list[Any]] = {}
_create_lookup_table(t)
doc = ""
for about, category in sorted(t.items()):
if about == ():
doc += 'Elementary functions:\n\n'
else:
doc += 'Functions involving ' + ', '.join('`%s`' % latex(
list(category[0][0].atoms(func))[0]) for func in about) + ':\n\n'
for formula, gs, cond, hint in category:
if not isinstance(gs, list):
g = Symbol('\\text{generated}')
else:
g = Add(*[fac*f for (fac, f) in gs])
obj = Eq(formula, g)
if cond is True:
cond = ""
else:
cond = ',\\text{ if } %s' % latex(cond)
doc += ".. math::\n %s%s\n\n" % (latex(obj), cond)
__doc__ = doc
|
cffe39f0b77254c46caee1df8d790b8533200a308779ba3f728d2ef82c1b3d9d | """
Integrate functions by rewriting them as Meijer G-functions.
There are three user-visible functions that can be used by other parts of the
sympy library to solve various integration problems:
- meijerint_indefinite
- meijerint_definite
- meijerint_inversion
They can be used to compute, respectively, indefinite integrals, definite
integrals over intervals of the real line, and inverse laplace-type integrals
(from c-I*oo to c+I*oo). See the respective docstrings for details.
The main references for this are:
[L] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
[R] Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
[P] A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
Integrals and Series: More Special Functions, Vol. 3,.
Gordon and Breach Science Publisher
"""
from __future__ import annotations
import itertools
from sympy import SYMPY_DEBUG
from sympy.core import S, Expr
from sympy.core.add import Add
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand, expand_mul, expand_power_base,
expand_trig, Function)
from sympy.core.mul import Mul
from sympy.core.numbers import ilcm, Rational, pi
from sympy.core.relational import Eq, Ne, _canonical_coeff
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (re, im, arg, Abs, sign,
unpolarify, polarify, polar_lift, principal_branch, unbranched_argument,
periodic_argument)
from sympy.functions.elementary.exponential import exp, exp_polar, log
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.hyperbolic import (cosh, sinh,
_rewrite_hyperbolics_as_exp, HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.trigonometric import (cos, sin, sinc,
TrigonometricFunction)
from sympy.functions.special.bessel import besselj, bessely, besseli, besselk
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.elliptic_integrals import elliptic_k, elliptic_e
from sympy.functions.special.error_functions import (erf, erfc, erfi, Ei,
expint, Si, Ci, Shi, Chi, fresnels, fresnelc)
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.hyper import hyper, meijerg
from sympy.functions.special.singularity_functions import SingularityFunction
from .integrals import Integral
from sympy.logic.boolalg import And, Or, BooleanAtom, Not, BooleanFunction
from sympy.polys import cancel, factor
from sympy.utilities.iterables import multiset_partitions
from sympy.utilities.misc import debug as _debug
# keep this at top for easy reference
z = Dummy('z')
def _has(res, *f):
# return True if res has f; in the case of Piecewise
# only return True if *all* pieces have f
res = piecewise_fold(res)
if getattr(res, 'is_Piecewise', False):
return all(_has(i, *f) for i in res.args)
return res.has(*f)
def _create_lookup_table(table):
""" Add formulae for the function -> meijerg lookup table. """
def wild(n):
return Wild(n, exclude=[z])
p, q, a, b, c = list(map(wild, 'pqabc'))
n = Wild('n', properties=[lambda x: x.is_Integer and x > 0])
t = p*z**q
def add(formula, an, ap, bm, bq, arg=t, fac=S.One, cond=True, hint=True):
table.setdefault(_mytype(formula, z), []).append((formula,
[(fac, meijerg(an, ap, bm, bq, arg))], cond, hint))
def addi(formula, inst, cond, hint=True):
table.setdefault(
_mytype(formula, z), []).append((formula, inst, cond, hint))
def constant(a):
return [(a, meijerg([1], [], [], [0], z)),
(a, meijerg([], [1], [0], [], z))]
table[()] = [(a, constant(a), True, True)]
# [P], Section 8.
class IsNonPositiveInteger(Function):
@classmethod
def eval(cls, arg):
arg = unpolarify(arg)
if arg.is_Integer is True:
return arg <= 0
# Section 8.4.2
# TODO this needs more polar_lift (c/f entry for exp)
add(Heaviside(t - b)*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(b - t)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(z - (b/p)**(1/q))*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside((b/p)**(1/q) - z)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add((b + t)**(-a), [1 - a], [], [0], [], t/b, b**(-a)/gamma(a),
hint=Not(IsNonPositiveInteger(a)))
add(Abs(b - t)**(-a), [1 - a], [(1 - a)/2], [0], [(1 - a)/2], t/b,
2*sin(pi*a/2)*gamma(1 - a)*Abs(b)**(-a), re(a) < 1)
add((t**a - b**a)/(t - b), [0, a], [], [0, a], [], t/b,
b**(a - 1)*sin(a*pi)/pi)
# 12
def A1(r, sign, nu):
return pi**Rational(-1, 2)*(-sign*nu/2)**(1 - 2*r)
def tmpadd(r, sgn):
# XXX the a**2 is bad for matching
add((sqrt(a**2 + t) + sgn*a)**b/(a**2 + t)**r,
[(1 + b)/2, 1 - 2*r + b/2], [],
[(b - sgn*b)/2], [(b + sgn*b)/2], t/a**2,
a**(b - 2*r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S.Half, 1)
tmpadd(S.Half, -1)
# 13
def tmpadd(r, sgn):
add((sqrt(a + p*z**q) + sgn*sqrt(p)*z**(q/2))**b/(a + p*z**q)**r,
[1 - r + sgn*b/2], [1 - r - sgn*b/2], [0, S.Half], [],
p*z**q/a, a**(b/2 - r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S.Half, 1)
tmpadd(S.Half, -1)
# (those after look obscure)
# Section 8.4.3
add(exp(polar_lift(-1)*t), [], [], [0], [])
# TODO can do sin^n, sinh^n by expansion ... where?
# 8.4.4 (hyperbolic functions)
add(sinh(t), [], [1], [S.Half], [1, 0], t**2/4, pi**Rational(3, 2))
add(cosh(t), [], [S.Half], [0], [S.Half, S.Half], t**2/4, pi**Rational(3, 2))
# Section 8.4.5
# TODO can do t + a. but can also do by expansion... (XXX not really)
add(sin(t), [], [], [S.Half], [0], t**2/4, sqrt(pi))
add(cos(t), [], [], [0], [S.Half], t**2/4, sqrt(pi))
# Section 8.4.6 (sinc function)
add(sinc(t), [], [], [0], [Rational(-1, 2)], t**2/4, sqrt(pi)/2)
# Section 8.5.5
def make_log1(subs):
N = subs[n]
return [(S.NegativeOne**N*factorial(N),
meijerg([], [1]*(N + 1), [0]*(N + 1), [], t))]
def make_log2(subs):
N = subs[n]
return [(factorial(N),
meijerg([1]*(N + 1), [], [], [0]*(N + 1), t))]
# TODO these only hold for positive p, and can be made more general
# but who uses log(x)*Heaviside(a-x) anyway ...
# TODO also it would be nice to derive them recursively ...
addi(log(t)**n*Heaviside(1 - t), make_log1, True)
addi(log(t)**n*Heaviside(t - 1), make_log2, True)
def make_log3(subs):
return make_log1(subs) + make_log2(subs)
addi(log(t)**n, make_log3, True)
addi(log(t + a),
constant(log(a)) + [(S.One, meijerg([1, 1], [], [1], [0], t/a))],
True)
addi(log(Abs(t - a)), constant(log(Abs(a))) +
[(pi, meijerg([1, 1], [S.Half], [1], [0, S.Half], t/a))],
True)
# TODO log(x)/(x+a) and log(x)/(x-1) can also be done. should they
# be derivable?
# TODO further formulae in this section seem obscure
# Sections 8.4.9-10
# TODO
# Section 8.4.11
addi(Ei(t),
constant(-S.ImaginaryUnit*pi) + [(S.NegativeOne, meijerg([], [1], [0, 0], [],
t*polar_lift(-1)))],
True)
# Section 8.4.12
add(Si(t), [1], [], [S.Half], [0, 0], t**2/4, sqrt(pi)/2)
add(Ci(t), [], [1], [0, 0], [S.Half], t**2/4, -sqrt(pi)/2)
# Section 8.4.13
add(Shi(t), [S.Half], [], [0], [Rational(-1, 2), Rational(-1, 2)], polar_lift(-1)*t**2/4,
t*sqrt(pi)/4)
add(Chi(t), [], [S.Half, 1], [0, 0], [S.Half, S.Half], t**2/4, -
pi**S('3/2')/2)
# generalized exponential integral
add(expint(a, t), [], [a], [a - 1, 0], [], t)
# Section 8.4.14
add(erf(t), [1], [], [S.Half], [0], t**2, 1/sqrt(pi))
# TODO exp(-x)*erf(I*x) does not work
add(erfc(t), [], [1], [0, S.Half], [], t**2, 1/sqrt(pi))
# This formula for erfi(z) yields a wrong(?) minus sign
#add(erfi(t), [1], [], [S.Half], [0], -t**2, I/sqrt(pi))
add(erfi(t), [S.Half], [], [0], [Rational(-1, 2)], -t**2, t/sqrt(pi))
# Fresnel Integrals
add(fresnels(t), [1], [], [Rational(3, 4)], [0, Rational(1, 4)], pi**2*t**4/16, S.Half)
add(fresnelc(t), [1], [], [Rational(1, 4)], [0, Rational(3, 4)], pi**2*t**4/16, S.Half)
##### bessel-type functions #####
# Section 8.4.19
add(besselj(a, t), [], [], [a/2], [-a/2], t**2/4)
# all of the following are derivable
#add(sin(t)*besselj(a, t), [Rational(1, 4), Rational(3, 4)], [], [(1+a)/2],
# [-a/2, a/2, (1-a)/2], t**2, 1/sqrt(2))
#add(cos(t)*besselj(a, t), [Rational(1, 4), Rational(3, 4)], [], [a/2],
# [-a/2, (1+a)/2, (1-a)/2], t**2, 1/sqrt(2))
#add(besselj(a, t)**2, [S.Half], [], [a], [-a, 0], t**2, 1/sqrt(pi))
#add(besselj(a, t)*besselj(b, t), [0, S.Half], [], [(a + b)/2],
# [-(a+b)/2, (a - b)/2, (b - a)/2], t**2, 1/sqrt(pi))
# Section 8.4.20
add(bessely(a, t), [], [-(a + 1)/2], [a/2, -a/2], [-(a + 1)/2], t**2/4)
# TODO all of the following should be derivable
#add(sin(t)*bessely(a, t), [Rational(1, 4), Rational(3, 4)], [(1 - a - 1)/2],
# [(1 + a)/2, (1 - a)/2], [(1 - a - 1)/2, (1 - 1 - a)/2, (1 - 1 + a)/2],
# t**2, 1/sqrt(2))
#add(cos(t)*bessely(a, t), [Rational(1, 4), Rational(3, 4)], [(0 - a - 1)/2],
# [(0 + a)/2, (0 - a)/2], [(0 - a - 1)/2, (1 - 0 - a)/2, (1 - 0 + a)/2],
# t**2, 1/sqrt(2))
#add(besselj(a, t)*bessely(b, t), [0, S.Half], [(a - b - 1)/2],
# [(a + b)/2, (a - b)/2], [(a - b - 1)/2, -(a + b)/2, (b - a)/2],
# t**2, 1/sqrt(pi))
#addi(bessely(a, t)**2,
# [(2/sqrt(pi), meijerg([], [S.Half, S.Half - a], [0, a, -a],
# [S.Half - a], t**2)),
# (1/sqrt(pi), meijerg([S.Half], [], [a], [-a, 0], t**2))],
# True)
#addi(bessely(a, t)*bessely(b, t),
# [(2/sqrt(pi), meijerg([], [0, S.Half, (1 - a - b)/2],
# [(a + b)/2, (a - b)/2, (b - a)/2, -(a + b)/2],
# [(1 - a - b)/2], t**2)),
# (1/sqrt(pi), meijerg([0, S.Half], [], [(a + b)/2],
# [-(a + b)/2, (a - b)/2, (b - a)/2], t**2))],
# True)
# Section 8.4.21 ?
# Section 8.4.22
add(besseli(a, t), [], [(1 + a)/2], [a/2], [-a/2, (1 + a)/2], t**2/4, pi)
# TODO many more formulas. should all be derivable
# Section 8.4.23
add(besselk(a, t), [], [], [a/2, -a/2], [], t**2/4, S.Half)
# TODO many more formulas. should all be derivable
# Complete elliptic integrals K(z) and E(z)
add(elliptic_k(t), [S.Half, S.Half], [], [0], [0], -t, S.Half)
add(elliptic_e(t), [S.Half, 3*S.Half], [], [0], [0], -t, Rational(-1, 2)/2)
####################################################################
# First some helper functions.
####################################################################
from sympy.utilities.timeutils import timethis
timeit = timethis('meijerg')
def _mytype(f, x):
""" Create a hashable entity describing the type of f. """
if x not in f.free_symbols:
return ()
elif f.is_Function:
return (type(f),)
else:
types = [_mytype(a, x) for a in f.args]
res = []
for t in types:
res += list(t)
res.sort()
return tuple(res)
class _CoeffExpValueError(ValueError):
"""
Exception raised by _get_coeff_exp, for internal use only.
"""
pass
def _get_coeff_exp(expr, x):
"""
When expr is known to be of the form c*x**b, with c and/or b possibly 1,
return c, b.
Examples
========
>>> from sympy.abc import x, a, b
>>> from sympy.integrals.meijerint import _get_coeff_exp
>>> _get_coeff_exp(a*x**b, x)
(a, b)
>>> _get_coeff_exp(x, x)
(1, 1)
>>> _get_coeff_exp(2*x, x)
(2, 1)
>>> _get_coeff_exp(x**3, x)
(1, 3)
"""
from sympy.simplify import powsimp
(c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x)
if not m:
return c, S.Zero
[m] = m
if m.is_Pow:
if m.base != x:
raise _CoeffExpValueError('expr not of form a*x**b')
return c, m.exp
elif m == x:
return c, S.One
else:
raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
def _exponents(expr, x):
"""
Find the exponents of ``x`` (not including zero) in ``expr``.
Examples
========
>>> from sympy.integrals.meijerint import _exponents
>>> from sympy.abc import x, y
>>> from sympy import sin
>>> _exponents(x, x)
{1}
>>> _exponents(x**2, x)
{2}
>>> _exponents(x**2 + x, x)
{1, 2}
>>> _exponents(x**3*sin(x + x**y) + 1/x, x)
{-1, 1, 3, y}
"""
def _exponents_(expr, x, res):
if expr == x:
res.update([1])
return
if expr.is_Pow and expr.base == x:
res.update([expr.exp])
return
for argument in expr.args:
_exponents_(argument, x, res)
res = set()
_exponents_(expr, x, res)
return res
def _functions(expr, x):
""" Find the types of functions in expr, to estimate the complexity. """
return {e.func for e in expr.atoms(Function) if x in e.free_symbols}
def _find_splitting_points(expr, x):
"""
Find numbers a such that a linear substitution x -> x + a would
(hopefully) simplify expr.
Examples
========
>>> from sympy.integrals.meijerint import _find_splitting_points as fsp
>>> from sympy import sin
>>> from sympy.abc import x
>>> fsp(x, x)
{0}
>>> fsp((x-1)**3, x)
{1}
>>> fsp(sin(x+3)*x, x)
{-3, 0}
"""
p, q = [Wild(n, exclude=[x]) for n in 'pq']
def compute_innermost(expr, res):
if not isinstance(expr, Expr):
return
m = expr.match(p*x + q)
if m and m[p] != 0:
res.add(-m[q]/m[p])
return
if expr.is_Atom:
return
for argument in expr.args:
compute_innermost(argument, res)
innermost = set()
compute_innermost(expr, innermost)
return innermost
def _split_mul(f, x):
"""
Split expression ``f`` into fac, po, g, where fac is a constant factor,
po = x**s for some s independent of s, and g is "the rest".
Examples
========
>>> from sympy.integrals.meijerint import _split_mul
>>> from sympy import sin
>>> from sympy.abc import s, x
>>> _split_mul((3*x)**s*sin(x**2)*x, x)
(3**s, x*x**s, sin(x**2))
"""
fac = S.One
po = S.One
g = S.One
f = expand_power_base(f)
args = Mul.make_args(f)
for a in args:
if a == x:
po *= x
elif x not in a.free_symbols:
fac *= a
else:
if a.is_Pow and x not in a.exp.free_symbols:
c, t = a.base.as_coeff_mul(x)
if t != (x,):
c, t = expand_mul(a.base).as_coeff_mul(x)
if t == (x,):
po *= x**a.exp
fac *= unpolarify(polarify(c**a.exp, subs=False))
continue
g *= a
return fac, po, g
def _mul_args(f):
"""
Return a list ``L`` such that ``Mul(*L) == f``.
If ``f`` is not a ``Mul`` or ``Pow``, ``L=[f]``.
If ``f=g**n`` for an integer ``n``, ``L=[g]*n``.
If ``f`` is a ``Mul``, ``L`` comes from applying ``_mul_args`` to all factors of ``f``.
"""
args = Mul.make_args(f)
gs = []
for g in args:
if g.is_Pow and g.exp.is_Integer:
n = g.exp
base = g.base
if n < 0:
n = -n
base = 1/base
gs += [base]*n
else:
gs.append(g)
return gs
def _mul_as_two_parts(f):
"""
Find all the ways to split ``f`` into a product of two terms.
Return None on failure.
Explanation
===========
Although the order is canonical from multiset_partitions, this is
not necessarily the best order to process the terms. For example,
if the case of len(gs) == 2 is removed and multiset is allowed to
sort the terms, some tests fail.
Examples
========
>>> from sympy.integrals.meijerint import _mul_as_two_parts
>>> from sympy import sin, exp, ordered
>>> from sympy.abc import x
>>> list(ordered(_mul_as_two_parts(x*sin(x)*exp(x))))
[(x, exp(x)*sin(x)), (x*exp(x), sin(x)), (x*sin(x), exp(x))]
"""
gs = _mul_args(f)
if len(gs) < 2:
return None
if len(gs) == 2:
return [tuple(gs)]
return [(Mul(*x), Mul(*y)) for (x, y) in multiset_partitions(gs, 2)]
def _inflate_g(g, n):
""" Return C, h such that h is a G function of argument z**n and
g = C*h. """
# TODO should this be a method of meijerg?
# See: [L, page 150, equation (5)]
def inflate(params, n):
""" (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) """
return [(a + i)/n for a, i in itertools.product(params, range(n))]
v = S(len(g.ap) - len(g.bq))
C = n**(1 + g.nu + v/2)
C /= (2*pi)**((n - 1)*g.delta)
return C, meijerg(inflate(g.an, n), inflate(g.aother, n),
inflate(g.bm, n), inflate(g.bother, n),
g.argument**n * n**(n*v))
def _flip_g(g):
""" Turn the G function into one of inverse argument
(i.e. G(1/x) -> G'(x)) """
# See [L], section 5.2
def tr(l):
return [1 - a for a in l]
return meijerg(tr(g.bm), tr(g.bother), tr(g.an), tr(g.aother), 1/g.argument)
def _inflate_fox_h(g, a):
r"""
Let d denote the integrand in the definition of the G function ``g``.
Consider the function H which is defined in the same way, but with
integrand d/Gamma(a*s) (contour conventions as usual).
If ``a`` is rational, the function H can be written as C*G, for a constant C
and a G-function G.
This function returns C, G.
"""
if a < 0:
return _inflate_fox_h(_flip_g(g), -a)
p = S(a.p)
q = S(a.q)
# We use the substitution s->qs, i.e. inflate g by q. We are left with an
# extra factor of Gamma(p*s), for which we use Gauss' multiplication
# theorem.
D, g = _inflate_g(g, q)
z = g.argument
D /= (2*pi)**((1 - p)/2)*p**Rational(-1, 2)
z /= p**p
bs = [(n + 1)/p for n in range(p)]
return D, meijerg(g.an, g.aother, g.bm, list(g.bother) + bs, z)
_dummies: dict[tuple[str, str], Dummy] = {}
def _dummy(name, token, expr, **kwargs):
"""
Return a dummy. This will return the same dummy if the same token+name is
requested more than once, and it is not already in expr.
This is for being cache-friendly.
"""
d = _dummy_(name, token, **kwargs)
if d in expr.free_symbols:
return Dummy(name, **kwargs)
return d
def _dummy_(name, token, **kwargs):
"""
Return a dummy associated to name and token. Same effect as declaring
it globally.
"""
global _dummies
if not (name, token) in _dummies:
_dummies[(name, token)] = Dummy(name, **kwargs)
return _dummies[(name, token)]
def _is_analytic(f, x):
""" Check if f(x), when expressed using G functions on the positive reals,
will in fact agree with the G functions almost everywhere """
return not any(x in expr.free_symbols for expr in f.atoms(Heaviside, Abs))
def _condsimp(cond, first=True):
"""
Do naive simplifications on ``cond``.
Explanation
===========
Note that this routine is completely ad-hoc, simplification rules being
added as need arises rather than following any logical pattern.
Examples
========
>>> from sympy.integrals.meijerint import _condsimp as simp
>>> from sympy import Or, Eq
>>> from sympy.abc import x, y
>>> simp(Or(x < y, Eq(x, y)))
x <= y
"""
if first:
cond = cond.replace(lambda _: _.is_Relational, _canonical_coeff)
first = False
if not isinstance(cond, BooleanFunction):
return cond
p, q, r = symbols('p q r', cls=Wild)
# transforms tests use 0, 4, 5 and 11-14
# meijer tests use 0, 2, 11, 14
# joint_rv uses 6, 7
rules = [
(Or(p < q, Eq(p, q)), p <= q), # 0
# The next two obviously are instances of a general pattern, but it is
# easier to spell out the few cases we care about.
(And(Abs(arg(p)) <= pi, Abs(arg(p) - 2*pi) <= pi),
Eq(arg(p) - pi, 0)), # 1
(And(Abs(2*arg(p) + pi) <= pi, Abs(2*arg(p) - pi) <= pi),
Eq(arg(p), 0)), # 2
(And(Abs(2*arg(p) + pi) < pi, Abs(2*arg(p) - pi) <= pi),
S.false), # 3
(And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) <= pi/2),
Eq(arg(p), 0)), # 4
(And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) < pi/2),
S.false), # 5
(And(Abs(arg(p**2/2 + 1)) < pi, Ne(Abs(arg(p**2/2 + 1)), pi)),
S.true), # 6
(Or(Abs(arg(p**2/2 + 1)) < pi, Ne(1/(p**2/2 + 1), 0)),
S.true), # 7
(And(Abs(unbranched_argument(p)) <= pi,
Abs(unbranched_argument(exp_polar(-2*pi*S.ImaginaryUnit)*p)) <= pi),
Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi)*p), 0)), # 8
(And(Abs(unbranched_argument(p)) <= pi/2,
Abs(unbranched_argument(exp_polar(-pi*S.ImaginaryUnit)*p)) <= pi/2),
Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi/2)*p), 0)), # 9
(Or(p <= q, And(p < q, r)), p <= q), # 10
(Ne(p**2, 1) & (p**2 > 1), p**2 > 1), # 11
(Ne(1/p, 1) & (cos(Abs(arg(p)))*Abs(p) > 1), Abs(p) > 1), # 12
(Ne(p, 2) & (cos(Abs(arg(p)))*Abs(p) > 2), Abs(p) > 2), # 13
((Abs(arg(p)) < pi/2) & (cos(Abs(arg(p)))*sqrt(Abs(p**2)) > 1), p**2 > 1), # 14
]
cond = cond.func(*list(map(lambda _: _condsimp(_, first), cond.args)))
change = True
while change:
change = False
for irule, (fro, to) in enumerate(rules):
if fro.func != cond.func:
continue
for n, arg1 in enumerate(cond.args):
if r in fro.args[0].free_symbols:
m = arg1.match(fro.args[1])
num = 1
else:
num = 0
m = arg1.match(fro.args[0])
if not m:
continue
otherargs = [x.subs(m) for x in fro.args[:num] + fro.args[num + 1:]]
otherlist = [n]
for arg2 in otherargs:
for k, arg3 in enumerate(cond.args):
if k in otherlist:
continue
if arg2 == arg3:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[1] == r and \
isinstance(arg2, And) and arg2.args[0] in arg3.args:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[0] == r and \
isinstance(arg2, And) and arg2.args[1] in arg3.args:
otherlist += [k]
break
if len(otherlist) != len(otherargs) + 1:
continue
newargs = [arg_ for (k, arg_) in enumerate(cond.args)
if k not in otherlist] + [to.subs(m)]
if SYMPY_DEBUG:
if irule not in (0, 2, 4, 5, 6, 7, 11, 12, 13, 14):
print('used new rule:', irule)
cond = cond.func(*newargs)
change = True
break
# final tweak
def rel_touchup(rel):
if rel.rel_op != '==' or rel.rhs != 0:
return rel
# handle Eq(*, 0)
LHS = rel.lhs
m = LHS.match(arg(p)**q)
if not m:
m = LHS.match(unbranched_argument(polar_lift(p)**q))
if not m:
if isinstance(LHS, periodic_argument) and not LHS.args[0].is_polar \
and LHS.args[1] is S.Infinity:
return (LHS.args[0] > 0)
return rel
return (m[p] > 0)
cond = cond.replace(lambda _: _.is_Relational, rel_touchup)
if SYMPY_DEBUG:
print('_condsimp: ', cond)
return cond
def _eval_cond(cond):
""" Re-evaluate the conditions. """
if isinstance(cond, bool):
return cond
return _condsimp(cond.doit())
####################################################################
# Now the "backbone" functions to do actual integration.
####################################################################
def _my_principal_branch(expr, period, full_pb=False):
""" Bring expr nearer to its principal branch by removing superfluous
factors.
This function does *not* guarantee to yield the principal branch,
to avoid introducing opaque principal_branch() objects,
unless full_pb=True. """
res = principal_branch(expr, period)
if not full_pb:
res = res.replace(principal_branch, lambda x, y: x)
return res
def _rewrite_saxena_1(fac, po, g, x):
"""
Rewrite the integral fac*po*g dx, from zero to infinity, as
integral fac*G, where G has argument a*x. Note po=x**s.
Return fac, G.
"""
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
period = g.get_period()
a = _my_principal_branch(a, period)
# We substitute t = x**b.
C = fac/(Abs(b)*a**((s + 1)/b - 1))
# Absorb a factor of (at)**((1 + s)/b - 1).
def tr(l):
return [a + (1 + s)/b - 1 for a in l]
return C, meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother),
a*x)
def _check_antecedents_1(g, x, helper=False):
r"""
Return a condition under which the mellin transform of g exists.
Any power of x has already been absorbed into the G function,
so this is just $\int_0^\infty g\, dx$.
See [L, section 5.6.1]. (Note that s=1.)
If ``helper`` is True, only check if the MT exists at infinity, i.e. if
$\int_1^\infty g\, dx$ exists.
"""
# NOTE if you update these conditions, please update the documentation as well
delta = g.delta
eta, _ = _get_coeff_exp(g.argument, x)
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
if p > q:
def tr(l):
return [1 - x for x in l]
return _check_antecedents_1(meijerg(tr(g.bm), tr(g.bother),
tr(g.an), tr(g.aother), x/eta),
x)
tmp = [-re(b) < 1 for b in g.bm] + [1 < 1 - re(a) for a in g.an]
cond_3 = And(*tmp)
tmp += [-re(b) < 1 for b in g.bother]
tmp += [1 < 1 - re(a) for a in g.aother]
cond_3_star = And(*tmp)
cond_4 = (-re(g.nu) + (q + 1 - p)/2 > q - p)
def debug(*msg):
_debug(*msg)
debug('Checking antecedents for 1 function:')
debug(' delta=%s, eta=%s, m=%s, n=%s, p=%s, q=%s'
% (delta, eta, m, n, p, q))
debug(' ap = %s, %s' % (list(g.an), list(g.aother)))
debug(' bq = %s, %s' % (list(g.bm), list(g.bother)))
debug(' cond_3=%s, cond_3*=%s, cond_4=%s' % (cond_3, cond_3_star, cond_4))
conds = []
# case 1
case1 = []
tmp1 = [1 <= n, p < q, 1 <= m]
tmp2 = [1 <= p, 1 <= m, Eq(q, p + 1), Not(And(Eq(n, 0), Eq(m, p + 1)))]
tmp3 = [1 <= p, Eq(q, p)]
for k in range(ceiling(delta/2) + 1):
tmp3 += [Ne(Abs(unbranched_argument(eta)), (delta - 2*k)*pi)]
tmp = [delta > 0, Abs(unbranched_argument(eta)) < delta*pi]
extra = [Ne(eta, 0), cond_3]
if helper:
extra = []
for t in [tmp1, tmp2, tmp3]:
case1 += [And(*(t + tmp + extra))]
conds += case1
debug(' case 1:', case1)
# case 2
extra = [cond_3]
if helper:
extra = []
case2 = [And(Eq(n, 0), p + 1 <= m, m <= q,
Abs(unbranched_argument(eta)) < delta*pi, *extra)]
conds += case2
debug(' case 2:', case2)
# case 3
extra = [cond_3, cond_4]
if helper:
extra = []
case3 = [And(p < q, 1 <= m, delta > 0, Eq(Abs(unbranched_argument(eta)), delta*pi),
*extra)]
case3 += [And(p <= q - 2, Eq(delta, 0), Eq(Abs(unbranched_argument(eta)), 0), *extra)]
conds += case3
debug(' case 3:', case3)
# TODO altered cases 4-7
# extra case from wofram functions site:
# (reproduced verbatim from Prudnikov, section 2.24.2)
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/01/
case_extra = []
case_extra += [Eq(p, q), Eq(delta, 0), Eq(unbranched_argument(eta), 0), Ne(eta, 0)]
if not helper:
case_extra += [cond_3]
s = []
for a, b in zip(g.ap, g.bq):
s += [b - a]
case_extra += [re(Add(*s)) < 0]
case_extra = And(*case_extra)
conds += [case_extra]
debug(' extra case:', [case_extra])
case_extra_2 = [And(delta > 0, Abs(unbranched_argument(eta)) < delta*pi)]
if not helper:
case_extra_2 += [cond_3]
case_extra_2 = And(*case_extra_2)
conds += [case_extra_2]
debug(' second extra case:', [case_extra_2])
# TODO This leaves only one case from the three listed by Prudnikov.
# Investigate if these indeed cover everything; if so, remove the rest.
return Or(*conds)
def _int0oo_1(g, x):
r"""
Evaluate $\int_0^\infty g\, dx$ using G functions,
assuming the necessary conditions are fulfilled.
Examples
========
>>> from sympy.abc import a, b, c, d, x, y
>>> from sympy import meijerg
>>> from sympy.integrals.meijerint import _int0oo_1
>>> _int0oo_1(meijerg([a], [b], [c], [d], x*y), x)
gamma(-a)*gamma(c + 1)/(y*gamma(-d)*gamma(b + 1))
"""
from sympy.simplify import gammasimp
# See [L, section 5.6.1]. Note that s=1.
eta, _ = _get_coeff_exp(g.argument, x)
res = 1/eta
# XXX TODO we should reduce order first
for b in g.bm:
res *= gamma(b + 1)
for a in g.an:
res *= gamma(1 - a - 1)
for b in g.bother:
res /= gamma(1 - b - 1)
for a in g.aother:
res /= gamma(a + 1)
return gammasimp(unpolarify(res))
def _rewrite_saxena(fac, po, g1, g2, x, full_pb=False):
"""
Rewrite the integral ``fac*po*g1*g2`` from 0 to oo in terms of G
functions with argument ``c*x``.
Explanation
===========
Return C, f1, f2 such that integral C f1 f2 from 0 to infinity equals
integral fac ``po``, ``g1``, ``g2`` from 0 to infinity.
Examples
========
>>> from sympy.integrals.meijerint import _rewrite_saxena
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg
>>> g1 = meijerg([], [], [0], [], s*t)
>>> g2 = meijerg([], [], [m/2], [-m/2], t**2/4)
>>> r = _rewrite_saxena(1, t**0, g1, g2, t)
>>> r[0]
s/(4*sqrt(pi))
>>> r[1]
meijerg(((), ()), ((-1/2, 0), ()), s**2*t/4)
>>> r[2]
meijerg(((), ()), ((m/2,), (-m/2,)), t/4)
"""
def pb(g):
a, b = _get_coeff_exp(g.argument, x)
per = g.get_period()
return meijerg(g.an, g.aother, g.bm, g.bother,
_my_principal_branch(a, per, full_pb)*x**b)
_, s = _get_coeff_exp(po, x)
_, b1 = _get_coeff_exp(g1.argument, x)
_, b2 = _get_coeff_exp(g2.argument, x)
if (b1 < 0) == True:
b1 = -b1
g1 = _flip_g(g1)
if (b2 < 0) == True:
b2 = -b2
g2 = _flip_g(g2)
if not b1.is_Rational or not b2.is_Rational:
return
m1, n1 = b1.p, b1.q
m2, n2 = b2.p, b2.q
tau = ilcm(m1*n2, m2*n1)
r1 = tau//(m1*n2)
r2 = tau//(m2*n1)
C1, g1 = _inflate_g(g1, r1)
C2, g2 = _inflate_g(g2, r2)
g1 = pb(g1)
g2 = pb(g2)
fac *= C1*C2
a1, b = _get_coeff_exp(g1.argument, x)
a2, _ = _get_coeff_exp(g2.argument, x)
# arbitrarily tack on the x**s part to g1
# TODO should we try both?
exp = (s + 1)/b - 1
fac = fac/(Abs(b) * a1**exp)
def tr(l):
return [a + exp for a in l]
g1 = meijerg(tr(g1.an), tr(g1.aother), tr(g1.bm), tr(g1.bother), a1*x)
g2 = meijerg(g2.an, g2.aother, g2.bm, g2.bother, a2*x)
from sympy.simplify import powdenest
return powdenest(fac, polar=True), g1, g2
def _check_antecedents(g1, g2, x):
""" Return a condition under which the integral theorem applies. """
# Yes, this is madness.
# XXX TODO this is a testing *nightmare*
# NOTE if you update these conditions, please update the documentation as well
# The following conditions are found in
# [P], Section 2.24.1
#
# They are also reproduced (verbatim!) at
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/
#
# Note: k=l=r=alpha=1
sigma, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
s, t, u, v = S([len(g1.bm), len(g1.an), len(g1.ap), len(g1.bq)])
m, n, p, q = S([len(g2.bm), len(g2.an), len(g2.ap), len(g2.bq)])
bstar = s + t - (u + v)/2
cstar = m + n - (p + q)/2
rho = g1.nu + (u - v)/2 + 1
mu = g2.nu + (p - q)/2 + 1
phi = q - p - (v - u)
eta = 1 - (v - u) - mu - rho
psi = (pi*(q - m - n) + Abs(unbranched_argument(omega)))/(q - p)
theta = (pi*(v - s - t) + Abs(unbranched_argument(sigma)))/(v - u)
_debug('Checking antecedents:')
_debug(' sigma=%s, s=%s, t=%s, u=%s, v=%s, b*=%s, rho=%s'
% (sigma, s, t, u, v, bstar, rho))
_debug(' omega=%s, m=%s, n=%s, p=%s, q=%s, c*=%s, mu=%s,'
% (omega, m, n, p, q, cstar, mu))
_debug(' phi=%s, eta=%s, psi=%s, theta=%s' % (phi, eta, psi, theta))
def _c1():
for g in [g1, g2]:
for i, j in itertools.product(g.an, g.bm):
diff = i - j
if diff.is_integer and diff.is_positive:
return False
return True
c1 = _c1()
c2 = And(*[re(1 + i + j) > 0 for i in g1.bm for j in g2.bm])
c3 = And(*[re(1 + i + j) < 1 + 1 for i in g1.an for j in g2.an])
c4 = And(*[(p - q)*re(1 + i - 1) - re(mu) > Rational(-3, 2) for i in g1.an])
c5 = And(*[(p - q)*re(1 + i) - re(mu) > Rational(-3, 2) for i in g1.bm])
c6 = And(*[(u - v)*re(1 + i - 1) - re(rho) > Rational(-3, 2) for i in g2.an])
c7 = And(*[(u - v)*re(1 + i) - re(rho) > Rational(-3, 2) for i in g2.bm])
c8 = (Abs(phi) + 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c9 = (Abs(phi) - 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c10 = (Abs(unbranched_argument(sigma)) < bstar*pi)
c11 = Eq(Abs(unbranched_argument(sigma)), bstar*pi)
c12 = (Abs(unbranched_argument(omega)) < cstar*pi)
c13 = Eq(Abs(unbranched_argument(omega)), cstar*pi)
# The following condition is *not* implemented as stated on the wolfram
# function site. In the book of Prudnikov there is an additional part
# (the And involving re()). However, I only have this book in russian, and
# I don't read any russian. The following condition is what other people
# have told me it means.
# Worryingly, it is different from the condition implemented in REDUCE.
# The REDUCE implementation:
# https://reduce-algebra.svn.sourceforge.net/svnroot/reduce-algebra/trunk/packages/defint/definta.red
# (search for tst14)
# The Wolfram alpha version:
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/03/0014/
z0 = exp(-(bstar + cstar)*pi*S.ImaginaryUnit)
zos = unpolarify(z0*omega/sigma)
zso = unpolarify(z0*sigma/omega)
if zos == 1/zso:
c14 = And(Eq(phi, 0), bstar + cstar <= 1,
Or(Ne(zos, 1), re(mu + rho + v - u) < 1,
re(mu + rho + q - p) < 1))
else:
def _cond(z):
'''Returns True if abs(arg(1-z)) < pi, avoiding arg(0).
Explanation
===========
If ``z`` is 1 then arg is NaN. This raises a
TypeError on `NaN < pi`. Previously this gave `False` so
this behavior has been hardcoded here but someone should
check if this NaN is more serious! This NaN is triggered by
test_meijerint() in test_meijerint.py:
`meijerint_definite(exp(x), x, 0, I)`
'''
return z != 1 and Abs(arg(1 - z)) < pi
c14 = And(Eq(phi, 0), bstar - 1 + cstar <= 0,
Or(And(Ne(zos, 1), _cond(zos)),
And(re(mu + rho + v - u) < 1, Eq(zos, 1))))
c14_alt = And(Eq(phi, 0), cstar - 1 + bstar <= 0,
Or(And(Ne(zso, 1), _cond(zso)),
And(re(mu + rho + q - p) < 1, Eq(zso, 1))))
# Since r=k=l=1, in our case there is c14_alt which is the same as calling
# us with (g1, g2) = (g2, g1). The conditions below enumerate all cases
# (i.e. we don't have to try arguments reversed by hand), and indeed try
# all symmetric cases. (i.e. whenever there is a condition involving c14,
# there is also a dual condition which is exactly what we would get when g1,
# g2 were interchanged, *but c14 was unaltered*).
# Hence the following seems correct:
c14 = Or(c14, c14_alt)
'''
When `c15` is NaN (e.g. from `psi` being NaN as happens during
'test_issue_4992' and/or `theta` is NaN as in 'test_issue_6253',
both in `test_integrals.py`) the comparison to 0 formerly gave False
whereas now an error is raised. To keep the old behavior, the value
of NaN is replaced with False but perhaps a closer look at this condition
should be made: XXX how should conditions leading to c15=NaN be handled?
'''
try:
lambda_c = (q - p)*Abs(omega)**(1/(q - p))*cos(psi) \
+ (v - u)*Abs(sigma)**(1/(v - u))*cos(theta)
# the TypeError might be raised here, e.g. if lambda_c is NaN
if _eval_cond(lambda_c > 0) != False:
c15 = (lambda_c > 0)
else:
def lambda_s0(c1, c2):
return c1*(q - p)*Abs(omega)**(1/(q - p))*sin(psi) \
+ c2*(v - u)*Abs(sigma)**(1/(v - u))*sin(theta)
lambda_s = Piecewise(
((lambda_s0(+1, +1)*lambda_s0(-1, -1)),
And(Eq(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))),
(lambda_s0(sign(unbranched_argument(omega)), +1)*lambda_s0(sign(unbranched_argument(omega)), -1),
And(Eq(unbranched_argument(sigma), 0), Ne(unbranched_argument(omega), 0))),
(lambda_s0(+1, sign(unbranched_argument(sigma)))*lambda_s0(-1, sign(unbranched_argument(sigma))),
And(Ne(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))),
(lambda_s0(sign(unbranched_argument(omega)), sign(unbranched_argument(sigma))), True))
tmp = [lambda_c > 0,
And(Eq(lambda_c, 0), Ne(lambda_s, 0), re(eta) > -1),
And(Eq(lambda_c, 0), Eq(lambda_s, 0), re(eta) > 0)]
c15 = Or(*tmp)
except TypeError:
c15 = False
for cond, i in [(c1, 1), (c2, 2), (c3, 3), (c4, 4), (c5, 5), (c6, 6),
(c7, 7), (c8, 8), (c9, 9), (c10, 10), (c11, 11),
(c12, 12), (c13, 13), (c14, 14), (c15, 15)]:
_debug(' c%s:' % i, cond)
# We will return Or(*conds)
conds = []
def pr(count):
_debug(' case %s:' % count, conds[-1])
conds += [And(m*n*s*t != 0, bstar.is_positive is True, cstar.is_positive is True, c1, c2, c3, c10,
c12)] # 1
pr(1)
conds += [And(Eq(u, v), Eq(bstar, 0), cstar.is_positive is True, sigma.is_positive is True, re(rho) < 1,
c1, c2, c3, c12)] # 2
pr(2)
conds += [And(Eq(p, q), Eq(cstar, 0), bstar.is_positive is True, omega.is_positive is True, re(mu) < 1,
c1, c2, c3, c10)] # 3
pr(3)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu) < 1, re(rho) < 1,
Ne(sigma, omega), c1, c2, c3)] # 4
pr(4)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu + rho) < 1,
Ne(omega, sigma), c1, c2, c3)] # 5
pr(5)
conds += [And(p > q, s.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c5, c10, c13)] # 6
pr(6)
conds += [And(p < q, t.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c4, c10, c13)] # 7
pr(7)
conds += [And(u > v, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c7, c11, c12)] # 8
pr(8)
conds += [And(u < v, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c6, c11, c12)] # 9
pr(9)
conds += [And(p > q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c5, c13)] # 10
pr(10)
conds += [And(p < q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c4, c13)] # 11
pr(11)
conds += [And(Eq(p, q), u > v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c7, c11)] # 12
pr(12)
conds += [And(Eq(p, q), u < v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c6, c11)] # 13
pr(13)
conds += [And(p < q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c7, c11, c13)] # 14
pr(14)
conds += [And(p > q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c6, c11, c13)] # 15
pr(15)
conds += [And(p > q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c7, c8, c11, c13, c14)] # 16
pr(16)
conds += [And(p < q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c6, c9, c11, c13, c14)] # 17
pr(17)
conds += [And(Eq(t, 0), s.is_positive is True, bstar.is_positive is True, phi.is_positive is True, c1, c2, c10)] # 18
pr(18)
conds += [And(Eq(s, 0), t.is_positive is True, bstar.is_positive is True, phi.is_negative is True, c1, c3, c10)] # 19
pr(19)
conds += [And(Eq(n, 0), m.is_positive is True, cstar.is_positive is True, phi.is_negative is True, c1, c2, c12)] # 20
pr(20)
conds += [And(Eq(m, 0), n.is_positive is True, cstar.is_positive is True, phi.is_positive is True, c1, c3, c12)] # 21
pr(21)
conds += [And(Eq(s*t, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 22
pr(22)
conds += [And(Eq(m*n, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 23
pr(23)
# The following case is from [Luke1969]. As far as I can tell, it is *not*
# covered by Prudnikov's.
# Let G1 and G2 be the two G-functions. Suppose the integral exists from
# 0 to a > 0 (this is easy the easy part), that G1 is exponential decay at
# infinity, and that the mellin transform of G2 exists.
# Then the integral exists.
mt1_exists = _check_antecedents_1(g1, x, helper=True)
mt2_exists = _check_antecedents_1(g2, x, helper=True)
conds += [And(mt2_exists, Eq(t, 0), u < s, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E1')
conds += [And(mt2_exists, Eq(s, 0), v < t, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E2')
conds += [And(mt1_exists, Eq(n, 0), p < m, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E3')
conds += [And(mt1_exists, Eq(m, 0), q < n, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E4')
# Let's short-circuit if this worked ...
# the rest is corner-cases and terrible to read.
r = Or(*conds)
if _eval_cond(r) != False:
return r
conds += [And(m + n > p, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 24
pr(24)
conds += [And(m + n > q, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 25
pr(25)
conds += [And(Eq(p, q - 1), Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
c1, c2, c10, c14, c15)] # 26
pr(26)
conds += [And(Eq(p, q + 1), Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
c1, c3, c10, c14, c15)] # 27
pr(27)
conds += [And(p < q - 1, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 28
pr(28)
conds += [And(
p > q + 1, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar >= 0,
cstar*pi < Abs(unbranched_argument(omega)),
Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 29
pr(29)
conds += [And(Eq(n, 0), Eq(phi, 0), s + t > 0, m.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 30
pr(30)
conds += [And(Eq(m, 0), Eq(phi, 0), s + t > v, n.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 31
pr(31)
conds += [And(Eq(n, 0), Eq(phi, 0), Eq(u, v - 1), m.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (bstar + 1)*pi,
c1, c2, c12, c14, c15)] # 32
pr(32)
conds += [And(Eq(m, 0), Eq(phi, 0), Eq(u, v + 1), n.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (bstar + 1)*pi,
c1, c3, c12, c14, c15)] # 33
pr(33)
conds += [And(
Eq(n, 0), Eq(phi, 0), u < v - 1, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 34
pr(34)
conds += [And(
Eq(m, 0), Eq(phi, 0), u > v + 1, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 35
pr(35)
return Or(*conds)
# NOTE An alternative, but as far as I can tell weaker, set of conditions
# can be found in [L, section 5.6.2].
def _int0oo(g1, g2, x):
"""
Express integral from zero to infinity g1*g2 using a G function,
assuming the necessary conditions are fulfilled.
Examples
========
>>> from sympy.integrals.meijerint import _int0oo
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg, S
>>> g1 = meijerg([], [], [-S(1)/2, 0], [], s**2*t/4)
>>> g2 = meijerg([], [], [m/2], [-m/2], t/4)
>>> _int0oo(g1, g2, t)
4*meijerg(((1/2, 0), ()), ((m/2,), (-m/2,)), s**(-2))/s**2
"""
# See: [L, section 5.6.2, equation (1)]
eta, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
def neg(l):
return [-x for x in l]
a1 = neg(g1.bm) + list(g2.an)
a2 = list(g2.aother) + neg(g1.bother)
b1 = neg(g1.an) + list(g2.bm)
b2 = list(g2.bother) + neg(g1.aother)
return meijerg(a1, a2, b1, b2, omega/eta)/eta
def _rewrite_inversion(fac, po, g, x):
""" Absorb ``po`` == x**s into g. """
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
def tr(l):
return [t + s/b for t in l]
from sympy.simplify import powdenest
return (powdenest(fac/a**(s/b), polar=True),
meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument))
def _check_antecedents_inversion(g, x):
""" Check antecedents for the laplace inversion integral. """
_debug('Checking antecedents for inversion:')
z = g.argument
_, e = _get_coeff_exp(z, x)
if e < 0:
_debug(' Flipping G.')
# We want to assume that argument gets large as |x| -> oo
return _check_antecedents_inversion(_flip_g(g), x)
def statement_half(a, b, c, z, plus):
coeff, exponent = _get_coeff_exp(z, x)
a *= exponent
b *= coeff**c
c *= exponent
conds = []
wp = b*exp(S.ImaginaryUnit*re(c)*pi/2)
wm = b*exp(-S.ImaginaryUnit*re(c)*pi/2)
if plus:
w = wp
else:
w = wm
conds += [And(Or(Eq(b, 0), re(c) <= 0), re(a) <= -1)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) < 0)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) <= 0,
re(a) <= -1)]
return Or(*conds)
def statement(a, b, c, z):
""" Provide a convergence statement for z**a * exp(b*z**c),
c/f sphinx docs. """
return And(statement_half(a, b, c, z, True),
statement_half(a, b, c, z, False))
# Notations from [L], section 5.7-10
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
tau = m + n - p
nu = q - m - n
rho = (tau - nu)/2
sigma = q - p
if sigma == 1:
epsilon = S.Half
elif sigma > 1:
epsilon = 1
else:
epsilon = S.NaN
theta = ((1 - sigma)/2 + Add(*g.bq) - Add(*g.ap))/sigma
delta = g.delta
_debug(' m=%s, n=%s, p=%s, q=%s, tau=%s, nu=%s, rho=%s, sigma=%s' % (
m, n, p, q, tau, nu, rho, sigma))
_debug(' epsilon=%s, theta=%s, delta=%s' % (epsilon, theta, delta))
# First check if the computation is valid.
if not (g.delta >= e/2 or (p >= 1 and p >= q)):
_debug(' Computation not valid for these parameters.')
return False
# Now check if the inversion integral exists.
# Test "condition A"
for a, b in itertools.product(g.an, g.bm):
if (a - b).is_integer and a > b:
_debug(' Not a valid G function.')
return False
# There are two cases. If p >= q, we can directly use a slater expansion
# like [L], 5.2 (11). Note in particular that the asymptotics of such an
# expansion even hold when some of the parameters differ by integers, i.e.
# the formula itself would not be valid! (b/c G functions are cts. in their
# parameters)
# When p < q, we need to use the theorems of [L], 5.10.
if p >= q:
_debug(' Using asymptotic Slater expansion.')
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def E(z):
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def H(z):
return statement(theta, -sigma, 1/sigma, z)
def Hp(z):
return statement_half(theta, -sigma, 1/sigma, z, True)
def Hm(z):
return statement_half(theta, -sigma, 1/sigma, z, False)
# [L], section 5.10
conds = []
# Theorem 1 -- p < q from test above
conds += [And(1 <= n, 1 <= m, rho*pi - delta >= pi/2, delta > 0,
E(z*exp(S.ImaginaryUnit*pi*(nu + 1))))]
# Theorem 2, statements (2) and (3)
conds += [And(p + 1 <= m, m + 1 <= q, delta > 0, delta < pi/2, n == 0,
(m - p + 1)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*(q - m))),
Hm(z*exp(-S.ImaginaryUnit*pi*(q - m))))]
# Theorem 2, statement (5) -- p < q from test above
conds += [And(m == q, n == 0, delta > 0,
(sigma + epsilon)*pi - delta >= pi/2, H(z))]
# Theorem 3, statements (6) and (7)
conds += [And(Or(And(p <= q - 2, 1 <= tau, tau <= sigma/2),
And(p + 1 <= m + n, m + n <= (p + q)/2)),
delta > 0, delta < pi/2, (tau + 1)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*nu)),
Hm(z*exp(-S.ImaginaryUnit*pi*nu)))]
# Theorem 4, statements (10) and (11) -- p < q from test above
conds += [And(1 <= m, rho > 0, delta > 0, delta + rho*pi < pi/2,
(tau + epsilon)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*nu)),
Hm(z*exp(-S.ImaginaryUnit*pi*nu)))]
# Trivial case
conds += [m == 0]
# TODO
# Theorem 5 is quite general
# Theorem 6 contains special cases for q=p+1
return Or(*conds)
def _int_inversion(g, x, t):
"""
Compute the laplace inversion integral, assuming the formula applies.
"""
b, a = _get_coeff_exp(g.argument, x)
C, g = _inflate_fox_h(meijerg(g.an, g.aother, g.bm, g.bother, b/t**a), -a)
return C/t*g
####################################################################
# Finally, the real meat.
####################################################################
_lookup_table = None
@cacheit
@timeit
def _rewrite_single(f, x, recursive=True):
"""
Try to rewrite f as a sum of single G functions of the form
C*x**s*G(a*x**b), where b is a rational number and C is independent of x.
We guarantee that result.argument.as_coeff_mul(x) returns (a, (x**b,))
or (a, ()).
Returns a list of tuples (C, s, G) and a condition cond.
Returns None on failure.
"""
from .transforms import (mellin_transform, inverse_mellin_transform,
IntegralTransformError, MellinTransformStripError)
global _lookup_table
if not _lookup_table:
_lookup_table = {}
_create_lookup_table(_lookup_table)
if isinstance(f, meijerg):
coeff, m = factor(f.argument, x).as_coeff_mul(x)
if len(m) > 1:
return None
m = m[0]
if m.is_Pow:
if m.base != x or not m.exp.is_Rational:
return None
elif m != x:
return None
return [(1, 0, meijerg(f.an, f.aother, f.bm, f.bother, coeff*m))], True
f_ = f
f = f.subs(x, z)
t = _mytype(f, z)
if t in _lookup_table:
l = _lookup_table[t]
for formula, terms, cond, hint in l:
subs = f.match(formula, old=True)
if subs:
subs_ = {}
for fro, to in subs.items():
subs_[fro] = unpolarify(polarify(to, lift=True),
exponents_only=True)
subs = subs_
if not isinstance(hint, bool):
hint = hint.subs(subs)
if hint == False:
continue
if not isinstance(cond, (bool, BooleanAtom)):
cond = unpolarify(cond.subs(subs))
if _eval_cond(cond) == False:
continue
if not isinstance(terms, list):
terms = terms(subs)
res = []
for fac, g in terms:
r1 = _get_coeff_exp(unpolarify(fac.subs(subs).subs(z, x),
exponents_only=True), x)
try:
g = g.subs(subs).subs(z, x)
except ValueError:
continue
# NOTE these substitutions can in principle introduce oo,
# zoo and other absurdities. It shouldn't matter,
# but better be safe.
if Tuple(*(r1 + (g,))).has(S.Infinity, S.ComplexInfinity, S.NegativeInfinity):
continue
g = meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(g.argument, exponents_only=True))
res.append(r1 + (g,))
if res:
return res, cond
# try recursive mellin transform
if not recursive:
return None
_debug('Trying recursive Mellin transform method.')
def my_imt(F, s, x, strip):
""" Calling simplify() all the time is slow and not helpful, since
most of the time it only factors things in a way that has to be
un-done anyway. But sometimes it can remove apparent poles. """
# XXX should this be in inverse_mellin_transform?
try:
return inverse_mellin_transform(F, s, x, strip,
as_meijerg=True, needeval=True)
except MellinTransformStripError:
from sympy.simplify import simplify
return inverse_mellin_transform(
simplify(cancel(expand(F))), s, x, strip,
as_meijerg=True, needeval=True)
f = f_
s = _dummy('s', 'rewrite-single', f)
# to avoid infinite recursion, we have to force the two g functions case
def my_integrator(f, x):
r = _meijerint_definite_4(f, x, only_double=True)
if r is not None:
from sympy.simplify import hyperexpand
res, cond = r
res = _my_unpolarify(hyperexpand(res, rewrite='nonrepsmall'))
return Piecewise((res, cond),
(Integral(f, (x, S.Zero, S.Infinity)), True))
return Integral(f, (x, S.Zero, S.Infinity))
try:
F, strip, _ = mellin_transform(f, x, s, integrator=my_integrator,
simplify=False, needeval=True)
g = my_imt(F, s, x, strip)
except IntegralTransformError:
g = None
if g is None:
# We try to find an expression by analytic continuation.
# (also if the dummy is already in the expression, there is no point in
# putting in another one)
a = _dummy_('a', 'rewrite-single')
if a not in f.free_symbols and _is_analytic(f, x):
try:
F, strip, _ = mellin_transform(f.subs(x, a*x), x, s,
integrator=my_integrator,
needeval=True, simplify=False)
g = my_imt(F, s, x, strip).subs(a, 1)
except IntegralTransformError:
g = None
if g is None or g.has(S.Infinity, S.NaN, S.ComplexInfinity):
_debug('Recursive Mellin transform failed.')
return None
args = Add.make_args(g)
res = []
for f in args:
c, m = f.as_coeff_mul(x)
if len(m) > 1:
raise NotImplementedError('Unexpected form...')
g = m[0]
a, b = _get_coeff_exp(g.argument, x)
res += [(c, 0, meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(polarify(
a, lift=True), exponents_only=True)
*x**b))]
_debug('Recursive Mellin transform worked:', g)
return res, True
def _rewrite1(f, x, recursive=True):
"""
Try to rewrite ``f`` using a (sum of) single G functions with argument a*x**b.
Return fac, po, g such that f = fac*po*g, fac is independent of ``x``.
and po = x**s.
Here g is a result from _rewrite_single.
Return None on failure.
"""
fac, po, g = _split_mul(f, x)
g = _rewrite_single(g, x, recursive)
if g:
return fac, po, g[0], g[1]
def _rewrite2(f, x):
"""
Try to rewrite ``f`` as a product of two G functions of arguments a*x**b.
Return fac, po, g1, g2 such that f = fac*po*g1*g2, where fac is
independent of x and po is x**s.
Here g1 and g2 are results of _rewrite_single.
Returns None on failure.
"""
fac, po, g = _split_mul(f, x)
if any(_rewrite_single(expr, x, False) is None for expr in _mul_args(g)):
return None
l = _mul_as_two_parts(g)
if not l:
return None
l = list(ordered(l, [
lambda p: max(len(_exponents(p[0], x)), len(_exponents(p[1], x))),
lambda p: max(len(_functions(p[0], x)), len(_functions(p[1], x))),
lambda p: max(len(_find_splitting_points(p[0], x)),
len(_find_splitting_points(p[1], x)))]))
for recursive, (fac1, fac2) in itertools.product((False, True), l):
g1 = _rewrite_single(fac1, x, recursive)
g2 = _rewrite_single(fac2, x, recursive)
if g1 and g2:
cond = And(g1[1], g2[1])
if cond != False:
return fac, po, g1[0], g2[0], cond
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
f = sympify(f)
results = []
for a in sorted(_find_splitting_points(f, x) | {S.Zero}, key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not isinstance(rv, list):
from sympy.simplify.radsimp import collect
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
def _meijerint_indefinite_1(f, x):
""" Helper that does not attempt any substitution. """
_debug('Trying to compute the indefinite integral of', f, 'wrt', x)
from sympy.simplify import hyperexpand, powdenest
gs = _rewrite1(f, x)
if gs is None:
# Note: the code that calls us will do expand() and try again
return None
fac, po, gl, cond = gs
_debug(' could rewrite:', gs)
res = S.Zero
for C, s, g in gl:
a, b = _get_coeff_exp(g.argument, x)
_, c = _get_coeff_exp(po, x)
c += s
# we do a substitution t=a*x**b, get integrand fac*t**rho*g
fac_ = fac * C / (b*a**((1 + c)/b))
rho = (c + 1)/b - 1
# we now use t**rho*G(params, t) = G(params + rho, t)
# [L, page 150, equation (4)]
# and integral G(params, t) dt = G(1, params+1, 0, t)
# (or a similar expression with 1 and 0 exchanged ... pick the one
# which yields a well-defined function)
# [R, section 5]
# (Note that this dummy will immediately go away again, so we
# can safely pass S.One for ``expr``.)
t = _dummy('t', 'meijerint-indefinite', S.One)
def tr(p):
return [a + rho + 1 for a in p]
if any(b.is_integer and (b <= 0) == True for b in tr(g.bm)):
r = -meijerg(
tr(g.an), tr(g.aother) + [1], tr(g.bm) + [0], tr(g.bother), t)
else:
r = meijerg(
tr(g.an) + [1], tr(g.aother), tr(g.bm), tr(g.bother) + [0], t)
# The antiderivative is most often expected to be defined
# in the neighborhood of x = 0.
if b.is_extended_nonnegative and not f.subs(x, 0).has(S.NaN, S.ComplexInfinity):
place = 0 # Assume we can expand at zero
else:
place = None
r = hyperexpand(r.subs(t, a*x**b), place=place)
# now substitute back
# Note: we really do want the powers of x to combine.
res += powdenest(fac_*r, polar=True)
def _clean(res):
"""This multiplies out superfluous powers of x we created, and chops off
constants:
>> _clean(x*(exp(x)/x - 1/x) + 3)
exp(x)
cancel is used before mul_expand since it is possible for an
expression to have an additive constant that does not become isolated
with simple expansion. Such a situation was identified in issue 6369:
Examples
========
>>> from sympy import sqrt, cancel
>>> from sympy.abc import x
>>> a = sqrt(2*x + 1)
>>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2
>>> bad.expand().as_independent(x)[0]
0
>>> cancel(bad).expand().as_independent(x)[0]
1
"""
res = expand_mul(cancel(res), deep=False)
return Add._from_args(res.as_coeff_add(x)[1])
res = piecewise_fold(res, evaluate=None)
if res.is_Piecewise:
newargs = []
for e, c in res.args:
e = _my_unpolarify(_clean(e))
newargs += [(e, c)]
res = Piecewise(*newargs, evaluate=False)
else:
res = _my_unpolarify(_clean(res))
return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True))
@timeit
def meijerint_definite(f, x, a, b):
"""
Integrate ``f`` over the interval [``a``, ``b``], by rewriting it as a product
of two G functions, or as a single G function.
Return res, cond, where cond are convergence conditions.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_definite
>>> from sympy import exp, oo
>>> from sympy.abc import x
>>> meijerint_definite(exp(-x**2), x, -oo, oo)
(sqrt(pi), True)
This function is implemented as a succession of functions
meijerint_definite, _meijerint_definite_2, _meijerint_definite_3,
_meijerint_definite_4. Each function in the list calls the next one
(presumably) several times. This means that calling meijerint_definite
can be very costly.
"""
# This consists of three steps:
# 1) Change the integration limits to 0, oo
# 2) Rewrite in terms of G functions
# 3) Evaluate the integral
#
# There are usually several ways of doing this, and we want to try all.
# This function does (1), calls _meijerint_definite_2 for step (2).
_debug('Integrating', f, 'wrt %s from %s to %s.' % (x, a, b))
f = sympify(f)
if f.has(DiracDelta):
_debug('Integrand has DiracDelta terms - giving up.')
return None
if f.has(SingularityFunction):
_debug('Integrand has Singularity Function terms - giving up.')
return None
f_, x_, a_, b_ = f, x, a, b
# Let's use a dummy in case any of the boundaries has x.
d = Dummy('x')
f = f.subs(x, d)
x = d
if a == b:
return (S.Zero, True)
results = []
if a is S.NegativeInfinity and b is not S.Infinity:
return meijerint_definite(f.subs(x, -x), x, -b, -a)
elif a is S.NegativeInfinity:
# Integrating -oo to oo. We need to find a place to split the integral.
_debug(' Integrating -oo to +oo.')
innermost = _find_splitting_points(f, x)
_debug(' Sensible splitting points:', innermost)
for c in sorted(innermost, key=default_sort_key, reverse=True) + [S.Zero]:
_debug(' Trying to split at', c)
if not c.is_extended_real:
_debug(' Non-real splitting point.')
continue
res1 = _meijerint_definite_2(f.subs(x, x + c), x)
if res1 is None:
_debug(' But could not compute first integral.')
continue
res2 = _meijerint_definite_2(f.subs(x, c - x), x)
if res2 is None:
_debug(' But could not compute second integral.')
continue
res1, cond1 = res1
res2, cond2 = res2
cond = _condsimp(And(cond1, cond2))
if cond == False:
_debug(' But combined condition is always false.')
continue
res = res1 + res2
return res, cond
elif a is S.Infinity:
res = meijerint_definite(f, x, b, S.Infinity)
return -res[0], res[1]
elif (a, b) == (S.Zero, S.Infinity):
# This is a common case - try it directly first.
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
else:
if b is S.Infinity:
for split in _find_splitting_points(f, x):
if (a - split >= 0) == True:
_debug('Trying x -> x + %s' % split)
res = _meijerint_definite_2(f.subs(x, x + split)
*Heaviside(x + split - a), x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
f = f.subs(x, x + a)
b = b - a
a = 0
if b is not S.Infinity:
phi = exp(S.ImaginaryUnit*arg(b))
b = Abs(b)
f = f.subs(x, phi*x)
f *= Heaviside(b - x)*phi
b = S.Infinity
_debug('Changed limits to', a, b)
_debug('Changed function to', f)
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
if f_.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_definite(
_rewrite_hyperbolics_as_exp(f_), x_, a_, b_)
if rv:
if not isinstance(rv, list):
from sympy.simplify.radsimp import collect
rv = (collect(factor_terms(rv[0]), rv[0].atoms(exp)),) + rv[1:]
return rv
results.extend(rv)
if results:
return next(ordered(results))
def _guess_expansion(f, x):
""" Try to guess sensible rewritings for integrand f(x). """
res = [(f, 'original integrand')]
orig = res[-1][0]
saw = {orig}
expanded = expand_mul(orig)
if expanded not in saw:
res += [(expanded, 'expand_mul')]
saw.add(expanded)
expanded = expand(orig)
if expanded not in saw:
res += [(expanded, 'expand')]
saw.add(expanded)
if orig.has(TrigonometricFunction, HyperbolicFunction):
expanded = expand_mul(expand_trig(orig))
if expanded not in saw:
res += [(expanded, 'expand_trig, expand_mul')]
saw.add(expanded)
if orig.has(cos, sin):
from sympy.simplify.fu import sincos_to_sum
reduced = sincos_to_sum(orig)
if reduced not in saw:
res += [(reduced, 'trig power reduction')]
saw.add(reduced)
return res
def _meijerint_definite_2(f, x):
"""
Try to integrate f dx from zero to infinity.
The body of this function computes various 'simplifications'
f1, f2, ... of f (e.g. by calling expand_mul(), trigexpand()
- see _guess_expansion) and calls _meijerint_definite_3 with each of
these in succession.
If _meijerint_definite_3 succeeds with any of the simplified functions,
returns this result.
"""
# This function does preparation for (2), calls
# _meijerint_definite_3 for (2) and (3) combined.
# use a positive dummy - we integrate from 0 to oo
# XXX if a nonnegative symbol is used there will be test failures
dummy = _dummy('x', 'meijerint-definite2', f, positive=True)
f = f.subs(x, dummy)
x = dummy
if f == 0:
return S.Zero, True
for g, explanation in _guess_expansion(f, x):
_debug('Trying', explanation)
res = _meijerint_definite_3(g, x)
if res:
return res
def _meijerint_definite_3(f, x):
"""
Try to integrate f dx from zero to infinity.
This function calls _meijerint_definite_4 to try to compute the
integral. If this fails, it tries using linearity.
"""
res = _meijerint_definite_4(f, x)
if res and res[1] != False:
return res
if f.is_Add:
_debug('Expanding and evaluating all terms.')
ress = [_meijerint_definite_4(g, x) for g in f.args]
if all(r is not None for r in ress):
conds = []
res = S.Zero
for r, c in ress:
res += r
conds += [c]
c = And(*conds)
if c != False:
return res, c
def _my_unpolarify(f):
return _eval_cond(unpolarify(f))
@timeit
def _meijerint_definite_4(f, x, only_double=False):
"""
Try to integrate f dx from zero to infinity.
Explanation
===========
This function tries to apply the integration theorems found in literature,
i.e. it tries to rewrite f as either one or a product of two G-functions.
The parameter ``only_double`` is used internally in the recursive algorithm
to disable trying to rewrite f as a single G-function.
"""
from sympy.simplify import hyperexpand
# This function does (2) and (3)
_debug('Integrating', f)
# Try single G function.
if not only_double:
gs = _rewrite1(f, x, recursive=False)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S.Zero
for C, s, f in g:
if C == 0:
continue
C, f = _rewrite_saxena_1(fac*C, po*x**s, f, x)
res += C*_int0oo_1(f, x)
cond = And(cond, _check_antecedents_1(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitutions is:', res)
return _my_unpolarify(hyperexpand(res)), cond
# Try two G functions.
gs = _rewrite2(f, x)
if gs is not None:
for full_pb in [False, True]:
fac, po, g1, g2, cond = gs
_debug('Could rewrite as two G functions:', fac, po, g1, g2)
res = S.Zero
for C1, s1, f1 in g1:
for C2, s2, f2 in g2:
r = _rewrite_saxena(fac*C1*C2, po*x**(s1 + s2),
f1, f2, x, full_pb)
if r is None:
_debug('Non-rational exponents.')
return
C, f1_, f2_ = r
_debug('Saxena subst for yielded:', C, f1_, f2_)
cond = And(cond, _check_antecedents(f1_, f2_, x))
if cond == False:
break
res += C*_int0oo(f1_, f2_, x)
else:
continue
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False (full_pb=%s).' % full_pb)
else:
_debug('Result before branch substitutions is:', res)
if only_double:
return res, cond
return _my_unpolarify(hyperexpand(res)), cond
def meijerint_inversion(f, x, t):
r"""
Compute the inverse laplace transform
$\int_{c+i\infty}^{c-i\infty} f(x) e^{tx}\, dx$,
for real c larger than the real part of all singularities of ``f``.
Note that ``t`` is always assumed real and positive.
Return None if the integral does not exist or could not be evaluated.
Examples
========
>>> from sympy.abc import x, t
>>> from sympy.integrals.meijerint import meijerint_inversion
>>> meijerint_inversion(1/x, x, t)
Heaviside(t)
"""
f_ = f
t_ = t
t = Dummy('t', polar=True) # We don't want sqrt(t**2) = abs(t) etc
f = f.subs(t_, t)
_debug('Laplace-inverting', f)
if not _is_analytic(f, x):
_debug('But expression is not analytic.')
return None
# Exponentials correspond to shifts; we filter them out and then
# shift the result later. If we are given an Add this will not
# work, but the calling code will take care of that.
shift = S.Zero
if f.is_Mul:
args = list(f.args)
elif isinstance(f, exp):
args = [f]
else:
args = None
if args:
newargs = []
exponentials = []
while args:
arg = args.pop()
if isinstance(arg, exp):
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
try:
a, b = _get_coeff_exp(arg.args[0], x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a)
else:
newargs.append(arg)
elif arg.is_Pow:
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
if x not in arg.base.free_symbols:
try:
a, b = _get_coeff_exp(arg.exp, x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a*log(arg.base))
newargs.append(arg)
else:
newargs.append(arg)
shift = Add(*exponentials)
f = Mul(*newargs)
if x not in f.free_symbols:
_debug('Expression consists of constant and exp shift:', f, shift)
cond = Eq(im(shift), 0)
if cond == False:
_debug('but shift is nonreal, cannot be a Laplace transform')
return None
res = f*DiracDelta(t + shift)
_debug('Result is a delta function, possibly conditional:', res, cond)
# cond is True or Eq
return Piecewise((res.subs(t, t_), cond))
gs = _rewrite1(f, x)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S.Zero
for C, s, f in g:
C, f = _rewrite_inversion(fac*C, po*x**s, f, x)
res += C*_int_inversion(f, x, t)
cond = And(cond, _check_antecedents_inversion(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitution:', res)
from sympy.simplify import hyperexpand
res = _my_unpolarify(hyperexpand(res))
if not res.has(Heaviside):
res *= Heaviside(t)
res = res.subs(t, t + shift)
if not isinstance(cond, bool):
cond = cond.subs(t, t + shift)
from .transforms import InverseLaplaceTransform
return Piecewise((res.subs(t, t_), cond),
(InverseLaplaceTransform(f_.subs(t, t_), x, t_, None), True))
|
09319ff8d8cfdca78c01cf06487a52a67df0272c2dfcfcccc2cc70eb07b0c55f | """Base class for all the objects in SymPy"""
from __future__ import annotations
from collections import defaultdict
from collections.abc import Mapping
from itertools import chain, zip_longest
from .assumptions import ManagedProperties
from .cache import cacheit
from .core import BasicMeta
from .sympify import _sympify, sympify, SympifyError, _external_converter
from .sorting import ordered
from .kind import Kind, UndefinedKind
from ._print_helpers import Printable
from sympy.utilities.decorator import deprecated
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import iterable, numbered_symbols
from sympy.utilities.misc import filldedent, func_name
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(Printable, metaclass=ManagedProperties):
"""
Base class for all SymPy objects.
Notes and conventions
=====================
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
3) By "SymPy object" we mean something that can be returned by
``sympify``. But not all objects one encounters using SymPy are
subclasses of Basic. For example, mutable objects are not:
>>> from sympy import Basic, Matrix, sympify
>>> A = Matrix([[1, 2], [3, 4]]).as_mutable()
>>> isinstance(A, Basic)
False
>>> B = sympify(A)
>>> isinstance(B, Basic)
True
"""
__slots__ = ('_mhash', # hash value
'_args', # arguments
'_assumptions'
)
_args: tuple[Basic, ...]
_mhash: int | None
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
is_real: bool | None
is_extended_real: bool | None
is_zero: bool | None
is_negative: bool | None
is_commutative: bool | None
kind: Kind = UndefinedKind
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __getnewargs__(self):
return self.args
def __getstate__(self):
return None
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
def __reduce_ex__(self, protocol):
if protocol < 2:
msg = "Only pickle protocol 2 or higher is supported by SymPy"
raise NotImplementedError(msg)
return super().__reduce_ex__(protocol)
def __hash__(self) -> int:
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from .symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def _do_eq_sympify(self, other):
"""Returns a boolean indicating whether a == b when either a
or b is not a Basic. This is only done for types that were either
added to `converter` by a 3rd party or when the object has `_sympy_`
defined. This essentially reuses the code in `_sympify` that is
specific for this use case. Non-user defined types that are meant
to work with SymPy should be handled directly in the __eq__ methods
of the `Basic` classes it could equate to and not be converted. Note
that after conversion, `==` is used again since it is not
necessarily clear whether `self` or `other`'s __eq__ method needs
to be used."""
for superclass in type(other).__mro__:
conv = _external_converter.get(superclass)
if conv is not None:
return self == conv(other)
if hasattr(other, '_sympy_'):
return self == other._sympy_()
return NotImplemented
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting
__hash__ : Callable[[object], int] = <ParentClass>.__hash__.
Otherwise the inheritance of __hash__() will be blocked,
just as if __hash__ had been explicitly set to None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
if not isinstance(other, Basic):
return self._do_eq_sympify(other)
# check for pure number expr
if not (self.is_Number and other.is_Number) and (
type(self) != type(other)):
return False
a, b = self._hashable_content(), other._hashable_content()
if a != b:
return False
# check number *in* an expression
for a, b in zip(a, b):
if not isinstance(a, Basic):
continue
if a.is_Number and type(a) != type(b):
return False
return True
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.xreplace({dummy: tmp}) == o.xreplace({symbol: tmp})
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and cannot
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of SymPy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
SymPy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
nodes = _preorder_traversal(self)
if types:
result = {node for node in nodes if isinstance(node, types)}
else:
result = {node for node in nodes if not node.args}
return result
@property
def free_symbols(self) -> set[Basic]:
"""Return from the atoms of self those which are free symbols.
Not all free symbols are ``Symbol``. Eg: IndexedBase('I')[0].free_symbols
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
empty: set[Basic] = set()
return empty.union(*(a.free_symbols for a in self.args))
@property
def expr_free_symbols(self):
sympy_deprecation_warning("""
The expr_free_symbols property is deprecated. Use free_symbols to get
the free symbols of an expression.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-expr-free-symbols")
return set()
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True. When applied to a
symbol a new symbol having only the same commutativity will be
returned.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
>>> r.as_dummy()
_r
Notes
=====
Any object that has structurally bound variables should have
a property, `bound_symbols` that returns those symbols
appearing in the object.
"""
from .symbol import Dummy, Symbol
def can(x):
# mask free that shadow bound
free = x.free_symbols
bound = set(x.bound_symbols)
d = {i: Dummy() for i in bound & free}
x = x.subs(d)
# replace bound with canonical names
x = x.xreplace(x.canonical_variables)
# return after undoing masking
return x.xreplace({v: k for k, v in d.items()})
if not self.has(Symbol):
return self
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
can,
simultaneous=False)
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any free symbols in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
# watch out for free symbol that are not in bound symbols;
# those that are in bound symbols are about to get changed
bound = self.bound_symbols
names = {i.name for i in self.free_symbols - set(bound)}
for b in bound:
d = next(dums)
if b.is_Symbol:
while d.name in names:
d = next(dums)
reps[b] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance, in SymPy the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however, you can use:
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from .symbol import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify.simplify import hypersimp
from sympy.functions.elementary.piecewise import Piecewise
if self.has(Piecewise):
return None
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self) -> tuple[Basic, ...]:
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Do not override .args() from Basic (so that it is easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which do not fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from .containers import Dict
from .symbol import Dummy, Symbol
from .numbers import _illegal
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
def sympify_old(old):
if isinstance(old, str):
# Use Symbol rather than parse_expr for old
return Symbol(old)
elif isinstance(old, type):
# Allow a type e.g. Function('f') or sin
return sympify(old, strict=False)
else:
return sympify(old, strict=True)
def sympify_new(new):
if isinstance(new, (str, type)):
# Allow a type or parse a string input
return sympify(new, strict=False)
else:
return sympify(new, strict=True)
sequence = [(sympify_old(s1), sympify_new(s2)) for s1, s2 in sequence]
# skip if there is no change
sequence = [(s1, s2) for s1, s2 in sequence if not _aresame(s1, s2)]
simultaneous = kwargs.pop('simultaneous', False)
if unordered:
from .sorting import _nodes, default_sort_key
sequence = dict(sequence)
# order so more complex items are first and items
# of identical complexity are ordered so
# f(x) < f(y) < x < y
# \___ 2 __/ \_1_/ <- number of nodes
#
# For more complex ordering use an unordered sequence.
k = list(ordered(sequence, default=False, keys=(
lambda x: -_nodes(x),
default_sort_key,
)))
sequence = [(k, sequence[k]) for k in k]
# do infinities first
if not simultaneous:
redo = [i for i, seq in enumerate(sequence) if seq[1] in _illegal]
for i in reversed(redo):
sequence.insert(0, sequence.pop(i))
if simultaneous: # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs does not want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs does not need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace does not differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return self._has(iterargs, *patterns)
def has_xfree(self, s: set[Basic]):
"""return True if self has any of the patterns in s as a
free argument, else False. This is like `Basic.has_free`
but this will only report exact argument matches.
Examples
========
>>> from sympy import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> f(x).has_xfree({f})
False
>>> f(x).has_xfree({f(x)})
True
>>> f(x + 1).has_xfree({x})
True
>>> f(x + 1).has_xfree({x + 1})
True
>>> f(x + y + 1).has_xfree({x + 1})
False
"""
# protect O(1) containment check by requiring:
if type(s) is not set:
raise TypeError('expecting set argument')
return any(a in s for a in iterfreeargs(self))
@cacheit
def has_free(self, *patterns):
"""return True if self has object(s) ``x`` as a free expression
else False.
Examples
========
>>> from sympy import Integral, Function
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> g = Function('g')
>>> expr = Integral(f(x), (f(x), 1, g(y)))
>>> expr.free_symbols
{y}
>>> expr.has_free(g(y))
True
>>> expr.has_free(*(x, f(x)))
False
This works for subexpressions and types, too:
>>> expr.has_free(g)
True
>>> (x + y + 1).has_free(y + 1)
True
"""
if not patterns:
return False
p0 = patterns[0]
if len(patterns) == 1 and iterable(p0) and not isinstance(p0, Basic):
# Basic can contain iterables (though not non-Basic, ideally)
# but don't encourage mixed passing patterns
raise TypeError(filldedent('''
Expecting 1 or more Basic args, not a single
non-Basic iterable. Don't forget to unpack
iterables: `eq.has_free(*patterns)`'''))
# try quick test first
s = set(patterns)
rv = self.has_xfree(s)
if rv:
return rv
# now try matching through slower _has
return self._has(iterfreeargs, *patterns)
def _has(self, iterargs, *patterns):
# separate out types and unhashable objects
type_set = set() # only types
p_set = set() # hashable non-types
for p in patterns:
if isinstance(p, BasicMeta):
type_set.add(p)
continue
if not isinstance(p, Basic):
try:
p = _sympify(p)
except SympifyError:
continue # Basic won't have this in it
p_set.add(p) # fails if object defines __eq__ but
# doesn't define __hash__
types = tuple(type_set) #
for i in iterargs(self): #
if i in p_set: # <--- here, too
return True
if isinstance(i, types):
return True
# use matcher if defined, e.g. operations defines
# matcher that checks for exact subset containment,
# (x + y + 1).has(x + 1) -> True
for i in p_set - type_set: # types don't have matchers
if not hasattr(i, '_has_matcher'):
continue
match = i._has_matcher()
if any(match(arg) for arg in iterargs(self)):
return True
# no success
return False
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself does not match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
x
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
x
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
from .symbol import Wild
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
def walk(rv, F):
"""Apply ``F`` to args and then to result.
"""
args = getattr(rv, 'args', None)
if args is not None:
if args:
newargs = tuple([walk(a, F) for a in args])
if args != newargs:
rv = rv.func(*newargs)
if simultaneous:
# if rv is something that was already
# matched (that was changed) then skip
# applying F again
for i, e in enumerate(args):
if rv == e and e != newargs[i]:
return rv
rv = F(rv)
return rv
mapping = {} # changes that took place
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
v = _value(expr, result)
if v is not None and v != expr:
if map:
mapping[expr] = v
expr = v
return expr
rv = walk(self, rec_replace)
return (rv, mapping) if map else rv
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, _preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in _preorder_traversal(self))
def matches(self, expr, repl_dict=None, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict # already a copy
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
if arg.is_Relational:
try:
d = arg.xreplace(d).matches(other_arg, d, old=old)
except TypeError: # Should be InvalidComparisonError when introduced
d = None
else:
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild, Sum
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
Structurally bound symbols are ignored during matching:
>>> Sum(x, (x, 1, 2)).match(Sum(y, (y, 1, p)))
{p_: 2}
But they can be identified if desired:
>>> Sum(x, (x, 1, 2)).match(Sum(q, (q, 1, p)))
{p_: 2, q_: x}
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
# match non-bound symbols
canonical = lambda x: x if x.is_Symbol else x.as_dummy()
m = canonical(pattern).matches(canonical(self), old=old)
if m is None:
return m
from .symbol import Wild
from .function import WildFunction
wild = pattern.atoms(Wild, WildFunction)
# sanity check
if set(m) - wild:
raise ValueError(filldedent('''
Some `matches` routine did not use a copy of repl_dict
and injected unexpected symbols. Report this as an
error at https://github.com/sympy/sympy/issues'''))
# now see if bound symbols were requested
bwild = wild - set(m)
if not bwild:
return m
# replace free-Wild symbols in pattern with match result
# so they will match but not be in the next match
wpat = pattern.xreplace(m)
# identify remaining bound wild
w = wpat.matches(self, old=old)
# add them to m
if w:
m.update(w)
# done
return m
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def simplify(self, **kwargs):
"""See the simplify function in sympy.simplify"""
from sympy.simplify.simplify import simplify
return simplify(self, **kwargs)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions.refine import refine
return refine(self, assumption)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from .numbers import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, deep=True, **hints):
"""
Rewrite *self* using a defined rule.
Rewriting transforms an expression to another, which is mathematically
equivalent but structurally different. For example you can rewrite
trigonometric functions as complex exponentials or combinatorial
functions as gamma function.
This method takes a *pattern* and a *rule* as positional arguments.
*pattern* is optional parameter which defines the types of expressions
that will be transformed. If it is not passed, all possible expressions
will be rewritten. *rule* defines how the expression will be rewritten.
Parameters
==========
args : *rule*, or *pattern* and *rule*.
- *pattern* is a type or an iterable of types.
- *rule* can be any object.
deep : bool, optional.
If ``True``, subexpressions are recursively transformed. Default is
``True``.
Examples
========
If *pattern* is unspecified, all possible expressions are transformed.
>>> from sympy import cos, sin, exp, I
>>> from sympy.abc import x
>>> expr = cos(x) + I*sin(x)
>>> expr.rewrite(exp)
exp(I*x)
Pattern can be a type or an iterable of types.
>>> expr.rewrite(sin, exp)
exp(I*x)/2 + cos(x) - exp(-I*x)/2
>>> expr.rewrite([cos,], exp)
exp(I*x)/2 + I*sin(x) + exp(-I*x)/2
>>> expr.rewrite([cos, sin], exp)
exp(I*x)
Rewriting behavior can be implemented by defining ``_eval_rewrite()``
method.
>>> from sympy import Expr, sqrt, pi
>>> class MySin(Expr):
... def _eval_rewrite(self, rule, args, **hints):
... x, = args
... if rule == cos:
... return cos(pi/2 - x, evaluate=False)
... if rule == sqrt:
... return sqrt(1 - cos(x)**2)
>>> MySin(MySin(x)).rewrite(cos)
cos(-cos(-x + pi/2) + pi/2)
>>> MySin(x).rewrite(sqrt)
sqrt(1 - cos(x)**2)
Defining ``_eval_rewrite_as_[...]()`` method is supported for backwards
compatibility reason. This may be removed in the future and using it is
discouraged.
>>> class MySin(Expr):
... def _eval_rewrite_as_cos(self, *args, **hints):
... x, = args
... return cos(pi/2 - x, evaluate=False)
>>> MySin(x).rewrite(cos)
cos(-x + pi/2)
"""
if not args:
return self
hints.update(deep=deep)
pattern = args[:-1]
rule = args[-1]
# support old design by _eval_rewrite_as_[...] method
if isinstance(rule, str):
method = "_eval_rewrite_as_%s" % rule
elif hasattr(rule, "__name__"):
# rule is class or function
clsname = rule.__name__
method = "_eval_rewrite_as_%s" % clsname
else:
# rule is instance
clsname = rule.__class__.__name__
method = "_eval_rewrite_as_%s" % clsname
if pattern:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = tuple(p for p in pattern if self.has(p))
if not pattern:
return self
# hereafter, empty pattern is interpreted as all pattern.
return self._rewrite(pattern, rule, method, **hints)
def _rewrite(self, pattern, rule, method, **hints):
deep = hints.pop('deep', True)
if deep:
args = [a._rewrite(pattern, rule, method, **hints)
for a in self.args]
else:
args = self.args
if not pattern or any(isinstance(self, p) for p in pattern):
meth = getattr(self, method, None)
if meth is not None:
rewritten = meth(*args, **hints)
else:
rewritten = self._eval_rewrite(rule, args, **hints)
if rewritten is not None:
return rewritten
if not args:
return self
return self.func(*args)
def _eval_rewrite(self, rule, args, **hints):
return None
_constructor_postprocessor_mapping = {} # type: ignore
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
def _sage_(self):
"""
Convert *self* to a symbolic expression of SageMath.
This version of the method is merely a placeholder.
"""
old_method = self._sage_
from sage.interfaces.sympy import sympy_init
sympy_init() # may monkey-patch _sage_ method into self's class or superclasses
if old_method == self._sage_:
raise NotImplementedError('conversion to SageMath is not implemented')
else:
# call the freshly monkey-patched method
return self._sage_()
def could_extract_minus_sign(self):
return False # see Expr.could_extract_minus_sign
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict=None, old=False):
if self == expr:
if repl_dict is None:
return {}
return repl_dict.copy()
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(_preorder_traversal(a), _preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _ne(a, b):
# use this as a second test after `a != b` if you want to make
# sure that things are truly equal, e.g.
# a, b = 0.5, S.Half
# a !=b or _ne(a, b) -> True
from .numbers import Number
# 0.5 == S.Half
if isinstance(a, Number) and isinstance(b, Number):
return a.__class__ != b.__class__
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Do not
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
pot = _preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
from .symbol import Symbol
from .function import Derivative, Function
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
# Delayed to avoid cyclic import
from .singleton import S
from .traversal import (preorder_traversal as _preorder_traversal,
iterargs, iterfreeargs)
preorder_traversal = deprecated(
"""
Using preorder_traversal from the sympy.core.basic submodule is
deprecated.
Instead, use preorder_traversal from the top-level sympy namespace, like
sympy.preorder_traversal
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-traversal-functions-moved",
)(_preorder_traversal)
|
0ed6e5dd9d7bf01819f690a4429b515ccad0d6a7ce62b0622c553dac8420f5c2 | """Tools for manipulating of large commutative expressions. """
from .add import Add
from .mul import Mul, _keep_coeff
from .power import Pow
from .basic import Basic
from .expr import Expr
from .function import expand_power_exp
from .sympify import sympify
from .numbers import Rational, Integer, Number, I
from .singleton import S
from .sorting import default_sort_key, ordered
from .symbol import Dummy
from .traversal import preorder_traversal
from .coreerrors import NonCommutativeExpression
from .containers import Tuple, Dict
from sympy.external.gmpy import SYMPY_INTS
from sympy.utilities.iterables import (common_prefix, common_suffix,
variations, iterable, is_sequence)
from collections import defaultdict
from typing import Tuple as tTuple
_eps = Dummy(positive=True)
def _isnumber(i):
return isinstance(i, (SYMPY_INTS, float)) or i.is_Number
def _monotonic_sign(self):
"""Return the value closest to 0 that ``self`` may have if all symbols
are signed and the result is uniformly the same sign for all values of symbols.
If a symbol is only signed but not known to be an
integer or the result is 0 then a symbol representative of the sign of self
will be returned. Otherwise, None is returned if a) the sign could be positive
or negative or b) self is not in one of the following forms:
- L(x, y, ...) + A: a function linear in all symbols x, y, ... with an
additive constant; if A is zero then the function can be a monomial whose
sign is monotonic over the range of the variables, e.g. (x + 1)**3 if x is
nonnegative.
- A/L(x, y, ...) + B: the inverse of a function linear in all symbols x, y, ...
that does not have a sign change from positive to negative for any set
of values for the variables.
- M(x, y, ...) + A: a monomial M whose factors are all signed and a constant, A.
- A/M(x, y, ...) + B: the inverse of a monomial and constants A and B.
- P(x): a univariate polynomial
Examples
========
>>> from sympy.core.exprtools import _monotonic_sign as F
>>> from sympy import Dummy
>>> nn = Dummy(integer=True, nonnegative=True)
>>> p = Dummy(integer=True, positive=True)
>>> p2 = Dummy(integer=True, positive=True)
>>> F(nn + 1)
1
>>> F(p - 1)
_nneg
>>> F(nn*p + 1)
1
>>> F(p2*p + 1)
2
>>> F(nn - 1) # could be negative, zero or positive
"""
if not self.is_extended_real:
return
if (-self).is_Symbol:
rv = _monotonic_sign(-self)
return rv if rv is None else -rv
if not self.is_Add and self.as_numer_denom()[1].is_number:
s = self
if s.is_prime:
if s.is_odd:
return Integer(3)
else:
return Integer(2)
elif s.is_composite:
if s.is_odd:
return Integer(9)
else:
return Integer(4)
elif s.is_positive:
if s.is_even:
if s.is_prime is False:
return Integer(4)
else:
return Integer(2)
elif s.is_integer:
return S.One
else:
return _eps
elif s.is_extended_negative:
if s.is_even:
return Integer(-2)
elif s.is_integer:
return S.NegativeOne
else:
return -_eps
if s.is_zero or s.is_extended_nonpositive or s.is_extended_nonnegative:
return S.Zero
return None
# univariate polynomial
free = self.free_symbols
if len(free) == 1:
if self.is_polynomial():
from sympy.polys.polytools import real_roots
from sympy.polys.polyroots import roots
from sympy.polys.polyerrors import PolynomialError
x = free.pop()
x0 = _monotonic_sign(x)
if x0 in (_eps, -_eps):
x0 = S.Zero
if x0 is not None:
d = self.diff(x)
if d.is_number:
currentroots = []
else:
try:
currentroots = real_roots(d)
except (PolynomialError, NotImplementedError):
currentroots = [r for r in roots(d, x) if r.is_extended_real]
y = self.subs(x, x0)
if x.is_nonnegative and all(
(r - x0).is_nonpositive for r in currentroots):
if y.is_nonnegative and d.is_positive:
if y:
return y if y.is_positive else Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_negative:
if y:
return y if y.is_negative else Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
elif x.is_nonpositive and all(
(r - x0).is_nonnegative for r in currentroots):
if y.is_nonnegative and d.is_negative:
if y:
return Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_positive:
if y:
return Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
else:
n, d = self.as_numer_denom()
den = None
if n.is_number:
den = _monotonic_sign(d)
elif not d.is_number:
if _monotonic_sign(n) is not None:
den = _monotonic_sign(d)
if den is not None and (den.is_positive or den.is_negative):
v = n*den
if v.is_positive:
return Dummy('pos', positive=True)
elif v.is_nonnegative:
return Dummy('nneg', nonnegative=True)
elif v.is_negative:
return Dummy('neg', negative=True)
elif v.is_nonpositive:
return Dummy('npos', nonpositive=True)
return None
# multivariate
c, a = self.as_coeff_Add()
v = None
if not a.is_polynomial():
# F/A or A/F where A is a number and F is a signed, rational monomial
n, d = a.as_numer_denom()
if not (n.is_number or d.is_number):
return
if (
a.is_Mul or a.is_Pow) and \
a.is_rational and \
all(p.exp.is_Integer for p in a.atoms(Pow) if p.is_Pow) and \
(a.is_positive or a.is_negative):
v = S.One
for ai in Mul.make_args(a):
if ai.is_number:
v *= ai
continue
reps = {}
for x in ai.free_symbols:
reps[x] = _monotonic_sign(x)
if reps[x] is None:
return
v *= ai.subs(reps)
elif c:
# signed linear expression
if not any(p for p in a.atoms(Pow) if not p.is_number) and (a.is_nonpositive or a.is_nonnegative):
free = list(a.free_symbols)
p = {}
for i in free:
v = _monotonic_sign(i)
if v is None:
return
p[i] = v or (_eps if i.is_nonnegative else -_eps)
v = a.xreplace(p)
if v is not None:
rv = v + c
if v.is_nonnegative and rv.is_positive:
return rv.subs(_eps, 0)
if v.is_nonpositive and rv.is_negative:
return rv.subs(_eps, 0)
def decompose_power(expr: Expr) -> tTuple[Expr, int]:
"""
Decompose power into symbolic base and integer exponent.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> from sympy import exp
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(exp(2*y/3))
(exp(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q)) # type: ignore
e = exp.p # type: ignore
else:
base, e = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, e = Pow(base, tail), -1
elif exp is not S.One:
# todo: after dropping python 3.7 support, use overload and Literal
# in as_coeff_Mul to make exp Rational, and remove these 2 ignores
tail = _keep_coeff(Rational(1, exp.q), tail) # type: ignore
base, e = Pow(base, tail), exp.p # type: ignore
else:
base, e = expr, 1
return base, e
def decompose_power_rat(expr: Expr) -> tTuple[Expr, Rational]:
"""
Decompose power into symbolic base and rational exponent;
if the exponent is not a Rational, then separate only the
integer coefficient.
Examples
========
>>> from sympy.core.exprtools import decompose_power_rat
>>> from sympy.abc import x
>>> from sympy import sqrt, exp
>>> decompose_power_rat(sqrt(x))
(x, 1/2)
>>> decompose_power_rat(exp(-3*x/2))
(exp(x/2), -3)
"""
_ = base, exp = expr.as_base_exp()
return _ if exp.is_Rational else decompose_power(expr)
class Factors:
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ('factors', 'gens')
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset({2, x})
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors in (None, S.One):
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
# Handle all rational Coefficients
for f in list(factors.keys()):
if isinstance(f, Rational) and not isinstance(f, Integer):
p, q = Integer(f.p), Integer(f.q)
factors[p] = (factors[p] if p in factors else S.Zero) + factors[f]
factors[q] = (factors[q] if q in factors else S.Zero) - factors[f]
factors.pop(f)
if i:
factors[I] = factors.get(I, S.Zero) + i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = [k for k in factors if k is I or k in (-1, 1)]
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
factors[a.base] = factors.get(a.base, S.Zero) + a.exp
elif a == 1:
factors[a] = S.One
elif a == -1:
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
keys = getattr(factors, 'keys', None)
if keys is None:
raise TypeError('expecting Expr or dictionary')
self.gens = frozenset(keys())
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
if isinstance(exp, Integer):
b, e = factor.as_base_exp()
e = _keep_coeff(exp, e)
args.append(b**e)
else:
args.append(factor**exp)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt == True:
factors[factor] = exp
elif lt == False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __truediv__(self, other): # Factors
return self.quo(other)
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self == other
class Term:
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ('coeff', 'numer', 'denom')
def __init__(self, term, numer=None, denom=None): # Term
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self): # Term
return hash((self.coeff, self.numer, self.denom))
def __repr__(self): # Term
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self): # Term
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other): # Term
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self): # Term
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other): # Term
return self.mul(other.inv())
def pow(self, other): # Term
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other): # Term
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other): # Term
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other): # Term
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __truediv__(self, other): # Term
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
def __pow__(self, other): # Term
if isinstance(other, SYMPY_INTS):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other): # Term
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other): # Term
return not self == other
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
Parameters
==========
isprimitive : boolean, optional
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extracted.
fraction : boolean, optional
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, [t for t in terms if t]))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return S.Zero, S.Zero, S.One
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(S.One).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
Parameters
==========
terms : Expr
Can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
isprimitive : bool, optional
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
clear : bool, optional
It controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
fraction : bool, optional
When True (default), will put the expression over a common
denominator.
Examples
========
>>> from sympy import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False)
(x**2/2 + 1)/(x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False, fraction=False)
(x/2 + 1/x)/y
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, sympy.polys.polytools.terms_gcd
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul(*nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul(*c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
if not clear:
c, _coeff = coeff.as_coeff_Mul()
if not c.is_Integer and not clear and numer.is_Add:
n, d = c.as_numer_denom()
_numer = numer/d
if any(a.as_coeff_Mul()[0].is_Integer
for a in _numer.args):
numer = _numer
coeff = n*_coeff
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
if isinstance(a, Basic):
if not a.args:
return a
return a.func(*[handle(i) for i in a.args])
return type(a)([handle(i) for i in a])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def _factor_sum_int(expr, **kwargs):
"""Return Sum or Integral object with factors that are not
in the wrt variables removed. In cases where there are additive
terms in the function of the object that are independent, the
object will be separated into two objects.
Examples
========
>>> from sympy import Sum, factor_terms
>>> from sympy.abc import x, y
>>> factor_terms(Sum(x + y, (x, 1, 3)))
y*Sum(1, (x, 1, 3)) + Sum(x, (x, 1, 3))
>>> factor_terms(Sum(x*y, (x, 1, 3)))
y*Sum(x, (x, 1, 3))
Notes
=====
If a function in the summand or integrand is replaced
with a symbol, then this simplification should not be
done or else an incorrect result will be obtained when
the symbol is replaced with an expression that depends
on the variables of summation/integration:
>>> eq = Sum(y, (x, 1, 3))
>>> factor_terms(eq).subs(y, x).doit()
3*x
>>> eq.subs(y, x).doit()
6
"""
result = expr.function
if result == 0:
return S.Zero
limits = expr.limits
# get the wrt variables
wrt = {i.args[0] for i in limits}
# factor out any common terms that are independent of wrt
f = factor_terms(result, **kwargs)
i, d = f.as_independent(*wrt)
if isinstance(f, Add):
return i * expr.func(1, *limits) + expr.func(d, *limits)
else:
return i * expr.func(d, *limits)
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
Parameters
==========
radical: bool, optional
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
clear : bool, optional
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
fraction : bool, optional
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
sign : bool, optional
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> from sympy import factor_terms, Symbol
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, sympy.polys.polytools.terms_gcd
"""
def do(expr):
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([do(i) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([do(i) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
if isinstance(expr, (Sum, Integral)):
return _factor_sum_int(expr,
radical=radical, clear=clear,
fraction=fraction, sign=sign)
cont, p = expr.as_content_primitive(radical=radical, clear=clear)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if not any(a.as_coeff_Mul()[0].extract_multiplicatively(-1) is None
for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
b, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
Explanation
===========
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Parameters
==========
name : str
``name``, if given, is the name that will be used with numbered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import symbols
>>> from sympy.core.exprtools import _mask_nc
>>> from sympy.abc import x, y
>>> A, B, C = symbols('A,B,C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(_d0**2 - x**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, {}, [A, B])
An nc-object with nc-symbols but no others outside of it:
>>> _mask_nc(1 + x*Commutator(A, B), 'd')
(_d0*x + 1, {_d0: Commutator(A, B)}, [])
>>> _mask_nc(NO(Fd(x)*F(y)), 'd')
(_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
Multiple nc-objects:
>>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
>>> _mask_nc(eq, 'd')
(x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
Multiple nc-objects and nc-symbols:
>>> eq = A*Commutator(A, B) + B*Commutator(A, C)
>>> _mask_nc(eq, 'd')
(A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from .symbol import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_symbol:
nc_syms.add(a)
pot.skip()
elif not (a.is_Add or a.is_Mul or a.is_Pow):
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol or object, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_obj) == 1 and not nc_syms:
rep.append((nc_obj.pop(), Dummy()))
elif len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, {v: k for k, v in rep}, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
Examples
========
>>> from sympy import factor_nc, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr = expr.func(*[expand_power_exp(i) for i in expr.args])
from sympy.polys.polytools import gcd, factor
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
if g is not S.One:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
if cc:
cc[0] = cc[0]/c
else:
cc = [1/c]
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for _ in args:
_[1][0] = il*_[1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for _ in args:
_[1] = _[1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for _ in args:
_[1][-1] = _[1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for _ in args:
_[1] = _[1][:len(_[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
from sympy.simplify.powsimp import powsimp
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
def _pemexpand(expr):
"Expand with the minimal set of hints necessary to check the result."
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
|
caa88b3ea5147ba14fdf11025b14e6e44c8a6da9852317a4c9aa9315dab2c749 | """
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import annotations
from typing import Any
from collections.abc import Iterable
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic, _atomic
from .cache import cacheit
from .containers import Tuple, Dict
from .decorators import _sympifyit
from .evalf import pure_complex
from .expr import Expr, AtomicExpr
from .logic import fuzzy_and, fuzzy_or, fuzzy_not, FuzzyBool
from .mul import Mul
from .numbers import Rational, Float, Integer
from .operations import LatticeOp
from .parameters import global_parameters
from .rules import Transform
from .singleton import S
from .sympify import sympify, _sympify
from .sorting import default_sort_key, ordered
from sympy.utilities.exceptions import (sympy_deprecation_warning,
SymPyDeprecationWarning, ignore_warnings)
from sympy.utilities.iterables import (has_dups, sift, iterable,
is_sequence, uniq, topological_sort)
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
from sympy.utilities.misc import as_int, filldedent, func_name
import mpmath
from mpmath.libmp.libmpf import prec_to_dps
import inspect
from collections import Counter
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
For matrix expressions:
>>> from sympy import MatrixSymbol, sqrt
>>> A = MatrixSymbol("A", 3, 3)
>>> _coeff_isneg(-sqrt(2)*A)
True
>>> _coeff_isneg(sqrt(2)*A)
False
"""
if a.is_MatMul:
a = a.args[0]
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_extended_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class BadSignatureError(TypeError):
'''Raised when a Lambda is created with an invalid signature'''
pass
class BadArgumentsError(TypeError):
'''Raised when a Lambda is called with an incorrect number of arguments'''
pass
# Python 3 version that does not raise a Deprecation warning
def arity(cls):
"""Return the arity of the function if it is known, else None.
Explanation
===========
When default values are specified for some arguments, they are
optional and the arity is reported as a tuple of possible values.
Examples
========
>>> from sympy import arity, log
>>> arity(lambda x: x)
1
>>> arity(log)
(1, 2)
>>> arity(lambda *x: sum(x)) is None
True
"""
eval_ = getattr(cls, 'eval', cls)
parameters = inspect.signature(eval_).parameters.items()
if [p for _, p in parameters if p.kind == p.VAR_POSITIONAL]:
return
p_or_k = [p for _, p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
# how many have no default and how many have a default value
no, yes = map(len, sift(p_or_k,
lambda p:p.default == p.empty, binary=True))
return no if not yes else tuple(range(no, no + yes + 1))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', arity(cls)))
if nargs is None and 'nargs' not in cls.__dict__:
for supcls in cls.__mro__:
if hasattr(supcls, '_nargs'):
nargs = supcls._nargs
break
else:
continue
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
# When __init__ is called from UndefinedFunction it is called with
# just one arg but when it is called from subclassing Function it is
# called with the usual (name, bases, namespace) type() signature.
if len(args) == 3:
namespace = args[2]
if 'eval' in namespace and not isinstance(namespace['eval'], classmethod):
raise TypeError("eval on Function subclasses should be a class method (defined with @classmethod)")
super().__init__(*args, **kwargs)
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def free_symbols(self):
return set()
@property
def xreplace(self):
# Function needs args so we define a property that returns
# a function that takes args...and then use that function
# to return the right value
return lambda rule, **_: rule.get(self, self)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy import Function
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def _valid_nargs(self, n : int) -> bool:
""" Return True if the specified integer is a valid number of arguments
The number of arguments n is guaranteed to be an integer and positive
"""
if self._nargs:
return n in self._nargs
nargs = self.nargs
return nargs is S.Naturals0 or n in nargs
def __repr__(cls):
return cls.__name__
class Application(Basic, metaclass=FunctionClass):
"""
Base class for applied functions.
Explanation
===========
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_parameters.evaluate)
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super().__new__(cls, *args, **options)
# make nargs uniform here
sentinel = object()
objnargs = getattr(obj, "nargs", sentinel)
if objnargs is not sentinel:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(objnargs):
nargs = tuple(ordered(set(objnargs)))
elif objnargs is not None:
nargs = (as_int(objnargs),)
else:
nargs = None
else:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
Explanation
===========
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
class Function(Application, Expr):
r"""
Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
See the :ref:`custom-functions` guide for details on how to subclass
``Function`` and what methods can be defined.
Examples
========
**Undefined Functions**
To create an undefined function, pass a string of the function name to
``Function``.
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
Assumptions can be passed to ``Function`` the same as with a
:class:`~.Symbol`. Alternatively, you can use a ``Symbol`` with
assumptions for the function name and the function will inherit the name
and assumptions associated with the ``Symbol``:
>>> f_real = Function('f', real=True)
>>> f_real(x).is_real
True
>>> f_real_inherit = Function(Symbol('f', real=True))
>>> f_real_inherit(x).is_real
True
Note that assumptions on a function are unrelated to the assumptions on
the variables it is called on. If you want to add a relationship, subclass
``Function`` and define custom assumptions handler methods. See the
:ref:`custom-functions-assumptions` section of the :ref:`custom-functions`
guide for more details.
**Custom Function Subclasses**
The :ref:`custom-functions` guide has several
:ref:`custom-functions-complete-examples` of how to subclass ``Function``
to create a custom function.
"""
@property
def _diff_wrt(self):
return False
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if not cls._valid_nargs(n):
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_parameters.evaluate)
result = super().__new__(cls, *args, **options)
if evaluate and isinstance(result, cls) and result.args:
_should_evalf = [cls._should_evalf(a) for a in result.args]
pr2 = min(_should_evalf)
if pr2 > 0:
pr = max(_should_evalf)
result = result.evalf(prec_to_dps(pr))
return _sympify(result)
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
Explanation
===========
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number (including complex numbers).
This function is used by __new__.
Returns the precision to evalf to, or -1 if it should not evalf.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
m = pure_complex(arg)
if m is None:
return -1
# the elements of m are of type Number, so have a _prec
return max(m[0]._prec, m[1]._prec)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
def _eval_evalf(self, prec):
def _get_mpmath_func(fname):
"""Lookup mpmath function based on name"""
if isinstance(self, AppliedUndef):
# Shouldn't lookup in mpmath but might have ._imp_
return None
if not hasattr(mpmath, fname):
fname = MPMATH_TRANSLATIONS.get(fname, None)
if fname is None:
return None
return getattr(mpmath, fname)
_eval_mpmath = getattr(self, '_eval_mpmath', None)
if _eval_mpmath is None:
func = _get_mpmath_func(self.func.__name__)
args = self.args
else:
func, args = _eval_mpmath()
# Fall-back evaluation
if func is None:
imp = getattr(self, '_imp_', None)
if imp is None:
return None
try:
return Float(imp(*[i.evalf(prec) for i in self.args]), prec)
except (TypeError, ValueError):
return None
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da.is_zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def _eval_is_meromorphic(self, x, a):
if not self.args:
return True
if any(arg.has(x) for arg in self.args[1:]):
return False
arg = self.args[0]
if not arg._eval_is_meromorphic(x, a):
return None
return fuzzy_not(type(self).is_singular(arg.subs(x, a)))
_singularities: FuzzyBool | tuple[Expr, ...] = None
@classmethod
def is_singular(cls, a):
"""
Tests whether the argument is an essential singularity
or a branch point, or the functions is non-holomorphic.
"""
ss = cls._singularities
if ss in (True, None, False):
return ss
return fuzzy_or(a.is_infinite if s is S.ComplexInfinity
else (a - s).is_zero for s in ss)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx, cdir=0):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from .symbol import uniquely_named_symbol
from sympy.series.order import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from .numbers import oo, zoo, nan
a = [t.as_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any(t.has(oo, -oo, zoo, nan) for t in a0):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for _ in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
if len(e.args) == 1:
# issue 14411
e = e.func(e.args[0].cancel())
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = uniquely_named_symbol('xi', self)
e = e.subs(x, _x)
for i in range(1, n):
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = (n/cf).ceiling()
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
ix = argindex - 1
A = self.args[ix]
if A._diff_wrt:
if len(self.args) == 1 or not A.is_Symbol:
return _derivative_dispatch(self, A)
for i, v in enumerate(self.args):
if i != ix and A in v.free_symbols:
# it can't be in any other argument's free symbols
# issue 8510
break
else:
return _derivative_dispatch(self, A)
# See issue 4624 and issue 4719, 5600 and 8510
D = Dummy('xi_%i' % argindex, dummy_index=hash(A))
args = self.args[:ix] + (D,) + self.args[ix + 1:]
return Subs(Derivative(self.func(*args), D), D, A)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy.series.order import Order
args = [a.as_leading_term(x, logx=logx) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
is_number = False
def __new__(cls, *args, **options):
args = list(map(sympify, args))
u = [a.name for a in args if isinstance(a, UndefinedFunction)]
if u:
raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % (
's'*(len(u) > 1), ', '.join(u)))
obj = super().__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x, logx=None, cdir=0):
return self
@property
def _diff_wrt(self):
"""
Allow derivatives wrt to undefined functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
>>> f(x).diff(x)
Derivative(f(x), x)
"""
return True
class UndefSageHelper:
"""
Helper to facilitate Sage conversion.
"""
def __get__(self, ins, typ):
import sage.all as sage
if ins is None:
return lambda: sage.function(typ.__name__)
else:
args = [arg._sage_() for arg in ins.args]
return lambda : sage.function(ins.__class__.__name__)(*args)
_undef_sage_helper = UndefSageHelper()
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
from .symbol import _filter_assumptions
# Allow Function('f', real=True)
# and/or Function(Symbol('f', real=True))
assumptions, kwargs = _filter_assumptions(kwargs)
if isinstance(name, Symbol):
assumptions = name._merge(assumptions)
name = name.name
elif not isinstance(name, str):
raise TypeError('expecting string or Symbol for name')
else:
commutative = assumptions.get('commutative', None)
assumptions = Symbol(name, **assumptions).assumptions0
if commutative is None:
assumptions.pop('commutative')
__dict__ = __dict__ or {}
# put the `is_*` for into __dict__
__dict__.update({'is_%s' % k: v for k, v in assumptions.items()})
# You can add other attributes, although they do have to be hashable
# (but seriously, if you want to add anything other than assumptions,
# just subclass Function)
__dict__.update(kwargs)
# add back the sanitized assumptions without the is_ prefix
kwargs.update(assumptions)
# Save these for __eq__
__dict__.update({'_kwargs': kwargs})
# do this for pickling
__dict__['__module__'] = None
obj = super().__new__(mcl, name, bases, __dict__)
obj.name = name
obj._sage_ = _undef_sage_helper
return obj
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
_kwargs: dict[str, bool | None] = {}
def __hash__(self):
return hash((self.class_key(), frozenset(self._kwargs.items())))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.class_key() == other.class_key() and
self._kwargs == other._kwargs)
def __ne__(self, other):
return not self == other
@property
def _diff_wrt(self):
return False
# XXX: The type: ignore on WildFunction is because mypy complains:
#
# sympy/core/function.py:939: error: Cannot determine type of 'sort_key' in
# base class 'Expr'
#
# Somehow this is because of the @cacheit decorator but it is not clear how to
# fix it.
class WildFunction(Function, AtomicExpr): # type: ignore
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
# XXX: What is this class attribute used for?
include: set[Any] = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict=None, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
Examples
========
>>> from sympy import Derivative, Function, symbols, Subs
>>> from sympy.abc import x, y
>>> f, g = symbols('f g', cls=Function)
>>> Derivative(x**2, x, evaluate=True)
2*x
Denesting of derivatives retains the ordering of variables:
>>> Derivative(Derivative(f(x, y), y), x)
Derivative(f(x, y), y, x)
Contiguously identical symbols are merged into a tuple giving
the symbol and the count:
>>> Derivative(f(x), x, x, y, x)
Derivative(f(x), (x, 2), y, x)
If the derivative cannot be performed, and evaluate is True, the
order of the variables of differentiation will be made canonical:
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Derivatives with respect to undefined functions can be calculated:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
Such derivatives will show up when the chain rule is used to
evalulate a derivative:
>>> f(g(x)).diff(x)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
Substitution is used to represent derivatives of functions with
arguments that are not symbols or functions:
>>> f(2*x + 3).diff(x) == 2*Subs(f(y).diff(y), y, 2*x + 3)
True
Notes
=====
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import sqrt, diff, Function, symbols
>>> from sympy.abc import x, y, z
>>> f, g = symbols('f,g', cls=Function)
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression cannot be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked.
Derivative wrt non-Symbols:
For the most part, one may not differentiate wrt non-symbols.
For example, we do not allow differentiation wrt `x*y` because
there are multiple ways of structurally defining where x*y appears
in an expression: a very strict definition would make
(x*y*z).diff(x*y) == 0. Derivatives wrt defined functions (like
cos(x)) are not allowed, either:
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't calculate derivative wrt x*y.
To make it easier to work with variational calculus, however,
derivatives wrt AppliedUndef and Derivatives are allowed.
For example, in the Euler-Lagrange method one may write
F(t, u, v) where u = f(t) and v = f'(t). These variables can be
written explicitly as functions of time::
>>> from sympy.abc import t
>>> F = Function('F')
>>> U = f(t)
>>> V = U.diff(t)
The derivative wrt f(t) can be obtained directly:
>>> direct = F(t, U, V).diff(U)
When differentiation wrt a non-Symbol is attempted, the non-Symbol
is temporarily converted to a Symbol while the differentiation
is performed and the same answer is obtained:
>>> indirect = F(t, U, V).subs(U, x).diff(x).subs(x, U)
>>> assert direct == indirect
The implication of this non-symbol replacement is that all
functions are treated as independent of other functions and the
symbols are independent of the functions that contain them::
>>> x.diff(f(x))
0
>>> g(x).diff(f(x))
0
It also means that derivatives are assumed to depend only
on the variables of differentiation, not on anything contained
within the expression being differentiated::
>>> F = f(x)
>>> Fx = F.diff(x)
>>> Fx.diff(F) # derivative depends on x, not F
0
>>> Fxx = Fx.diff(x)
>>> Fxx.diff(Fx) # derivative depends on x, not Fx
0
The last example can be made explicit by showing the replacement
of Fx in Fxx with y:
>>> Fxx.subs(Fx, y)
Derivative(y, x)
Since that in itself will evaluate to zero, differentiating
wrt Fx will also be zero:
>>> _.doit()
0
Replacing undefined functions with concrete expressions
One must be careful to replace undefined functions with expressions
that contain variables consistent with the function definition and
the variables of differentiation or else insconsistent result will
be obtained. Consider the following example:
>>> eq = f(x)*g(y)
>>> eq.subs(f(x), x*y).diff(x, y).doit()
y*Derivative(g(y), y) + g(y)
>>> eq.diff(x, y).subs(f(x), x*y).doit()
y*Derivative(g(y), y)
The results differ because `f(x)` was replaced with an expression
that involved both variables of differentiation. In the abstract
case, differentiation of `f(x)` by `y` is 0; in the concrete case,
the presence of `y` made that derivative nonvanishing and produced
the extra `g(y)` term.
Defining differentiation for an object
An object must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Any class can allow derivatives to be taken with respect to
itself (while indicating its scalar nature). See the
docstring of Expr._diff_wrt.
See Also
========
_sort_variable_count
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""An expression may be differentiated wrt a Derivative if
it is in elementary form.
Examples
========
>>> from sympy import Function, Derivative, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(cos(x), x)._diff_wrt
False
>>> Derivative(x + 1, x)._diff_wrt
False
A Derivative might be an unevaluated form of what will not be
a valid variable of differentiation if evaluated. For example,
>>> Derivative(f(f(x)), x).doit()
Derivative(f(x), x)*Derivative(f(f(x)), f(x))
Such an expression will present the same ambiguities as arise
when dealing with any other product, like ``2*x``, so ``_diff_wrt``
is False:
>>> Derivative(f(f(x)), x)._diff_wrt
False
"""
return self.expr._diff_wrt and isinstance(self.doit(), Derivative)
def __new__(cls, expr, *variables, **kwargs):
expr = sympify(expr)
symbols_or_none = getattr(expr, "free_symbols", None)
has_symbol_set = isinstance(symbols_or_none, set)
if not has_symbol_set:
raise ValueError(filldedent('''
Since there are no variables in the expression %s,
it cannot be differentiated.''' % expr))
# determine value for variables if it wasn't given
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
array_likes = (tuple, list, Tuple)
from sympy.tensor.array import Array, NDimArray
for i, v in enumerate(variables):
if isinstance(v, UndefinedFunction):
raise TypeError(
"cannot differentiate wrt "
"UndefinedFunction: %s" % v)
if isinstance(v, array_likes):
if len(v) == 0:
# Ignore empty tuples: Derivative(expr, ... , (), ... )
continue
if isinstance(v[0], array_likes):
# Derive by array: Derivative(expr, ... , [[x, y, z]], ... )
if len(v) == 1:
v = Array(v[0])
count = 1
else:
v, count = v
v = Array(v)
else:
v, count = v
if count == 0:
continue
variable_count.append(Tuple(v, count))
continue
v = sympify(v)
if isinstance(v, Integer):
if i == 0:
raise ValueError("First variable cannot be a number: %i" % v)
count = v
prev, prevcount = variable_count[-1]
if prevcount != 1:
raise TypeError("tuple {} followed by number {}".format((prev, prevcount), v))
if count == 0:
variable_count.pop()
else:
variable_count[-1] = Tuple(prev, count)
else:
count = 1
variable_count.append(Tuple(v, count))
# light evaluation of contiguous, identical
# items: (x, 1), (x, 1) -> (x, 2)
merged = []
for t in variable_count:
v, c = t
if c.is_negative:
raise ValueError(
'order of differentiation must be nonnegative')
if merged and merged[-1][0] == v:
c += merged[-1][1]
if not c:
merged.pop()
else:
merged[-1] = Tuple(v, c)
else:
merged.append(t)
variable_count = merged
# sanity check of variables of differentation; we waited
# until the counts were computed since some variables may
# have been removed because the count was 0
for v, c in variable_count:
# v must have _diff_wrt True
if not v._diff_wrt:
__ = '' # filler to make error message neater
raise ValueError(filldedent('''
Can't calculate derivative wrt %s.%s''' % (v,
__)))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if len(variable_count) == 0:
return expr
evaluate = kwargs.get('evaluate', False)
if evaluate:
if isinstance(expr, Derivative):
expr = expr.canonical
variable_count = [
(v.canonical if isinstance(v, Derivative) else v, c)
for v, c in variable_count]
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannot check non-symbols like
# Derivatives as those can be created by intermediate
# derivatives.
zero = False
free = expr.free_symbols
from sympy.matrices.expressions.matexpr import MatrixExpr
for v, c in variable_count:
vfree = v.free_symbols
if c.is_positive and vfree:
if isinstance(v, AppliedUndef):
# these match exactly since
# x.diff(f(x)) == g(x).diff(f(x)) == 0
# and are not created by differentiation
D = Dummy()
if not expr.xreplace({v: D}).has(D):
zero = True
break
elif isinstance(v, MatrixExpr):
zero = False
break
elif isinstance(v, Symbol) and v not in free:
zero = True
break
else:
if not free & vfree:
# e.g. v is IndexedBase or Matrix
zero = True
break
if zero:
return cls._get_zero_with_shape_like(expr)
# make the order of symbols canonical
#TODO: check if assumption of discontinuous derivatives exist
variable_count = cls._sort_variable_count(variable_count)
# denest
if isinstance(expr, Derivative):
variable_count = list(expr.variable_count) + variable_count
expr = expr.expr
return _derivative_dispatch(expr, *variable_count, **kwargs)
# we return here if evaluate is False or if there is no
# _eval_derivative method
if not evaluate or not hasattr(expr, '_eval_derivative'):
# return an unevaluated Derivative
if evaluate and variable_count == [(expr, 1)] and expr.is_scalar:
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
return S.One
return Expr.__new__(cls, expr, *variable_count)
# evaluate the derivative by calling _eval_derivative method
# of expr for each variable
# -------------------------------------------------------------
nderivs = 0 # how many derivatives were performed
unhandled = []
from sympy.matrices.common import MatrixCommon
for i, (v, count) in enumerate(variable_count):
old_expr = expr
old_v = None
is_symbol = v.is_symbol or isinstance(v,
(Iterable, Tuple, MatrixCommon, NDimArray))
if not is_symbol:
old_v = v
v = Dummy('xi')
expr = expr.xreplace({old_v: v})
# Derivatives and UndefinedFunctions are independent
# of all others
clashing = not (isinstance(old_v, Derivative) or \
isinstance(old_v, AppliedUndef))
if v not in expr.free_symbols and not clashing:
return expr.diff(v) # expr's version of 0
if not old_v.is_scalar and not hasattr(
old_v, '_eval_derivative'):
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
expr *= old_v.diff(old_v)
obj = cls._dispatch_eval_derivative_n_times(expr, v, count)
if obj is not None and obj.is_zero:
return obj
nderivs += count
if old_v is not None:
if obj is not None:
# remove the dummy that was used
obj = obj.subs(v, old_v)
# restore expr
expr = old_expr
if obj is None:
# we've already checked for quick-exit conditions
# that give 0 so the remaining variables
# are contained in the expression but the expression
# did not compute a derivative so we stop taking
# derivatives
unhandled = variable_count[i:]
break
expr = obj
# what we have so far can be made canonical
expr = expr.replace(
lambda x: isinstance(x, Derivative),
lambda x: x.canonical)
if unhandled:
if isinstance(expr, Derivative):
unhandled = list(expr.variable_count) + unhandled
expr = expr.expr
expr = Expr.__new__(cls, expr, *unhandled)
if (nderivs > 1) == True and kwargs.get('simplify', True):
from .exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@property
def canonical(cls):
return cls.func(cls.expr,
*Derivative._sort_variable_count(cls.variable_count))
@classmethod
def _sort_variable_count(cls, vc):
"""
Sort (variable, count) pairs into canonical order while
retaining order of variables that do not commute during
differentiation:
* symbols and functions commute with each other
* derivatives commute with each other
* a derivative does not commute with anything it contains
* any other object is not allowed to commute if it has
free symbols in common with another object
Examples
========
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variable_count
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
Contiguous items are collapsed into one pair:
>>> vsort([(x, 1), (x, 1)])
[(x, 2)]
>>> vsort([(y, 1), (f(x), 1), (y, 1), (f(x), 1)])
[(y, 2), (f(x), 2)]
Ordering is canonical.
>>> def vsort0(*v):
... # docstring helper to
... # change vi -> (vi, 0), sort, and return vi vals
... return [i[0] for i in vsort([(i, 0) for i in v])]
>>> vsort0(y, x)
[x, y]
>>> vsort0(g(y), g(x), f(y))
[f(y), g(x), g(y)]
Symbols are sorted as far to the left as possible but never
move to the left of a derivative having the same symbol in
its variables; the same applies to AppliedUndef which are
always sorted after Symbols:
>>> dfx = f(x).diff(x)
>>> assert vsort0(dfx, y) == [y, dfx]
>>> assert vsort0(dfx, x) == [dfx, x]
"""
if not vc:
return []
vc = list(vc)
if len(vc) == 1:
return [Tuple(*vc[0])]
V = list(range(len(vc)))
E = []
v = lambda i: vc[i][0]
D = Dummy()
def _block(d, v, wrt=False):
# return True if v should not come before d else False
if d == v:
return wrt
if d.is_Symbol:
return False
if isinstance(d, Derivative):
# a derivative blocks if any of it's variables contain
# v; the wrt flag will return True for an exact match
# and will cause an AppliedUndef to block if v is in
# the arguments
if any(_block(k, v, wrt=True)
for k in d._wrt_variables):
return True
return False
if not wrt and isinstance(d, AppliedUndef):
return False
if v.is_Symbol:
return v in d.free_symbols
if isinstance(v, AppliedUndef):
return _block(d.xreplace({v: D}), D)
return d.free_symbols & v.free_symbols
for i in range(len(vc)):
for j in range(i):
if _block(v(j), v(i)):
E.append((j,i))
# this is the default ordering to use in case of ties
O = dict(zip(ordered(uniq([i for i, c in vc])), range(len(vc))))
ix = topological_sort((V, E), key=lambda i: O[v(i)])
# merge counts of contiguously identical items
merged = []
for v, c in [vc[i] for i in ix]:
if merged and merged[-1][0] == v:
merged[-1][1] += c
else:
merged.append([v, c])
return [Tuple(*i) for i in merged]
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If v (the variable of differentiation) is not in
# self.variables, we might be able to take the derivative.
if v not in self._wrt_variables:
dedv = self.expr.diff(v)
if isinstance(dedv, Derivative):
return dedv.func(dedv.expr, *(self.variable_count + dedv.variable_count))
# dedv (d(self.expr)/dv) could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when dedv is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(dedv, *self.variables, evaluate=True)
# In this case v was in self.variables so the derivative wrt v has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
variable_count = list(self.variable_count)
variable_count.append((v, 1))
return self.func(self.expr, *variable_count, evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
rv = self.func(expr, *self.variable_count, **hints)
if rv!= self and rv.has(Derivative):
rv = rv.doit(**hints)
return rv
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def _wrt_variables(self):
# return the variables of differentiation without
# respect to the type of count (int or symbolic)
return [i[0] for i in self.variable_count]
@property
def variables(self):
# TODO: deprecate? YES, make this 'enumerated_variables' and
# name _wrt_variables as variables
# TODO: support for `d^n`?
rv = []
for v, count in self.variable_count:
if not count.is_Integer:
raise TypeError(filldedent('''
Cannot give expansion for symbolic count. If you just
want a list of all variables of differentiation, use
_wrt_variables.'''))
rv.extend([v]*count)
return tuple(rv)
@property
def variable_count(self):
return self._args[1:]
@property
def derivative_count(self):
return sum([count for _, count in self.variable_count], 0)
@property
def free_symbols(self):
ret = self.expr.free_symbols
# Add symbolic counts to free_symbols
for _, count in self.variable_count:
ret.update(count.free_symbols)
return ret
@property
def kind(self):
return self.args[0].kind
def _eval_subs(self, old, new):
# The substitution (old, new) cannot be done inside
# Derivative(expr, vars) for a variety of reasons
# as handled below.
if old in self._wrt_variables:
# first handle the counts
expr = self.func(self.expr, *[(v, c.subs(old, new))
for v, c in self.variable_count])
if expr != self:
return expr._eval_subs(old, new)
# quick exit case
if not getattr(new, '_diff_wrt', False):
# case (0): new is not a valid variable of
# differentiation
if isinstance(old, Symbol):
# don't introduce a new symbol if the old will do
return Subs(self, old, new)
else:
xi = Dummy('xi')
return Subs(self.xreplace({old: xi}), xi, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
if self.canonical == old.canonical:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all((a[i] <= b[i]) == True for i in a)
old_vars = Counter(dict(reversed(old.variable_count)))
self_vars = Counter(dict(reversed(self.variable_count)))
if _subset(old_vars, self_vars):
return _derivative_dispatch(new, *(self_vars - old_vars).items()).canonical
args = list(self.args)
newargs = list(x._subs(old, new) for x in args)
if args[0] == old:
# complete replacement of self.expr
# we already checked that the new is valid so we know
# it won't be a problem should it appear in variables
return _derivative_dispatch(*newargs)
if newargs[0] != args[0]:
# case (1) can't change expr by introducing something that is in
# the _wrt_variables if it was already in the expr
# e.g.
# for Derivative(f(x, g(y)), y), x cannot be replaced with
# anything that has y in it; for f(g(x), g(y)).diff(g(y))
# g(x) cannot be replaced with anything that has g(y)
syms = {vi: Dummy() for vi in self._wrt_variables
if not vi.is_Symbol}
wrt = {syms.get(vi, vi) for vi in self._wrt_variables}
forbidden = args[0].xreplace(syms).free_symbols & wrt
nfree = new.xreplace(syms).free_symbols
ofree = old.xreplace(syms).free_symbols
if (nfree - ofree) & forbidden:
return Subs(self, old, new)
viter = ((i, j) for ((i, _), (j, _)) in zip(newargs[1:], args[1:]))
if any(i != j for i, j in viter): # a wrt-variable change
# case (2) can't change vars by introducing a variable
# that is contained in expr, e.g.
# for Derivative(f(z, g(h(x), y)), y), y cannot be changed to
# x, h(x), or g(h(x), y)
for a in _atomic(self.expr, recursive=True):
for i in range(1, len(newargs)):
vi, _ = newargs[i]
if a == vi and vi != args[i][0]:
return Subs(self, old, new)
# more arg-wise checks
vc = newargs[1:]
oldv = self._wrt_variables
newe = self.expr
subs = []
for i, (vi, ci) in enumerate(vc):
if not vi._diff_wrt:
# case (3) invalid differentiation expression so
# create a replacement dummy
xi = Dummy('xi_%i' % i)
# replace the old valid variable with the dummy
# in the expression
newe = newe.xreplace({oldv[i]: xi})
# and replace the bad variable with the dummy
vc[i] = (xi, ci)
# and record the dummy with the new (invalid)
# differentiation expression
subs.append((xi, vi))
if subs:
# handle any residual substitution in the expression
newe = newe._subs(old, new)
# return the Subs-wrapped derivative
return Subs(Derivative(newe, *vc), *zip(*subs))
# everything was ok
return _derivative_dispatch(*newargs)
def _eval_lseries(self, x, logx, cdir=0):
dx = self.variables
for term in self.expr.lseries(x, logx=logx, cdir=cdir):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx, cdir=0):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
To approximate ``Derivative`` around ``x0`` using a non-equidistant
spacing step, the algorithm supports assignment of undefined
functions to ``points``:
>>> dx = Function('dx')
>>> f(x).diff(x).as_finite_difference(points=dx(x), x0=x-h)
-f(-h + x - dx(-h + x)/2)/dx(-h + x) + f(-h + x + dx(-h + x)/2)/dx(-h + x)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from sympy.calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
@classmethod
def _get_zero_with_shape_like(cls, expr):
return S.Zero
@classmethod
def _dispatch_eval_derivative_n_times(cls, expr, v, count):
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
return expr._eval_derivative_n_times(v, count)
def _derivative_dispatch(expr, *variables, **kwargs):
from sympy.matrices.common import MatrixCommon
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.tensor.array import NDimArray
array_types = (MatrixCommon, MatrixExpr, NDimArray, list, tuple, Tuple)
if isinstance(expr, array_types) or any(isinstance(i[0], array_types) if isinstance(i, (tuple, list, Tuple)) else isinstance(i, array_types) for i in variables):
from sympy.tensor.array.array_derivatives import ArrayDerivative
return ArrayDerivative(expr, *variables, **kwargs)
return Derivative(expr, *variables, **kwargs)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
Examples
========
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
It is also possible to unpack tuple arguments:
>>> f = Lambda(((x, y), z), x + y + z)
>>> f((1, 2), 3)
6
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, signature, expr):
if iterable(signature) and not isinstance(signature, (tuple, Tuple)):
sympy_deprecation_warning(
"""
Using a non-tuple iterable as the first argument to Lambda
is deprecated. Use Lambda(tuple(args), expr) instead.
""",
deprecated_since_version="1.5",
active_deprecations_target="deprecated-non-tuple-lambda",
)
signature = tuple(signature)
sig = signature if iterable(signature) else (signature,)
sig = sympify(sig)
cls._check_signature(sig)
if len(sig) == 1 and sig[0] == expr:
return S.IdentityFunction
return Expr.__new__(cls, sig, sympify(expr))
@classmethod
def _check_signature(cls, sig):
syms = set()
def rcheck(args):
for a in args:
if a.is_symbol:
if a in syms:
raise BadSignatureError("Duplicate symbol %s" % a)
syms.add(a)
elif isinstance(a, Tuple):
rcheck(a)
else:
raise BadSignatureError("Lambda signature should be only tuples"
" and symbols, not %s" % a)
if not isinstance(sig, Tuple):
raise BadSignatureError("Lambda signature should be a tuple not %s" % sig)
# Recurse through the signature:
rcheck(sig)
@property
def signature(self):
"""The expected form of the arguments to be unpacked into variables"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def variables(self):
"""The variables used in the internal representation of the function"""
def _variables(args):
if isinstance(args, Tuple):
for arg in args:
yield from _variables(arg)
else:
yield args
return tuple(_variables(self.signature))
@property
def nargs(self):
from sympy.sets.sets import FiniteSet
return FiniteSet(len(self.signature))
bound_symbols = variables
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise BadArgumentsError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
d = self._match_signature(self.signature, args)
return self.expr.xreplace(d)
def _match_signature(self, sig, args):
symargmap = {}
def rmatch(pars, args):
for par, arg in zip(pars, args):
if par.is_symbol:
symargmap[par] = arg
elif isinstance(par, Tuple):
if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars):
raise BadArgumentsError("Can't match %s and %s" % (args, pars))
rmatch(par, arg)
rmatch(sig, args)
return symargmap
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
return self.signature == self.expr
def _eval_evalf(self, prec):
return self.func(self.args[0], self.args[1].evalf(n=prec_to_dps(prec)))
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` represents the expression resulting
from substituting x with x0 in expr.
Parameters
==========
expr : Expr
An expression.
x : tuple, variable
A variable or list of distinct variables.
x0 : tuple or list of tuples
A point or list of evaluation points
corresponding to those variables.
Notes
=====
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> from sympy import Subs, Function, sin, cos
>>> from sympy.abc import x, y, z
>>> f = Function('f')
Subs are created when a particular substitution cannot be made. The
x in the derivative cannot be replaced with 0 because 0 is not a
valid variables of differentiation:
>>> f(x).diff(x).subs(x, 0)
Subs(Derivative(f(x), x), x, 0)
Once f is known, the derivative and evaluation at 0 can be done:
>>> _.subs(f, sin).doit() == sin(x).diff(x).subs(x, 0) == cos(0)
True
Subs can also be created directly with one or more variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
Notes
=====
In order to allow expressions to combine before doit is done, a
representation of the Subs expression is used internally to make
expressions that are superficially different compare the same:
>>> a, b = Subs(x, x, 0), Subs(y, y, 0)
>>> a + b
2*Subs(x, x, 0)
This can lead to unexpected consequences when using methods
like `has` that are cached:
>>> s = Subs(x, x, 0)
>>> s.has(x), s.has(y)
(True, False)
>>> ss = s.subs(x, y)
>>> ss.has(x), ss.has(y)
(True, False)
>>> s, ss
(Subs(x, x, 0), Subs(y, y, 0))
"""
def __new__(cls, expr, variables, point, **assumptions):
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*variables)
if has_dups(variables):
repeated = [str(v) for v, i in Counter(variables).items() if i > 1]
__ = ', '.join(repeated)
raise ValueError(filldedent('''
The following expressions appear more than once: %s
''' % __))
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
if not point:
return sympify(expr)
# denest
if isinstance(expr, Subs):
variables = expr.variables + variables
point = expr.point + point
expr = expr.expr
else:
expr = sympify(expr)
# use symbols with names equal to the point value (with prepended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing.str import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-prepended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.xreplace(dict(reps))
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self, **hints):
e, v, p = self.args
# remove self mappings
for i, (vi, pi) in enumerate(zip(v, p)):
if vi == pi:
v = v[:i] + v[i + 1:]
p = p[:i] + p[i + 1:]
if not v:
return self.expr
if isinstance(e, Derivative):
# apply functions first, e.g. f -> cos
undone = []
for i, vi in enumerate(v):
if isinstance(vi, FunctionClass):
e = e.subs(vi, p[i])
else:
undone.append((vi, p[i]))
if not isinstance(e, Derivative):
e = e.doit()
if isinstance(e, Derivative):
# do Subs that aren't related to differentiation
undone2 = []
D = Dummy()
arg = e.args[0]
for vi, pi in undone:
if D not in e.xreplace({vi: D}).free_symbols:
if arg.has(vi):
e = e.subs(vi, pi)
else:
undone2.append((vi, pi))
undone = undone2
# differentiate wrt variables that are present
wrt = []
D = Dummy()
expr = e.expr
free = expr.free_symbols
for vi, ci in e.variable_count:
if isinstance(vi, Symbol) and vi in free:
expr = expr.diff((vi, ci))
elif D in expr.subs(vi, D).free_symbols:
expr = expr.diff((vi, ci))
else:
wrt.append((vi, ci))
# inject remaining subs
rv = expr.subs(undone)
# do remaining differentiation *in order given*
for vc in wrt:
rv = rv.diff(vc)
else:
# inject remaining subs
rv = e.subs(undone)
else:
rv = e.doit(**hints).subs(list(zip(v, p)))
if hints.get('deep', True) and rv != self:
rv = rv.doit(**hints)
return rv
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf # type:ignore
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
bound_symbols = variables
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
@property
def expr_free_symbols(self):
sympy_deprecation_warning("""
The expr_free_symbols property is deprecated. Use free_symbols to get
the free symbols of an expression.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-expr-free-symbols")
# Don't show the warning twice from the recursive call
with ignore_warnings(SymPyDeprecationWarning):
return (self.expr.expr_free_symbols - set(self.variables) |
set(self.point.expr_free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),
) + tuple(ordered([(v, p) for v, p in
zip(self.variables, self.point) if not self.expr.has(v)]))
def _eval_subs(self, old, new):
# Subs doit will do the variables in order; the semantics
# of subs for Subs is have the following invariant for
# Subs object foo:
# foo.doit().subs(reps) == foo.subs(reps).doit()
pt = list(self.point)
if old in self.variables:
if _atomic(new) == {new} and not any(
i.has(new) for i in self.args):
# the substitution is neutral
return self.xreplace({old: new})
# any occurrence of old before this point will get
# handled by replacements from here on
i = self.variables.index(old)
for j in range(i, len(self.variables)):
pt[j] = pt[j]._subs(old, new)
return self.func(self.expr, self.variables, pt)
v = [i._subs(old, new) for i in self.variables]
if v != list(self.variables):
return self.func(self.expr, self.variables + (old,), pt + [new])
expr = self.expr._subs(old, new)
pt = [i._subs(old, new) for i in self.point]
return self.func(expr, v, pt)
def _eval_derivative(self, s):
# Apply the chain rule of the derivative on the substitution variables:
f = self.expr
vp = V, P = self.variables, self.point
val = Add.fromiter(p.diff(s)*Subs(f.diff(v), *vp).doit()
for v, p in zip(V, P))
# these are all the free symbols in the expr
efree = f.free_symbols
# some symbols like IndexedBase include themselves and args
# as free symbols
compound = {i for i in efree if len(i.free_symbols) > 1}
# hide them and see what independent free symbols remain
dums = {Dummy() for i in compound}
masked = f.xreplace(dict(zip(compound, dums)))
ifree = masked.free_symbols - dums
# include the compound symbols
free = ifree | compound
# remove the variables already handled
free -= set(V)
# add back any free symbols of remaining compound symbols
free |= {i for j in free & compound for i in j.free_symbols}
# if symbols of s are in free then there is more to do
if free & s.free_symbols:
val += Subs(f.diff(s), self.variables, self.point).doit()
return val
def _eval_nseries(self, x, n, logx, cdir=0):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
else:
other = x
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
terms = Add.make_args(arg.removeO())
rv = Add(*[self.func(a, *self.args[1:]) for a in terms])
if o:
rv += o.subs(other, x)
return rv
def _eval_as_leading_term(self, x, logx=None, cdir=0):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
Explanation
===========
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), (x, 3))
>>> diff(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
.. [1] http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
idiff: computes the derivative implicitly
"""
if hasattr(f, 'diff'):
return f.diff(*symbols, **kwargs)
kwargs.setdefault('evaluate', True)
return _derivative_dispatch(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""
Expand an expression using methods given as hints.
Explanation
===========
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, *, force=False, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, sympy.simplify.hyperexpand.hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False, factor=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
from sympy.functions.elementary.exponential import log
if factor is False:
def _handle(x):
x1 = expand_mul(expand_log(x, deep=deep, force=force, factor=True))
if x1.count(log) <= x.count(log):
return x1
return x
expr = expr.replace(
lambda x: x.is_Mul and all(any(isinstance(i, log) and i.args[0].is_Rational
for i in Mul.make_args(j)) for j in x.as_numer_denom()),
_handle)
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force, factor=factor)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
sympy.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp, Symbol
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y**(z + 1)
The power that is unexpanded can be expanded safely when
``y != 0``, otherwise different values might be obtained for the expression:
>>> prev = _
If we indicate that ``y`` is positive but then replace it with
a value of 0 after expansion, the expression becomes 0:
>>> p = Symbol('p', positive=True)
>>> prev.subs(y, p).expand().subs(p, 0)
0
But if ``z = -1`` the expression would not be zero:
>>> prev.subs(y, 0).subs(z, -1)
1
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp, Symbol
>>> from sympy.abc import x, y
>>> expand_power_exp(3**(y + 2))
9*3**y
>>> expand_power_exp(x**(y + 2))
x**(y + 2)
If ``x = 0`` the value of the expression depends on the
value of ``y``; if the expression were expanded the result
would be 0. So expansion is only done if ``x != 0``:
>>> expand_power_exp(Symbol('x', zero=False)**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
Parameters
==========
expr : Expr
If expr is an iterable, the sum of the op counts of the
items will be returned.
visual : bool, optional
If ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there is not a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from .relational import Relational
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
from sympy.logic.boolalg import BooleanFunction
from sympy.simplify.radsimp import fraction
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
EXP = Symbol('EXP')
while args:
a = args.pop()
# if the following fails because the object is
# not Basic type, then the object should be fixed
# since it is the intention that all args of Basic
# should themselves be Basic
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul or a.is_MatMul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add or a.is_MatAdd:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if a == S.Exp1:
ops.append(EXP)
continue
if a.is_Pow and a.base == S.Exp1:
ops.append(EXP)
args.append(a.exp)
continue
if a.is_Mul or isinstance(a, LatticeOp):
o = Symbol(a.func.__name__.upper())
# count the args
ops.append(o*(len(a.args) - 1))
elif a.args and (
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral) or
isinstance(a, Sum)):
# if it's not in the list above we don't
# consider a.func something to count, e.g.
# Tuple, MatrixSymbol, etc...
if isinstance(a.func, UndefinedFunction):
o = Symbol("FUNC_" + a.func.__name__.upper())
else:
o = Symbol(a.func.__name__.upper())
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif isinstance(expr, Dict):
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, (Relational, BooleanFunction)):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(func_name(expr, short=True).upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(type(a).__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False, dkeys=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True) and those in undefined
functions. When processing dictionaries, do not modify the keys
unless ``dkeys=True``.
Examples
========
>>> from sympy import nfloat, cos, pi, sqrt
>>> from sympy.abc import x, y
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
Container types are not modified:
>>> type(nfloat((1, 2))) is tuple
True
"""
from sympy.matrices.matrices import MatrixBase
kw = dict(n=n, exponent=exponent, dkeys=dkeys)
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda e: nfloat(e, **kw))
# handling of iterable containers
if iterable(expr, exclude=str):
if isinstance(expr, (dict, Dict)):
if dkeys:
args = [tuple(map(lambda i: nfloat(i, **kw), a))
for a in expr.items()]
else:
args = [(k, nfloat(v, **kw)) for k, v in expr.items()]
if isinstance(expr, dict):
return type(expr)(args)
else:
return expr.func(*args)
elif isinstance(expr, Basic):
return expr.func(*[nfloat(a, **kw) for a in expr.args])
return type(expr)([nfloat(a, **kw) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
elif rv.is_Atom:
return rv
elif rv.is_Relational:
args_nfloat = (nfloat(arg, **kw) for arg in rv.args)
return rv.func(*args_nfloat)
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
from sympy.polys.rootoftools import RootOf
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
from .power import Pow
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function) and not isinstance(x, AppliedUndef)))
from .symbol import Dummy, Symbol
|
f6442fb43c66f924911a2fb5e795ec065febb3ea0dffb770edf34d4f7983bb64 | """
Module to efficiently partition SymPy objects.
This system is introduced because class of SymPy object does not always
represent the mathematical classification of the entity. For example,
``Integral(1, x)`` and ``Integral(Matrix([1,2]), x)`` are both instance
of ``Integral`` class. However the former is number and the latter is
matrix.
One way to resolve this is defining subclass for each mathematical type,
such as ``MatAdd`` for the addition between matrices. Basic algebraic
operation such as addition or multiplication take this approach, but
defining every class for every mathematical object is not scalable.
Therefore, we define the "kind" of the object and let the expression
infer the kind of itself from its arguments. Function and class can
filter the arguments by their kind, and behave differently according to
the type of itself.
This module defines basic kinds for core objects. Other kinds such as
``ArrayKind`` or ``MatrixKind`` can be found in corresponding modules.
.. notes::
This approach is experimental, and can be replaced or deleted in the future.
See https://github.com/sympy/sympy/pull/20549.
"""
from collections import defaultdict
from .cache import cacheit
from sympy.multipledispatch.dispatcher import (Dispatcher,
ambiguity_warn, ambiguity_register_error_ignore_dup,
str_signature, RaiseNotImplementedError)
class KindMeta(type):
"""
Metaclass for ``Kind``.
Assigns empty ``dict`` as class attribute ``_inst`` for every class,
in order to endow singleton-like behavior.
"""
def __new__(cls, clsname, bases, dct):
dct['_inst'] = {}
return super().__new__(cls, clsname, bases, dct)
class Kind(object, metaclass=KindMeta):
"""
Base class for kinds.
Kind of the object represents the mathematical classification that
the entity falls into. It is expected that functions and classes
recognize and filter the argument by its kind.
Kind of every object must be carefully selected so that it shows the
intention of design. Expressions may have different kind according
to the kind of its arguments. For example, arguments of ``Add``
must have common kind since addition is group operator, and the
resulting ``Add()`` has the same kind.
For the performance, each kind is as broad as possible and is not
based on set theory. For example, ``NumberKind`` includes not only
complex number but expression containing ``S.Infinity`` or ``S.NaN``
which are not strictly number.
Kind may have arguments as parameter. For example, ``MatrixKind()``
may be constructed with one element which represents the kind of its
elements.
``Kind`` behaves in singleton-like fashion. Same signature will
return the same object.
"""
def __new__(cls, *args):
if args in cls._inst:
inst = cls._inst[args]
else:
inst = super().__new__(cls)
cls._inst[args] = inst
return inst
class _UndefinedKind(Kind):
"""
Default kind for all SymPy object. If the kind is not defined for
the object, or if the object cannot infer the kind from its
arguments, this will be returned.
Examples
========
>>> from sympy import Expr
>>> Expr().kind
UndefinedKind
"""
def __new__(cls):
return super().__new__(cls)
def __repr__(self):
return "UndefinedKind"
UndefinedKind = _UndefinedKind()
class _NumberKind(Kind):
"""
Kind for all numeric object.
This kind represents every number, including complex numbers,
infinity and ``S.NaN``. Other objects such as quaternions do not
have this kind.
Most ``Expr`` are initially designed to represent the number, so
this will be the most common kind in SymPy core. For example
``Symbol()``, which represents a scalar, has this kind as long as it
is commutative.
Numbers form a field. Any operation between number-kind objects will
result this kind as well.
Examples
========
>>> from sympy import S, oo, Symbol
>>> S.One.kind
NumberKind
>>> (-oo).kind
NumberKind
>>> S.NaN.kind
NumberKind
Commutative symbol are treated as number.
>>> x = Symbol('x')
>>> x.kind
NumberKind
>>> Symbol('y', commutative=False).kind
UndefinedKind
Operation between numbers results number.
>>> (x+1).kind
NumberKind
See Also
========
sympy.core.expr.Expr.is_Number : check if the object is strictly
subclass of ``Number`` class.
sympy.core.expr.Expr.is_number : check if the object is number
without any free symbol.
"""
def __new__(cls):
return super().__new__(cls)
def __repr__(self):
return "NumberKind"
NumberKind = _NumberKind()
class _BooleanKind(Kind):
"""
Kind for boolean objects.
SymPy's ``S.true``, ``S.false``, and built-in ``True`` and ``False``
have this kind. Boolean number ``1`` and ``0`` are not relevant.
Examples
========
>>> from sympy import S, Q
>>> S.true.kind
BooleanKind
>>> Q.even(3).kind
BooleanKind
"""
def __new__(cls):
return super().__new__(cls)
def __repr__(self):
return "BooleanKind"
BooleanKind = _BooleanKind()
class KindDispatcher:
"""
Dispatcher to select a kind from multiple kinds by binary dispatching.
.. notes::
This approach is experimental, and can be replaced or deleted in
the future.
Explanation
===========
SymPy object's :obj:`sympy.core.kind.Kind()` vaguely represents the
algebraic structure where the object belongs to. Therefore, with
given operation, we can always find a dominating kind among the
different kinds. This class selects the kind by recursive binary
dispatching. If the result cannot be determined, ``UndefinedKind``
is returned.
Examples
========
Multiplication between numbers return number.
>>> from sympy import NumberKind, Mul
>>> Mul._kind_dispatcher(NumberKind, NumberKind)
NumberKind
Multiplication between number and unknown-kind object returns unknown kind.
>>> from sympy import UndefinedKind
>>> Mul._kind_dispatcher(NumberKind, UndefinedKind)
UndefinedKind
Any number and order of kinds is allowed.
>>> Mul._kind_dispatcher(UndefinedKind, NumberKind)
UndefinedKind
>>> Mul._kind_dispatcher(NumberKind, UndefinedKind, NumberKind)
UndefinedKind
Since matrix forms a vector space over scalar field, multiplication
between matrix with numeric element and number returns matrix with
numeric element.
>>> from sympy.matrices import MatrixKind
>>> Mul._kind_dispatcher(MatrixKind(NumberKind), NumberKind)
MatrixKind(NumberKind)
If a matrix with number element and another matrix with unknown-kind
element are multiplied, we know that the result is matrix but the
kind of its elements is unknown.
>>> Mul._kind_dispatcher(MatrixKind(NumberKind), MatrixKind(UndefinedKind))
MatrixKind(UndefinedKind)
Parameters
==========
name : str
commutative : bool, optional
If True, binary dispatch will be automatically registered in
reversed order as well.
doc : str, optional
"""
def __init__(self, name, commutative=False, doc=None):
self.name = name
self.doc = doc
self.commutative = commutative
self._dispatcher = Dispatcher(name)
def __repr__(self):
return "<dispatched %s>" % self.name
def register(self, *types, **kwargs):
"""
Register the binary dispatcher for two kind classes.
If *self.commutative* is ``True``, signature in reversed order is
automatically registered as well.
"""
on_ambiguity = kwargs.pop("on_ambiguity", None)
if not on_ambiguity:
if self.commutative:
on_ambiguity = ambiguity_register_error_ignore_dup
else:
on_ambiguity = ambiguity_warn
kwargs.update(on_ambiguity=on_ambiguity)
if not len(types) == 2:
raise RuntimeError(
"Only binary dispatch is supported, but got %s types: <%s>." % (
len(types), str_signature(types)
))
def _(func):
self._dispatcher.add(types, func, **kwargs)
if self.commutative:
self._dispatcher.add(tuple(reversed(types)), func, **kwargs)
return _
def __call__(self, *args, **kwargs):
if self.commutative:
kinds = frozenset(args)
else:
kinds = []
prev = None
for a in args:
if prev is not a:
kinds.append(a)
prev = a
return self.dispatch_kinds(kinds, **kwargs)
@cacheit
def dispatch_kinds(self, kinds, **kwargs):
# Quick exit for the case where all kinds are same
if len(kinds) == 1:
result, = kinds
if not isinstance(result, Kind):
raise RuntimeError("%s is not a kind." % result)
return result
for i,kind in enumerate(kinds):
if not isinstance(kind, Kind):
raise RuntimeError("%s is not a kind." % kind)
if i == 0:
result = kind
else:
prev_kind = result
t1, t2 = type(prev_kind), type(kind)
k1, k2 = prev_kind, kind
func = self._dispatcher.dispatch(t1, t2)
if func is None and self.commutative:
# try reversed order
func = self._dispatcher.dispatch(t2, t1)
k1, k2 = k2, k1
if func is None:
# unregistered kind relation
result = UndefinedKind
else:
result = func(k1, k2)
if not isinstance(result, Kind):
raise RuntimeError(
"Dispatcher for {!r} and {!r} must return a Kind, but got {!r}".format(
prev_kind, kind, result
))
return result
@property
def __doc__(self):
docs = [
"Kind dispatcher : %s" % self.name,
"Note that support for this is experimental. See the docs for :class:`KindDispatcher` for details"
]
if self.doc:
docs.append(self.doc)
s = "Registered kind classes\n"
s += '=' * len(s)
docs.append(s)
amb_sigs = []
typ_sigs = defaultdict(list)
for sigs in self._dispatcher.ordering[::-1]:
key = self._dispatcher.funcs[sigs]
typ_sigs[key].append(sigs)
for func, sigs in typ_sigs.items():
sigs_str = ', '.join('<%s>' % str_signature(sig) for sig in sigs)
if isinstance(func, RaiseNotImplementedError):
amb_sigs.append(sigs_str)
continue
s = 'Inputs: %s\n' % sigs_str
s += '-' * len(s) + '\n'
if func.__doc__:
s += func.__doc__.strip()
else:
s += func.__name__
docs.append(s)
if amb_sigs:
s = "Ambiguous kind classes\n"
s += '=' * len(s)
docs.append(s)
s = '\n'.join(amb_sigs)
docs.append(s)
return '\n\n'.join(docs)
|
0dbc1e20eb807bb7ae40d7e6d171b6eef5460bfb4af9d13c3196c9198a1df921 | from typing import Tuple as tTuple
from collections import defaultdict
from functools import cmp_to_key, reduce
from operator import attrgetter
from .basic import Basic
from .parameters import global_parameters
from .logic import _fuzzy_group, fuzzy_or, fuzzy_not
from .singleton import S
from .operations import AssocOp, AssocOpDispatcher
from .cache import cacheit
from .numbers import ilcm, igcd
from .expr import Expr
from .kind import UndefinedKind
from sympy.utilities.iterables import is_sequence, sift
# Key for sorting commutative args in canonical order
_args_sortkey = cmp_to_key(Basic.compare)
def _could_extract_minus_sign(expr):
# assume expr is Add-like
# We choose the one with less arguments with minus signs
negative_args = sum(1 for i in expr.args
if i.could_extract_minus_sign())
positive_args = len(expr.args) - negative_args
if positive_args > negative_args:
return False
elif positive_args < negative_args:
return True
# choose based on .sort_key() to prefer
# x - 1 instead of 1 - x and
# 3 - sqrt(2) instead of -3 + sqrt(2)
return bool(expr.sort_key() < (-expr).sort_key())
def _addsort(args):
# in-place sorting of args
args.sort(key=_args_sortkey)
def _unevaluated_Add(*args):
"""Return a well-formed unevaluated Add: Numbers are collected and
put in slot 0 and args are sorted. Use this when args have changed
but you still want to return an unevaluated Add.
Examples
========
>>> from sympy.core.add import _unevaluated_Add as uAdd
>>> from sympy import S, Add
>>> from sympy.abc import x, y
>>> a = uAdd(*[S(1.0), x, S(2)])
>>> a.args[0]
3.00000000000000
>>> a.args[1]
x
Beyond the Number being in slot 0, there is no other assurance of
order for the arguments since they are hash sorted. So, for testing
purposes, output produced by this in some other function can only
be tested against the output of this function or as one of several
options:
>>> opts = (Add(x, y, evaluate=False), Add(y, x, evaluate=False))
>>> a = uAdd(x, y)
>>> assert a in opts and a == uAdd(x, y)
>>> uAdd(x + 1, x + 2)
x + x + 3
"""
args = list(args)
newargs = []
co = S.Zero
while args:
a = args.pop()
if a.is_Add:
# this will keep nesting from building up
# so that x + (x + 1) -> x + x + 1 (3 args)
args.extend(a.args)
elif a.is_Number:
co += a
else:
newargs.append(a)
_addsort(newargs)
if co:
newargs.insert(0, co)
return Add._from_args(newargs)
class Add(Expr, AssocOp):
"""
Expression representing addition operation for algebraic group.
.. deprecated:: 1.7
Using arguments that aren't subclasses of :class:`~.Expr` in core
operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is
deprecated. See :ref:`non-expr-args-deprecated` for details.
Every argument of ``Add()`` must be ``Expr``. Infix operator ``+``
on most scalar objects in SymPy calls this class.
Another use of ``Add()`` is to represent the structure of abstract
addition so that its arguments can be substituted to return different
class. Refer to examples section for this.
``Add()`` evaluates the argument unless ``evaluate=False`` is passed.
The evaluation logic includes:
1. Flattening
``Add(x, Add(y, z))`` -> ``Add(x, y, z)``
2. Identity removing
``Add(x, 0, y)`` -> ``Add(x, y)``
3. Coefficient collecting by ``.as_coeff_Mul()``
``Add(x, 2*x)`` -> ``Mul(3, x)``
4. Term sorting
``Add(y, x, 2)`` -> ``Add(2, x, y)``
If no argument is passed, identity element 0 is returned. If single
element is passed, that element is returned.
Note that ``Add(*args)`` is more efficient than ``sum(args)`` because
it flattens the arguments. ``sum(a, b, c, ...)`` recursively adds the
arguments as ``a + (b + (c + ...))``, which has quadratic complexity.
On the other hand, ``Add(a, b, c, d)`` does not assume nested
structure, making the complexity linear.
Since addition is group operation, every argument should have the
same :obj:`sympy.core.kind.Kind()`.
Examples
========
>>> from sympy import Add, I
>>> from sympy.abc import x, y
>>> Add(x, 1)
x + 1
>>> Add(x, x)
2*x
>>> 2*x**2 + 3*x + I*y + 2*y + 2*x/5 + 1.0*y + 1
2*x**2 + 17*x/5 + 3.0*y + I*y + 1
If ``evaluate=False`` is passed, result is not evaluated.
>>> Add(1, 2, evaluate=False)
1 + 2
>>> Add(x, x, evaluate=False)
x + x
``Add()`` also represents the general structure of addition operation.
>>> from sympy import MatrixSymbol
>>> A,B = MatrixSymbol('A', 2,2), MatrixSymbol('B', 2,2)
>>> expr = Add(x,y).subs({x:A, y:B})
>>> expr
A + B
>>> type(expr)
<class 'sympy.matrices.expressions.matadd.MatAdd'>
Note that the printers do not display in args order.
>>> Add(x, 1)
x + 1
>>> Add(x, 1).args
(1, x)
See Also
========
MatAdd
"""
__slots__ = ()
args: tTuple[Expr, ...]
is_Add = True
_args_type = Expr
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
NB: the removal of 0 is already handled by AssocOp.__new__
See also
========
sympy.core.mul.Mul.flatten
"""
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.matrices.expressions import MatrixExpr
from sympy.tensor.tensor import TensExpr
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
if a.is_Rational:
if b.is_Mul:
rv = [a, b], [], None
if rv:
if all(s.is_commutative for s in rv[0]):
return rv
return [], rv[0], None
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # coefficient (Number or zoo) to always be in slot 0
# e.g. 3 + ...
order_factors = []
extra = []
for o in seq:
# O(x)
if o.is_Order:
if o.expr.is_zero:
continue
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o] + [
o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3 or NaN
elif o.is_Number:
if (o is S.NaN or coeff is S.ComplexInfinity and
o.is_finite is False) and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number or isinstance(coeff, AccumBounds):
coeff += o
if coeff is S.NaN and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif isinstance(o, AccumBounds):
coeff = o.__add__(coeff)
continue
elif isinstance(o, MatrixExpr):
# can't add 0 to Matrix so make sure coeff is not 0
extra.append(o)
continue
elif isinstance(o, TensExpr):
coeff = o.__add__(coeff) if coeff else o
continue
elif o is S.ComplexInfinity:
if coeff.is_finite is False and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c, s = o.as_coeff_Mul()
# check for unevaluated Pow, e.g. 2**3 or 2**(-1/2)
elif o.is_Pow:
b, e = o.as_base_exp()
if b.is_Number and (e.is_Integer or
(e.is_Rational and e.is_negative)):
seq.append(b**e)
continue
c, s = S.One, o
else:
# everything else
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
if terms[s] is S.NaN and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s, c in terms.items():
# 0*s
if c.is_zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
elif s.is_Add:
# we just re-create the unevaluated Mul
newseq.append(Mul(c, s, evaluate=False))
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c, s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not (f.is_extended_nonnegative or f.is_real)]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not (f.is_extended_nonpositive or f.is_real)]
if coeff is S.ComplexInfinity:
# zoo might be
# infinite_real + finite_im
# finite_real + infinite_im
# infinite_real + infinite_im
# addition of a finite real or imaginary number won't be able to
# change the zoo nature; adding an infinite qualtity would result
# in a NaN condition if it had sign opposite of the infinite
# portion of zoo, e.g., infinite_real - infinite_real.
newseq = [c for c in newseq if not (c.is_finite and
c.is_extended_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
_addsort(newseq)
# current code expects coeff to be first
if coeff is not S.Zero:
newseq.insert(0, coeff)
if extra:
newseq += extra
noncommutative = True
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@classmethod
def class_key(cls):
"""Nice order of classes"""
return 3, 1, cls.__name__
@property
def kind(self):
k = attrgetter('kind')
kinds = map(k, self.args)
kinds = frozenset(kinds)
if len(kinds) != 1:
# Since addition is group operator, kind must be same.
# We know that this is unexpected signature, so return this.
result = UndefinedKind
else:
result, = kinds
return result
def could_extract_minus_sign(self):
return _could_extract_minus_sign(self)
@cacheit
def as_coeff_add(self, *deps):
"""
Returns a tuple (coeff, args) where self is treated as an Add and coeff
is the Number term and args is a tuple of all other terms.
Examples
========
>>> from sympy.abc import x
>>> (7 + 3*x).as_coeff_add()
(7, (3*x,))
>>> (7*x).as_coeff_add()
(0, (7*x,))
"""
if deps:
l1, l2 = sift(self.args, lambda x: x.has_free(*deps), binary=True)
return self._new_rawargs(*l2), tuple(l1)
coeff, notrat = self.args[0].as_coeff_add()
if coeff is not S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
def as_coeff_Add(self, rational=False, deps=None):
"""
Efficiently extract the coefficient of a summation.
"""
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number and not rational or coeff.is_Rational:
return coeff, self._new_rawargs(*args)
return S.Zero, self
# Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we
# let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See
# issue 5524.
def _eval_power(self, e):
from .evalf import pure_complex
from .relational import is_eq
if len(self.args) == 2 and any(_.is_infinite for _ in self.args):
if e.is_zero is False and is_eq(e, S.One) is False:
# looking for literal a + I*b
a, b = self.args
if a.coeff(S.ImaginaryUnit):
a, b = b, a
ico = b.coeff(S.ImaginaryUnit)
if ico and ico.is_extended_real and a.is_extended_real:
if e.is_extended_negative:
return S.Zero
if e.is_extended_positive:
return S.ComplexInfinity
return
if e.is_Rational and self.is_number:
ri = pure_complex(self)
if ri:
r, i = ri
if e.q == 2:
from sympy.functions.elementary.miscellaneous import sqrt
D = sqrt(r**2 + i**2)
if D.is_Rational:
from .exprtools import factor_terms
from sympy.functions.elementary.complexes import sign
from .function import expand_multinomial
# (r, i, D) is a Pythagorean triple
root = sqrt(factor_terms((D - r)/2))**e.p
return root*expand_multinomial((
# principle value
(D + r)/abs(i) + sign(i)*S.ImaginaryUnit)**e.p)
elif e == -1:
return _unevaluated_Mul(
r - i*S.ImaginaryUnit,
1/(r**2 + i**2))
elif e.is_Number and abs(e) != 1:
# handle the Float case: (2.0 + 4*x)**e -> 4**e*(0.5 + x)**e
c, m = zip(*[i.as_coeff_Mul() for i in self.args])
if any(i.is_Float for i in c): # XXX should this always be done?
big = -1
for i in c:
if abs(i) >= big:
big = abs(i)
if big > 0 and big != 1:
from sympy.functions.elementary.complexes import sign
bigs = (big, -big)
c = [sign(i) if i in bigs else i/big for i in c]
addpow = Add(*[c*m for c, m in zip(c, m)])**e
return big**e*addpow
@cacheit
def _eval_derivative(self, s):
return self.func(*[a.diff(s) for a in self.args])
def _eval_nseries(self, x, n, logx, cdir=0):
terms = [t.nseries(x, n=n, logx=logx, cdir=cdir) for t in self.args]
return self.func(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms) == 1:
return terms[0].matches(expr - coeff, repl_dict)
return
def matches(self, expr, repl_dict=None, old=False):
return self._matches_commutative(expr, repl_dict, old)
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats oo like a symbol so oo - oo
returns 0, instead of a nan.
"""
from sympy.simplify.simplify import signsimp
inf = (S.Infinity, S.NegativeInfinity)
if lhs.has(*inf) or rhs.has(*inf):
from .symbol import Dummy
oo = Dummy('oo')
reps = {
S.Infinity: oo,
S.NegativeInfinity: -oo}
ireps = {v: k for k, v in reps.items()}
eq = lhs.xreplace(reps) - rhs.xreplace(reps)
if eq.has(oo):
eq = eq.replace(
lambda x: x.is_Pow and x.base is oo,
lambda x: x.base)
rv = eq.xreplace(ireps)
else:
rv = lhs - rhs
srv = signsimp(rv)
return srv if srv.is_Number else rv
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x - 2*y + 5).as_two_terms()
(5, 3*x - 2*y)
"""
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self):
"""
Decomposes an expression to its numerator part and its
denominator part.
Examples
========
>>> from sympy.abc import x, y, z
>>> (x*y/z).as_numer_denom()
(x*y, z)
>>> (x*(y + 1)/y**7).as_numer_denom()
(x*(y + 1), y**7)
See Also
========
sympy.core.expr.Expr.as_numer_denom
"""
# clear rational denominator
content, expr = self.primitive()
if not isinstance(expr, Add):
return Mul(content, expr, evaluate=False).as_numer_denom()
ncon, dcon = content.as_numer_denom()
# collect numerators and denominators of the terms
nd = defaultdict(list)
for f in expr.args:
ni, di = f.as_numer_denom()
nd[di].append(ni)
# check for quick exit
if len(nd) == 1:
d, n = nd.popitem()
return self.func(
*[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d)
# sum up the terms having a common denominator
for d, n in nd.items():
if len(n) == 1:
nd[d] = n[0]
else:
nd[d] = self.func(*n)
# assemble single numerator and denominator
denoms, numers = [list(i) for i in zip(*iter(nd.items()))]
n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:]))
for i in range(len(numers))]), Mul(*denoms)
return _keep_coeff(ncon, n), _keep_coeff(dcon, d)
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_meromorphic(self, x, a):
return _fuzzy_group((arg.is_meromorphic(x, a) for arg in self.args),
quick_exit=True)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
# assumption methods
_eval_is_real = lambda self: _fuzzy_group(
(a.is_real for a in self.args), quick_exit=True)
_eval_is_extended_real = lambda self: _fuzzy_group(
(a.is_extended_real for a in self.args), quick_exit=True)
_eval_is_complex = lambda self: _fuzzy_group(
(a.is_complex for a in self.args), quick_exit=True)
_eval_is_antihermitian = lambda self: _fuzzy_group(
(a.is_antihermitian for a in self.args), quick_exit=True)
_eval_is_finite = lambda self: _fuzzy_group(
(a.is_finite for a in self.args), quick_exit=True)
_eval_is_hermitian = lambda self: _fuzzy_group(
(a.is_hermitian for a in self.args), quick_exit=True)
_eval_is_integer = lambda self: _fuzzy_group(
(a.is_integer for a in self.args), quick_exit=True)
_eval_is_rational = lambda self: _fuzzy_group(
(a.is_rational for a in self.args), quick_exit=True)
_eval_is_algebraic = lambda self: _fuzzy_group(
(a.is_algebraic for a in self.args), quick_exit=True)
_eval_is_commutative = lambda self: _fuzzy_group(
a.is_commutative for a in self.args)
def _eval_is_infinite(self):
sawinf = False
for a in self.args:
ainf = a.is_infinite
if ainf is None:
return None
elif ainf is True:
# infinite+infinite might not be infinite
if sawinf is True:
return None
sawinf = True
return sawinf
def _eval_is_imaginary(self):
nz = []
im_I = []
for a in self.args:
if a.is_extended_real:
if a.is_zero:
pass
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im_I.append(a*S.ImaginaryUnit)
elif (S.ImaginaryUnit*a).is_extended_real:
im_I.append(a*S.ImaginaryUnit)
else:
return
b = self.func(*nz)
if b != self:
if b.is_zero:
return fuzzy_not(self.func(*im_I).is_zero)
elif b.is_zero is False:
return False
def _eval_is_zero(self):
if self.is_commutative is False:
# issue 10528: there is no way to know if a nc symbol
# is zero or not
return
nz = []
z = 0
im_or_z = False
im = 0
for a in self.args:
if a.is_extended_real:
if a.is_zero:
z += 1
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im += 1
elif (S.ImaginaryUnit*a).is_extended_real:
im_or_z = True
else:
return
if z == len(self.args):
return True
if len(nz) in [0, len(self.args)]:
return None
b = self.func(*nz)
if b.is_zero:
if not im_or_z:
if im == 0:
return True
elif im == 1:
return False
if b.is_zero is False:
return False
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even is True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _all_nonneg_or_nonppos(self):
nn = np = 0
for a in self.args:
if a.is_nonnegative:
if np:
return False
nn = 1
elif a.is_nonpositive:
if nn:
return False
np = 1
else:
break
else:
return True
def _eval_is_extended_positive(self):
if self.is_number:
return super()._eval_is_extended_positive()
c, a = self.as_coeff_Add()
if not c.is_zero:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_positive and a.is_extended_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_positive:
return True
pos = nonneg = nonpos = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
ispos = a.is_extended_positive
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((ispos, a.is_extended_nonnegative)))
if True in saw_INF and False in saw_INF:
return
if ispos:
pos = True
continue
elif a.is_extended_nonnegative:
nonneg = True
continue
elif a.is_extended_nonpositive:
nonpos = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif not nonpos and not nonneg and pos:
return True
elif not nonpos and pos:
return True
elif not pos and not nonneg:
return False
def _eval_is_extended_nonnegative(self):
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_extended_nonnegative:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_nonnegative:
return True
def _eval_is_extended_nonpositive(self):
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_extended_nonpositive:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_nonpositive:
return True
def _eval_is_extended_negative(self):
if self.is_number:
return super()._eval_is_extended_negative()
c, a = self.as_coeff_Add()
if not c.is_zero:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_negative and a.is_extended_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_negative:
return True
neg = nonpos = nonneg = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
isneg = a.is_extended_negative
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((isneg, a.is_extended_nonpositive)))
if True in saw_INF and False in saw_INF:
return
if isneg:
neg = True
continue
elif a.is_extended_nonpositive:
nonpos = True
continue
elif a.is_extended_nonnegative:
nonneg = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif not nonneg and not nonpos and neg:
return True
elif not nonneg and neg:
return True
elif not neg and not nonpos:
return False
def _eval_subs(self, old, new):
if not old.is_Add:
if old is S.Infinity and -old in self.args:
# foo - oo is foo + (-oo) internally
return self.xreplace({-old: -new})
return None
coeff_self, terms_self = self.as_coeff_Add()
coeff_old, terms_old = old.as_coeff_Add()
if coeff_self.is_Rational and coeff_old.is_Rational:
if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y
return self.func(new, coeff_self, -coeff_old)
if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y
return self.func(-new, coeff_self, coeff_old)
if coeff_self.is_Rational and coeff_old.is_Rational \
or coeff_self == coeff_old:
args_old, args_self = self.func.make_args(
terms_old), self.func.make_args(terms_self)
if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x
self_set = set(args_self)
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(new, coeff_self, -coeff_old,
*[s._subs(old, new) for s in ret_set])
args_old = self.func.make_args(
-terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(-new, coeff_self, coeff_old,
*[s._subs(old, new) for s in ret_set])
def removeO(self):
args = [a for a in self.args if not a.is_Order]
return self._new_rawargs(*args)
def getO(self):
args = [a for a in self.args if a.is_Order]
if args:
return self._new_rawargs(*args)
@cacheit
def extract_leading_order(self, symbols, point=None):
"""
Returns the leading term and its order.
Examples
========
>>> from sympy.abc import x
>>> (x + 1 + 1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1 + x).extract_leading_order(x)
((1, O(1)),)
>>> (x + x**2).extract_leading_order(x)
((x, O(x)),)
"""
from sympy.series.order import Order
lst = []
symbols = list(symbols if is_sequence(symbols) else [symbols])
if not point:
point = [0]*len(symbols)
seq = [(f, Order(f, *zip(symbols, point))) for f in self.args]
for ef, of in seq:
for e, o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef, of)]
for e, o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e, o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True, **hints):
"""
returns a tuple representing a complex number
Examples
========
>>> from sympy import I
>>> (7 + 9*I).as_real_imag()
(7, 9)
>>> ((1 + I)/(1 - I)).as_real_imag()
(0, 1)
>>> ((1 + 2*I)*(1 + 3*I)).as_real_imag()
(-5, 5)
"""
sargs = self.args
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.func(*re_part), self.func(*im_part))
def _eval_as_leading_term(self, x, logx=None, cdir=0):
from sympy.core.symbol import Dummy, Symbol
from sympy.series.order import Order
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from .function import expand_mul
o = self.getO()
if o is None:
o = Order(0)
old = self.removeO()
if old.has(Piecewise):
old = piecewise_fold(old)
# This expansion is the last part of expand_log. expand_log also calls
# expand_mul with factor=True, which would be more expensive
if any(isinstance(a, log) for a in self.args):
logflags = dict(deep=True, log=True, mul=False, power_exp=False,
power_base=False, multinomial=False, basic=False, force=False,
factor=False)
old = old.expand(**logflags)
expr = expand_mul(old)
if not expr.is_Add:
return expr.as_leading_term(x, logx=logx, cdir=cdir)
infinite = [t for t in expr.args if t.is_infinite]
_logx = Dummy('logx') if logx is None else logx
leading_terms = [t.as_leading_term(x, logx=_logx, cdir=cdir) for t in expr.args]
min, new_expr = Order(0), 0
try:
for term in leading_terms:
order = Order(term, x)
if not min or order not in min:
min = order
new_expr = term
elif min in order:
new_expr += term
except TypeError:
return expr
if logx is None:
new_expr = new_expr.subs(_logx, log(x))
is_zero = new_expr.is_zero
if is_zero is None:
new_expr = new_expr.trigsimp().cancel()
is_zero = new_expr.is_zero
if is_zero is True:
# simple leading term analysis gave us cancelled terms but we have to send
# back a term, so compute the leading term (via series)
try:
n0 = min.getn()
except NotImplementedError:
n0 = S.One
if n0.has(Symbol):
n0 = S.One
res = Order(1)
incr = S.One
while res.is_Order:
res = old._eval_nseries(x, n=n0+incr, logx=logx, cdir=cdir).cancel().powsimp().trigsimp()
incr *= 2
return res.as_leading_term(x, logx=logx, cdir=cdir)
elif new_expr is S.NaN:
return old.func._from_args(infinite) + o
else:
return new_expr
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args])
def primitive(self):
"""
Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```.
``R`` is collected only from the leading coefficient of each term.
Examples
========
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 3*x + 2*y)
>>> (2*x/3 + 4.2*y).primitive()
(1/3, 2*x + 12.6*y)
No subprocessing of term factors is performed:
>>> ((2 + 2*x)*x + 2).primitive()
(1, x*(2*x + 2) + 2)
Recursive processing can be done with the ``as_content_primitive()``
method:
>>> ((2 + 2*x)*x + 2).as_content_primitive()
(2, x*(x + 1) + 1)
See also: primitive() function in polytools.py
"""
terms = []
inf = False
for a in self.args:
c, m = a.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = a
inf = inf or m is S.ComplexInfinity
terms.append((c.p, c.q, m))
if not inf:
ngcd = reduce(igcd, [t[0] for t in terms], 0)
dlcm = reduce(ilcm, [t[1] for t in terms], 1)
else:
ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0)
dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1)
if ngcd == dlcm == 1:
return S.One, self
if not inf:
for i, (p, q, term) in enumerate(terms):
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
for i, (p, q, term) in enumerate(terms):
if q:
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
terms[i] = _keep_coeff(Rational(p, q), term)
# we don't need a complete re-flattening since no new terms will join
# so we just use the same sort as is used in Add.flatten. When the
# coefficient changes, the ordering of terms may change, e.g.
# (3*x, 6*y) -> (2*y, x)
#
# We do need to make sure that term[0] stays in position 0, however.
#
if terms[0].is_Number or terms[0] is S.ComplexInfinity:
c = terms.pop(0)
else:
c = None
_addsort(terms)
if c:
terms.insert(0, c)
return Rational(ngcd, dlcm), self._new_rawargs(*terms)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self. If radical is True (default is False) then
common radicals will be removed and included as a factor of the
primitive expression.
Examples
========
>>> from sympy import sqrt
>>> (3 + 3*sqrt(2)).as_content_primitive()
(3, 1 + sqrt(2))
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
See docstring of Expr.as_content_primitive for more examples.
"""
con, prim = self.func(*[_keep_coeff(*a.as_content_primitive(
radical=radical, clear=clear)) for a in self.args]).primitive()
if not clear and not con.is_Integer and prim.is_Add:
con, d = con.as_numer_denom()
_p = prim/d
if any(a.as_coeff_Mul()[0].is_Integer for a in _p.args):
prim = _p
else:
con /= d
if radical and prim.is_Add:
# look for common radicals that can be removed
args = prim.args
rads = []
common_q = None
for m in args:
term_rads = defaultdict(list)
for ai in Mul.make_args(m):
if ai.is_Pow:
b, e = ai.as_base_exp()
if e.is_Rational and b.is_Integer:
term_rads[e.q].append(abs(int(b))**e.p)
if not term_rads:
break
if common_q is None:
common_q = set(term_rads.keys())
else:
common_q = common_q & set(term_rads.keys())
if not common_q:
break
rads.append(term_rads)
else:
# process rads
# keep only those in common_q
for r in rads:
for q in list(r.keys()):
if q not in common_q:
r.pop(q)
for q in r:
r[q] = Mul(*r[q])
# find the gcd of bases for each q
G = []
for q in common_q:
g = reduce(igcd, [r[q] for r in rads], 0)
if g != 1:
G.append(g**Rational(1, q))
if G:
G = Mul(*G)
args = [ai/G for ai in args]
prim = G*prim.func(*args)
return con, prim
@property
def _sorted_args(self):
from .sorting import default_sort_key
return tuple(sorted(self.args, key=default_sort_key))
def _eval_difference_delta(self, n, step):
from sympy.series.limitseq import difference_delta as dd
return self.func(*[dd(a, n, step) for a in self.args])
@property
def _mpc_(self):
"""
Convert self to an mpmath mpc if possible
"""
from .numbers import Float
re_part, rest = self.as_coeff_Add()
im_part, imag_unit = rest.as_coeff_Mul()
if not imag_unit == S.ImaginaryUnit:
# ValueError may seem more reasonable but since it's a @property,
# we need to use AttributeError to keep from confusing things like
# hasattr.
raise AttributeError("Cannot convert Add to mpc. Must be of the form Number + Number*I")
return (Float(re_part)._mpf_, Float(im_part)._mpf_)
def __neg__(self):
if not global_parameters.distribute:
return super().__neg__()
return Mul(S.NegativeOne, self)
add = AssocOpDispatcher('add')
from .mul import Mul, _keep_coeff, _unevaluated_Mul
from .numbers import Rational
|
540130a1072b5df03d5ed5def68e4048ee9e7f6ada6d0d1f2fa8d77a66f07076 | from __future__ import annotations
from typing import TYPE_CHECKING
from collections.abc import Iterable
from functools import reduce
import re
from .sympify import sympify, _sympify
from .basic import Basic, Atom
from .singleton import S
from .evalf import EvalfMixin, pure_complex, DEFAULT_MAXPREC
from .decorators import call_highest_priority, sympify_method_args, sympify_return
from .cache import cacheit
from .sorting import default_sort_key
from .kind import NumberKind
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.misc import as_int, func_name, filldedent
from sympy.utilities.iterables import has_variety, sift
from mpmath.libmp import mpf_log, prec_to_dps
from mpmath.libmp.libintmath import giant_steps
if TYPE_CHECKING:
from .numbers import Number
from collections import defaultdict
def _corem(eq, c): # helper for extract_additively
# return co, diff from co*c + diff
co = []
non = []
for i in Add.make_args(eq):
ci = i.coeff(c)
if not ci:
non.append(i)
else:
co.append(ci)
return Add(*co), Add(*non)
@sympify_method_args
class Expr(Basic, EvalfMixin):
"""
Base class for algebraic expressions.
Explanation
===========
Everything that requires arithmetic operations to be defined
should subclass this class, instead of Basic (which should be
used only for argument storage and expression manipulation, i.e.
pattern matching, substitutions, etc).
If you want to override the comparisons of expressions:
Should use _eval_is_ge for inequality, or _eval_is_eq, with multiple dispatch.
_eval_is_ge return true if x >= y, false if x < y, and None if the two types
are not comparable or the comparison is indeterminate
See Also
========
sympy.core.basic.Basic
"""
__slots__: tuple[str, ...] = ()
is_scalar = True # self derivative is 1
@property
def _diff_wrt(self):
"""Return True if one can differentiate with respect to this
object, else False.
Explanation
===========
Subclasses such as Symbol, Function and Derivative return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol (_diff_wrt=True) variables and
temporarily converts the non-Symbols into Symbols when performing
the differentiation. By default, any object deriving from Expr
will behave like a scalar with self.diff(self) == 1. If this is
not desired then the object must also set `is_scalar = False` or
else define an _eval_derivative routine.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyScalar(Expr):
... _diff_wrt = True
...
>>> MyScalar().diff(MyScalar())
1
>>> class MySymbol(Expr):
... _diff_wrt = True
... is_scalar = False
...
>>> MySymbol().diff(MySymbol())
Derivative(MySymbol(), MySymbol())
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
if expr.base is S.Exp1:
# If we remove this, many doctests will go crazy:
# (keeps E**x sorted like the exp(x) function,
# part of exp(x) to E**x transition)
expr, exp = Function("exp")(expr.exp), S.One
else:
expr, exp = expr.args
else:
exp = S.One
if expr.is_Dummy:
args = (expr.sort_key(),)
elif expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
# ***************
# * Arithmetics *
# ***************
# Expr and its subclasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 5510.
_op_priority = 10.0
@property
def _add_handler(self):
return Add
@property
def _mul_handler(self):
return Mul
def __pos__(self):
return self
def __neg__(self):
# Mul has its own __neg__ routine, so we just
# create a 2-args Mul with the -1 in the canonical
# slot 0.
c = self.is_commutative
return Mul._from_args((S.NegativeOne, self), c)
def __abs__(self) -> Expr:
from sympy.functions.elementary.complexes import Abs
return Abs(self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rpow__')
def _pow(self, other):
return Pow(self, other)
def __pow__(self, other, mod=None) -> Expr:
if mod is None:
return self._pow(other)
try:
_self, other, mod = as_int(self), as_int(other), as_int(mod)
if other >= 0:
return _sympify(pow(_self, other, mod))
else:
from .numbers import mod_inverse
return _sympify(mod_inverse(pow(_self, -other, mod), mod))
except ValueError:
power = self._pow(other)
try:
return power%mod
except TypeError:
return NotImplemented
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
denom = Pow(other, S.NegativeOne)
if self is S.One:
return denom
else:
return Mul(self, denom)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
denom = Pow(self, S.NegativeOne)
if other is S.One:
return denom
else:
return Mul(other, denom)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rfloordiv__')
def __floordiv__(self, other):
from sympy.functions.elementary.integers import floor
return floor(self / other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__floordiv__')
def __rfloordiv__(self, other):
from sympy.functions.elementary.integers import floor
return floor(other / self)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__rdivmod__')
def __divmod__(self, other):
from sympy.functions.elementary.integers import floor
return floor(self / other), Mod(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
@call_highest_priority('__divmod__')
def __rdivmod__(self, other):
from sympy.functions.elementary.integers import floor
return floor(other / self), Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
from .symbol import Dummy
if not self.is_number:
raise TypeError("Cannot convert symbols to int")
r = self.round(2)
if not r.is_Number:
raise TypeError("Cannot convert complex to int")
if r in (S.NaN, S.Infinity, S.NegativeInfinity):
raise TypeError("Cannot convert %s to int" % r)
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("Cannot convert complex to float")
raise TypeError("Cannot convert expression to float")
def __complex__(self):
result = self.evalf()
re, im = result.as_real_imag()
return complex(float(re), float(im))
@sympify_return([('other', 'Expr')], NotImplemented)
def __ge__(self, other):
from .relational import GreaterThan
return GreaterThan(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
def __le__(self, other):
from .relational import LessThan
return LessThan(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
def __gt__(self, other):
from .relational import StrictGreaterThan
return StrictGreaterThan(self, other)
@sympify_return([('other', 'Expr')], NotImplemented)
def __lt__(self, other):
from .relational import StrictLessThan
return StrictLessThan(self, other)
def __trunc__(self):
if not self.is_number:
raise TypeError("Cannot truncate symbols and expressions")
else:
return Integer(self)
def __format__(self, format_spec: str):
if self.is_number:
mt = re.match(r'\+?\d*\.(\d+)f', format_spec)
if mt:
prec = int(mt.group(1))
rounded = self.round(prec)
if rounded.is_Integer:
return format(int(rounded), format_spec)
if rounded.is_Float:
return format(rounded, format_spec)
return super().__format__(format_spec)
@staticmethod
def _from_mpmath(x, prec):
if hasattr(x, "_mpf_"):
return Float._new(x._mpf_, prec)
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
re = Float._new(re, prec)
im = Float._new(im, prec)*S.ImaginaryUnit
return re + im
else:
raise TypeError("expected mpmath number (mpf or mpc)")
@property
def is_number(self):
"""Returns True if ``self`` has no free symbols and no
undefined functions (AppliedUndef, to be precise). It will be
faster than ``if not self.free_symbols``, however, since
``is_number`` will fail as soon as it hits a free symbol
or undefined function.
Examples
========
>>> from sympy import Function, Integral, cos, sin, pi
>>> from sympy.abc import x
>>> f = Function('f')
>>> x.is_number
False
>>> f(1).is_number
False
>>> (2*x).is_number
False
>>> (2 + Integral(2, x)).is_number
False
>>> (2 + Integral(2, (x, 1, 2))).is_number
True
Not all numbers are Numbers in the SymPy sense:
>>> pi.is_number, pi.is_Number
(True, False)
If something is a number it should evaluate to a number with
real and imaginary parts that are Numbers; the result may not
be comparable, however, since the real and/or imaginary part
of the result may not have precision.
>>> cos(1).is_number and cos(1).is_comparable
True
>>> z = cos(1)**2 + sin(1)**2 - 1
>>> z.is_number
True
>>> z.is_comparable
False
See Also
========
sympy.core.basic.Basic.is_comparable
"""
return all(obj.is_number for obj in self.args)
def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1):
"""Return self evaluated, if possible, replacing free symbols with
random complex values, if necessary.
Explanation
===========
The random complex value for each free symbol is generated
by the random_complex_number routine giving real and imaginary
parts in the range given by the re_min, re_max, im_min, and im_max
values. The returned value is evaluated to a precision of n
(if given) else the maximum of 15 and the precision needed
to get more than 1 digit of precision. If the expression
could not be evaluated to a number, or could not be evaluated
to more than 1 digit of precision, then None is returned.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> x._random() # doctest: +SKIP
0.0392918155679172 + 0.916050214307199*I
>>> x._random(2) # doctest: +SKIP
-0.77 - 0.87*I
>>> (x + y/2)._random(2) # doctest: +SKIP
-0.57 + 0.16*I
>>> sqrt(2)._random(2)
1.4
See Also
========
sympy.core.random.random_complex_number
"""
free = self.free_symbols
prec = 1
if free:
from sympy.core.random import random_complex_number
a, c, b, d = re_min, re_max, im_min, im_max
reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True)
for zi in free])))
try:
nmag = abs(self.evalf(2, subs=reps))
except (ValueError, TypeError):
# if an out of range value resulted in evalf problems
# then return None -- XXX is there a way to know how to
# select a good random number for a given expression?
# e.g. when calculating n! negative values for n should not
# be used
return None
else:
reps = {}
nmag = abs(self.evalf(2))
if not hasattr(nmag, '_prec'):
# e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True
return None
if nmag._prec == 1:
# increase the precision up to the default maximum
# precision to see if we can get any significance
# evaluate
for prec in giant_steps(2, DEFAULT_MAXPREC):
nmag = abs(self.evalf(prec, subs=reps))
if nmag._prec != 1:
break
if nmag._prec != 1:
if n is None:
n = max(prec, 15)
return self.evalf(n, subs=reps)
# never got any significance
return None
def is_constant(self, *wrt, **flags):
"""Return True if self is constant, False if not, or None if
the constancy could not be determined conclusively.
Explanation
===========
If an expression has no free symbols then it is a constant. If
there are free symbols it is possible that the expression is a
constant, perhaps (but not necessarily) zero. To test such
expressions, a few strategies are tried:
1) numerical evaluation at two random points. If two such evaluations
give two different values and the values have a precision greater than
1 then self is not constant. If the evaluations agree or could not be
obtained with any precision, no decision is made. The numerical testing
is done only if ``wrt`` is different than the free symbols.
2) differentiation with respect to variables in 'wrt' (or all free
symbols if omitted) to see if the expression is constant or not. This
will not always lead to an expression that is zero even though an
expression is constant (see added test in test_expr.py). If
all derivatives are zero then self is constant with respect to the
given symbols.
3) finding out zeros of denominator expression with free_symbols.
It will not be constant if there are zeros. It gives more negative
answers for expression that are not constant.
If neither evaluation nor differentiation can prove the expression is
constant, None is returned unless two numerical values happened to be
the same and the flag ``failing_number`` is True -- in that case the
numerical value will be returned.
If flag simplify=False is passed, self will not be simplified;
the default is True since self should be simplified before testing.
Examples
========
>>> from sympy import cos, sin, Sum, S, pi
>>> from sympy.abc import a, n, x, y
>>> x.is_constant()
False
>>> S(2).is_constant()
True
>>> Sum(x, (x, 1, 10)).is_constant()
True
>>> Sum(x, (x, 1, n)).is_constant()
False
>>> Sum(x, (x, 1, n)).is_constant(y)
True
>>> Sum(x, (x, 1, n)).is_constant(n)
False
>>> Sum(x, (x, 1, n)).is_constant(x)
True
>>> eq = a*cos(x)**2 + a*sin(x)**2 - a
>>> eq.is_constant()
True
>>> eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
True
>>> (0**x).is_constant()
False
>>> x.is_constant()
False
>>> (x**x).is_constant()
False
>>> one = cos(x)**2 + sin(x)**2
>>> one.is_constant()
True
>>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1
True
"""
def check_denominator_zeros(expression):
from sympy.solvers.solvers import denoms
retNone = False
for den in denoms(expression):
z = den.is_zero
if z is True:
return True
if z is None:
retNone = True
if retNone:
return None
return False
simplify = flags.get('simplify', True)
if self.is_number:
return True
free = self.free_symbols
if not free:
return True # assume f(1) is some constant
# if we are only interested in some symbols and they are not in the
# free symbols then this expression is constant wrt those symbols
wrt = set(wrt)
if wrt and not wrt & free:
return True
wrt = wrt or free
# simplify unless this has already been done
expr = self
if simplify:
expr = expr.simplify()
# is_zero should be a quick assumptions check; it can be wrong for
# numbers (see test_is_not_constant test), giving False when it
# shouldn't, but hopefully it will never give True unless it is sure.
if expr.is_zero:
return True
# Don't attempt substitution or differentiation with non-number symbols
wrt_number = {sym for sym in wrt if sym.kind is NumberKind}
# try numerical evaluation to see if we get two different values
failing_number = None
if wrt_number == free:
# try 0 (for a) and 1 (for b)
try:
a = expr.subs(list(zip(free, [0]*len(free))),
simultaneous=True)
if a is S.NaN:
# evaluation may succeed when substitution fails
a = expr._random(None, 0, 0, 0, 0)
except ZeroDivisionError:
a = None
if a is not None and a is not S.NaN:
try:
b = expr.subs(list(zip(free, [1]*len(free))),
simultaneous=True)
if b is S.NaN:
# evaluation may succeed when substitution fails
b = expr._random(None, 1, 0, 1, 0)
except ZeroDivisionError:
b = None
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random real
b = expr._random(None, -1, 0, 1, 0)
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random complex
b = expr._random()
if b is not None and b is not S.NaN:
if b.equals(a) is False:
return False
failing_number = a if a.is_number else b
# now we will test each wrt symbol (or all free symbols) to see if the
# expression depends on them or not using differentiation. This is
# not sufficient for all expressions, however, so we don't return
# False if we get a derivative other than 0 with free symbols.
for w in wrt_number:
deriv = expr.diff(w)
if simplify:
deriv = deriv.simplify()
if deriv != 0:
if not (pure_complex(deriv, or_real=True)):
if flags.get('failing_number', False):
return failing_number
return False
cd = check_denominator_zeros(self)
if cd is True:
return False
elif cd is None:
return None
return True
def equals(self, other, failing_expression=False):
"""Return True if self == other, False if it does not, or None. If
failing_expression is True then the expression which did not simplify
to a 0 will be returned instead of None.
Explanation
===========
If ``self`` is a Number (or complex number) that is not zero, then
the result is False.
If ``self`` is a number and has not evaluated to zero, evalf will be
used to test whether the expression evaluates to zero. If it does so
and the result has significance (i.e. the precision is either -1, for
a Rational result, or is greater than 1) then the evalf value will be
used to return True or False.
"""
from sympy.simplify.simplify import nsimplify, simplify
from sympy.solvers.solvers import solve
from sympy.polys.polyerrors import NotAlgebraic
from sympy.polys.numberfields import minimal_polynomial
other = sympify(other)
if self == other:
return True
# they aren't the same so see if we can make the difference 0;
# don't worry about doing simplification steps one at a time
# because if the expression ever goes to 0 then the subsequent
# simplification steps that are done will be very fast.
diff = factor_terms(simplify(self - other), radical=True)
if not diff:
return True
if not diff.has(Add, Mod):
# if there is no expanding to be done after simplifying
# then this can't be a zero
return False
factors = diff.as_coeff_mul()[1]
if len(factors) > 1: # avoid infinity recursion
fac_zero = [fac.equals(0) for fac in factors]
if None not in fac_zero: # every part can be decided
return any(fac_zero)
constant = diff.is_constant(simplify=False, failing_number=True)
if constant is False:
return False
if not diff.is_number:
if constant is None:
# e.g. unless the right simplification is done, a symbolic
# zero is possible (see expression of issue 6829: without
# simplification constant will be None).
return
if constant is True:
# this gives a number whether there are free symbols or not
ndiff = diff._random()
# is_comparable will work whether the result is real
# or complex; it could be None, however.
if ndiff and ndiff.is_comparable:
return False
# sometimes we can use a simplified result to give a clue as to
# what the expression should be; if the expression is *not* zero
# then we should have been able to compute that and so now
# we can just consider the cases where the approximation appears
# to be zero -- we try to prove it via minimal_polynomial.
#
# removed
# ns = nsimplify(diff)
# if diff.is_number and (not ns or ns == diff):
#
# The thought was that if it nsimplifies to 0 that's a sure sign
# to try the following to prove it; or if it changed but wasn't
# zero that might be a sign that it's not going to be easy to
# prove. But tests seem to be working without that logic.
#
if diff.is_number:
# try to prove via self-consistency
surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer]
# it seems to work better to try big ones first
surds.sort(key=lambda x: -x.args[0])
for s in surds:
try:
# simplify is False here -- this expression has already
# been identified as being hard to identify as zero;
# we will handle the checking ourselves using nsimplify
# to see if we are in the right ballpark or not and if so
# *then* the simplification will be attempted.
sol = solve(diff, s, simplify=False)
if sol:
if s in sol:
# the self-consistent result is present
return True
if all(si.is_Integer for si in sol):
# perfect powers are removed at instantiation
# so surd s cannot be an integer
return False
if all(i.is_algebraic is False for i in sol):
# a surd is algebraic
return False
if any(si in surds for si in sol):
# it wasn't equal to s but it is in surds
# and different surds are not equal
return False
if any(nsimplify(s - si) == 0 and
simplify(s - si) == 0 for si in sol):
return True
if s.is_real:
if any(nsimplify(si, [s]) == s and simplify(si) == s
for si in sol):
return True
except NotImplementedError:
pass
# try to prove with minimal_polynomial but know when
# *not* to use this or else it can take a long time. e.g. issue 8354
if True: # change True to condition that assures non-hang
try:
mp = minimal_polynomial(diff)
if mp.is_Symbol:
return True
return False
except (NotAlgebraic, NotImplementedError):
pass
# diff has not simplified to zero; constant is either None, True
# or the number with significance (is_comparable) that was randomly
# calculated twice as the same value.
if constant not in (True, None) and constant != 0:
return False
if failing_expression:
return diff
return None
def _eval_is_extended_positive_negative(self, positive):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
# check to see that we can get a value
try:
n2 = self._eval_evalf(2)
# XXX: This shouldn't be caught here
# Catches ValueError: hypsum() failed to converge to the requested
# 34 bits of accuracy
except ValueError:
return None
if n2 is None:
return None
if getattr(n2, '_prec', 1) == 1: # no significance
return None
if n2 is S.NaN:
return None
f = self.evalf(2)
if f.is_Float:
match = f, S.Zero
else:
match = pure_complex(f)
if match is None:
return False
r, i = match
if not (i.is_Number and r.is_Number):
return False
if r._prec != 1 and i._prec != 1:
return bool(not i and ((r > 0) if positive else (r < 0)))
elif r._prec == 1 and (not i or i._prec == 1) and \
self._eval_is_algebraic() and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_is_extended_positive(self):
return self._eval_is_extended_positive_negative(positive=True)
def _eval_is_extended_negative(self):
return self._eval_is_extended_positive_negative(positive=False)
def _eval_interval(self, x, a, b):
"""
Returns evaluation over an interval. For most functions this is:
self.subs(x, b) - self.subs(x, a),
possibly using limit() if NaN is returned from subs, or if
singularities are found between a and b.
If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x),
respectively.
"""
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.functions.elementary.exponential import log
from sympy.series.limits import limit, Limit
from sympy.sets.sets import Interval
from sympy.solvers.solveset import solveset
if (a is None and b is None):
raise ValueError('Both interval ends cannot be None.')
def _eval_endpoint(left):
c = a if left else b
if c is None:
return S.Zero
else:
C = self.subs(x, c)
if C.has(S.NaN, S.Infinity, S.NegativeInfinity,
S.ComplexInfinity, AccumBounds):
if (a < b) != False:
C = limit(self, x, c, "+" if left else "-")
else:
C = limit(self, x, c, "-" if left else "+")
if isinstance(C, Limit):
raise NotImplementedError("Could not compute limit")
return C
if a == b:
return S.Zero
A = _eval_endpoint(left=True)
if A is S.NaN:
return A
B = _eval_endpoint(left=False)
if (a and b) is None:
return B - A
value = B - A
if a.is_comparable and b.is_comparable:
if a < b:
domain = Interval(a, b)
else:
domain = Interval(b, a)
# check the singularities of self within the interval
# if singularities is a ConditionSet (not iterable), catch the exception and pass
singularities = solveset(self.cancel().as_numer_denom()[1], x,
domain=domain)
for logterm in self.atoms(log):
singularities = singularities | solveset(logterm.args[0], x,
domain=domain)
try:
for s in singularities:
if value is S.NaN:
# no need to keep adding, it will stay NaN
break
if not s.is_comparable:
continue
if (a < s) == (s < b) == True:
value += -limit(self, x, s, "+") + limit(self, x, s, "-")
elif (b < s) == (s < a) == True:
value += limit(self, x, s, "+") - limit(self, x, s, "-")
except TypeError:
pass
return value
def _eval_power(self, other):
# subclass to compute self**other for cases when
# other is not NaN, 0, or 1
return None
def _eval_conjugate(self):
if self.is_extended_real:
return self
elif self.is_imaginary:
return -self
def conjugate(self):
"""Returns the complex conjugate of 'self'."""
from sympy.functions.elementary.complexes import conjugate as c
return c(self)
def dir(self, x, cdir):
if self.is_zero:
return S.Zero
from sympy.functions.elementary.exponential import log
minexp = S.Zero
arg = self
while arg:
minexp += S.One
arg = arg.diff(x)
coeff = arg.subs(x, 0)
if coeff is S.NaN:
coeff = arg.limit(x, 0)
if coeff is S.ComplexInfinity:
try:
coeff, _ = arg.leadterm(x)
if coeff.has(log(x)):
raise ValueError()
except ValueError:
coeff = arg.limit(x, 0)
if coeff != S.Zero:
break
return coeff*cdir**minexp
def _eval_transpose(self):
from sympy.functions.elementary.complexes import conjugate
if (self.is_complex or self.is_infinite):
return self
elif self.is_hermitian:
return conjugate(self)
elif self.is_antihermitian:
return -conjugate(self)
def transpose(self):
from sympy.functions.elementary.complexes import transpose
return transpose(self)
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import conjugate, transpose
if self.is_hermitian:
return self
elif self.is_antihermitian:
return -self
obj = self._eval_conjugate()
if obj is not None:
return transpose(obj)
obj = self._eval_transpose()
if obj is not None:
return conjugate(obj)
def adjoint(self):
from sympy.functions.elementary.complexes import adjoint
return adjoint(self)
@classmethod
def _parse_order(cls, order):
"""Parse and configure the ordering of terms. """
from sympy.polys.orderings import monomial_key
startswith = getattr(order, "startswith", None)
if startswith is None:
reverse = False
else:
reverse = startswith('rev-')
if reverse:
order = order[4:]
monom_key = monomial_key(order)
def neg(monom):
return tuple([neg(m) if isinstance(m, tuple) else -m for m in monom])
def key(term):
_, ((re, im), monom, ncpart) = term
monom = neg(monom_key(monom))
ncpart = tuple([e.sort_key(order=order) for e in ncpart])
coeff = ((bool(im), im), (re, im))
return monom, ncpart, coeff
return key, reverse
def as_ordered_factors(self, order=None):
"""Return list of ordered factors (if Mul) else [self]."""
return [self]
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
Explanation
===========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded
from sympy.polys.polytools import Poly
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except (PolynomialError, GeneratorsNeeded):
# PolynomialError is caught for e.g. exp(x).as_poly(x)
# GeneratorsNeeded is caught for e.g. S(2).as_poly()
return None
def as_ordered_terms(self, order=None, data=False):
"""
Transform an expression to an ordered list of terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms()
[sin(x)**2*cos(x), sin(x)**2, 1]
"""
from .numbers import Number, NumberSymbol
if order is None and self.is_Add:
# Spot the special case of Add(Number, Mul(Number, expr)) with the
# first number positive and the second number negative
key = lambda x:not isinstance(x, (Number, NumberSymbol))
add_args = sorted(Add.make_args(self), key=key)
if (len(add_args) == 2
and isinstance(add_args[0], (Number, NumberSymbol))
and isinstance(add_args[1], Mul)):
mul_args = sorted(Mul.make_args(add_args[1]), key=key)
if (len(mul_args) == 2
and isinstance(mul_args[0], Number)
and add_args[0].is_positive
and mul_args[0].is_negative):
return add_args
key, reverse = self._parse_order(order)
terms, gens = self.as_terms()
if not any(term.is_Order for term, _ in terms):
ordered = sorted(terms, key=key, reverse=reverse)
else:
_terms, _order = [], []
for term, repr in terms:
if not term.is_Order:
_terms.append((term, repr))
else:
_order.append((term, repr))
ordered = sorted(_terms, key=key, reverse=True) \
+ sorted(_order, key=key, reverse=True)
if data:
return ordered, gens
else:
return [term for term, _ in ordered]
def as_terms(self):
"""Transform an expression to a list of terms. """
from .exprtools import decompose_power
gens, terms = set(), []
for term in Add.make_args(self):
coeff, _term = term.as_coeff_Mul()
coeff = complex(coeff)
cpart, ncpart = {}, []
if _term is not S.One:
for factor in Mul.make_args(_term):
if factor.is_number:
try:
coeff *= complex(factor)
except (TypeError, ValueError):
pass
else:
continue
if factor.is_commutative:
base, exp = decompose_power(factor)
cpart[base] = exp
gens.add(base)
else:
ncpart.append(factor)
coeff = coeff.real, coeff.imag
ncpart = tuple(ncpart)
terms.append((term, (coeff, cpart, ncpart)))
gens = sorted(gens, key=default_sort_key)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = []
for term, (coeff, cpart, ncpart) in terms:
monom = [0]*k
for base, exp in cpart.items():
monom[indices[base]] = exp
result.append((term, (coeff, tuple(monom), ncpart)))
return result, gens
def removeO(self):
"""Removes the additive O(..) symbol if there is one"""
return self
def getO(self):
"""Returns the additive O(..) symbol if there is one, else None."""
return None
def getn(self):
"""
Returns the order of the expression.
Explanation
===========
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Examples
========
>>> from sympy import O
>>> from sympy.abc import x
>>> (1 + x + O(x**2)).getn()
2
>>> (1 + x).getn()
"""
o = self.getO()
if o is None:
return None
elif o.is_Order:
o = o.expr
if o is S.One:
return S.Zero
if o.is_Symbol:
return S.One
if o.is_Pow:
return o.args[1]
if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n
for oi in o.args:
if oi.is_Symbol:
return S.One
if oi.is_Pow:
from .symbol import Dummy, Symbol
syms = oi.atoms(Symbol)
if len(syms) == 1:
x = syms.pop()
oi = oi.subs(x, Dummy('x', positive=True))
if oi.base.is_Symbol and oi.exp.is_Rational:
return abs(oi.exp)
raise NotImplementedError('not sure of order of %s' % o)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def args_cnc(self, cset=False, warn=True, split_1=True):
"""Return [commutative factors, non-commutative factors] of self.
Explanation
===========
self is treated as a Mul and the ordering of the factors is maintained.
If ``cset`` is True the commutative factors will be returned in a set.
If there were repeated factors (as may happen with an unevaluated Mul)
then an error will be raised unless it is explicitly suppressed by
setting ``warn`` to False.
Note: -1 is always separated from a Number unless split_1 is False.
Examples
========
>>> from sympy import symbols, oo
>>> A, B = symbols('A B', commutative=0)
>>> x, y = symbols('x y')
>>> (-2*x*y).args_cnc()
[[-1, 2, x, y], []]
>>> (-2.5*x).args_cnc()
[[-1, 2.5, x], []]
>>> (-2*x*A*B*y).args_cnc()
[[-1, 2, x, y], [A, B]]
>>> (-2*x*A*B*y).args_cnc(split_1=False)
[[-2, x, y], [A, B]]
>>> (-2*x*y).args_cnc(cset=True)
[{-1, 2, x, y}, []]
The arg is always treated as a Mul:
>>> (-2 + x + A).args_cnc()
[[], [x - 2 + A]]
>>> (-oo).args_cnc() # -oo is a singleton
[[-1, oo], []]
"""
if self.is_Mul:
args = list(self.args)
else:
args = [self]
for i, mi in enumerate(args):
if not mi.is_commutative:
c = args[:i]
nc = args[i:]
break
else:
c = args
nc = []
if c and split_1 and (
c[0].is_Number and
c[0].is_extended_negative and
c[0] is not S.NegativeOne):
c[:1] = [S.NegativeOne, -c[0]]
if cset:
clen = len(c)
c = set(c)
if clen and warn and len(c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in c if list(self.args).count(ci) > 1])
return [c, nc]
def coeff(self, x, n=1, right=False, _first=True):
"""
Returns the coefficient from the term(s) containing ``x**n``. If ``n``
is zero then all terms independent of ``x`` will be returned.
Explanation
===========
When ``x`` is noncommutative, the coefficient to the left (default) or
right of ``x`` can be returned. The keyword 'right' is ignored when
``x`` is commutative.
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
You can select terms that have an explicit negative in front of them:
>>> (-x + 2*y).coeff(-1)
x
>>> (x - 2*y).coeff(-1)
2*y
You can select terms with no Rational coefficient:
>>> (x + 2*y).coeff(1)
x
>>> (3 + 2*x + 4*x**2).coeff(1)
0
You can select terms independent of x by making n=0; in this case
expr.as_independent(x)[0] is returned (and 0 will be returned instead
of None):
>>> (3 + 2*x + 4*x**2).coeff(x, 0)
3
>>> eq = ((x + 1)**3).expand() + 1
>>> eq
x**3 + 3*x**2 + 3*x + 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 2]
>>> eq -= 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 0]
You can select terms that have a numerical term in front of them:
>>> (-x - 2*y).coeff(2)
-y
>>> from sympy import sqrt
>>> (x + sqrt(2)*x).coeff(sqrt(2))
x
The matching is exact:
>>> (3 + 2*x + 4*x**2).coeff(x)
2
>>> (3 + 2*x + 4*x**2).coeff(x**2)
4
>>> (3 + 2*x + 4*x**2).coeff(x**3)
0
>>> (z*(x + y)**2).coeff((x + y)**2)
z
>>> (z*(x + y)**2).coeff(x + y)
0
In addition, no factoring is done, so 1 + z*(1 + y) is not obtained
from the following:
>>> (x + z*(x + x*y)).coeff(x)
1
If such factoring is desired, factor_terms can be used first:
>>> from sympy import factor_terms
>>> factor_terms(x + z*(x + x*y)).coeff(x)
z*(y + 1) + 1
>>> n, m, o = symbols('n m o', commutative=False)
>>> n.coeff(n)
1
>>> (3*n).coeff(n)
3
>>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m
1 + m
>>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m
m
If there is more than one possible coefficient 0 is returned:
>>> (n*m + m*n).coeff(n)
0
If there is only one possible coefficient, it is returned:
>>> (n*m + x*m*n).coeff(m*n)
x
>>> (n*m + x*m*n).coeff(m*n, right=1)
1
See Also
========
as_coefficient: separate the expression into a coefficient and factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used
"""
x = sympify(x)
if not isinstance(x, Basic):
return S.Zero
n = as_int(n)
if not x:
return S.Zero
if x == self:
if n == 1:
return S.One
return S.Zero
if x is S.One:
co = [a for a in Add.make_args(self)
if a.as_coeff_Mul()[0] is S.One]
if not co:
return S.Zero
return Add(*co)
if n == 0:
if x.is_Add and self.is_Add:
c = self.coeff(x, right=right)
if not c:
return S.Zero
if not right:
return self - Add(*[a*x for a in Add.make_args(c)])
return self - Add(*[x*a for a in Add.make_args(c)])
return self.as_independent(x, as_Add=True)[0]
# continue with the full method, looking for this power of x:
x = x**n
def incommon(l1, l2):
if not l1 or not l2:
return []
n = min(len(l1), len(l2))
for i in range(n):
if l1[i] != l2[i]:
return l1[:i]
return l1[:]
def find(l, sub, first=True):
""" Find where list sub appears in list l. When ``first`` is True
the first occurrence from the left is returned, else the last
occurrence is returned. Return None if sub is not in l.
Examples
========
>> l = range(5)*2
>> find(l, [2, 3])
2
>> find(l, [2, 3], first=0)
7
>> find(l, [2, 4])
None
"""
if not sub or not l or len(sub) > len(l):
return None
n = len(sub)
if not first:
l.reverse()
sub.reverse()
for i in range(len(l) - n + 1):
if all(l[i + j] == sub[j] for j in range(n)):
break
else:
i = None
if not first:
l.reverse()
sub.reverse()
if i is not None and not first:
i = len(l) - (i + n)
return i
co = []
args = Add.make_args(self)
self_c = self.is_commutative
x_c = x.is_commutative
if self_c and not x_c:
return S.Zero
if _first and self.is_Add and not self_c and not x_c:
# get the part that depends on x exactly
xargs = Mul.make_args(x)
d = Add(*[i for i in Add.make_args(self.as_independent(x)[1])
if all(xi in Mul.make_args(i) for xi in xargs)])
rv = d.coeff(x, right=right, _first=False)
if not rv.is_Add or not right:
return rv
c_part, nc_part = zip(*[i.args_cnc() for i in rv.args])
if has_variety(c_part):
return rv
return Add(*[Mul._from_args(i) for i in nc_part])
one_c = self_c or x_c
xargs, nx = x.args_cnc(cset=True, warn=bool(not x_c))
# find the parts that pass the commutative terms
for a in args:
margs, nc = a.args_cnc(cset=True, warn=bool(not self_c))
if nc is None:
nc = []
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
if one_c:
co.append(Mul(*(list(resid) + nc)))
else:
co.append((resid, nc))
if one_c:
if co == []:
return S.Zero
elif co:
return Add(*co)
else: # both nc
# now check the non-comm parts
if not co:
return S.Zero
if all(n == co[0][1] for r, n in co):
ii = find(co[0][1], nx, right)
if ii is not None:
if not right:
return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii]))
else:
return Mul(*co[0][1][ii + len(nx):])
beg = reduce(incommon, (n[1] for n in co))
if beg:
ii = find(beg, nx, right)
if ii is not None:
if not right:
gcdc = co[0][0]
for i in range(1, len(co)):
gcdc = gcdc.intersection(co[i][0])
if not gcdc:
break
return Mul(*(list(gcdc) + beg[:ii]))
else:
m = ii + len(nx)
return Add(*[Mul(*(list(r) + n[m:])) for r, n in co])
end = list(reversed(
reduce(incommon, (list(reversed(n[1])) for n in co))))
if end:
ii = find(end, nx, right)
if ii is not None:
if not right:
return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co])
else:
return Mul(*end[ii + len(nx):])
# look for single match
hit = None
for i, (r, n) in enumerate(co):
ii = find(n, nx, right)
if ii is not None:
if not hit:
hit = ii, r, n
else:
break
else:
if hit:
ii, r, n = hit
if not right:
return Mul(*(list(r) + n[:ii]))
else:
return Mul(*n[ii + len(nx):])
return S.Zero
def as_expr(self, *gens):
"""
Convert a polynomial to a SymPy expression.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> f = (x**2 + x*y).as_poly(x, y)
>>> f.as_expr()
x**2 + x*y
>>> sin(x).as_expr()
sin(x)
"""
return self
def as_coefficient(self, expr):
"""
Extracts symbolic coefficient at the given expression. In
other words, this functions separates 'self' into the product
of 'expr' and 'expr'-free coefficient. If such separation
is not possible it will return None.
Examples
========
>>> from sympy import E, pi, sin, I, Poly
>>> from sympy.abc import x
>>> E.as_coefficient(E)
1
>>> (2*E).as_coefficient(E)
2
>>> (2*sin(E)*E).as_coefficient(E)
Two terms have E in them so a sum is returned. (If one were
desiring the coefficient of the term exactly matching E then
the constant from the returned expression could be selected.
Or, for greater precision, a method of Poly can be used to
indicate the desired term from which the coefficient is
desired.)
>>> (2*E + x*E).as_coefficient(E)
x + 2
>>> _.args[0] # just want the exact match
2
>>> p = Poly(2*E + x*E); p
Poly(x*E + 2*E, x, E, domain='ZZ')
>>> p.coeff_monomial(E)
2
>>> p.nth(0, 1)
2
Since the following cannot be written as a product containing
E as a factor, None is returned. (If the coefficient ``2*x`` is
desired then the ``coeff`` method should be used.)
>>> (2*E*x + x).as_coefficient(E)
>>> (2*E*x + x).coeff(E)
2*x
>>> (E*(x + 1) + x).as_coefficient(E)
>>> (2*pi*I).as_coefficient(pi*I)
2
>>> (2*I).as_coefficient(pi*I)
See Also
========
coeff: return sum of terms have a given factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used
"""
r = self.extract_multiplicatively(expr)
if r and not r.has(expr):
return r
def as_independent(self, *deps, **hint) -> tuple[Expr, Expr]:
"""
A mostly naive separation of a Mul or Add into arguments that are not
are dependent on deps. To obtain as complete a separation of variables
as possible, use a separation method first, e.g.:
* separatevars() to change Mul, Add and Pow (including exp) into Mul
* .expand(mul=True) to change Add or Mul into Add
* .expand(log=True) to change log expr into an Add
The only non-naive thing that is done here is to respect noncommutative
ordering of variables and to always return (0, 0) for `self` of zero
regardless of hints.
For nonzero `self`, the returned tuple (i, d) has the
following interpretation:
* i will has no variable that appears in deps
* d will either have terms that contain variables that are in deps, or
be equal to 0 (when self is an Add) or 1 (when self is a Mul)
* if self is an Add then self = i + d
* if self is a Mul then self = i*d
* otherwise (self, S.One) or (S.One, self) is returned.
To force the expression to be treated as an Add, use the hint as_Add=True
Examples
========
-- self is an Add
>>> from sympy import sin, cos, exp
>>> from sympy.abc import x, y, z
>>> (x + x*y).as_independent(x)
(0, x*y + x)
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> (2*x*sin(x) + y + x + z).as_independent(x)
(y + z, 2*x*sin(x) + x)
>>> (2*x*sin(x) + y + x + z).as_independent(x, y)
(z, 2*x*sin(x) + x + y)
-- self is a Mul
>>> (x*sin(x)*cos(y)).as_independent(x)
(cos(y), x*sin(x))
non-commutative terms cannot always be separated out when self is a Mul
>>> from sympy import symbols
>>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
>>> (n1 + n1*n2).as_independent(n2)
(n1, n1*n2)
>>> (n2*n1 + n1*n2).as_independent(n2)
(0, n1*n2 + n2*n1)
>>> (n1*n2*n3).as_independent(n1)
(1, n1*n2*n3)
>>> (n1*n2*n3).as_independent(n2)
(n1, n2*n3)
>>> ((x-n1)*(x-y)).as_independent(x)
(1, (x - y)*(x - n1))
-- self is anything else:
>>> (sin(x)).as_independent(x)
(1, sin(x))
>>> (sin(x)).as_independent(y)
(sin(x), 1)
>>> exp(x+y).as_independent(x)
(1, exp(x + y))
-- force self to be treated as an Add:
>>> (3*x).as_independent(x, as_Add=True)
(0, 3*x)
-- force self to be treated as a Mul:
>>> (3+x).as_independent(x, as_Add=False)
(1, x + 3)
>>> (-3+x).as_independent(x, as_Add=False)
(1, x - 3)
Note how the below differs from the above in making the
constant on the dep term positive.
>>> (y*(-3+x)).as_independent(x)
(y, x - 3)
-- use .as_independent() for true independence testing instead
of .has(). The former considers only symbols in the free
symbols while the latter considers all symbols
>>> from sympy import Integral
>>> I = Integral(x, (x, 1, 2))
>>> I.has(x)
True
>>> x in I.free_symbols
False
>>> I.as_independent(x) == (I, 1)
True
>>> (I + x).as_independent(x) == (I, x)
True
Note: when trying to get independent terms, a separation method
might need to be used first. In this case, it is important to keep
track of what you send to this routine so you know how to interpret
the returned values
>>> from sympy import separatevars, log
>>> separatevars(exp(x+y)).as_independent(x)
(exp(y), exp(x))
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> separatevars(x + x*y).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).expand(mul=True).as_independent(y)
(x, x*y)
>>> a, b=symbols('a b', positive=True)
>>> (log(a*b).expand(log=True)).as_independent(b)
(log(a), log(b))
See Also
========
.separatevars(), .expand(log=True), sympy.core.add.Add.as_two_terms(),
sympy.core.mul.Mul.as_two_terms(), .as_coeff_add(), .as_coeff_mul()
"""
from .symbol import Symbol
from .add import _unevaluated_Add
from .mul import _unevaluated_Mul
if self is S.Zero:
return (self, self)
func = self.func
if hint.get('as_Add', isinstance(self, Add) ):
want = Add
else:
want = Mul
# sift out deps into symbolic and other and ignore
# all symbols but those that are in the free symbols
sym = set()
other = []
for d in deps:
if isinstance(d, Symbol): # Symbol.is_Symbol is True
sym.add(d)
else:
other.append(d)
def has(e):
"""return the standard has() if there are no literal symbols, else
check to see that symbol-deps are in the free symbols."""
has_other = e.has(*other)
if not sym:
return has_other
return has_other or e.has(*(e.free_symbols & sym))
if (want is not func or
func is not Add and func is not Mul):
if has(self):
return (want.identity, self)
else:
return (self, want.identity)
else:
if func is Add:
args = list(self.args)
else:
args, nc = self.args_cnc()
d = sift(args, has)
depend = d[True]
indep = d[False]
if func is Add: # all terms were treated as commutative
return (Add(*indep), _unevaluated_Add(*depend))
else: # handle noncommutative by stopping at first dependent term
for i, n in enumerate(nc):
if has(n):
depend.extend(nc[i:])
break
indep.append(n)
return Mul(*indep), (
Mul(*depend, evaluate=False) if nc else
_unevaluated_Mul(*depend))
def as_real_imag(self, deep=True, **hints):
"""Performs complex expansion on 'self' and returns a tuple
containing collected both real and imaginary parts. This
method cannot be confused with re() and im() functions,
which does not perform complex expansion at evaluation.
However it is possible to expand both re() and im()
functions and get exactly the same results as with
a single call to this function.
>>> from sympy import symbols, I
>>> x, y = symbols('x,y', real=True)
>>> (x + y*I).as_real_imag()
(x, y)
>>> from sympy.abc import z, w
>>> (z + w*I).as_real_imag()
(re(z) - im(w), re(w) + im(z))
"""
if hints.get('ignore') == self:
return None
else:
from sympy.functions.elementary.complexes import im, re
return (re(self), im(self))
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power. The keys are the bases of the factors and the
values, the corresponding exponents. The resulting dictionary should
be used with caution if the expression is a Mul and contains non-
commutative factors since the order that they appeared will be lost in
the dictionary.
See Also
========
as_ordered_factors: An alternative for noncommutative applications,
returning an ordered list of factors.
args_cnc: Similar to as_ordered_factors, but guarantees separation
of commutative and noncommutative factors.
"""
d = defaultdict(int)
d.update(dict([self.as_base_exp()]))
return d
def as_coefficients_dict(self, *syms):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0.
If symbols ``syms`` are provided, any multiplicative terms
independent of them will be considered a coefficient and a
regular dictionary of syms-dependent generators as keys and
their corresponding coefficients as values will be returned.
Examples
========
>>> from sympy.abc import a, x, y
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
>>> (3*a*x).as_coefficients_dict(x)
{x: 3*a}
>>> (3*a*x).as_coefficients_dict(y)
{1: 3*a*x}
"""
d = defaultdict(list)
if not syms:
for ai in Add.make_args(self):
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
else:
ind, dep = self.as_independent(*syms, as_Add=True)
for i in Add.make_args(dep):
if i.is_Mul:
c, x = i.as_coeff_mul(*syms)
if c is S.One:
d[i].append(c)
else:
d[i._new_rawargs(*x)].append(c)
elif i:
d[i].append(S.One)
d = {k: Add(*d[k]) for k in d}
if ind is not S.Zero:
d.update({S.One: ind})
di = defaultdict(int)
di.update(d)
return di
def as_base_exp(self) -> tuple[Expr, Expr]:
# a -> b ** e
return self, S.One
def as_coeff_mul(self, *deps, **kwargs) -> tuple[Expr, tuple[Expr, ...]]:
"""Return the tuple (c, args) where self is written as a Mul, ``m``.
c should be a Rational multiplied by any factors of the Mul that are
independent of deps.
args should be a tuple of all other factors of m; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you do not know if self is a Mul or not but
you want to treat self as a Mul or if you want to process the
individual arguments of the tail of self as a Mul.
- if you know self is a Mul and want only the head, use self.args[0];
- if you do not want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail;
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_mul()
(3, ())
>>> (3*x*y).as_coeff_mul()
(3, (x, y))
>>> (3*x*y).as_coeff_mul(x)
(3*y, (x,))
>>> (3*y).as_coeff_mul(x)
(3*y, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.One, (self,)
def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:
"""Return the tuple (c, args) where self is written as an Add, ``a``.
c should be a Rational added to any terms of the Add that are
independent of deps.
args should be a tuple of all other terms of ``a``; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you do not know if self is an Add or not but
you want to treat self as an Add or if you want to process the
individual arguments of the tail of self as an Add.
- if you know self is an Add and want only the head, use self.args[0];
- if you do not want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail.
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_add()
(3, ())
>>> (3 + x).as_coeff_add()
(3, (x,))
>>> (3 + x + y).as_coeff_add(x)
(y + 3, (x,))
>>> (3 + y).as_coeff_add(x)
(y + 3, ())
"""
if deps:
if not self.has_free(*deps):
return self, tuple()
return S.Zero, (self,)
def primitive(self):
"""Return the positive Rational that can be extracted non-recursively
from every term of self (i.e., self is treated like an Add). This is
like the as_coeff_Mul() method but primitive always extracts a positive
Rational (never a negative or a Float).
Examples
========
>>> from sympy.abc import x
>>> (3*(x + 1)**2).primitive()
(3, (x + 1)**2)
>>> a = (6*x + 2); a.primitive()
(2, 3*x + 1)
>>> b = (x/2 + 3); b.primitive()
(1/2, x + 6)
>>> (a*b).primitive() == (1, a*b)
True
"""
if not self:
return S.One, S.Zero
c, r = self.as_coeff_Mul(rational=True)
if c.is_negative:
c, r = -c, -r
return c, r
def as_content_primitive(self, radical=False, clear=True):
"""This method should recursively remove a Rational from all arguments
and return that (content) and the new self (primitive). The content
should always be positive and ``Mul(*foo.as_content_primitive()) == foo``.
The primitive need not be in canonical form and should try to preserve
the underlying structure if possible (i.e. expand_mul should not be
applied to self).
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y, z
>>> eq = 2 + 2*x + 2*y*(3 + 3*y)
The as_content_primitive function is recursive and retains structure:
>>> eq.as_content_primitive()
(2, x + 3*y*(y + 1) + 1)
Integer powers will have Rationals extracted from the base:
>>> ((2 + 6*x)**2).as_content_primitive()
(4, (3*x + 1)**2)
>>> ((2 + 6*x)**(2*y)).as_content_primitive()
(1, (2*(3*x + 1))**(2*y))
Terms may end up joining once their as_content_primitives are added:
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(11, x*(y + 1))
>>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(9, x*(y + 1))
>>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive()
(1, 6.0*x*(y + 1) + 3*z*(y + 1))
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive()
(121, x**2*(y + 1)**2)
>>> ((x*(1 + y) + 0.4*x*(3 + 3*y))**2).as_content_primitive()
(1, 4.84*x**2*(y + 1)**2)
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
If clear=False (default is True) then content will not be removed
from an Add if it can be distributed to leave one or more
terms with integer coefficients.
>>> (x/2 + y).as_content_primitive()
(1/2, x + 2*y)
>>> (x/2 + y).as_content_primitive(clear=False)
(1, x/2 + y)
"""
return S.One, self
def as_numer_denom(self):
""" expression -> a/b -> a, b
This is just a stub that should be defined by
an object's class methods to get anything else.
See Also
========
normal: return ``a/b`` instead of ``(a, b)``
"""
return self, S.One
def normal(self):
""" expression -> a/b
See Also
========
as_numer_denom: return ``(a, b)`` instead of ``a/b``
"""
from .mul import _unevaluated_Mul
n, d = self.as_numer_denom()
if d is S.One:
return n
if d.is_Number:
return _unevaluated_Mul(n, 1/d)
else:
return n/d
def extract_multiplicatively(self, c):
"""Return None if it's not possible to make self in the form
c * something in a nice way, i.e. preserving the properties
of arguments of self.
Examples
========
>>> from sympy import symbols, Rational
>>> x, y = symbols('x,y', real=True)
>>> ((x*y)**3).extract_multiplicatively(x**2 * y)
x*y**2
>>> ((x*y)**3).extract_multiplicatively(x**4 * y)
>>> (2*x).extract_multiplicatively(2)
x
>>> (2*x).extract_multiplicatively(3)
>>> (Rational(1, 2)*x).extract_multiplicatively(3)
x/6
"""
from sympy.functions.elementary.exponential import exp
from .add import _unevaluated_Add
c = sympify(c)
if self is S.NaN:
return None
if c is S.One:
return self
elif c == self:
return S.One
if c.is_Add:
cc, pc = c.primitive()
if cc is not S.One:
c = Mul(cc, pc, evaluate=False)
if c.is_Mul:
a, b = c.as_two_terms()
x = self.extract_multiplicatively(a)
if x is not None:
return x.extract_multiplicatively(b)
else:
return x
quotient = self / c
if self.is_Number:
if self is S.Infinity:
if c.is_positive:
return S.Infinity
elif self is S.NegativeInfinity:
if c.is_negative:
return S.Infinity
elif c.is_positive:
return S.NegativeInfinity
elif self is S.ComplexInfinity:
if not c.is_zero:
return S.ComplexInfinity
elif self.is_Integer:
if not quotient.is_Integer:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Rational:
if not quotient.is_Rational:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Float:
if not quotient.is_Float:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit:
if quotient.is_Mul and len(quotient.args) == 2:
if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self:
return quotient
elif quotient.is_Integer and c.is_Number:
return quotient
elif self.is_Add:
cs, ps = self.primitive()
# assert cs >= 1
if c.is_Number and c is not S.NegativeOne:
# assert c != 1 (handled at top)
if cs is not S.One:
if c.is_negative:
xc = -(cs.extract_multiplicatively(-c))
else:
xc = cs.extract_multiplicatively(c)
if xc is not None:
return xc*ps # rely on 2-arg Mul to restore Add
return # |c| != 1 can only be extracted from cs
if c == ps:
return cs
# check args of ps
newargs = []
for arg in ps.args:
newarg = arg.extract_multiplicatively(c)
if newarg is None:
return # all or nothing
newargs.append(newarg)
if cs is not S.One:
args = [cs*t for t in newargs]
# args may be in different order
return _unevaluated_Add(*args)
else:
return Add._from_args(newargs)
elif self.is_Mul:
args = list(self.args)
for i, arg in enumerate(args):
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
args[i] = newarg
return Mul(*args)
elif self.is_Pow or isinstance(self, exp):
sb, se = self.as_base_exp()
cb, ce = c.as_base_exp()
if cb == sb:
new_exp = se.extract_additively(ce)
if new_exp is not None:
return Pow(sb, new_exp)
elif c == sb:
new_exp = self.exp.extract_additively(1)
if new_exp is not None:
return Pow(sb, new_exp)
def extract_additively(self, c):
"""Return self - c if it's possible to subtract c from self and
make all matching coefficients move towards zero, else return None.
Examples
========
>>> from sympy.abc import x, y
>>> e = 2*x + 3
>>> e.extract_additively(x + 1)
x + 2
>>> e.extract_additively(3*x)
>>> e.extract_additively(4)
>>> (y*(x + 1)).extract_additively(x + 1)
>>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1)
(x + 1)*(x + 2*y) + 3
See Also
========
extract_multiplicatively
coeff
as_coefficient
"""
c = sympify(c)
if self is S.NaN:
return None
if c.is_zero:
return self
elif c == self:
return S.Zero
elif self == S.Zero:
return None
if self.is_Number:
if not c.is_Number:
return None
co = self
diff = co - c
# XXX should we match types? i.e should 3 - .1 succeed?
if (co > 0 and diff > 0 and diff < co or
co < 0 and diff < 0 and diff > co):
return diff
return None
if c.is_Number:
co, t = self.as_coeff_Add()
xa = co.extract_additively(c)
if xa is None:
return None
return xa + t
# handle the args[0].is_Number case separately
# since we will have trouble looking for the coeff of
# a number.
if c.is_Add and c.args[0].is_Number:
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
h, t = c.as_coeff_Add()
sh, st = self.as_coeff_Add()
xa = sh.extract_additively(h)
if xa is None:
return None
xa2 = st.extract_additively(t)
if xa2 is None:
return None
return xa + xa2
# whole term as a term factor
co, diff = _corem(self, c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
coeffs = []
for a in Add.make_args(c):
ac, at = a.as_coeff_Mul()
co = self.coeff(at)
if not co:
return None
coc, cot = co.as_coeff_Add()
xa = coc.extract_additively(ac)
if xa is None:
return None
self -= co*at
coeffs.append((cot + xa)*at)
coeffs.append(self)
return Add(*coeffs)
@property
def expr_free_symbols(self):
"""
Like ``free_symbols``, but returns the free symbols only if
they are contained in an expression node.
Examples
========
>>> from sympy.abc import x, y
>>> (x + y).expr_free_symbols # doctest: +SKIP
{x, y}
If the expression is contained in a non-expression object, do not return
the free symbols. Compare:
>>> from sympy import Tuple
>>> t = Tuple(x + y)
>>> t.expr_free_symbols # doctest: +SKIP
set()
>>> t.free_symbols
{x, y}
"""
sympy_deprecation_warning("""
The expr_free_symbols property is deprecated. Use free_symbols to get
the free symbols of an expression.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-expr-free-symbols")
return {j for i in self.args for j in i.expr_free_symbols}
def could_extract_minus_sign(self):
"""Return True if self has -1 as a leading factor or has
more literal negative signs than positive signs in a sum,
otherwise False.
Examples
========
>>> from sympy.abc import x, y
>>> e = x - y
>>> {i.could_extract_minus_sign() for i in (e, -e)}
{False, True}
Though the ``y - x`` is considered like ``-(x - y)``, since it
is in a product without a leading factor of -1, the result is
false below:
>>> (x*(y - x)).could_extract_minus_sign()
False
To put something in canonical form wrt to sign, use `signsimp`:
>>> from sympy import signsimp
>>> signsimp(x*(y - x))
-x*(x - y)
>>> _.could_extract_minus_sign()
True
"""
return False
def extract_branch_factor(self, allow_half=False):
"""
Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way.
Return (z, n).
>>> from sympy import exp_polar, I, pi
>>> from sympy.abc import x, y
>>> exp_polar(I*pi).extract_branch_factor()
(exp_polar(I*pi), 0)
>>> exp_polar(2*I*pi).extract_branch_factor()
(1, 1)
>>> exp_polar(-pi*I).extract_branch_factor()
(exp_polar(I*pi), -1)
>>> exp_polar(3*pi*I + x).extract_branch_factor()
(exp_polar(x + I*pi), 1)
>>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor()
(y*exp_polar(2*pi*x), -1)
>>> exp_polar(-I*pi/2).extract_branch_factor()
(exp_polar(-I*pi/2), 0)
If allow_half is True, also extract exp_polar(I*pi):
>>> exp_polar(I*pi).extract_branch_factor(allow_half=True)
(1, 1/2)
>>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True)
(1, 1)
>>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True)
(1, 3/2)
>>> exp_polar(-I*pi).extract_branch_factor(allow_half=True)
(1, -1/2)
"""
from sympy.functions.elementary.exponential import exp_polar
from sympy.functions.elementary.integers import ceiling
n = S.Zero
res = S.One
args = Mul.make_args(self)
exps = []
for arg in args:
if isinstance(arg, exp_polar):
exps += [arg.exp]
else:
res *= arg
piimult = S.Zero
extras = []
ipi = S.Pi*S.ImaginaryUnit
while exps:
exp = exps.pop()
if exp.is_Add:
exps += exp.args
continue
if exp.is_Mul:
coeff = exp.as_coefficient(ipi)
if coeff is not None:
piimult += coeff
continue
extras += [exp]
if piimult.is_number:
coeff = piimult
tail = ()
else:
coeff, tail = piimult.as_coeff_add(*piimult.free_symbols)
# round down to nearest multiple of 2
branchfact = ceiling(coeff/2 - S.Half)*2
n += branchfact/2
c = coeff - branchfact
if allow_half:
nc = c.extract_additively(1)
if nc is not None:
n += S.Half
c = nc
newexp = ipi*Add(*((c, ) + tail)) + Add(*extras)
if newexp != 0:
res *= exp_polar(newexp)
return res, n
def is_polynomial(self, *syms):
r"""
Return True if self is a polynomial in syms and False otherwise.
This checks if self is an exact polynomial in syms. This function
returns False for expressions that are "polynomials" with symbolic
exponents. Thus, you should be able to apply polynomial algorithms to
expressions for which this returns True, and Poly(expr, \*syms) should
work if and only if expr.is_polynomial(\*syms) returns True. The
polynomial does not have to be in expanded form. If no symbols are
given, all free symbols in the expression will be used.
This is not part of the assumptions system. You cannot do
Symbol('z', polynomial=True).
Examples
========
>>> from sympy import Symbol, Function
>>> x = Symbol('x')
>>> ((x**2 + 1)**4).is_polynomial(x)
True
>>> ((x**2 + 1)**4).is_polynomial()
True
>>> (2**x + 1).is_polynomial(x)
False
>>> (2**x + 1).is_polynomial(2**x)
True
>>> f = Function('f')
>>> (f(x) + 1).is_polynomial(x)
False
>>> (f(x) + 1).is_polynomial(f(x))
True
>>> (1/f(x) + 1).is_polynomial(f(x))
False
>>> n = Symbol('n', nonnegative=True, integer=True)
>>> (x**n + 1).is_polynomial(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a polynomial to
become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)
>>> a.is_polynomial(y)
False
>>> factor(a)
y + 1
>>> factor(a).is_polynomial(y)
True
>>> b = (y**2 + 2*y + 1)/(y + 1)
>>> b.is_polynomial(y)
False
>>> cancel(b)
y + 1
>>> cancel(b).is_polynomial(y)
True
See also .is_rational_function()
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if not syms:
return True
return self._eval_is_polynomial(syms)
def _eval_is_polynomial(self, syms):
if self in syms:
return True
if not self.has_free(*syms):
# constant polynomial
return True
# subclasses should return True or False
def is_rational_function(self, *syms):
"""
Test whether function is a ratio of two polynomials in the given
symbols, syms. When syms is not given, all free symbols will be used.
The rational function does not have to be in expanded or in any kind of
canonical form.
This function returns False for expressions that are "rational
functions" with symbolic exponents. Thus, you should be able to call
.as_numer_denom() and apply polynomial algorithms to the result for
expressions for which this returns True.
This is not part of the assumptions system. You cannot do
Symbol('z', rational_function=True).
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.abc import x, y
>>> (x/y).is_rational_function()
True
>>> (x**2).is_rational_function()
True
>>> (x/sin(y)).is_rational_function(y)
False
>>> n = Symbol('n', integer=True)
>>> (x**n + 1).is_rational_function(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a rational function
to become one.
>>> from sympy import sqrt, factor
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)/y
>>> a.is_rational_function(y)
False
>>> factor(a)
(y + 1)/y
>>> factor(a).is_rational_function(y)
True
See also is_algebraic_expr().
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if not syms:
return self not in _illegal
return self._eval_is_rational_function(syms)
def _eval_is_rational_function(self, syms):
if self in syms:
return True
if not self.has_xfree(syms):
return True
# subclasses should return True or False
def is_meromorphic(self, x, a):
"""
This tests whether an expression is meromorphic as
a function of the given symbol ``x`` at the point ``a``.
This method is intended as a quick test that will return
None if no decision can be made without simplification or
more detailed analysis.
Examples
========
>>> from sympy import zoo, log, sin, sqrt
>>> from sympy.abc import x
>>> f = 1/x**2 + 1 - 2*x**3
>>> f.is_meromorphic(x, 0)
True
>>> f.is_meromorphic(x, 1)
True
>>> f.is_meromorphic(x, zoo)
True
>>> g = x**log(3)
>>> g.is_meromorphic(x, 0)
False
>>> g.is_meromorphic(x, 1)
True
>>> g.is_meromorphic(x, zoo)
False
>>> h = sin(1/x)*x**2
>>> h.is_meromorphic(x, 0)
False
>>> h.is_meromorphic(x, 1)
True
>>> h.is_meromorphic(x, zoo)
True
Multivalued functions are considered meromorphic when their
branches are meromorphic. Thus most functions are meromorphic
everywhere except at essential singularities and branch points.
In particular, they will be meromorphic also on branch cuts
except at their endpoints.
>>> log(x).is_meromorphic(x, -1)
True
>>> log(x).is_meromorphic(x, 0)
False
>>> sqrt(x).is_meromorphic(x, -1)
True
>>> sqrt(x).is_meromorphic(x, 0)
False
"""
if not x.is_symbol:
raise TypeError("{} should be of symbol type".format(x))
a = sympify(a)
return self._eval_is_meromorphic(x, a)
def _eval_is_meromorphic(self, x, a):
if self == x:
return True
if not self.has_free(x):
return True
# subclasses should return True or False
def is_algebraic_expr(self, *syms):
"""
This tests whether a given expression is algebraic or not, in the
given symbols, syms. When syms is not given, all free symbols
will be used. The rational function does not have to be in expanded
or in any kind of canonical form.
This function returns False for expressions that are "algebraic
expressions" with symbolic exponents. This is a simple extension to the
is_rational_function, including rational exponentiation.
Examples
========
>>> from sympy import Symbol, sqrt
>>> x = Symbol('x', real=True)
>>> sqrt(1 + x).is_rational_function()
False
>>> sqrt(1 + x).is_algebraic_expr()
True
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be an algebraic
expression to become one.
>>> from sympy import exp, factor
>>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1)
>>> a.is_algebraic_expr(x)
False
>>> factor(a).is_algebraic_expr()
True
See Also
========
is_rational_function()
References
==========
.. [1] https://en.wikipedia.org/wiki/Algebraic_expression
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if not syms:
return True
return self._eval_is_algebraic_expr(syms)
def _eval_is_algebraic_expr(self, syms):
if self in syms:
return True
if not self.has_free(*syms):
return True
# subclasses should return True or False
###################################################################################
##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ##################
###################################################################################
def series(self, x=None, x0=0, n=6, dir="+", logx=None, cdir=0):
"""
Series expansion of "self" around ``x = x0`` yielding either terms of
the series one by one (the lazy series given when n=None), else
all the terms at once when n != None.
Returns the series expansion of "self" around the point ``x = x0``
with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6).
If ``x=None`` and ``self`` is univariate, the univariate symbol will
be supplied, otherwise an error will be raised.
Parameters
==========
expr : Expression
The expression whose series is to be expanded.
x : Symbol
It is the variable of the expression to be calculated.
x0 : Value
The value around which ``x`` is calculated. Can be any value
from ``-oo`` to ``oo``.
n : Value
The value used to represent the order in terms of ``x**n``,
up to which the series is to be expanded.
dir : String, optional
The series-expansion can be bi-directional. If ``dir="+"``,
then (x->x0+). If ``dir="-", then (x->x0-). For infinite
``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined
from the direction of the infinity (i.e., ``dir="-"`` for
``oo``).
logx : optional
It is used to replace any log(x) in the returned series with a
symbolic value rather than evaluating the actual value.
cdir : optional
It stands for complex direction, and indicates the direction
from which the expansion needs to be evaluated.
Examples
========
>>> from sympy import cos, exp, tan
>>> from sympy.abc import x, y
>>> cos(x).series()
1 - x**2/2 + x**4/24 + O(x**6)
>>> cos(x).series(n=4)
1 - x**2/2 + O(x**4)
>>> cos(x).series(x, x0=1, n=2)
cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1))
>>> e = cos(x + exp(y))
>>> e.series(y, n=2)
cos(x + 1) - y*sin(x + 1) + O(y**2)
>>> e.series(x, n=2)
cos(exp(y)) - x*sin(exp(y)) + O(x**2)
If ``n=None`` then a generator of the series terms will be returned.
>>> term=cos(x).series(n=None)
>>> [next(term) for i in range(2)]
[1, -x**2/2]
For ``dir=+`` (default) the series is calculated from the right and
for ``dir=-`` the series from the left. For smooth functions this
flag will not alter the results.
>>> abs(x).series(dir="+")
x
>>> abs(x).series(dir="-")
-x
>>> f = tan(x)
>>> f.series(x, 2, 6, "+")
tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) +
(x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 +
5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 +
2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2))
>>> f.series(x, 2, 3, "-")
tan(2) + (2 - x)*(-tan(2)**2 - 1) + (2 - x)**2*(tan(2)**3 + tan(2))
+ O((x - 2)**3, (x, 2))
For rational expressions this method may return original expression without the Order term.
>>> (1/x).series(x, n=8)
1/x
Returns
=======
Expr : Expression
Series expansion of the expression about x0
Raises
======
TypeError
If "n" and "x0" are infinity objects
PoleError
If "x0" is an infinity object
"""
if x is None:
syms = self.free_symbols
if not syms:
return self
elif len(syms) > 1:
raise ValueError('x must be given for multivariate functions.')
x = syms.pop()
from .symbol import Dummy, Symbol
if isinstance(x, Symbol):
dep = x in self.free_symbols
else:
d = Dummy()
dep = d in self.xreplace({x: d}).free_symbols
if not dep:
if n is None:
return (s for s in [self])
else:
return self
if len(dir) != 1 or dir not in '+-':
raise ValueError("Dir must be '+' or '-'")
x0 = sympify(x0)
cdir = sympify(cdir)
from sympy.functions.elementary.complexes import im, sign
if not cdir.is_zero:
if cdir.is_real:
dir = '+' if cdir.is_positive else '-'
else:
dir = '+' if im(cdir).is_positive else '-'
else:
if x0 and x0.is_infinite:
cdir = sign(x0).simplify()
elif str(dir) == "+":
cdir = S.One
elif str(dir) == "-":
cdir = S.NegativeOne
elif cdir == S.Zero:
cdir = S.One
cdir = cdir/abs(cdir)
if x0 and x0.is_infinite:
from .function import PoleError
try:
s = self.subs(x, cdir/x).series(x, n=n, dir='+', cdir=1)
if n is None:
return (si.subs(x, cdir/x) for si in s)
return s.subs(x, cdir/x)
except PoleError:
s = self.subs(x, cdir*x).aseries(x, n=n)
return s.subs(x, cdir*x)
# use rep to shift origin to x0 and change sign (if dir is negative)
# and undo the process with rep2
if x0 or cdir != 1:
s = self.subs({x: x0 + cdir*x}).series(x, x0=0, n=n, dir='+', logx=logx, cdir=1)
if n is None: # lseries...
return (si.subs({x: x/cdir - x0/cdir}) for si in s)
return s.subs({x: x/cdir - x0/cdir})
# from here on it's x0=0 and dir='+' handling
if x.is_positive is x.is_negative is None or x.is_Symbol is not True:
# replace x with an x that has a positive assumption
xpos = Dummy('x', positive=True)
rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx, cdir=cdir)
if n is None:
return (s.subs(xpos, x) for s in rv)
else:
return rv.subs(xpos, x)
from sympy.series.order import Order
if n is not None: # nseries handling
s1 = self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
o = s1.getO() or S.Zero
if o:
# make sure the requested order is returned
ngot = o.getn()
if ngot > n:
# leave o in its current form (e.g. with x*log(x)) so
# it eats terms properly, then replace it below
if n != 0:
s1 += o.subs(x, x**Rational(n, ngot))
else:
s1 += Order(1, x)
elif ngot < n:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
from sympy.functions.elementary.integers import ceiling
for more in range(1, 9):
s1 = self._eval_nseries(x, n=n + more, logx=logx, cdir=cdir)
newn = s1.getn()
if newn != ngot:
ndo = n + ceiling((n - ngot)*more/(newn - ngot))
s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir)
while s1.getn() < n:
s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir)
ndo += 1
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(n), self))
s1 += Order(x**n, x)
o = s1.getO()
s1 = s1.removeO()
elif s1.has(Order):
# asymptotic expansion
return s1
else:
o = Order(x**n, x)
s1done = s1.doit()
try:
if (s1done + o).removeO() == s1done:
o = S.Zero
except NotImplementedError:
return s1
try:
from sympy.simplify.radsimp import collect
return collect(s1, x) + o
except NotImplementedError:
return s1 + o
else: # lseries handling
def yield_lseries(s):
"""Return terms of lseries one at a time."""
for si in s:
if not si.is_Add:
yield si
continue
# yield terms 1 at a time if possible
# by increasing order until all the
# terms have been returned
yielded = 0
o = Order(si, x)*x
ndid = 0
ndo = len(si.args)
while 1:
do = (si - yielded + o).removeO()
o *= x
if not do or do.is_Order:
continue
if do.is_Add:
ndid += len(do.args)
else:
ndid += 1
yield do
if ndid == ndo:
break
yielded += do
return yield_lseries(self.removeO()._eval_lseries(x, logx=logx, cdir=cdir))
def aseries(self, x=None, n=6, bound=0, hir=False):
"""Asymptotic Series expansion of self.
This is equivalent to ``self.series(x, oo, n)``.
Parameters
==========
self : Expression
The expression whose series is to be expanded.
x : Symbol
It is the variable of the expression to be calculated.
n : Value
The value used to represent the order in terms of ``x**n``,
up to which the series is to be expanded.
hir : Boolean
Set this parameter to be True to produce hierarchical series.
It stops the recursion at an early level and may provide nicer
and more useful results.
bound : Value, Integer
Use the ``bound`` parameter to give limit on rewriting
coefficients in its normalised form.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
>>> e = sin(1/x + exp(-x)) - sin(1/x)
>>> e.aseries(x)
(1/(24*x**4) - 1/(2*x**2) + 1 + O(x**(-6), (x, oo)))*exp(-x)
>>> e.aseries(x, n=3, hir=True)
-exp(-2*x)*sin(1/x)/2 + exp(-x)*cos(1/x) + O(exp(-3*x), (x, oo))
>>> e = exp(exp(x)/(1 - 1/x))
>>> e.aseries(x)
exp(exp(x)/(1 - 1/x))
>>> e.aseries(x, bound=3) # doctest: +SKIP
exp(exp(x)/x**2)*exp(exp(x)/x)*exp(-exp(x) + exp(x)/(1 - 1/x) - exp(x)/x - exp(x)/x**2)*exp(exp(x))
For rational expressions this method may return original expression without the Order term.
>>> (1/x).aseries(x, n=8)
1/x
Returns
=======
Expr
Asymptotic series expansion of the expression.
Notes
=====
This algorithm is directly induced from the limit computational algorithm provided by Gruntz.
It majorly uses the mrv and rewrite sub-routines. The overall idea of this algorithm is first
to look for the most rapidly varying subexpression w of a given expression f and then expands f
in a series in w. Then same thing is recursively done on the leading coefficient
till we get constant coefficients.
If the most rapidly varying subexpression of a given expression f is f itself,
the algorithm tries to find a normalised representation of the mrv set and rewrites f
using this normalised representation.
If the expansion contains an order term, it will be either ``O(x ** (-n))`` or ``O(w ** (-n))``
where ``w`` belongs to the most rapidly varying expression of ``self``.
References
==========
.. [1] Gruntz, Dominik. A new algorithm for computing asymptotic series.
In: Proc. 1993 Int. Symp. Symbolic and Algebraic Computation. 1993.
pp. 239-244.
.. [2] Gruntz thesis - p90
.. [3] http://en.wikipedia.org/wiki/Asymptotic_expansion
See Also
========
Expr.aseries: See the docstring of this function for complete details of this wrapper.
"""
from .symbol import Dummy
if x.is_positive is x.is_negative is None:
xpos = Dummy('x', positive=True)
return self.subs(x, xpos).aseries(xpos, n, bound, hir).subs(xpos, x)
from .function import PoleError
from sympy.series.gruntz import mrv, rewrite
try:
om, exps = mrv(self, x)
except PoleError:
return self
# We move one level up by replacing `x` by `exp(x)`, and then
# computing the asymptotic series for f(exp(x)). Then asymptotic series
# can be obtained by moving one-step back, by replacing x by ln(x).
from sympy.functions.elementary.exponential import exp, log
from sympy.series.order import Order
if x in om:
s = self.subs(x, exp(x)).aseries(x, n, bound, hir).subs(x, log(x))
if s.getO():
return s + Order(1/x**n, (x, S.Infinity))
return s
k = Dummy('k', positive=True)
# f is rewritten in terms of omega
func, logw = rewrite(exps, om, x, k)
if self in om:
if bound <= 0:
return self
s = (self.exp).aseries(x, n, bound=bound)
s = s.func(*[t.removeO() for t in s.args])
try:
res = exp(s.subs(x, 1/x).as_leading_term(x).subs(x, 1/x))
except PoleError:
res = self
func = exp(self.args[0] - res.args[0]) / k
logw = log(1/res)
s = func.series(k, 0, n)
# Hierarchical series
if hir:
return s.subs(k, exp(logw))
o = s.getO()
terms = sorted(Add.make_args(s.removeO()), key=lambda i: int(i.as_coeff_exponent(k)[1]))
s = S.Zero
has_ord = False
# Then we recursively expand these coefficients one by one into
# their asymptotic series in terms of their most rapidly varying subexpressions.
for t in terms:
coeff, expo = t.as_coeff_exponent(k)
if coeff.has(x):
# Recursive step
snew = coeff.aseries(x, n, bound=bound-1)
if has_ord and snew.getO():
break
elif snew.getO():
has_ord = True
s += (snew * k**expo)
else:
s += t
if not o or has_ord:
return s.subs(k, exp(logw))
return (s + o).subs(k, exp(logw))
def taylor_term(self, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
from .symbol import Dummy
from sympy.functions.combinatorial.factorials import factorial
x = sympify(x)
_x = Dummy('x')
return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / factorial(n)
def lseries(self, x=None, x0=0, dir='+', logx=None, cdir=0):
"""
Wrapper for series yielding an iterator of the terms of the series.
Note: an infinite series will yield an infinite iterator. The following,
for exaxmple, will never terminate. It will just keep printing terms
of the sin(x) series::
for term in sin(x).lseries(x):
print term
The advantage of lseries() over nseries() is that many times you are
just interested in the next term in the series (i.e. the first term for
example), but you do not know how many you should ask for in nseries()
using the "n" parameter.
See also nseries().
"""
return self.series(x, x0, n=None, dir=dir, logx=logx, cdir=cdir)
def _eval_lseries(self, x, logx=None, cdir=0):
# default implementation of lseries is using nseries(), and adaptively
# increasing the "n". As you can see, it is not very efficient, because
# we are calculating the series over and over again. Subclasses should
# override this method and implement much more efficient yielding of
# terms.
n = 0
series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
while series.is_Order:
n += 1
series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
e = series.removeO()
yield e
if e is S.Zero:
return
while 1:
while 1:
n += 1
series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir).removeO()
if e != series:
break
if (series - self).cancel() is S.Zero:
return
yield series - e
e = series
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):
"""
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we do not have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but only returns self
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
x**y
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
"""
if x and x not in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir, cdir=cdir)
else:
return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
def _eval_nseries(self, x, n, logx, cdir):
"""
Return terms of series for self up to O(x**n) at x=0
from the positive direction.
This is a method that should be overridden in subclasses. Users should
never call this method directly (use .nseries() instead), so you do not
have to write docstrings for _eval_nseries().
"""
raise NotImplementedError(filldedent("""
The _eval_nseries method should be added to
%s to give terms up to O(x**n) at x=0
from the positive direction so it is available when
nseries calls it.""" % self.func)
)
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return limit(self, x, xlim, dir)
def compute_leading_term(self, x, logx=None):
"""
as_leading_term is only allowed for results of .series()
This is a wrapper to compute a series first.
"""
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="compute_leading_term",
useinstead="as_leading_term",
issue=21843,
deprecated_since_version="1.12"
).warn()
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
if self.has(Piecewise):
expr = piecewise_fold(self)
else:
expr = self
if self.removeO() == 0:
return self
from .symbol import Dummy
from sympy.functions.elementary.exponential import log
from sympy.series.order import Order
_logx = logx
logx = Dummy('logx') if logx is None else logx
res = Order(1)
incr = S.One
while res.is_Order:
res = expr._eval_nseries(x, n=1+incr, logx=logx).cancel().powsimp().trigsimp()
incr *= 2
if _logx is None:
res = res.subs(logx, log(x))
return res.as_leading_term(x)
@cacheit
def as_leading_term(self, *symbols, logx=None, cdir=0):
"""
Returns the leading (nonzero) term of the series expansion of self.
The _eval_as_leading_term routines are used to do this, and they must
always return a non-zero value.
Examples
========
>>> from sympy.abc import x
>>> (1 + x + x**2).as_leading_term(x)
1
>>> (1/x**2 + x + x**2).as_leading_term(x)
x**(-2)
"""
if len(symbols) > 1:
c = self
for x in symbols:
c = c.as_leading_term(x, logx=logx, cdir=cdir)
return c
elif not symbols:
return self
x = sympify(symbols[0])
if not x.is_symbol:
raise ValueError('expecting a Symbol but got %s' % x)
if x not in self.free_symbols:
return self
obj = self._eval_as_leading_term(x, logx=logx, cdir=cdir)
if obj is not None:
from sympy.simplify.powsimp import powsimp
return powsimp(obj, deep=True, combine='exp')
raise NotImplementedError('as_leading_term(%s, %s)' % (self, x))
def _eval_as_leading_term(self, x, logx=None, cdir=0):
return self
def as_coeff_exponent(self, x) -> tuple[Expr, Expr]:
""" ``c*x**e -> c,e`` where x can be any symbolic expression.
"""
from sympy.simplify.radsimp import collect
s = collect(self, x)
c, p = s.as_coeff_mul(x)
if len(p) == 1:
b, e = p[0].as_base_exp()
if b == x:
return c, e
return s, S.Zero
def leadterm(self, x, logx=None, cdir=0):
"""
Returns the leading term a*x**b as a tuple (a, b).
Examples
========
>>> from sympy.abc import x
>>> (1+x+x**2).leadterm(x)
(1, 0)
>>> (1/x**2+x+x**2).leadterm(x)
(1, -2)
"""
from .symbol import Dummy
from sympy.functions.elementary.exponential import log
l = self.as_leading_term(x, logx=logx, cdir=cdir)
d = Dummy('logx')
if l.has(log(x)):
l = l.subs(log(x), d)
c, e = l.as_coeff_exponent(x)
if x in c.free_symbols:
raise ValueError(filldedent("""
cannot compute leadterm(%s, %s). The coefficient
should have been free of %s but got %s""" % (self, x, x, c)))
c = c.subs(d, log(x))
return c, e
def as_coeff_Mul(self, rational: bool = False) -> tuple['Number', Expr]:
"""Efficiently extract the coefficient of a product. """
return S.One, self
def as_coeff_Add(self, rational=False) -> tuple['Number', Expr]:
"""Efficiently extract the coefficient of a summation. """
return S.Zero, self
def fps(self, x=None, x0=0, dir=1, hyper=True, order=4, rational=True,
full=False):
"""
Compute formal power power series of self.
See the docstring of the :func:`fps` function in sympy.series.formal for
more information.
"""
from sympy.series.formal import fps
return fps(self, x, x0, dir, hyper, order, rational, full)
def fourier_series(self, limits=None):
"""Compute fourier sine/cosine series of self.
See the docstring of the :func:`fourier_series` in sympy.series.fourier
for more information.
"""
from sympy.series.fourier import fourier_series
return fourier_series(self, limits)
###################################################################################
##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS ####################
###################################################################################
def diff(self, *symbols, **assumptions):
assumptions.setdefault("evaluate", True)
return _derivative_dispatch(self, *symbols, **assumptions)
###########################################################################
###################### EXPRESSION EXPANSION METHODS #######################
###########################################################################
# Relevant subclasses should override _eval_expand_hint() methods. See
# the docstring of expand() for more info.
def _eval_expand_complex(self, **hints):
real, imag = self.as_real_imag(**hints)
return real + S.ImaginaryUnit*imag
@staticmethod
def _expand_hint(expr, hint, deep=True, **hints):
"""
Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.
Returns ``(expr, hit)``, where expr is the (possibly) expanded
``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and
``False`` otherwise.
"""
hit = False
# XXX: Hack to support non-Basic args
# |
# V
if deep and getattr(expr, 'args', ()) and not expr.is_Atom:
sargs = []
for arg in expr.args:
arg, arghit = Expr._expand_hint(arg, hint, **hints)
hit |= arghit
sargs.append(arg)
if hit:
expr = expr.func(*sargs)
if hasattr(expr, hint):
newexpr = getattr(expr, hint)(**hints)
if newexpr != expr:
return (newexpr, True)
return (expr, hit)
@cacheit
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using hints.
See the docstring of the expand() function in sympy.core.function for
more information.
"""
from sympy.simplify.radsimp import fraction
hints.update(power_base=power_base, power_exp=power_exp, mul=mul,
log=log, multinomial=multinomial, basic=basic)
expr = self
if hints.pop('frac', False):
n, d = [a.expand(deep=deep, modulus=modulus, **hints)
for a in fraction(self)]
return n/d
elif hints.pop('denom', False):
n, d = fraction(self)
return n/d.expand(deep=deep, modulus=modulus, **hints)
elif hints.pop('numer', False):
n, d = fraction(self)
return n.expand(deep=deep, modulus=modulus, **hints)/d
# Although the hints are sorted here, an earlier hint may get applied
# at a given node in the expression tree before another because of how
# the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y +
# x*z) because while applying log at the top level, log and mul are
# applied at the deeper level in the tree so that when the log at the
# upper level gets applied, the mul has already been applied at the
# lower level.
# Additionally, because hints are only applied once, the expression
# may not be expanded all the way. For example, if mul is applied
# before multinomial, x*(x + 1)**2 won't be expanded all the way. For
# now, we just use a special case to make multinomial run before mul,
# so that at least polynomials will be expanded all the way. In the
# future, smarter heuristics should be applied.
# TODO: Smarter heuristics
def _expand_hint_key(hint):
"""Make multinomial come before mul"""
if hint == 'mul':
return 'mulz'
return hint
for hint in sorted(hints.keys(), key=_expand_hint_key):
use_hint = hints[hint]
if use_hint:
hint = '_eval_expand_' + hint
expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints)
while True:
was = expr
if hints.get('multinomial', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_multinomial', deep=deep, **hints)
if hints.get('mul', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_mul', deep=deep, **hints)
if hints.get('log', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_log', deep=deep, **hints)
if expr == was:
break
if modulus is not None:
modulus = sympify(modulus)
if not modulus.is_Integer or modulus <= 0:
raise ValueError(
"modulus must be a positive integer, got %s" % modulus)
terms = []
for term in Add.make_args(expr):
coeff, tail = term.as_coeff_Mul(rational=True)
coeff %= modulus
if coeff:
terms.append(coeff*tail)
expr = Add(*terms)
return expr
###########################################################################
################### GLOBAL ACTION VERB WRAPPER METHODS ####################
###########################################################################
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals.integrals import integrate
return integrate(self, *args, **kwargs)
def nsimplify(self, constants=(), tolerance=None, full=False):
"""See the nsimplify function in sympy.simplify"""
from sympy.simplify.simplify import nsimplify
return nsimplify(self, constants, tolerance, full)
def separate(self, deep=False, force=False):
"""See the separate function in sympy.simplify"""
from .function import expand_power_base
return expand_power_base(self, deep=deep, force=force)
def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True):
"""See the collect function in sympy.simplify"""
from sympy.simplify.radsimp import collect
return collect(self, syms, func, evaluate, exact, distribute_order_term)
def together(self, *args, **kwargs):
"""See the together function in sympy.polys"""
from sympy.polys.rationaltools import together
return together(self, *args, **kwargs)
def apart(self, x=None, **args):
"""See the apart function in sympy.polys"""
from sympy.polys.partfrac import apart
return apart(self, x, **args)
def ratsimp(self):
"""See the ratsimp function in sympy.simplify"""
from sympy.simplify.ratsimp import ratsimp
return ratsimp(self)
def trigsimp(self, **args):
"""See the trigsimp function in sympy.simplify"""
from sympy.simplify.trigsimp import trigsimp
return trigsimp(self, **args)
def radsimp(self, **kwargs):
"""See the radsimp function in sympy.simplify"""
from sympy.simplify.radsimp import radsimp
return radsimp(self, **kwargs)
def powsimp(self, *args, **kwargs):
"""See the powsimp function in sympy.simplify"""
from sympy.simplify.powsimp import powsimp
return powsimp(self, *args, **kwargs)
def combsimp(self):
"""See the combsimp function in sympy.simplify"""
from sympy.simplify.combsimp import combsimp
return combsimp(self)
def gammasimp(self):
"""See the gammasimp function in sympy.simplify"""
from sympy.simplify.gammasimp import gammasimp
return gammasimp(self)
def factor(self, *gens, **args):
"""See the factor() function in sympy.polys.polytools"""
from sympy.polys.polytools import factor
return factor(self, *gens, **args)
def cancel(self, *gens, **args):
"""See the cancel function in sympy.polys"""
from sympy.polys.polytools import cancel
return cancel(self, *gens, **args)
def invert(self, g, *gens, **args):
"""Return the multiplicative inverse of ``self`` mod ``g``
where ``self`` (and ``g``) may be symbolic expressions).
See Also
========
sympy.core.numbers.mod_inverse, sympy.polys.polytools.invert
"""
if self.is_number and getattr(g, 'is_number', True):
from .numbers import mod_inverse
return mod_inverse(self, g)
from sympy.polys.polytools import invert
return invert(self, g, *gens, **args)
def round(self, n=None):
"""Return x rounded to the given decimal place.
If a complex number would results, apply round to the real
and imaginary components of the number.
Examples
========
>>> from sympy import pi, E, I, S, Number
>>> pi.round()
3
>>> pi.round(2)
3.14
>>> (2*pi + E*I).round()
6 + 3*I
The round method has a chopping effect:
>>> (2*pi + I/10).round()
6
>>> (pi/10 + 2*I).round()
2*I
>>> (pi/10 + E*I).round(2)
0.31 + 2.72*I
Notes
=====
The Python ``round`` function uses the SymPy ``round`` method so it
will always return a SymPy number (not a Python float or int):
>>> isinstance(round(S(123), -2), Number)
True
"""
x = self
if not x.is_number:
raise TypeError("Cannot round symbolic expression")
if not x.is_Atom:
if not pure_complex(x.n(2), or_real=True):
raise TypeError(
'Expected a number but got %s:' % func_name(x))
elif x in _illegal:
return x
if x.is_extended_real is False:
r, i = x.as_real_imag()
return r.round(n) + S.ImaginaryUnit*i.round(n)
if not x:
return S.Zero if n is None else x
p = as_int(n or 0)
if x.is_Integer:
return Integer(round(int(x), p))
digits_to_decimal = _mag(x) # _mag(12) = 2, _mag(.012) = -1
allow = digits_to_decimal + p
precs = [f._prec for f in x.atoms(Float)]
dps = prec_to_dps(max(precs)) if precs else None
if dps is None:
# assume everything is exact so use the Python
# float default or whatever was requested
dps = max(15, allow)
else:
allow = min(allow, dps)
# this will shift all digits to right of decimal
# and give us dps to work with as an int
shift = -digits_to_decimal + dps
extra = 1 # how far we look past known digits
# NOTE
# mpmath will calculate the binary representation to
# an arbitrary number of digits but we must base our
# answer on a finite number of those digits, e.g.
# .575 2589569785738035/2**52 in binary.
# mpmath shows us that the first 18 digits are
# >>> Float(.575).n(18)
# 0.574999999999999956
# The default precision is 15 digits and if we ask
# for 15 we get
# >>> Float(.575).n(15)
# 0.575000000000000
# mpmath handles rounding at the 15th digit. But we
# need to be careful since the user might be asking
# for rounding at the last digit and our semantics
# are to round toward the even final digit when there
# is a tie. So the extra digit will be used to make
# that decision. In this case, the value is the same
# to 15 digits:
# >>> Float(.575).n(16)
# 0.5750000000000000
# Now converting this to the 15 known digits gives
# 575000000000000.0
# which rounds to integer
# 5750000000000000
# And now we can round to the desired digt, e.g. at
# the second from the left and we get
# 5800000000000000
# and rescaling that gives
# 0.58
# as the final result.
# If the value is made slightly less than 0.575 we might
# still obtain the same value:
# >>> Float(.575-1e-16).n(16)*10**15
# 574999999999999.8
# What 15 digits best represents the known digits (which are
# to the left of the decimal? 5750000000000000, the same as
# before. The only way we will round down (in this case) is
# if we declared that we had more than 15 digits of precision.
# For example, if we use 16 digits of precision, the integer
# we deal with is
# >>> Float(.575-1e-16).n(17)*10**16
# 5749999999999998.4
# and this now rounds to 5749999999999998 and (if we round to
# the 2nd digit from the left) we get 5700000000000000.
#
xf = x.n(dps + extra)*Pow(10, shift)
xi = Integer(xf)
# use the last digit to select the value of xi
# nearest to x before rounding at the desired digit
sign = 1 if x > 0 else -1
dif2 = sign*(xf - xi).n(extra)
if dif2 < 0:
raise NotImplementedError(
'not expecting int(x) to round away from 0')
if dif2 > .5:
xi += sign # round away from 0
elif dif2 == .5:
xi += sign if xi%2 else -sign # round toward even
# shift p to the new position
ip = p - shift
# let Python handle the int rounding then rescale
xr = round(xi.p, ip)
# restore scale
rv = Rational(xr, Pow(10, shift))
# return Float or Integer
if rv.is_Integer:
if n is None: # the single-arg case
return rv
# use str or else it won't be a float
return Float(str(rv), dps) # keep same precision
else:
if not allow and rv > self:
allow += 1
return Float(rv, allow)
__round__ = round
def _eval_derivative_matrix_lines(self, x):
from sympy.matrices.expressions.matexpr import _LeftRightArgs
return [_LeftRightArgs([S.One, S.One], higher=self._eval_derivative(x))]
class AtomicExpr(Atom, Expr):
"""
A parent class for object which are both atoms and Exprs.
For example: Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_number = False
is_Atom = True
__slots__ = ()
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _eval_derivative_n_times(self, s, n):
from .containers import Tuple
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices.common import MatrixCommon
if isinstance(s, (MatrixCommon, Tuple, Iterable, MatrixExpr)):
return super()._eval_derivative_n_times(s, n)
from .relational import Eq
from sympy.functions.elementary.piecewise import Piecewise
if self == s:
return Piecewise((self, Eq(n, 0)), (1, Eq(n, 1)), (0, True))
else:
return Piecewise((self, Eq(n, 0)), (0, True))
def _eval_is_polynomial(self, syms):
return True
def _eval_is_rational_function(self, syms):
return self not in _illegal
def _eval_is_meromorphic(self, x, a):
from sympy.calculus.accumulationbounds import AccumBounds
return (not self.is_Number or self.is_finite) and not isinstance(self, AccumBounds)
def _eval_is_algebraic_expr(self, syms):
return True
def _eval_nseries(self, x, n, logx, cdir=0):
return self
@property
def expr_free_symbols(self):
sympy_deprecation_warning("""
The expr_free_symbols property is deprecated. Use free_symbols to get
the free symbols of an expression.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-expr-free-symbols")
return {self}
def _mag(x):
r"""Return integer $i$ such that $0.1 \le x/10^i < 1$
Examples
========
>>> from sympy.core.expr import _mag
>>> from sympy import Float
>>> _mag(Float(.1))
0
>>> _mag(Float(.01))
-1
>>> _mag(Float(1234))
4
"""
from math import log10, ceil, log
xpos = abs(x.n())
if not xpos:
return S.Zero
try:
mag_first_dig = int(ceil(log10(xpos)))
except (ValueError, OverflowError):
mag_first_dig = int(ceil(Float(mpf_log(xpos._mpf_, 53))/log(10)))
# check that we aren't off by 1
if (xpos/10**mag_first_dig) >= 1:
assert 1 <= (xpos/10**mag_first_dig) < 10
mag_first_dig += 1
return mag_first_dig
class UnevaluatedExpr(Expr):
"""
Expression that is not evaluated unless released.
Examples
========
>>> from sympy import UnevaluatedExpr
>>> from sympy.abc import x
>>> x*(1/x)
1
>>> x*UnevaluatedExpr(1/x)
x*1/x
"""
def __new__(cls, arg, **kwargs):
arg = _sympify(arg)
obj = Expr.__new__(cls, arg, **kwargs)
return obj
def doit(self, **hints):
if hints.get("deep", True):
return self.args[0].doit(**hints)
else:
return self.args[0]
def unchanged(func, *args):
"""Return True if `func` applied to the `args` is unchanged.
Can be used instead of `assert foo == foo`.
Examples
========
>>> from sympy import Piecewise, cos, pi
>>> from sympy.core.expr import unchanged
>>> from sympy.abc import x
>>> unchanged(cos, 1) # instead of assert cos(1) == cos(1)
True
>>> unchanged(cos, pi)
False
Comparison of args uses the builtin capabilities of the object's
arguments to test for equality so args can be defined loosely. Here,
the ExprCondPair arguments of Piecewise compare as equal to the
tuples that can be used to create the Piecewise:
>>> unchanged(Piecewise, (x, x > 1), (0, True))
True
"""
f = func(*args)
return f.func == func and f.args == args
class ExprBuilder:
def __init__(self, op, args=None, validator=None, check=True):
if not hasattr(op, "__call__"):
raise TypeError("op {} needs to be callable".format(op))
self.op = op
if args is None:
self.args = []
else:
self.args = args
self.validator = validator
if (validator is not None) and check:
self.validate()
@staticmethod
def _build_args(args):
return [i.build() if isinstance(i, ExprBuilder) else i for i in args]
def validate(self):
if self.validator is None:
return
args = self._build_args(self.args)
self.validator(*args)
def build(self, check=True):
args = self._build_args(self.args)
if self.validator and check:
self.validator(*args)
return self.op(*args)
def append_argument(self, arg, check=True):
self.args.append(arg)
if self.validator and check:
self.validate(*self.args)
def __getitem__(self, item):
if item == 0:
return self.op
else:
return self.args[item-1]
def __repr__(self):
return str(self.build())
def search_element(self, elem):
for i, arg in enumerate(self.args):
if isinstance(arg, ExprBuilder):
ret = arg.search_index(elem)
if ret is not None:
return (i,) + ret
elif id(arg) == id(elem):
return (i,)
return None
from .mul import Mul
from .add import Add
from .power import Pow
from .function import Function, _derivative_dispatch
from .mod import Mod
from .exprtools import factor_terms
from .numbers import Float, Integer, Rational, _illegal
|
be200e18be74f4f5f36706f018e2ce9bc16e125e4029012742b44d77ee11d68b | from __future__ import annotations
from .basic import Atom, Basic
from .sorting import ordered
from .evalf import EvalfMixin
from .function import AppliedUndef
from .singleton import S
from .sympify import _sympify, SympifyError
from .parameters import global_parameters
from .logic import fuzzy_bool, fuzzy_xor, fuzzy_and, fuzzy_not
from sympy.logic.boolalg import Boolean, BooleanAtom
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import sift
from sympy.utilities.misc import filldedent
__all__ = (
'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge',
'Relational', 'Equality', 'Unequality', 'StrictLessThan', 'LessThan',
'StrictGreaterThan', 'GreaterThan',
)
from .expr import Expr
from sympy.multipledispatch import dispatch
from .containers import Tuple
from .symbol import Symbol
def _nontrivBool(side):
return isinstance(side, Boolean) and \
not isinstance(side, Atom)
# Note, see issue 4986. Ideally, we wouldn't want to subclass both Boolean
# and Expr.
# from .. import Expr
def _canonical(cond):
# return a condition in which all relationals are canonical
reps = {r: r.canonical for r in cond.atoms(Relational)}
return cond.xreplace(reps)
# XXX: AttributeError was being caught here but it wasn't triggered by any of
# the tests so I've removed it...
def _canonical_coeff(rel):
# return -2*x + 1 < 0 as x > 1/2
# XXX make this part of Relational.canonical?
rel = rel.canonical
if not rel.is_Relational or rel.rhs.is_Boolean:
return rel # Eq(x, True)
b, l = rel.lhs.as_coeff_Add(rational=True)
m, lhs = l.as_coeff_Mul(rational=True)
rhs = (rel.rhs - b)/m
if m < 0:
return rel.reversed.func(lhs, rhs)
return rel.func(lhs, rhs)
class Relational(Boolean, EvalfMixin):
"""Base class for all relation types.
Explanation
===========
Subclasses of Relational should generally be instantiated directly, but
Relational can be instantiated with a valid ``rop`` value to dispatch to
the appropriate subclass.
Parameters
==========
rop : str or None
Indicates what subclass to instantiate. Valid values can be found
in the keys of Relational.ValidRelationOperator.
Examples
========
>>> from sympy import Rel
>>> from sympy.abc import x, y
>>> Rel(y, x + x**2, '==')
Eq(y, x**2 + x)
A relation's type can be defined upon creation using ``rop``.
The relation type of an existing expression can be obtained
using its ``rel_op`` property.
Here is a table of all the relation types, along with their
``rop`` and ``rel_op`` values:
+---------------------+----------------------------+------------+
|Relation |``rop`` |``rel_op`` |
+=====================+============================+============+
|``Equality`` |``==`` or ``eq`` or ``None``|``==`` |
+---------------------+----------------------------+------------+
|``Unequality`` |``!=`` or ``ne`` |``!=`` |
+---------------------+----------------------------+------------+
|``GreaterThan`` |``>=`` or ``ge`` |``>=`` |
+---------------------+----------------------------+------------+
|``LessThan`` |``<=`` or ``le`` |``<=`` |
+---------------------+----------------------------+------------+
|``StrictGreaterThan``|``>`` or ``gt`` |``>`` |
+---------------------+----------------------------+------------+
|``StrictLessThan`` |``<`` or ``lt`` |``<`` |
+---------------------+----------------------------+------------+
For example, setting ``rop`` to ``==`` produces an
``Equality`` relation, ``Eq()``.
So does setting ``rop`` to ``eq``, or leaving ``rop`` unspecified.
That is, the first three ``Rel()`` below all produce the same result.
Using a ``rop`` from a different row in the table produces a
different relation type.
For example, the fourth ``Rel()`` below using ``lt`` for ``rop``
produces a ``StrictLessThan`` inequality:
>>> from sympy import Rel
>>> from sympy.abc import x, y
>>> Rel(y, x + x**2, '==')
Eq(y, x**2 + x)
>>> Rel(y, x + x**2, 'eq')
Eq(y, x**2 + x)
>>> Rel(y, x + x**2)
Eq(y, x**2 + x)
>>> Rel(y, x + x**2, 'lt')
y < x**2 + x
To obtain the relation type of an existing expression,
get its ``rel_op`` property.
For example, ``rel_op`` is ``==`` for the ``Equality`` relation above,
and ``<`` for the strict less than inequality above:
>>> from sympy import Rel
>>> from sympy.abc import x, y
>>> my_equality = Rel(y, x + x**2, '==')
>>> my_equality.rel_op
'=='
>>> my_inequality = Rel(y, x + x**2, 'lt')
>>> my_inequality.rel_op
'<'
"""
__slots__ = ()
ValidRelationOperator: dict[str | None, type[Relational]] = {}
is_Relational = True
# ValidRelationOperator - Defined below, because the necessary classes
# have not yet been defined
def __new__(cls, lhs, rhs, rop=None, **assumptions):
# If called by a subclass, do nothing special and pass on to Basic.
if cls is not Relational:
return Basic.__new__(cls, lhs, rhs, **assumptions)
# XXX: Why do this? There should be a separate function to make a
# particular subclass of Relational from a string.
#
# If called directly with an operator, look up the subclass
# corresponding to that operator and delegate to it
cls = cls.ValidRelationOperator.get(rop, None)
if cls is None:
raise ValueError("Invalid relational operator symbol: %r" % rop)
if not issubclass(cls, (Eq, Ne)):
# validate that Booleans are not being used in a relational
# other than Eq/Ne;
# Note: Symbol is a subclass of Boolean but is considered
# acceptable here.
if any(map(_nontrivBool, (lhs, rhs))):
raise TypeError(filldedent('''
A Boolean argument can only be used in
Eq and Ne; all other relationals expect
real expressions.
'''))
return cls(lhs, rhs, **assumptions)
@property
def lhs(self):
"""The left-hand side of the relation."""
return self._args[0]
@property
def rhs(self):
"""The right-hand side of the relation."""
return self._args[1]
@property
def reversed(self):
"""Return the relationship with sides reversed.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x
>>> Eq(x, 1)
Eq(x, 1)
>>> _.reversed
Eq(1, x)
>>> x < 1
x < 1
>>> _.reversed
1 > x
"""
ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}
a, b = self.args
return Relational.__new__(ops.get(self.func, self.func), b, a)
@property
def reversedsign(self):
"""Return the relationship with signs reversed.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x
>>> Eq(x, 1)
Eq(x, 1)
>>> _.reversedsign
Eq(-x, -1)
>>> x < 1
x < 1
>>> _.reversedsign
-x > -1
"""
a, b = self.args
if not (isinstance(a, BooleanAtom) or isinstance(b, BooleanAtom)):
ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}
return Relational.__new__(ops.get(self.func, self.func), -a, -b)
else:
return self
@property
def negated(self):
"""Return the negated relationship.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x
>>> Eq(x, 1)
Eq(x, 1)
>>> _.negated
Ne(x, 1)
>>> x < 1
x < 1
>>> _.negated
x >= 1
Notes
=====
This works more or less identical to ``~``/``Not``. The difference is
that ``negated`` returns the relationship even if ``evaluate=False``.
Hence, this is useful in code when checking for e.g. negated relations
to existing ones as it will not be affected by the `evaluate` flag.
"""
ops = {Eq: Ne, Ge: Lt, Gt: Le, Le: Gt, Lt: Ge, Ne: Eq}
# If there ever will be new Relational subclasses, the following line
# will work until it is properly sorted out
# return ops.get(self.func, lambda a, b, evaluate=False: ~(self.func(a,
# b, evaluate=evaluate)))(*self.args, evaluate=False)
return Relational.__new__(ops.get(self.func), *self.args)
@property
def weak(self):
"""return the non-strict version of the inequality or self
EXAMPLES
========
>>> from sympy.abc import x
>>> (x < 1).weak
x <= 1
>>> _.weak
x <= 1
"""
return self
@property
def strict(self):
"""return the strict version of the inequality or self
EXAMPLES
========
>>> from sympy.abc import x
>>> (x <= 1).strict
x < 1
>>> _.strict
x < 1
"""
return self
def _eval_evalf(self, prec):
return self.func(*[s._evalf(prec) for s in self.args])
@property
def canonical(self):
"""Return a canonical form of the relational by putting a
number on the rhs, canonically removing a sign or else
ordering the args canonically. No other simplification is
attempted.
Examples
========
>>> from sympy.abc import x, y
>>> x < 2
x < 2
>>> _.reversed.canonical
x < 2
>>> (-y < x).canonical
x > -y
>>> (-y > x).canonical
x < -y
>>> (-y < -x).canonical
x < y
The canonicalization is recursively applied:
>>> from sympy import Eq
>>> Eq(x < y, y > x).canonical
True
"""
args = tuple([i.canonical if isinstance(i, Relational) else i for i in self.args])
if args != self.args:
r = self.func(*args)
if not isinstance(r, Relational):
return r
else:
r = self
if r.rhs.is_number:
if r.rhs.is_Number and r.lhs.is_Number and r.lhs > r.rhs:
r = r.reversed
elif r.lhs.is_number:
r = r.reversed
elif tuple(ordered(args)) != args:
r = r.reversed
LHS_CEMS = getattr(r.lhs, 'could_extract_minus_sign', None)
RHS_CEMS = getattr(r.rhs, 'could_extract_minus_sign', None)
if isinstance(r.lhs, BooleanAtom) or isinstance(r.rhs, BooleanAtom):
return r
# Check if first value has negative sign
if LHS_CEMS and LHS_CEMS():
return r.reversedsign
elif not r.rhs.is_number and RHS_CEMS and RHS_CEMS():
# Right hand side has a minus, but not lhs.
# How does the expression with reversed signs behave?
# This is so that expressions of the type
# Eq(x, -y) and Eq(-x, y)
# have the same canonical representation
expr1, _ = ordered([r.lhs, -r.rhs])
if expr1 != r.lhs:
return r.reversed.reversedsign
return r
def equals(self, other, failing_expression=False):
"""Return True if the sides of the relationship are mathematically
identical and the type of relationship is the same.
If failing_expression is True, return the expression whose truth value
was unknown."""
if isinstance(other, Relational):
if other in (self, self.reversed):
return True
a, b = self, other
if a.func in (Eq, Ne) or b.func in (Eq, Ne):
if a.func != b.func:
return False
left, right = [i.equals(j,
failing_expression=failing_expression)
for i, j in zip(a.args, b.args)]
if left is True:
return right
if right is True:
return left
lr, rl = [i.equals(j, failing_expression=failing_expression)
for i, j in zip(a.args, b.reversed.args)]
if lr is True:
return rl
if rl is True:
return lr
e = (left, right, lr, rl)
if all(i is False for i in e):
return False
for i in e:
if i not in (True, False):
return i
else:
if b.func != a.func:
b = b.reversed
if a.func != b.func:
return False
left = a.lhs.equals(b.lhs,
failing_expression=failing_expression)
if left is False:
return False
right = a.rhs.equals(b.rhs,
failing_expression=failing_expression)
if right is False:
return False
if left is True:
return right
return left
def _eval_simplify(self, **kwargs):
from .add import Add
from .expr import Expr
r = self
r = r.func(*[i.simplify(**kwargs) for i in r.args])
if r.is_Relational:
if not isinstance(r.lhs, Expr) or not isinstance(r.rhs, Expr):
return r
dif = r.lhs - r.rhs
# replace dif with a valid Number that will
# allow a definitive comparison with 0
v = None
if dif.is_comparable:
v = dif.n(2)
elif dif.equals(0): # XXX this is expensive
v = S.Zero
if v is not None:
r = r.func._eval_relation(v, S.Zero)
r = r.canonical
# If there is only one symbol in the expression,
# try to write it on a simplified form
free = list(filter(lambda x: x.is_real is not False, r.free_symbols))
if len(free) == 1:
try:
from sympy.solvers.solveset import linear_coeffs
x = free.pop()
dif = r.lhs - r.rhs
m, b = linear_coeffs(dif, x)
if m.is_zero is False:
if m.is_negative:
# Dividing with a negative number, so change order of arguments
# canonical will put the symbol back on the lhs later
r = r.func(-b / m, x)
else:
r = r.func(x, -b / m)
else:
r = r.func(b, S.Zero)
except ValueError:
# maybe not a linear function, try polynomial
from sympy.polys.polyerrors import PolynomialError
from sympy.polys.polytools import gcd, Poly, poly
try:
p = poly(dif, x)
c = p.all_coeffs()
constant = c[-1]
c[-1] = 0
scale = gcd(c)
c = [ctmp / scale for ctmp in c]
r = r.func(Poly.from_list(c, x).as_expr(), -constant / scale)
except PolynomialError:
pass
elif len(free) >= 2:
try:
from sympy.solvers.solveset import linear_coeffs
from sympy.polys.polytools import gcd
free = list(ordered(free))
dif = r.lhs - r.rhs
m = linear_coeffs(dif, *free)
constant = m[-1]
del m[-1]
scale = gcd(m)
m = [mtmp / scale for mtmp in m]
nzm = list(filter(lambda f: f[0] != 0, list(zip(m, free))))
if scale.is_zero is False:
if constant != 0:
# lhs: expression, rhs: constant
newexpr = Add(*[i * j for i, j in nzm])
r = r.func(newexpr, -constant / scale)
else:
# keep first term on lhs
lhsterm = nzm[0][0] * nzm[0][1]
del nzm[0]
newexpr = Add(*[i * j for i, j in nzm])
r = r.func(lhsterm, -newexpr)
else:
r = r.func(constant, S.Zero)
except ValueError:
pass
# Did we get a simplified result?
r = r.canonical
measure = kwargs['measure']
if measure(r) < kwargs['ratio'] * measure(self):
return r
else:
return self
def _eval_trigsimp(self, **opts):
from sympy.simplify.trigsimp import trigsimp
return self.func(trigsimp(self.lhs, **opts), trigsimp(self.rhs, **opts))
def expand(self, **kwargs):
args = (arg.expand(**kwargs) for arg in self.args)
return self.func(*args)
def __bool__(self):
raise TypeError("cannot determine truth value of Relational")
def _eval_as_set(self):
# self is univariate and periodicity(self, x) in (0, None)
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.sets.conditionset import ConditionSet
syms = self.free_symbols
assert len(syms) == 1
x = syms.pop()
try:
xset = solve_univariate_inequality(self, x, relational=False)
except NotImplementedError:
# solve_univariate_inequality raises NotImplementedError for
# unsolvable equations/inequalities.
xset = ConditionSet(x, self, S.Reals)
return xset
@property
def binary_symbols(self):
# override where necessary
return set()
Rel = Relational
class Equality(Relational):
"""
An equal relation between two objects.
Explanation
===========
Represents that two objects are equal. If they can be easily shown
to be definitively equal (or unequal), this will reduce to True (or
False). Otherwise, the relation is maintained as an unevaluated
Equality object. Use the ``simplify`` function on this object for
more nontrivial evaluation of the equality relation.
As usual, the keyword argument ``evaluate=False`` can be used to
prevent any evaluation.
Examples
========
>>> from sympy import Eq, simplify, exp, cos
>>> from sympy.abc import x, y
>>> Eq(y, x + x**2)
Eq(y, x**2 + x)
>>> Eq(2, 5)
False
>>> Eq(2, 5, evaluate=False)
Eq(2, 5)
>>> _.doit()
False
>>> Eq(exp(x), exp(x).rewrite(cos))
Eq(exp(x), sinh(x) + cosh(x))
>>> simplify(_)
True
See Also
========
sympy.logic.boolalg.Equivalent : for representing equality between two
boolean expressions
Notes
=====
Python treats 1 and True (and 0 and False) as being equal; SymPy
does not. And integer will always compare as unequal to a Boolean:
>>> Eq(True, 1), True == 1
(False, True)
This class is not the same as the == operator. The == operator tests
for exact structural equality between two expressions; this class
compares expressions mathematically.
If either object defines an ``_eval_Eq`` method, it can be used in place of
the default algorithm. If ``lhs._eval_Eq(rhs)`` or ``rhs._eval_Eq(lhs)``
returns anything other than None, that return value will be substituted for
the Equality. If None is returned by ``_eval_Eq``, an Equality object will
be created as usual.
Since this object is already an expression, it does not respond to
the method ``as_expr`` if one tries to create `x - y` from ``Eq(x, y)``.
This can be done with the ``rewrite(Add)`` method.
.. deprecated:: 1.5
``Eq(expr)`` with a single argument is a shorthand for ``Eq(expr, 0)``,
but this behavior is deprecated and will be removed in a future version
of SymPy.
"""
rel_op = '=='
__slots__ = ()
is_Equality = True
def __new__(cls, lhs, rhs=None, **options):
if rhs is None:
sympy_deprecation_warning(
"""
Eq(expr) with a single argument with the right-hand side
defaulting to 0 is deprecated. Use Eq(expr, 0) instead.
""",
deprecated_since_version="1.5",
active_deprecations_target="deprecated-eq-expr",
)
rhs = 0
evaluate = options.pop('evaluate', global_parameters.evaluate)
lhs = _sympify(lhs)
rhs = _sympify(rhs)
if evaluate:
val = is_eq(lhs, rhs)
if val is None:
return cls(lhs, rhs, evaluate=False)
else:
return _sympify(val)
return Relational.__new__(cls, lhs, rhs)
@classmethod
def _eval_relation(cls, lhs, rhs):
return _sympify(lhs == rhs)
def _eval_rewrite_as_Add(self, *args, **kwargs):
"""
return Eq(L, R) as L - R. To control the evaluation of
the result set pass `evaluate=True` to give L - R;
if `evaluate=None` then terms in L and R will not cancel
but they will be listed in canonical order; otherwise
non-canonical args will be returned. If one side is 0, the
non-zero side will be returned.
Examples
========
>>> from sympy import Eq, Add
>>> from sympy.abc import b, x
>>> eq = Eq(x + b, x - b)
>>> eq.rewrite(Add)
2*b
>>> eq.rewrite(Add, evaluate=None).args
(b, b, x, -x)
>>> eq.rewrite(Add, evaluate=False).args
(b, x, b, -x)
"""
from .add import _unevaluated_Add, Add
L, R = args
if L == 0:
return R
if R == 0:
return L
evaluate = kwargs.get('evaluate', True)
if evaluate:
# allow cancellation of args
return L - R
args = Add.make_args(L) + Add.make_args(-R)
if evaluate is None:
# no cancellation, but canonical
return _unevaluated_Add(*args)
# no cancellation, not canonical
return Add._from_args(args)
@property
def binary_symbols(self):
if S.true in self.args or S.false in self.args:
if self.lhs.is_Symbol:
return {self.lhs}
elif self.rhs.is_Symbol:
return {self.rhs}
return set()
def _eval_simplify(self, **kwargs):
# standard simplify
e = super()._eval_simplify(**kwargs)
if not isinstance(e, Equality):
return e
from .expr import Expr
if not isinstance(e.lhs, Expr) or not isinstance(e.rhs, Expr):
return e
free = self.free_symbols
if len(free) == 1:
try:
from .add import Add
from sympy.solvers.solveset import linear_coeffs
x = free.pop()
m, b = linear_coeffs(
e.rewrite(Add, evaluate=False), x)
if m.is_zero is False:
enew = e.func(x, -b / m)
else:
enew = e.func(m * x, -b)
measure = kwargs['measure']
if measure(enew) <= kwargs['ratio'] * measure(e):
e = enew
except ValueError:
pass
return e.canonical
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals.integrals import integrate
return integrate(self, *args, **kwargs)
def as_poly(self, *gens, **kwargs):
'''Returns lhs-rhs as a Poly
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x
>>> Eq(x**2, 1).as_poly(x)
Poly(x**2 - 1, x, domain='ZZ')
'''
return (self.lhs - self.rhs).as_poly(*gens, **kwargs)
Eq = Equality
class Unequality(Relational):
"""An unequal relation between two objects.
Explanation
===========
Represents that two objects are not equal. If they can be shown to be
definitively equal, this will reduce to False; if definitively unequal,
this will reduce to True. Otherwise, the relation is maintained as an
Unequality object.
Examples
========
>>> from sympy import Ne
>>> from sympy.abc import x, y
>>> Ne(y, x+x**2)
Ne(y, x**2 + x)
See Also
========
Equality
Notes
=====
This class is not the same as the != operator. The != operator tests
for exact structural equality between two expressions; this class
compares expressions mathematically.
This class is effectively the inverse of Equality. As such, it uses the
same algorithms, including any available `_eval_Eq` methods.
"""
rel_op = '!='
__slots__ = ()
def __new__(cls, lhs, rhs, **options):
lhs = _sympify(lhs)
rhs = _sympify(rhs)
evaluate = options.pop('evaluate', global_parameters.evaluate)
if evaluate:
val = is_neq(lhs, rhs)
if val is None:
return cls(lhs, rhs, evaluate=False)
else:
return _sympify(val)
return Relational.__new__(cls, lhs, rhs, **options)
@classmethod
def _eval_relation(cls, lhs, rhs):
return _sympify(lhs != rhs)
@property
def binary_symbols(self):
if S.true in self.args or S.false in self.args:
if self.lhs.is_Symbol:
return {self.lhs}
elif self.rhs.is_Symbol:
return {self.rhs}
return set()
def _eval_simplify(self, **kwargs):
# simplify as an equality
eq = Equality(*self.args)._eval_simplify(**kwargs)
if isinstance(eq, Equality):
# send back Ne with the new args
return self.func(*eq.args)
return eq.negated # result of Ne is the negated Eq
Ne = Unequality
class _Inequality(Relational):
"""Internal base class for all *Than types.
Each subclass must implement _eval_relation to provide the method for
comparing two real numbers.
"""
__slots__ = ()
def __new__(cls, lhs, rhs, **options):
try:
lhs = _sympify(lhs)
rhs = _sympify(rhs)
except SympifyError:
return NotImplemented
evaluate = options.pop('evaluate', global_parameters.evaluate)
if evaluate:
for me in (lhs, rhs):
if me.is_extended_real is False:
raise TypeError("Invalid comparison of non-real %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
# First we invoke the appropriate inequality method of `lhs`
# (e.g., `lhs.__lt__`). That method will try to reduce to
# boolean or raise an exception. It may keep calling
# superclasses until it reaches `Expr` (e.g., `Expr.__lt__`).
# In some cases, `Expr` will just invoke us again (if neither it
# nor a subclass was able to reduce to boolean or raise an
# exception). In that case, it must call us with
# `evaluate=False` to prevent infinite recursion.
return cls._eval_relation(lhs, rhs, **options)
# make a "non-evaluated" Expr for the inequality
return Relational.__new__(cls, lhs, rhs, **options)
@classmethod
def _eval_relation(cls, lhs, rhs, **options):
val = cls._eval_fuzzy_relation(lhs, rhs)
if val is None:
return cls(lhs, rhs, evaluate=False)
else:
return _sympify(val)
class _Greater(_Inequality):
"""Not intended for general use
_Greater is only used so that GreaterThan and StrictGreaterThan may
subclass it for the .gts and .lts properties.
"""
__slots__ = ()
@property
def gts(self):
return self._args[0]
@property
def lts(self):
return self._args[1]
class _Less(_Inequality):
"""Not intended for general use.
_Less is only used so that LessThan and StrictLessThan may subclass it for
the .gts and .lts properties.
"""
__slots__ = ()
@property
def gts(self):
return self._args[1]
@property
def lts(self):
return self._args[0]
class GreaterThan(_Greater):
r"""Class representations of inequalities.
Explanation
===========
The ``*Than`` classes represent inequal relationships, where the left-hand
side is generally bigger or smaller than the right-hand side. For example,
the GreaterThan class represents an inequal relationship where the
left-hand side is at least as big as the right side, if not bigger. In
mathematical notation:
lhs $\ge$ rhs
In total, there are four ``*Than`` classes, to represent the four
inequalities:
+-----------------+--------+
|Class Name | Symbol |
+=================+========+
|GreaterThan | ``>=`` |
+-----------------+--------+
|LessThan | ``<=`` |
+-----------------+--------+
|StrictGreaterThan| ``>`` |
+-----------------+--------+
|StrictLessThan | ``<`` |
+-----------------+--------+
All classes take two arguments, lhs and rhs.
+----------------------------+-----------------+
|Signature Example | Math Equivalent |
+============================+=================+
|GreaterThan(lhs, rhs) | lhs $\ge$ rhs |
+----------------------------+-----------------+
|LessThan(lhs, rhs) | lhs $\le$ rhs |
+----------------------------+-----------------+
|StrictGreaterThan(lhs, rhs) | lhs $>$ rhs |
+----------------------------+-----------------+
|StrictLessThan(lhs, rhs) | lhs $<$ rhs |
+----------------------------+-----------------+
In addition to the normal .lhs and .rhs of Relations, ``*Than`` inequality
objects also have the .lts and .gts properties, which represent the "less
than side" and "greater than side" of the operator. Use of .lts and .gts
in an algorithm rather than .lhs and .rhs as an assumption of inequality
direction will make more explicit the intent of a certain section of code,
and will make it similarly more robust to client code changes:
>>> from sympy import GreaterThan, StrictGreaterThan
>>> from sympy import LessThan, StrictLessThan
>>> from sympy import And, Ge, Gt, Le, Lt, Rel, S
>>> from sympy.abc import x, y, z
>>> from sympy.core.relational import Relational
>>> e = GreaterThan(x, 1)
>>> e
x >= 1
>>> '%s >= %s is the same as %s <= %s' % (e.gts, e.lts, e.lts, e.gts)
'x >= 1 is the same as 1 <= x'
Examples
========
One generally does not instantiate these classes directly, but uses various
convenience methods:
>>> for f in [Ge, Gt, Le, Lt]: # convenience wrappers
... print(f(x, 2))
x >= 2
x > 2
x <= 2
x < 2
Another option is to use the Python inequality operators (``>=``, ``>``,
``<=``, ``<``) directly. Their main advantage over the ``Ge``, ``Gt``,
``Le``, and ``Lt`` counterparts, is that one can write a more
"mathematical looking" statement rather than littering the math with
oddball function calls. However there are certain (minor) caveats of
which to be aware (search for 'gotcha', below).
>>> x >= 2
x >= 2
>>> _ == Ge(x, 2)
True
However, it is also perfectly valid to instantiate a ``*Than`` class less
succinctly and less conveniently:
>>> Rel(x, 1, ">")
x > 1
>>> Relational(x, 1, ">")
x > 1
>>> StrictGreaterThan(x, 1)
x > 1
>>> GreaterThan(x, 1)
x >= 1
>>> LessThan(x, 1)
x <= 1
>>> StrictLessThan(x, 1)
x < 1
Notes
=====
There are a couple of "gotchas" to be aware of when using Python's
operators.
The first is that what your write is not always what you get:
>>> 1 < x
x > 1
Due to the order that Python parses a statement, it may
not immediately find two objects comparable. When ``1 < x``
is evaluated, Python recognizes that the number 1 is a native
number and that x is *not*. Because a native Python number does
not know how to compare itself with a SymPy object
Python will try the reflective operation, ``x > 1`` and that is the
form that gets evaluated, hence returned.
If the order of the statement is important (for visual output to
the console, perhaps), one can work around this annoyance in a
couple ways:
(1) "sympify" the literal before comparison
>>> S(1) < x
1 < x
(2) use one of the wrappers or less succinct methods described
above
>>> Lt(1, x)
1 < x
>>> Relational(1, x, "<")
1 < x
The second gotcha involves writing equality tests between relationals
when one or both sides of the test involve a literal relational:
>>> e = x < 1; e
x < 1
>>> e == e # neither side is a literal
True
>>> e == x < 1 # expecting True, too
False
>>> e != x < 1 # expecting False
x < 1
>>> x < 1 != x < 1 # expecting False or the same thing as before
Traceback (most recent call last):
...
TypeError: cannot determine truth value of Relational
The solution for this case is to wrap literal relationals in
parentheses:
>>> e == (x < 1)
True
>>> e != (x < 1)
False
>>> (x < 1) != (x < 1)
False
The third gotcha involves chained inequalities not involving
``==`` or ``!=``. Occasionally, one may be tempted to write:
>>> e = x < y < z
Traceback (most recent call last):
...
TypeError: symbolic boolean expression has no truth value.
Due to an implementation detail or decision of Python [1]_,
there is no way for SymPy to create a chained inequality with
that syntax so one must use And:
>>> e = And(x < y, y < z)
>>> type( e )
And
>>> e
(x < y) & (y < z)
Although this can also be done with the '&' operator, it cannot
be done with the 'and' operarator:
>>> (x < y) & (y < z)
(x < y) & (y < z)
>>> (x < y) and (y < z)
Traceback (most recent call last):
...
TypeError: cannot determine truth value of Relational
.. [1] This implementation detail is that Python provides no reliable
method to determine that a chained inequality is being built.
Chained comparison operators are evaluated pairwise, using "and"
logic (see
http://docs.python.org/reference/expressions.html#not-in). This
is done in an efficient way, so that each object being compared
is only evaluated once and the comparison can short-circuit. For
example, ``1 > 2 > 3`` is evaluated by Python as ``(1 > 2) and (2
> 3)``. The ``and`` operator coerces each side into a bool,
returning the object itself when it short-circuits. The bool of
the --Than operators will raise TypeError on purpose, because
SymPy cannot determine the mathematical ordering of symbolic
expressions. Thus, if we were to compute ``x > y > z``, with
``x``, ``y``, and ``z`` being Symbols, Python converts the
statement (roughly) into these steps:
(1) x > y > z
(2) (x > y) and (y > z)
(3) (GreaterThanObject) and (y > z)
(4) (GreaterThanObject.__bool__()) and (y > z)
(5) TypeError
Because of the ``and`` added at step 2, the statement gets turned into a
weak ternary statement, and the first object's ``__bool__`` method will
raise TypeError. Thus, creating a chained inequality is not possible.
In Python, there is no way to override the ``and`` operator, or to
control how it short circuits, so it is impossible to make something
like ``x > y > z`` work. There was a PEP to change this,
:pep:`335`, but it was officially closed in March, 2012.
"""
__slots__ = ()
rel_op = '>='
@classmethod
def _eval_fuzzy_relation(cls, lhs, rhs):
return is_ge(lhs, rhs)
@property
def strict(self):
return Gt(*self.args)
Ge = GreaterThan
class LessThan(_Less):
__doc__ = GreaterThan.__doc__
__slots__ = ()
rel_op = '<='
@classmethod
def _eval_fuzzy_relation(cls, lhs, rhs):
return is_le(lhs, rhs)
@property
def strict(self):
return Lt(*self.args)
Le = LessThan
class StrictGreaterThan(_Greater):
__doc__ = GreaterThan.__doc__
__slots__ = ()
rel_op = '>'
@classmethod
def _eval_fuzzy_relation(cls, lhs, rhs):
return is_gt(lhs, rhs)
@property
def weak(self):
return Ge(*self.args)
Gt = StrictGreaterThan
class StrictLessThan(_Less):
__doc__ = GreaterThan.__doc__
__slots__ = ()
rel_op = '<'
@classmethod
def _eval_fuzzy_relation(cls, lhs, rhs):
return is_lt(lhs, rhs)
@property
def weak(self):
return Le(*self.args)
Lt = StrictLessThan
# A class-specific (not object-specific) data item used for a minor speedup.
# It is defined here, rather than directly in the class, because the classes
# that it references have not been defined until now (e.g. StrictLessThan).
Relational.ValidRelationOperator = {
None: Equality,
'==': Equality,
'eq': Equality,
'!=': Unequality,
'<>': Unequality,
'ne': Unequality,
'>=': GreaterThan,
'ge': GreaterThan,
'<=': LessThan,
'le': LessThan,
'>': StrictGreaterThan,
'gt': StrictGreaterThan,
'<': StrictLessThan,
'lt': StrictLessThan,
}
def _n2(a, b):
"""Return (a - b).evalf(2) if a and b are comparable, else None.
This should only be used when a and b are already sympified.
"""
# /!\ it is very important (see issue 8245) not to
# use a re-evaluated number in the calculation of dif
if a.is_comparable and b.is_comparable:
dif = (a - b).evalf(2)
if dif.is_comparable:
return dif
@dispatch(Expr, Expr)
def _eval_is_ge(lhs, rhs):
return None
@dispatch(Basic, Basic)
def _eval_is_eq(lhs, rhs):
return None
@dispatch(Tuple, Expr) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
return False
@dispatch(Tuple, AppliedUndef) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
return None
@dispatch(Tuple, Symbol) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
return None
@dispatch(Tuple, Tuple) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
if len(lhs) != len(rhs):
return False
return fuzzy_and(fuzzy_bool(is_eq(s, o)) for s, o in zip(lhs, rhs))
def is_lt(lhs, rhs, assumptions=None):
"""Fuzzy bool for lhs is strictly less than rhs.
See the docstring for :func:`~.is_ge` for more.
"""
return fuzzy_not(is_ge(lhs, rhs, assumptions))
def is_gt(lhs, rhs, assumptions=None):
"""Fuzzy bool for lhs is strictly greater than rhs.
See the docstring for :func:`~.is_ge` for more.
"""
return fuzzy_not(is_le(lhs, rhs, assumptions))
def is_le(lhs, rhs, assumptions=None):
"""Fuzzy bool for lhs is less than or equal to rhs.
See the docstring for :func:`~.is_ge` for more.
"""
return is_ge(rhs, lhs, assumptions)
def is_ge(lhs, rhs, assumptions=None):
"""
Fuzzy bool for *lhs* is greater than or equal to *rhs*.
Parameters
==========
lhs : Expr
The left-hand side of the expression, must be sympified,
and an instance of expression. Throws an exception if
lhs is not an instance of expression.
rhs : Expr
The right-hand side of the expression, must be sympified
and an instance of expression. Throws an exception if
lhs is not an instance of expression.
assumptions: Boolean, optional
Assumptions taken to evaluate the inequality.
Returns
=======
``True`` if *lhs* is greater than or equal to *rhs*, ``False`` if *lhs*
is less than *rhs*, and ``None`` if the comparison between *lhs* and
*rhs* is indeterminate.
Explanation
===========
This function is intended to give a relatively fast determination and
deliberately does not attempt slow calculations that might help in
obtaining a determination of True or False in more difficult cases.
The four comparison functions ``is_le``, ``is_lt``, ``is_ge``, and ``is_gt`` are
each implemented in terms of ``is_ge`` in the following way:
is_ge(x, y) := is_ge(x, y)
is_le(x, y) := is_ge(y, x)
is_lt(x, y) := fuzzy_not(is_ge(x, y))
is_gt(x, y) := fuzzy_not(is_ge(y, x))
Therefore, supporting new type with this function will ensure behavior for
other three functions as well.
To maintain these equivalences in fuzzy logic it is important that in cases where
either x or y is non-real all comparisons will give None.
Examples
========
>>> from sympy import S, Q
>>> from sympy.core.relational import is_ge, is_le, is_gt, is_lt
>>> from sympy.abc import x
>>> is_ge(S(2), S(0))
True
>>> is_ge(S(0), S(2))
False
>>> is_le(S(0), S(2))
True
>>> is_gt(S(0), S(2))
False
>>> is_lt(S(2), S(0))
False
Assumptions can be passed to evaluate the quality which is otherwise
indeterminate.
>>> print(is_ge(x, S(0)))
None
>>> is_ge(x, S(0), assumptions=Q.positive(x))
True
New types can be supported by dispatching to ``_eval_is_ge``.
>>> from sympy import Expr, sympify
>>> from sympy.multipledispatch import dispatch
>>> class MyExpr(Expr):
... def __new__(cls, arg):
... return super().__new__(cls, sympify(arg))
... @property
... def value(self):
... return self.args[0]
>>> @dispatch(MyExpr, MyExpr)
... def _eval_is_ge(a, b):
... return is_ge(a.value, b.value)
>>> a = MyExpr(1)
>>> b = MyExpr(2)
>>> is_ge(b, a)
True
>>> is_le(a, b)
True
"""
from sympy.assumptions.wrapper import AssumptionsWrapper, is_extended_nonnegative
if not (isinstance(lhs, Expr) and isinstance(rhs, Expr)):
raise TypeError("Can only compare inequalities with Expr")
retval = _eval_is_ge(lhs, rhs)
if retval is not None:
return retval
else:
n2 = _n2(lhs, rhs)
if n2 is not None:
# use float comparison for infinity.
# otherwise get stuck in infinite recursion
if n2 in (S.Infinity, S.NegativeInfinity):
n2 = float(n2)
return n2 >= 0
_lhs = AssumptionsWrapper(lhs, assumptions)
_rhs = AssumptionsWrapper(rhs, assumptions)
if _lhs.is_extended_real and _rhs.is_extended_real:
if (_lhs.is_infinite and _lhs.is_extended_positive) or (_rhs.is_infinite and _rhs.is_extended_negative):
return True
diff = lhs - rhs
if diff is not S.NaN:
rv = is_extended_nonnegative(diff, assumptions)
if rv is not None:
return rv
def is_neq(lhs, rhs, assumptions=None):
"""Fuzzy bool for lhs does not equal rhs.
See the docstring for :func:`~.is_eq` for more.
"""
return fuzzy_not(is_eq(lhs, rhs, assumptions))
def is_eq(lhs, rhs, assumptions=None):
"""
Fuzzy bool representing mathematical equality between *lhs* and *rhs*.
Parameters
==========
lhs : Expr
The left-hand side of the expression, must be sympified.
rhs : Expr
The right-hand side of the expression, must be sympified.
assumptions: Boolean, optional
Assumptions taken to evaluate the equality.
Returns
=======
``True`` if *lhs* is equal to *rhs*, ``False`` is *lhs* is not equal to *rhs*,
and ``None`` if the comparison between *lhs* and *rhs* is indeterminate.
Explanation
===========
This function is intended to give a relatively fast determination and
deliberately does not attempt slow calculations that might help in
obtaining a determination of True or False in more difficult cases.
:func:`~.is_neq` calls this function to return its value, so supporting
new type with this function will ensure correct behavior for ``is_neq``
as well.
Examples
========
>>> from sympy import Q, S
>>> from sympy.core.relational import is_eq, is_neq
>>> from sympy.abc import x
>>> is_eq(S(0), S(0))
True
>>> is_neq(S(0), S(0))
False
>>> is_eq(S(0), S(2))
False
>>> is_neq(S(0), S(2))
True
Assumptions can be passed to evaluate the equality which is otherwise
indeterminate.
>>> print(is_eq(x, S(0)))
None
>>> is_eq(x, S(0), assumptions=Q.zero(x))
True
New types can be supported by dispatching to ``_eval_is_eq``.
>>> from sympy import Basic, sympify
>>> from sympy.multipledispatch import dispatch
>>> class MyBasic(Basic):
... def __new__(cls, arg):
... return Basic.__new__(cls, sympify(arg))
... @property
... def value(self):
... return self.args[0]
...
>>> @dispatch(MyBasic, MyBasic)
... def _eval_is_eq(a, b):
... return is_eq(a.value, b.value)
...
>>> a = MyBasic(1)
>>> b = MyBasic(1)
>>> is_eq(a, b)
True
>>> is_neq(a, b)
False
"""
# here, _eval_Eq is only called for backwards compatibility
# new code should use is_eq with multiple dispatch as
# outlined in the docstring
for side1, side2 in (lhs, rhs), (rhs, lhs):
eval_func = getattr(side1, '_eval_Eq', None)
if eval_func is not None:
retval = eval_func(side2)
if retval is not None:
return retval
retval = _eval_is_eq(lhs, rhs)
if retval is not None:
return retval
if dispatch(type(lhs), type(rhs)) != dispatch(type(rhs), type(lhs)):
retval = _eval_is_eq(rhs, lhs)
if retval is not None:
return retval
# retval is still None, so go through the equality logic
# If expressions have the same structure, they must be equal.
if lhs == rhs:
return True # e.g. True == True
elif all(isinstance(i, BooleanAtom) for i in (rhs, lhs)):
return False # True != False
elif not (lhs.is_Symbol or rhs.is_Symbol) and (
isinstance(lhs, Boolean) !=
isinstance(rhs, Boolean)):
return False # only Booleans can equal Booleans
from sympy.assumptions.wrapper import (AssumptionsWrapper,
is_infinite, is_extended_real)
from .add import Add
_lhs = AssumptionsWrapper(lhs, assumptions)
_rhs = AssumptionsWrapper(rhs, assumptions)
if _lhs.is_infinite or _rhs.is_infinite:
if fuzzy_xor([_lhs.is_infinite, _rhs.is_infinite]):
return False
if fuzzy_xor([_lhs.is_extended_real, _rhs.is_extended_real]):
return False
if fuzzy_and([_lhs.is_extended_real, _rhs.is_extended_real]):
return fuzzy_xor([_lhs.is_extended_positive, fuzzy_not(_rhs.is_extended_positive)])
# Try to split real/imaginary parts and equate them
I = S.ImaginaryUnit
def split_real_imag(expr):
real_imag = lambda t: (
'real' if is_extended_real(t, assumptions) else
'imag' if is_extended_real(I*t, assumptions) else None)
return sift(Add.make_args(expr), real_imag)
lhs_ri = split_real_imag(lhs)
if not lhs_ri[None]:
rhs_ri = split_real_imag(rhs)
if not rhs_ri[None]:
eq_real = is_eq(Add(*lhs_ri['real']), Add(*rhs_ri['real']), assumptions)
eq_imag = is_eq(I * Add(*lhs_ri['imag']), I * Add(*rhs_ri['imag']), assumptions)
return fuzzy_and(map(fuzzy_bool, [eq_real, eq_imag]))
from sympy.functions.elementary.complexes import arg
# Compare e.g. zoo with 1+I*oo by comparing args
arglhs = arg(lhs)
argrhs = arg(rhs)
# Guard against Eq(nan, nan) -> False
if not (arglhs == S.NaN and argrhs == S.NaN):
return fuzzy_bool(is_eq(arglhs, argrhs, assumptions))
if all(isinstance(i, Expr) for i in (lhs, rhs)):
# see if the difference evaluates
dif = lhs - rhs
_dif = AssumptionsWrapper(dif, assumptions)
z = _dif.is_zero
if z is not None:
if z is False and _dif.is_commutative: # issue 10728
return False
if z:
return True
n2 = _n2(lhs, rhs)
if n2 is not None:
return _sympify(n2 == 0)
# see if the ratio evaluates
n, d = dif.as_numer_denom()
rv = None
_n = AssumptionsWrapper(n, assumptions)
_d = AssumptionsWrapper(d, assumptions)
if _n.is_zero:
rv = _d.is_nonzero
elif _n.is_finite:
if _d.is_infinite:
rv = True
elif _n.is_zero is False:
rv = _d.is_infinite
if rv is None:
# if the condition that makes the denominator
# infinite does not make the original expression
# True then False can be returned
from sympy.simplify.simplify import clear_coefficients
l, r = clear_coefficients(d, S.Infinity)
args = [_.subs(l, r) for _ in (lhs, rhs)]
if args != [lhs, rhs]:
rv = fuzzy_bool(is_eq(*args, assumptions))
if rv is True:
rv = None
elif any(is_infinite(a, assumptions) for a in Add.make_args(n)):
# (inf or nan)/x != 0
rv = False
if rv is not None:
return rv
|
09cdc11aa02a0cb9f4512015c2809fd0f7899cd5290ff27304e7728bab1f9744 | from __future__ import annotations
import numbers
import decimal
import fractions
import math
import re as regex
import sys
from functools import lru_cache
from .containers import Tuple
from .sympify import (SympifyError, _sympy_converter, sympify, _convert_numpy_types,
_sympify, _is_numpy_instance)
from .singleton import S, Singleton
from .basic import Basic
from .expr import Expr, AtomicExpr
from .evalf import pure_complex
from .cache import cacheit, clear_cache
from .decorators import _sympifyit
from .logic import fuzzy_not
from .kind import NumberKind
from sympy.external.gmpy import SYMPY_INTS, HAS_GMPY, gmpy
from sympy.multipledispatch import dispatch
import mpmath
import mpmath.libmp as mlib
from mpmath.libmp import bitcount, round_nearest as rnd
from mpmath.libmp.backend import MPZ
from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from mpmath.ctx_mp import mpnumeric
from mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero, _normalize as mpf_normalize,
prec_to_dps, dps_to_prec)
from sympy.utilities.misc import as_int, debug, filldedent
from .parameters import global_parameters
_LOG2 = math.log(2)
def comp(z1, z2, tol=None):
r"""Return a bool indicating whether the error between z1 and z2
is $\le$ ``tol``.
Examples
========
If ``tol`` is ``None`` then ``True`` will be returned if
:math:`|z1 - z2|\times 10^p \le 5` where $p$ is minimum value of the
decimal precision of each value.
>>> from sympy import comp, pi
>>> pi4 = pi.n(4); pi4
3.142
>>> comp(_, 3.142)
True
>>> comp(pi4, 3.141)
False
>>> comp(pi4, 3.143)
False
A comparison of strings will be made
if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''.
>>> comp(pi4, 3.1415)
True
>>> comp(pi4, 3.1415, '')
False
When ``tol`` is provided and $z2$ is non-zero and
:math:`|z1| > 1` the error is normalized by :math:`|z1|`:
>>> abs(pi4 - 3.14)/pi4
0.000509791731426756
>>> comp(pi4, 3.14, .001) # difference less than 0.1%
True
>>> comp(pi4, 3.14, .0005) # difference less than 0.1%
False
When :math:`|z1| \le 1` the absolute error is used:
>>> 1/pi4
0.3183
>>> abs(1/pi4 - 0.3183)/(1/pi4)
3.07371499106316e-5
>>> abs(1/pi4 - 0.3183)
9.78393554684764e-6
>>> comp(1/pi4, 0.3183, 1e-5)
True
To see if the absolute error between ``z1`` and ``z2`` is less
than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)``
or ``comp(z1 - z2, tol=tol)``:
>>> abs(pi4 - 3.14)
0.00160156249999988
>>> comp(pi4 - 3.14, 0, .002)
True
>>> comp(pi4 - 3.14, 0, .001)
False
"""
if isinstance(z2, str):
if not pure_complex(z1, or_real=True):
raise ValueError('when z2 is a str z1 must be a Number')
return str(z1) == z2
if not z1:
z1, z2 = z2, z1
if not z1:
return True
if not tol:
a, b = z1, z2
if tol == '':
return str(a) == str(b)
if tol is None:
a, b = sympify(a), sympify(b)
if not all(i.is_number for i in (a, b)):
raise ValueError('expecting 2 numbers')
fa = a.atoms(Float)
fb = b.atoms(Float)
if not fa and not fb:
# no floats -- compare exactly
return a == b
# get a to be pure_complex
for _ in range(2):
ca = pure_complex(a, or_real=True)
if not ca:
if fa:
a = a.n(prec_to_dps(min([i._prec for i in fa])))
ca = pure_complex(a, or_real=True)
break
else:
fa, fb = fb, fa
a, b = b, a
cb = pure_complex(b)
if not cb and fb:
b = b.n(prec_to_dps(min([i._prec for i in fb])))
cb = pure_complex(b, or_real=True)
if ca and cb and (ca[1] or cb[1]):
return all(comp(i, j) for i, j in zip(ca, cb))
tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec)))
return int(abs(a - b)*tol) <= 5
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 6639)
if not bc:
return fzero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
# Necessary if mpmath is using the gmpy backend
from mpmath.libmp.backend import MPZ
rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should SymPy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
neg_pow, man, expt, _ = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite():
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
_floatpat = regex.compile(r"[-+]?((\d*\.\d+)|(\d+\.?))")
def _literal_float(f):
"""Return True if n starts like a floating point number."""
return bool(_floatpat.match(f))
# (a,b) -> gcd(a,b)
# TODO caching with decorator, but not to degrade performance
@lru_cache(1024)
def igcd(*args):
"""Computes nonnegative integer greatest common divisor.
Explanation
===========
The algorithm is based on the well known Euclid's algorithm [1]_. To
improve speed, ``igcd()`` has its own caching mechanism.
Examples
========
>>> from sympy import igcd
>>> igcd(2, 4)
2
>>> igcd(5, 10, 15)
5
References
==========
.. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm
"""
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
args_temp = [abs(as_int(i)) for i in args]
if 1 in args_temp:
return 1
a = args_temp.pop()
if HAS_GMPY: # Using gmpy if present to speed up.
for b in args_temp:
a = gmpy.gcd(a, b) if b else a
return as_int(a)
for b in args_temp:
a = math.gcd(a, b)
return a
igcd2 = math.gcd
def igcd_lehmer(a, b):
r"""Computes greatest common divisor of two integers.
Explanation
===========
Euclid's algorithm for the computation of the greatest
common divisor ``gcd(a, b)`` of two (positive) integers
$a$ and $b$ is based on the division identity
$$ a = q \times b + r$$,
where the quotient $q$ and the remainder $r$ are integers
and $0 \le r < b$. Then each common divisor of $a$ and $b$
divides $r$, and it follows that ``gcd(a, b) == gcd(b, r)``.
The algorithm works by constructing the sequence
r0, r1, r2, ..., where r0 = a, r1 = b, and each rn
is the remainder from the division of the two preceding
elements.
In Python, ``q = a // b`` and ``r = a % b`` are obtained by the
floor division and the remainder operations, respectively.
These are the most expensive arithmetic operations, especially
for large a and b.
Lehmer's algorithm [1]_ is based on the observation that the quotients
``qn = r(n-1) // rn`` are in general small integers even
when a and b are very large. Hence the quotients can be
usually determined from a relatively small number of most
significant bits.
The efficiency of the algorithm is further enhanced by not
computing each long remainder in Euclid's sequence. The remainders
are linear combinations of a and b with integer coefficients
derived from the quotients. The coefficients can be computed
as far as the quotients can be determined from the chosen
most significant parts of a and b. Only then a new pair of
consecutive remainders is computed and the algorithm starts
anew with this pair.
References
==========
.. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm
"""
a, b = abs(as_int(a)), abs(as_int(b))
if a < b:
a, b = b, a
# The algorithm works by using one or two digit division
# whenever possible. The outer loop will replace the
# pair (a, b) with a pair of shorter consecutive elements
# of the Euclidean gcd sequence until a and b
# fit into two Python (long) int digits.
nbits = 2*sys.int_info.bits_per_digit
while a.bit_length() > nbits and b != 0:
# Quotients are mostly small integers that can
# be determined from most significant bits.
n = a.bit_length() - nbits
x, y = int(a >> n), int(b >> n) # most significant bits
# Elements of the Euclidean gcd sequence are linear
# combinations of a and b with integer coefficients.
# Compute the coefficients of consecutive pairs
# a' = A*a + B*b, b' = C*a + D*b
# using small integer arithmetic as far as possible.
A, B, C, D = 1, 0, 0, 1 # initial values
while True:
# The coefficients alternate in sign while looping.
# The inner loop combines two steps to keep track
# of the signs.
# At this point we have
# A > 0, B <= 0, C <= 0, D > 0,
# x' = x + B <= x < x" = x + A,
# y' = y + C <= y < y" = y + D,
# and
# x'*N <= a' < x"*N, y'*N <= b' < y"*N,
# where N = 2**n.
# Now, if y' > 0, and x"//y' and x'//y" agree,
# then their common value is equal to q = a'//b'.
# In addition,
# x'%y" = x' - q*y" < x" - q*y' = x"%y',
# and
# (x'%y")*N < a'%b' < (x"%y')*N.
# On the other hand, we also have x//y == q,
# and therefore
# x'%y" = x + B - q*(y + D) = x%y + B',
# x"%y' = x + A - q*(y + C) = x%y + A',
# where
# B' = B - q*D < 0, A' = A - q*C > 0.
if y + C <= 0:
break
q = (x + A) // (y + C)
# Now x'//y" <= q, and equality holds if
# x' - q*y" = (x - q*y) + (B - q*D) >= 0.
# This is a minor optimization to avoid division.
x_qy, B_qD = x - q*y, B - q*D
if x_qy + B_qD < 0:
break
# Next step in the Euclidean sequence.
x, y = y, x_qy
A, B, C, D = C, D, A - q*C, B_qD
# At this point the signs of the coefficients
# change and their roles are interchanged.
# A <= 0, B > 0, C > 0, D < 0,
# x' = x + A <= x < x" = x + B,
# y' = y + D < y < y" = y + C.
if y + D <= 0:
break
q = (x + B) // (y + D)
x_qy, A_qC = x - q*y, A - q*C
if x_qy + A_qC < 0:
break
x, y = y, x_qy
A, B, C, D = C, D, A_qC, B - q*D
# Now the conditions on top of the loop
# are again satisfied.
# A > 0, B < 0, C < 0, D > 0.
if B == 0:
# This can only happen when y == 0 in the beginning
# and the inner loop does nothing.
# Long division is forced.
a, b = b, a % b
continue
# Compute new long arguments using the coefficients.
a, b = A*a + B*b, C*a + D*b
# Small divisors. Finish with the standard algorithm.
while b:
a, b = b, a % b
return a
def ilcm(*args):
"""Computes integer least common multiple.
Examples
========
>>> from sympy import ilcm
>>> ilcm(5, 10)
10
>>> ilcm(7, 3)
21
>>> ilcm(5, 10, 15)
30
"""
if len(args) < 2:
raise TypeError(
'ilcm() takes at least 2 arguments (%s given)' % len(args))
if 0 in args:
return 0
a = args[0]
for b in args[1:]:
a = a // igcd(a, b) * b # since gcd(a,b) | a
return a
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
Examples
========
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
def mod_inverse(a, m):
r"""
Return the number $c$ such that, $a \times c = 1 \pmod{m}$
where $c$ has the same sign as $m$. If no such value exists,
a ValueError is raised.
Examples
========
>>> from sympy import mod_inverse, S
Suppose we wish to find multiplicative inverse $x$ of
3 modulo 11. This is the same as finding $x$ such
that $3x = 1 \pmod{11}$. One value of x that satisfies
this congruence is 4. Because $3 \times 4 = 12$ and $12 = 1 \pmod{11}$.
This is the value returned by ``mod_inverse``:
>>> mod_inverse(3, 11)
4
>>> mod_inverse(-3, 11)
7
When there is a common factor between the numerators of
`a` and `m` the inverse does not exist:
>>> mod_inverse(2, 4)
Traceback (most recent call last):
...
ValueError: inverse of 2 mod 4 does not exist
>>> mod_inverse(S(2)/7, S(5)/2)
7/2
References
==========
.. [1] https://en.wikipedia.org/wiki/Modular_multiplicative_inverse
.. [2] https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
c = None
try:
a, m = as_int(a), as_int(m)
if m != 1 and m != -1:
x, _, g = igcdex(a, m)
if g == 1:
c = x % m
except ValueError:
a, m = sympify(a), sympify(m)
if not (a.is_number and m.is_number):
raise TypeError(filldedent('''
Expected numbers for arguments; symbolic `mod_inverse`
is not implemented
but symbolic expressions can be handled with the
similar function,
sympy.polys.polytools.invert'''))
big = (m > 1)
if big not in (S.true, S.false):
raise ValueError('m > 1 did not evaluate; try to simplify %s' % m)
elif big:
c = 1/a
if c is None:
raise ValueError('inverse of %s (mod %s) does not exist' % (a, m))
return c
class Number(AtomicExpr):
"""Represents atomic numbers in SymPy.
Explanation
===========
Floating point numbers are represented by the Float class.
Rational numbers (of any size) are represented by the Rational class.
Integer numbers (of any size) are represented by the Integer class.
Float and Rational are subclasses of Number; Integer is a subclass
of Rational.
For example, ``2/3`` is represented as ``Rational(2, 3)`` which is
a different object from the floating point number obtained with
Python division ``2/3``. Even for numbers that are exactly
represented in binary, there is a difference between how two forms,
such as ``Rational(1, 2)`` and ``Float(0.5)``, are used in SymPy.
The rational form is to be preferred in symbolic computations.
Other kinds of numbers, such as algebraic numbers ``sqrt(2)`` or
complex numbers ``3 + 4*I``, are not instances of Number class as
they are not atomic.
See Also
========
Float, Integer, Rational
"""
is_commutative = True
is_number = True
is_Number = True
__slots__ = ()
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
kind = NumberKind
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, str):
_obj = obj.lower() # float('INF') == float('inf')
if _obj == 'nan':
return S.NaN
elif _obj == 'inf':
return S.Infinity
elif _obj == '+inf':
return S.Infinity
elif _obj == '-inf':
return S.NegativeInfinity
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def could_extract_minus_sign(self):
return bool(self.is_extended_negative)
def invert(self, other, *gens, **args):
from sympy.polys.polytools import invert
if getattr(other, 'is_number', True):
return mod_inverse(self, other)
return invert(self, other, *gens, **args)
def __divmod__(self, other):
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
if self.is_infinite or S.NaN in (self, other):
return (S.NaN, S.NaN)
except TypeError:
return NotImplemented
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
elif isinstance(other, Float):
rat = self/Rational(other)
else:
rat = self/other
if other.is_finite:
w = int(rat) if rat >= 0 else int(rat) - 1
r = self - other*w
else:
w = 0 if not self or (sign(self) == sign(other)) else -1
r = other if w else self
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
return NotImplemented
return divmod(other, self)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def floor(self):
raise NotImplementedError('%s needs .floor() method' %
(self.__class__.__name__))
def ceiling(self):
raise NotImplementedError('%s needs .ceiling() method' %
(self.__class__.__name__))
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
from sympy.series.order import Order
# Order(5, x, y) -> Order(1,x,y)
return Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
elif isinstance(other, Tuple):
return NotImplemented
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NaN:
return S.NaN
elif other in (S.Infinity, S.NegativeInfinity):
return S.Zero
return AtomicExpr.__truediv__(self, other)
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
return _sympify(other).__lt__(self)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
return _sympify(other).__le__(self)
def __hash__(self):
return super().__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps, rational=True, **kwargs):
# a -> c*t
if self.is_Rational or not rational:
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return (self, S.One) if self else (S.One, self)
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
if not rational:
return self, S.Zero
return S.Zero, self
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys.polytools import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys.polytools import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys.polytools import cofactors
return cofactors(self, other)
class Float(Number):
"""Represent a floating-point number of arbitrary precision.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Creating Floats from strings (and Python ``int`` and ``long``
types) will give a minimum precision of 15 digits, but the
precision will automatically increase to capture all digits
entered.
>>> Float(1)
1.00000000000000
>>> Float(10**20)
100000000000000000000.
>>> Float('1e20')
100000000000000000000.
However, *floating-point* numbers (Python ``float`` types) retain
only 15 digits of precision:
>>> Float(1e20)
1.00000000000000e+20
>>> Float(1.23456789123456789)
1.23456789123457
It may be preferable to enter high-precision decimal numbers
as strings:
>>> Float('1.23456789123456789')
1.23456789123456789
The desired number of digits can also be specified:
>>> Float('1e-3', 3)
0.00100
>>> Float(100, 4)
100.0
Float can automatically count significant figures if a null string
is sent for the precision; spaces or underscores are also allowed. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789.123_456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
In SymPy, a Float is a number that can be computed with arbitrary
precision. Although floating point 'inf' and 'nan' are not such
numbers, Float can create these numbers:
>>> Float('-inf')
-oo
>>> _.is_Float
False
Zero in Float only has a single value. Values are not separate for
positive and negative zeroes.
"""
__slots__ = ('_mpf_', '_prec')
_mpf_: tuple[int, int, int, int]
# A Float represents many real numbers,
# both rational and irrational.
is_rational = None
is_irrational = None
is_number = True
is_real = True
is_extended_real = True
is_Float = True
def __new__(cls, num, dps=None, precision=None):
if dps is not None and precision is not None:
raise ValueError('Both decimal and binary precision supplied. '
'Supply only one. ')
if isinstance(num, str):
# Float accepts spaces as digit separators
num = num.replace(' ', '').lower()
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif num in ('inf', '+inf'):
return S.Infinity
elif num == '-inf':
return S.NegativeInfinity
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, float) and num == float('inf'):
return S.Infinity
elif isinstance(num, float) and num == float('-inf'):
return S.NegativeInfinity
elif isinstance(num, float) and math.isnan(num):
return S.NaN
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num)
elif num is S.Infinity:
return num
elif num is S.NegativeInfinity:
return num
elif num is S.NaN:
return num
elif _is_numpy_instance(num): # support for numpy datatypes
num = _convert_numpy_types(num)
elif isinstance(num, mpmath.mpf):
if precision is None:
if dps is None:
precision = num.context.prec
num = num._mpf_
if dps is None and precision is None:
dps = 15
if isinstance(num, Float):
return num
if isinstance(num, str) and _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
dps = max(15, dps)
precision = dps_to_prec(dps)
elif precision == '' and dps is None or precision is None and dps == '':
if not isinstance(num, str):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
precision = dps_to_prec(dps)
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
# decimal precision(dps) is set and maybe binary precision(precision)
# as well.From here on binary precision is used to compute the Float.
# Hence, if supplied use binary precision else translate from decimal
# precision.
if precision is None or precision == '':
precision = dps_to_prec(dps)
precision = int(precision)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, precision, rnd)
elif isinstance(num, str):
_mpf_ = mlib.from_str(num, precision, rnd)
elif isinstance(num, decimal.Decimal):
if num.is_finite():
_mpf_ = mlib.from_str(str(num), precision, rnd)
elif num.is_nan():
return S.NaN
elif num.is_infinite():
if num > 0:
return S.Infinity
return S.NegativeInfinity
else:
raise ValueError("unexpected decimal value %s" % str(num))
elif isinstance(num, tuple) and len(num) in (3, 4):
if isinstance(num[1], str):
# it's a hexadecimal (coming from a pickled object)
num = list(num)
# If we're loading an object pickled in Python 2 into
# Python 3, we may need to strip a tailing 'L' because
# of a shim for int on Python 3, see issue #13470.
if num[1].endswith('L'):
num[1] = num[1][:-1]
# Strip leading '0x' - gmpy2 only documents such inputs
# with base prefix as valid when the 2nd argument (base) is 0.
# When mpmath uses Sage as the backend, however, it
# ends up including '0x' when preparing the picklable tuple.
# See issue #19690.
if num[1].startswith('0x'):
num[1] = num[1][2:]
# Now we can assume that it is in standard form
num[1] = MPZ(num[1], 16)
_mpf_ = tuple(num)
else:
if len(num) == 4:
# handle normalization hack
return Float._new(num, precision)
else:
if not all((
num[0] in (0, 1),
num[1] >= 0,
all(type(i) in (int, int) for i in num)
)):
raise ValueError('malformed mpf: %s' % (num,))
# don't compute number or else it may
# over/underflow
return Float._new(
(num[0], num[1], num[2], bitcount(num[1])),
precision)
else:
try:
_mpf_ = num._as_mpf_val(precision)
except (NotImplementedError, AttributeError):
_mpf_ = mpmath.mpf(num, prec=precision)._mpf_
return cls._new(_mpf_, precision, zero=False)
@classmethod
def _new(cls, _mpf_, _prec, zero=True):
# special cases
if zero and _mpf_ == fzero:
return S.Zero # Float(0) -> 0.0; Float._new((0,0,0,0)) -> 0
elif _mpf_ == _mpf_nan:
return S.NaN
elif _mpf_ == _mpf_inf:
return S.Infinity
elif _mpf_ == _mpf_ninf:
return S.NegativeInfinity
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs_ex__(self):
return ((mlib.to_pickable(self._mpf_),), {'precision': self._prec})
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
if rv != self._mpf_ and self._prec == prec:
debug(self._mpf_, rv)
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_infinite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return True
return False
def _eval_is_integer(self):
return self._mpf_ == fzero
def _eval_is_negative(self):
if self._mpf_ in (_mpf_ninf, _mpf_inf):
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ in (_mpf_ninf, _mpf_inf):
return False
return self.num > 0
def _eval_is_extended_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_extended_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == fzero
def __bool__(self):
return self._mpf_ != fzero
def __neg__(self):
if not self:
return self
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and other != 0 and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__truediv__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1 and global_parameters.evaluate:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
precision=self._prec)
if isinstance(other, Float) and global_parameters.evaluate:
r = self/other
if r == int(r):
return Float(0, precision=max(self._prec, other._prec))
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float) and global_parameters.evaluate:
return other.__mod__(self)
if isinstance(other, Number) and global_parameters.evaluate:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_extended_positive:
return self
if expt.is_extended_negative:
return S.ComplexInfinity
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
elif isinstance(expt, Rational) and \
expt.p == 1 and expt.q % 2 and self.is_negative:
return Pow(S.NegativeOne, expt, evaluate=False)*(
-self)._eval_power(expt)
expt, prec = expt._as_mpf_op(self._prec)
mpfself = self._mpf_
try:
y = mpf_pow(mpfself, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(mpfself, fzero), (expt, fzero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == fzero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
def __eq__(self, other):
from sympy.logic.boolalg import Boolean
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if isinstance(other, Boolean):
return False
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Float:
# comparison is exact
# so Float(.1, 3) != Float(.1, 33)
return self._mpf_ == other._mpf_
if other.is_Rational:
return other.__eq__(self)
if other.is_Number:
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
if not self:
return not other
return False # Float != non-Number
def __ne__(self, other):
return not self == other
def _Frel(self, other, op):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Rational:
# test self*other.q <?> other.p without losing precision
'''
>>> f = Float(.1,2)
>>> i = 1234567890
>>> (f*i)._mpf_
(0, 471, 18, 9)
>>> mlib.mpf_mul(f._mpf_, mlib.from_int(i))
(0, 505555550955, -12, 39)
'''
smpf = mlib.mpf_mul(self._mpf_, mlib.from_int(other.q))
ompf = mlib.from_int(other.p)
return _sympify(bool(op(smpf, ompf)))
elif other.is_Float:
return _sympify(bool(
op(self._mpf_, other._mpf_)))
elif other.is_comparable and other not in (
S.Infinity, S.NegativeInfinity):
other = other.evalf(prec_to_dps(self._prec))
if other._prec > 1:
if other.is_Number:
return _sympify(bool(
op(self._mpf_, other._as_mpf_val(self._prec))))
def __gt__(self, other):
if isinstance(other, NumberSymbol):
return other.__lt__(self)
rv = self._Frel(other, mlib.mpf_gt)
if rv is None:
return Expr.__gt__(self, other)
return rv
def __ge__(self, other):
if isinstance(other, NumberSymbol):
return other.__le__(self)
rv = self._Frel(other, mlib.mpf_ge)
if rv is None:
return Expr.__ge__(self, other)
return rv
def __lt__(self, other):
if isinstance(other, NumberSymbol):
return other.__gt__(self)
rv = self._Frel(other, mlib.mpf_lt)
if rv is None:
return Expr.__lt__(self, other)
return rv
def __le__(self, other):
if isinstance(other, NumberSymbol):
return other.__ge__(self)
rv = self._Frel(other, mlib.mpf_le)
if rv is None:
return Expr.__le__(self, other)
return rv
def __hash__(self):
return super().__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def __format__(self, format_spec):
return format(decimal.Decimal(str(self)), format_spec)
# Add sympify converters
_sympy_converter[float] = _sympy_converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
If an unevaluated Rational is desired, ``gcd=1`` can be passed and
this will keep common divisors of the numerator and denominator
from being eliminated. It is not possible, however, to leave a
negative value in the denominator.
>>> Rational(2, 4, gcd=1)
2/4
>>> Rational(2, -4, gcd=1).q
4
See Also
========
sympy.core.sympify.sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ('p', 'q')
p: int
q: int
is_Rational = True
@cacheit
def __new__(cls, p, q=None, gcd=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, SYMPY_INTS):
pass
else:
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, str):
try:
p = sympify(p)
except (SympifyError, SyntaxError):
pass # error will raise below
else:
if p.count('/') > 1:
raise TypeError('invalid input: %s' % p)
p = p.replace(' ', '')
pq = p.rsplit('/', 1)
if len(pq) == 2:
p, q = pq
fp = fractions.Fraction(p)
fq = fractions.Fraction(q)
p = fp/fq
try:
p = fractions.Fraction(p)
except ValueError:
pass # error will raise below
else:
return Rational(p.numerator, p.denominator, 1)
if not isinstance(p, Rational):
raise TypeError('invalid input: %s' % p)
q = 1
gcd = 1
if not isinstance(p, SYMPY_INTS):
p = Rational(p)
q *= p.q
p = p.p
else:
p = int(p)
if not isinstance(q, SYMPY_INTS):
q = Rational(q)
p *= q.q
q = q.p
else:
q = int(q)
# p and q are now ints
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
return S.ComplexInfinity
if q < 0:
q = -q
p = -p
if not gcd:
gcd = igcd(abs(p), q)
if gcd > 1:
p //= gcd
q //= gcd
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
Examples
========
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
f = fractions.Fraction(self.p, self.q)
return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator))))
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p + self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
#TODO: this can probably be optimized more
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p - self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.q*other.p - self.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.q*other.p - self.p*other.q, self.q*other.q)
elif isinstance(other, Float):
return -self + other
else:
return Number.__rsub__(self, other)
return Number.__rsub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(self.p*other.p, self.q, igcd(other.p, self.q))
elif isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p))
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational(self.p, self.q*other.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__truediv__(self, other)
return Number.__truediv__(self, other)
@_sympifyit('other', NotImplemented)
def __rtruediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational(other.p*self.q, self.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return other*(1/self)
else:
return Number.__rtruediv__(self, other)
return Number.__rtruediv__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if global_parameters.evaluate:
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
precision=other._prec)
return Number.__mod__(self, other)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_extended_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
return S.NegativeOne**expt*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p, 1)
if isinstance(expt, Rational):
intpart = expt.p // expt.q
if intpart:
intpart += 1
remfracpart = intpart*expt.q - expt.p
ratfracpart = Rational(remfracpart, expt.q)
if self.p != 1:
return Integer(self.p)**expt*Integer(self.q)**ratfracpart*Rational(1, self.q**intpart, 1)
return Integer(self.q)**ratfracpart*Rational(1, self.q**intpart, 1)
else:
remfracpart = expt.q - expt.p
ratfracpart = Rational(remfracpart, expt.q)
if self.p != 1:
return Integer(self.p)**expt*Integer(self.q)**ratfracpart*Rational(1, self.q, 1)
return Integer(self.q)**ratfracpart*Rational(1, self.q, 1)
if self.is_extended_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -int(-p//q)
return int(p//q)
def floor(self):
return Integer(self.p // self.q)
def ceiling(self):
return -Integer(-self.p // self.q)
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not isinstance(other, Number):
# S(0) == S.false is False
# S(0) == False is True
return False
if not self:
return not other
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Rational:
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
if other.is_Float:
# all Floats have a denominator that is a power of 2
# so if self doesn't, it can't be equal to other
if self.q & (self.q - 1):
return False
s, m, t = other._mpf_[:3]
if s:
m = -m
if not t:
# other is an odd integer
if not self.is_Integer or self.is_even:
return False
return m == self.p
from .power import integer_log
if t > 0:
# other is an even integer
if not self.is_Integer:
return False
# does m*2**t == self.p
return self.p and not self.p % m and \
integer_log(self.p//m, 2) == (t, True)
# does non-integer s*m/2**-t = p/q?
if self.is_Integer:
return False
return m == self.p and integer_log(self.q, 2) == (-t, True)
return False
def __ne__(self, other):
return not self == other
def _Rrel(self, other, attr):
# if you want self < other, pass self, other, __gt__
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Number:
op = None
s, o = self, other
if other.is_NumberSymbol:
op = getattr(o, attr)
elif other.is_Float:
op = getattr(o, attr)
elif other.is_Rational:
s, o = Integer(s.p*o.q), Integer(s.q*o.p)
op = getattr(o, attr)
if op:
return op(s)
if o.is_number and o.is_extended_real:
return Integer(s.p), s.q*o
def __gt__(self, other):
rv = self._Rrel(other, '__lt__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__gt__(*rv)
def __ge__(self, other):
rv = self._Rrel(other, '__le__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__ge__(*rv)
def __lt__(self, other):
rv = self._Rrel(other, '__gt__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__lt__(*rv)
def __le__(self, other):
rv = self._Rrel(other, '__ge__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__le__(*rv)
def __hash__(self):
return super().__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory.factor_ import factorrat
return factorrat(self, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
@property
def numerator(self):
return self.p
@property
def denominator(self):
return self.q
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other == S.Zero:
return other
return Rational(
igcd(self.p, other.p),
ilcm(self.q, other.q))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p // igcd(self.p, other.p) * other.p,
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return self, S.One
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
class Integer(Rational):
"""Represents integer numbers of any size.
Examples
========
>>> from sympy import Integer
>>> Integer(3)
3
If a float or a rational is passed to Integer, the fractional part
will be discarded; the effect is of rounding toward zero.
>>> Integer(3.8)
3
>>> Integer(-3.8)
-3
A string is acceptable input if it can be parsed as an integer:
>>> Integer("9" * 20)
99999999999999999999
It is rarely needed to explicitly instantiate an Integer, because
Python integers are automatically converted to Integer when they
are used in SymPy expressions.
"""
q = 1
is_integer = True
is_number = True
is_Integer = True
__slots__ = ()
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
@cacheit
def __new__(cls, i):
if isinstance(i, str):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
"Argument of Integer should be of numeric type, got %s." % i)
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
if ival == 1:
return S.One
if ival == -1:
return S.NegativeOne
if ival == 0:
return S.Zero
obj = Expr.__new__(cls)
obj.p = ival
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
def floor(self):
return Integer(self.p)
def ceiling(self):
return Integer(self.p)
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
if isinstance(other, Integer) and global_parameters.evaluate:
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
if isinstance(other, int) and global_parameters.evaluate:
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q + other.p, other.q, 1)
return Rational.__add__(self, other)
else:
return Add(self, other)
def __radd__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other + self.p)
elif isinstance(other, Rational):
return Rational(other.p + self.p*other.q, other.q, 1)
return Rational.__radd__(self, other)
return Rational.__radd__(self, other)
def __sub__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q - other.p, other.q, 1)
return Rational.__sub__(self, other)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other - self.p)
elif isinstance(other, Rational):
return Rational(other.p - self.p*other.q, other.q, 1)
return Rational.__rsub__(self, other)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.p, other.q, igcd(self.p, other.q))
return Rational.__mul__(self, other)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other*self.p)
elif isinstance(other, Rational):
return Rational(other.p*self.p, other.q, igcd(self.p, other.q))
return Rational.__rmul__(self, other)
return Rational.__rmul__(self, other)
def __mod__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if global_parameters.evaluate:
if isinstance(other, int):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, int):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Integer:
return _sympify(self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return hash(self.p)
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done.
Explanation
===========
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy.ntheory.factor_ import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self, 1)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if isinstance(expt, Float):
# Rational knows how to exponentiate by a Float
return super()._eval_power(expt)
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
return S.NegativeOne**expt*Rational(1, -self, 1)**ne
else:
return Rational(1, self.p, 1)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(b_pos).factors(limit=2**15)
# now process the dict of factors
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g, 1))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == b_pos and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
if self.is_negative:
result *= Pow(S.NegativeOne, expt)
return result
def _eval_is_prime(self):
from sympy.ntheory.primetest import isprime
return isprime(self)
def _eval_is_composite(self):
if self > 1:
return fuzzy_not(self.is_prime)
else:
return False
def as_numer_denom(self):
return self, S.One
@_sympifyit('other', NotImplemented)
def __floordiv__(self, other):
if not isinstance(other, Expr):
return NotImplemented
if isinstance(other, Integer):
return Integer(self.p // other)
return divmod(self, other)[0]
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# These bitwise operations (__lshift__, __rlshift__, ..., __invert__) are defined
# for Integer only and not for general SymPy expressions. This is to achieve
# compatibility with the numbers.Integral ABC which only defines these operations
# among instances of numbers.Integral. Therefore, these methods check explicitly for
# integer types rather than using sympify because they should not accept arbitrary
# symbolic expressions and there is no symbolic analogue of numbers.Integral's
# bitwise operations.
def __lshift__(self, other):
if isinstance(other, (int, Integer, numbers.Integral)):
return Integer(self.p << int(other))
else:
return NotImplemented
def __rlshift__(self, other):
if isinstance(other, (int, numbers.Integral)):
return Integer(int(other) << self.p)
else:
return NotImplemented
def __rshift__(self, other):
if isinstance(other, (int, Integer, numbers.Integral)):
return Integer(self.p >> int(other))
else:
return NotImplemented
def __rrshift__(self, other):
if isinstance(other, (int, numbers.Integral)):
return Integer(int(other) >> self.p)
else:
return NotImplemented
def __and__(self, other):
if isinstance(other, (int, Integer, numbers.Integral)):
return Integer(self.p & int(other))
else:
return NotImplemented
def __rand__(self, other):
if isinstance(other, (int, numbers.Integral)):
return Integer(int(other) & self.p)
else:
return NotImplemented
def __xor__(self, other):
if isinstance(other, (int, Integer, numbers.Integral)):
return Integer(self.p ^ int(other))
else:
return NotImplemented
def __rxor__(self, other):
if isinstance(other, (int, numbers.Integral)):
return Integer(int(other) ^ self.p)
else:
return NotImplemented
def __or__(self, other):
if isinstance(other, (int, Integer, numbers.Integral)):
return Integer(self.p | int(other))
else:
return NotImplemented
def __ror__(self, other):
if isinstance(other, (int, numbers.Integral)):
return Integer(int(other) | self.p)
else:
return NotImplemented
def __invert__(self):
return Integer(~self.p)
# Add sympify converters
_sympy_converter[int] = Integer
class AlgebraicNumber(Expr):
r"""
Class for representing algebraic numbers in SymPy.
Symbolically, an instance of this class represents an element
$\alpha \in \mathbb{Q}(\theta) \hookrightarrow \mathbb{C}$. That is, the
algebraic number $\alpha$ is represented as an element of a particular
number field $\mathbb{Q}(\theta)$, with a particular embedding of this
field into the complex numbers.
Formally, the primitive element $\theta$ is given by two data points: (1)
its minimal polynomial (which defines $\mathbb{Q}(\theta)$), and (2) a
particular complex number that is a root of this polynomial (which defines
the embedding $\mathbb{Q}(\theta) \hookrightarrow \mathbb{C}$). Finally,
the algebraic number $\alpha$ which we represent is then given by the
coefficients of a polynomial in $\theta$.
"""
__slots__ = ('rep', 'root', 'alias', 'minpoly', '_own_minpoly')
is_AlgebraicNumber = True
is_algebraic = True
is_number = True
kind = NumberKind
# Optional alias symbol is not free.
# Actually, alias should be a Str, but some methods
# expect that it be an instance of Expr.
free_symbols: set[Basic] = set()
def __new__(cls, expr, coeffs=None, alias=None, **args):
r"""
Construct a new algebraic number $\alpha$ belonging to a number field
$k = \mathbb{Q}(\theta)$.
There are four instance attributes to be determined:
=========== ============================================================================
Attribute Type/Meaning
=========== ============================================================================
``root`` :py:class:`~.Expr` for $\theta$ as a complex number
``minpoly`` :py:class:`~.Poly`, the minimal polynomial of $\theta$
``rep`` :py:class:`~sympy.polys.polyclasses.DMP` giving $\alpha$ as poly in $\theta$
``alias`` :py:class:`~.Symbol` for $\theta$, or ``None``
=========== ============================================================================
See Parameters section for how they are determined.
Parameters
==========
expr : :py:class:`~.Expr`, or pair $(m, r)$
There are three distinct modes of construction, depending on what
is passed as *expr*.
**(1)** *expr* is an :py:class:`~.AlgebraicNumber`:
In this case we begin by copying all four instance attributes from
*expr*. If *coeffs* were also given, we compose the two coeff
polynomials (see below). If an *alias* was given, it overrides.
**(2)** *expr* is any other type of :py:class:`~.Expr`:
Then ``root`` will equal *expr*. Therefore it
must express an algebraic quantity, and we will compute its
``minpoly``.
**(3)** *expr* is an ordered pair $(m, r)$ giving the
``minpoly`` $m$, and a ``root`` $r$ thereof, which together
define $\theta$. In this case $m$ may be either a univariate
:py:class:`~.Poly` or any :py:class:`~.Expr` which represents the
same, while $r$ must be some :py:class:`~.Expr` representing a
complex number that is a root of $m$, including both explicit
expressions in radicals, and instances of
:py:class:`~.ComplexRootOf` or :py:class:`~.AlgebraicNumber`.
coeffs : list, :py:class:`~.ANP`, None, optional (default=None)
This defines ``rep``, giving the algebraic number $\alpha$ as a
polynomial in $\theta$.
If a list, the elements should be integers or rational numbers.
If an :py:class:`~.ANP`, we take its coefficients (using its
:py:meth:`~.ANP.to_list()` method). If ``None``, then the list of
coefficients defaults to ``[1, 0]``, meaning that $\alpha = \theta$
is the primitive element of the field.
If *expr* was an :py:class:`~.AlgebraicNumber`, let $g(x)$ be its
``rep`` polynomial, and let $f(x)$ be the polynomial defined by
*coeffs*. Then ``self.rep`` will represent the composition
$(f \circ g)(x)$.
alias : str, :py:class:`~.Symbol`, None, optional (default=None)
This is a way to provide a name for the primitive element. We
described several ways in which the *expr* argument can define the
value of the primitive element, but none of these methods gave it
a name. Here, for example, *alias* could be set as
``Symbol('theta')``, in order to make this symbol appear when
$\alpha$ is printed, or rendered as a polynomial, using the
:py:meth:`~.as_poly()` method.
Examples
========
Recall that we are constructing an algebraic number as a field element
$\alpha \in \mathbb{Q}(\theta)$.
>>> from sympy import AlgebraicNumber, sqrt, CRootOf, S
>>> from sympy.abc import x
Example (1): $\alpha = \theta = \sqrt{2}$
>>> a1 = AlgebraicNumber(sqrt(2))
>>> a1.minpoly_of_element().as_expr(x)
x**2 - 2
>>> a1.evalf(10)
1.414213562
Example (2): $\alpha = 3 \sqrt{2} - 5$, $\theta = \sqrt{2}$. We can
either build on the last example:
>>> a2 = AlgebraicNumber(a1, [3, -5])
>>> a2.as_expr()
-5 + 3*sqrt(2)
or start from scratch:
>>> a2 = AlgebraicNumber(sqrt(2), [3, -5])
>>> a2.as_expr()
-5 + 3*sqrt(2)
Example (3): $\alpha = 6 \sqrt{2} - 11$, $\theta = \sqrt{2}$. Again we
can build on the previous example, and we see that the coeff polys are
composed:
>>> a3 = AlgebraicNumber(a2, [2, -1])
>>> a3.as_expr()
-11 + 6*sqrt(2)
reflecting the fact that $(2x - 1) \circ (3x - 5) = 6x - 11$.
Example (4): $\alpha = \sqrt{2}$, $\theta = \sqrt{2} + \sqrt{3}$. The
easiest way is to use the :py:func:`~.to_number_field()` function:
>>> from sympy import to_number_field
>>> a4 = to_number_field(sqrt(2), sqrt(2) + sqrt(3))
>>> a4.minpoly_of_element().as_expr(x)
x**2 - 2
>>> a4.to_root()
sqrt(2)
>>> a4.primitive_element()
sqrt(2) + sqrt(3)
>>> a4.coeffs()
[1/2, 0, -9/2, 0]
but if you already knew the right coefficients, you could construct it
directly:
>>> a4 = AlgebraicNumber(sqrt(2) + sqrt(3), [S(1)/2, 0, S(-9)/2, 0])
>>> a4.to_root()
sqrt(2)
>>> a4.primitive_element()
sqrt(2) + sqrt(3)
Example (5): Construct the Golden Ratio as an element of the 5th
cyclotomic field, supposing we already know its coefficients. This time
we introduce the alias $\zeta$ for the primitive element of the field:
>>> from sympy import cyclotomic_poly
>>> from sympy.abc import zeta
>>> a5 = AlgebraicNumber(CRootOf(cyclotomic_poly(5), -1),
... [-1, -1, 0, 0], alias=zeta)
>>> a5.as_poly().as_expr()
-zeta**3 - zeta**2
>>> a5.evalf()
1.61803398874989
(The index ``-1`` to ``CRootOf`` selects the complex root with the
largest real and imaginary parts, which in this case is
$\mathrm{e}^{2i\pi/5}$. See :py:class:`~.ComplexRootOf`.)
Example (6): Building on the last example, construct the number
$2 \phi \in \mathbb{Q}(\phi)$, where $\phi$ is the Golden Ratio:
>>> from sympy.abc import phi
>>> a6 = AlgebraicNumber(a5.to_root(), coeffs=[2, 0], alias=phi)
>>> a6.as_poly().as_expr()
2*phi
>>> a6.primitive_element().evalf()
1.61803398874989
Note that we needed to use ``a5.to_root()``, since passing ``a5`` as
the first argument would have constructed the number $2 \phi$ as an
element of the field $\mathbb{Q}(\zeta)$:
>>> a6_wrong = AlgebraicNumber(a5, coeffs=[2, 0])
>>> a6_wrong.as_poly().as_expr()
-2*zeta**3 - 2*zeta**2
>>> a6_wrong.primitive_element().evalf()
0.309016994374947 + 0.951056516295154*I
"""
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.numberfields import minimal_polynomial
expr = sympify(expr)
rep0 = None
alias0 = None
if isinstance(expr, (tuple, Tuple)):
minpoly, root = expr
if not minpoly.is_Poly:
from sympy.polys.polytools import Poly
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root, rep0, alias0 = (expr.minpoly, expr.root,
expr.rep, expr.alias)
else:
minpoly, root = minimal_polynomial(
expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
scoeffs = Tuple(*coeffs)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
scoeffs = Tuple(*coeffs.to_list())
else:
rep = DMP.from_list([1, 0], 0, dom)
scoeffs = Tuple(1, 0)
if rep0 is not None:
from sympy.polys.densetools import dup_compose
c = dup_compose(rep.rep, rep0.rep, dom)
rep = DMP.from_list(c, 0, dom)
scoeffs = Tuple(*c)
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
sargs = (root, scoeffs)
alias = alias or alias0
if alias is not None:
from .symbol import Symbol
if not isinstance(alias, Symbol):
alias = Symbol(alias)
sargs = sargs + (alias,)
obj = Expr.__new__(cls, *sargs)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
obj._own_minpoly = None
return obj
def __hash__(self):
return super().__hash__()
def _eval_evalf(self, prec):
return self.as_expr()._evalf(prec)
@property
def is_aliased(self):
"""Returns ``True`` if ``alias`` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from ``self``. """
from sympy.polys.polytools import Poly, PurePoly
if x is not None:
return Poly.new(self.rep, x)
else:
if self.alias is not None:
return Poly.new(self.rep, self.alias)
else:
from .symbol import Dummy
return PurePoly.new(self.rep, Dummy('x'))
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.as_poly(x or self.root).as_expr().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert ``self`` to an algebraic integer. """
from sympy.polys.polytools import Poly
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree() - 1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
def _eval_simplify(self, **kwargs):
from sympy.polys.rootoftools import CRootOf
from sympy.polys import minpoly
measure, ratio = kwargs['measure'], kwargs['ratio']
for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]:
if minpoly(self.root - r).is_Symbol:
# use the matching root if it's simpler
if measure(r) < ratio*measure(self.root):
return AlgebraicNumber(r)
return self
def field_element(self, coeffs):
r"""
Form another element of the same number field.
Explanation
===========
If we represent $\alpha \in \mathbb{Q}(\theta)$, form another element
$\beta \in \mathbb{Q}(\theta)$ of the same number field.
Parameters
==========
coeffs : list, :py:class:`~.ANP`
Like the *coeffs* arg to the class
:py:meth:`constructor<.AlgebraicNumber.__new__>`, defines the
new element as a polynomial in the primitive element.
If a list, the elements should be integers or rational numbers.
If an :py:class:`~.ANP`, we take its coefficients (using its
:py:meth:`~.ANP.to_list()` method).
Examples
========
>>> from sympy import AlgebraicNumber, sqrt
>>> a = AlgebraicNumber(sqrt(5), [-1, 1])
>>> b = a.field_element([3, 2])
>>> print(a)
1 - sqrt(5)
>>> print(b)
2 + 3*sqrt(5)
>>> print(b.primitive_element() == a.primitive_element())
True
See Also
========
.AlgebraicNumber.__new__()
"""
return AlgebraicNumber(
(self.minpoly, self.root), coeffs=coeffs, alias=self.alias)
@property
def is_primitive_element(self):
r"""
Say whether this algebraic number $\alpha \in \mathbb{Q}(\theta)$ is
equal to the primitive element $\theta$ for its field.
"""
c = self.coeffs()
# Second case occurs if self.minpoly is linear:
return c == [1, 0] or c == [self.root]
def primitive_element(self):
r"""
Get the primitive element $\theta$ for the number field
$\mathbb{Q}(\theta)$ to which this algebraic number $\alpha$ belongs.
Returns
=======
AlgebraicNumber
"""
if self.is_primitive_element:
return self
return self.field_element([1, 0])
def to_primitive_element(self, radicals=True):
r"""
Convert ``self`` to an :py:class:`~.AlgebraicNumber` instance that is
equal to its own primitive element.
Explanation
===========
If we represent $\alpha \in \mathbb{Q}(\theta)$, $\alpha \neq \theta$,
construct a new :py:class:`~.AlgebraicNumber` that represents
$\alpha \in \mathbb{Q}(\alpha)$.
Examples
========
>>> from sympy import sqrt, to_number_field
>>> from sympy.abc import x
>>> a = to_number_field(sqrt(2), sqrt(2) + sqrt(3))
The :py:class:`~.AlgebraicNumber` ``a`` represents the number
$\sqrt{2}$ in the field $\mathbb{Q}(\sqrt{2} + \sqrt{3})$. Rendering
``a`` as a polynomial,
>>> a.as_poly().as_expr(x)
x**3/2 - 9*x/2
reflects the fact that $\sqrt{2} = \theta^3/2 - 9 \theta/2$, where
$\theta = \sqrt{2} + \sqrt{3}$.
``a`` is not equal to its own primitive element. Its minpoly
>>> a.minpoly.as_poly().as_expr(x)
x**4 - 10*x**2 + 1
is that of $\theta$.
Converting to a primitive element,
>>> a_prim = a.to_primitive_element()
>>> a_prim.minpoly.as_poly().as_expr(x)
x**2 - 2
we obtain an :py:class:`~.AlgebraicNumber` whose ``minpoly`` is that of
the number itself.
Parameters
==========
radicals : boolean, optional (default=True)
If ``True``, then we will try to return an
:py:class:`~.AlgebraicNumber` whose ``root`` is an expression
in radicals. If that is not possible (or if *radicals* is
``False``), ``root`` will be a :py:class:`~.ComplexRootOf`.
Returns
=======
AlgebraicNumber
See Also
========
is_primitive_element
"""
if self.is_primitive_element:
return self
m = self.minpoly_of_element()
r = self.to_root(radicals=radicals)
return AlgebraicNumber((m, r))
def minpoly_of_element(self):
r"""
Compute the minimal polynomial for this algebraic number.
Explanation
===========
Recall that we represent an element $\alpha \in \mathbb{Q}(\theta)$.
Our instance attribute ``self.minpoly`` is the minimal polynomial for
our primitive element $\theta$. This method computes the minimal
polynomial for $\alpha$.
"""
if self._own_minpoly is None:
if self.is_primitive_element:
self._own_minpoly = self.minpoly
else:
from sympy.polys.numberfields.minpoly import minpoly
theta = self.primitive_element()
self._own_minpoly = minpoly(self.as_expr(theta), polys=True)
return self._own_minpoly
def to_root(self, radicals=True, minpoly=None):
"""
Convert to an :py:class:`~.Expr` that is not an
:py:class:`~.AlgebraicNumber`, specifically, either a
:py:class:`~.ComplexRootOf`, or, optionally and where possible, an
expression in radicals.
Parameters
==========
radicals : boolean, optional (default=True)
If ``True``, then we will try to return the root as an expression
in radicals. If that is not possible, we will return a
:py:class:`~.ComplexRootOf`.
minpoly : :py:class:`~.Poly`
If the minimal polynomial for `self` has been pre-computed, it can
be passed in order to save time.
"""
if self.is_primitive_element and not isinstance(self.root, AlgebraicNumber):
return self.root
m = minpoly or self.minpoly_of_element()
roots = m.all_roots(radicals=radicals)
if len(roots) == 1:
return roots[0]
ex = self.as_expr()
for b in roots:
if m.same_root(b, ex):
return b
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(IntegerConstant, metaclass=Singleton):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] https://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
is_comparable = True
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_extended_positive:
return self
if expt.is_extended_negative:
return S.ComplexInfinity
if expt.is_extended_real is False:
return S.NaN
if expt.is_zero:
return S.One
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent since 0**-x = zoo**x even when x == 0
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __bool__(self):
return False
class One(IntegerConstant, metaclass=Singleton):
"""The number one.
One is a singleton, and can be accessed by ``S.One``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(1) is S.One
True
References
==========
.. [1] https://en.wikipedia.org/wiki/1_%28number%29
"""
is_number = True
is_positive = True
p = 1
q = 1
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
else:
return {}
class NegativeOne(IntegerConstant, metaclass=Singleton):
"""The number negative one.
NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(-1) is S.NegativeOne
True
See Also
========
One
References
==========
.. [1] https://en.wikipedia.org/wiki/%E2%88%921_%28number%29
"""
is_number = True
p = -1
q = 1
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt in (S.Infinity, S.NegativeInfinity):
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(RationalConstant, metaclass=Singleton):
"""The rational number 1/2.
Half is a singleton, and can be accessed by ``S.Half``.
Examples
========
>>> from sympy import S, Rational
>>> Rational(1, 2) is S.Half
True
References
==========
.. [1] https://en.wikipedia.org/wiki/One_half
"""
is_number = True
p = 1
q = 2
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.Half
class Infinity(Number, metaclass=Singleton):
r"""Positive infinite quantity.
Explanation
===========
In real analysis the symbol `\infty` denotes an unbounded
limit: `x\to\infty` means that `x` grows without bound.
Infinity is often used not only to define a limit but as a value
in the affinely extended real number system. Points labeled `+\infty`
and `-\infty` can be added to the topological space of the real numbers,
producing the two-point compactification of the real numbers. Adding
algebraic properties to this gives us the extended real numbers.
Infinity is a singleton, and can be accessed by ``S.Infinity``,
or can be imported as ``oo``.
Examples
========
>>> from sympy import oo, exp, limit, Symbol
>>> 1 + oo
oo
>>> 42/oo
0
>>> x = Symbol('x')
>>> limit(exp(x), x, oo)
oo
See Also
========
NegativeInfinity, NaN
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinity
"""
is_commutative = True
is_number = True
is_complex = False
is_extended_real = True
is_infinite = True
is_comparable = True
is_extended_positive = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
def _eval_subs(self, old, new):
if self == old:
return new
def _eval_evalf(self, prec=None):
return Float('inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.NegativeInfinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.Infinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.NegativeInfinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
if other.is_extended_nonnegative:
return self
return S.NegativeInfinity
return Number.__truediv__(self, other)
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
See Also
========
Pow
NaN
NegativeInfinity
"""
if expt.is_extended_positive:
return S.Infinity
if expt.is_extended_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_extended_real is False and expt.is_number:
from sympy.functions.elementary.complexes import re
expt_real = re(expt)
if expt_real.is_positive:
return S.ComplexInfinity
if expt_real.is_negative:
return S.Zero
if expt_real.is_zero:
return S.NaN
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.Infinity or other == float('inf')
def __ne__(self, other):
return other is not S.Infinity and other != float('inf')
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
oo = S.Infinity
class NegativeInfinity(Number, metaclass=Singleton):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_extended_real = True
is_complex = False
is_commutative = True
is_infinite = True
is_comparable = True
is_extended_negative = True
is_number = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
def _eval_subs(self, old, new):
if self == old:
return new
def _eval_evalf(self, prec=None):
return Float('-inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.Infinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.NegativeInfinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.Infinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
if other.is_extended_nonnegative:
return self
return S.Infinity
return Number.__truediv__(self, other)
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if expt.is_number:
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_extended_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
inf_part = S.Infinity**expt
s_part = S.NegativeOne**expt
if inf_part == 0 and s_part.is_finite:
return inf_part
if (inf_part is S.ComplexInfinity and
s_part.is_finite and not s_part.is_zero):
return S.ComplexInfinity
return s_part*inf_part
def _as_mpf_val(self, prec):
return mlib.fninf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity or other == float('-inf')
def __ne__(self, other):
return other is not S.NegativeInfinity and other != float('-inf')
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
def as_powers_dict(self):
return {S.NegativeOne: 1, S.Infinity: 1}
class NaN(Number, metaclass=Singleton):
"""
Not a Number.
Explanation
===========
This serves as a place holder for numeric values that are indeterminate.
Most operations on NaN, produce another NaN. Most indeterminate forms,
such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0``
and ``oo**0``, which all produce ``1`` (this is consistent with Python's
float).
NaN is loosely related to floating point nan, which is defined in the
IEEE 754 floating point standard, and corresponds to the Python
``float('nan')``. Differences are noted below.
NaN is mathematically not equal to anything else, even NaN itself. This
explains the initially counter-intuitive results with ``Eq`` and ``==`` in
the examples below.
NaN is not comparable so inequalities raise a TypeError. This is in
contrast with floating point nan where all inequalities are false.
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo, Eq
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
>>> Eq(nan, nan) # mathematical equality
False
>>> nan == nan # structural equality
True
References
==========
.. [1] https://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_extended_real = None
is_real = None
is_rational = None
is_algebraic = None
is_transcendental = None
is_integer = None
is_comparable = False
is_finite = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
is_number = True
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\text{NaN}"
def __neg__(self):
return self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
return self
def floor(self):
return self
def ceiling(self):
return self
def _as_mpf_val(self, prec):
return _mpf_nan
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
# NaN is structurally equal to another NaN
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
# Expr will _sympify and raise TypeError
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
nan = S.NaN
@dispatch(NaN, Expr) # type:ignore
def _eval_is_eq(a, b): # noqa:F811
return False
class ComplexInfinity(AtomicExpr, metaclass=Singleton):
r"""Complex infinity.
Explanation
===========
In complex analysis the symbol `\tilde\infty`, called "complex
infinity", represents a quantity with infinite magnitude, but
undetermined complex phase.
ComplexInfinity is a singleton, and can be accessed by
``S.ComplexInfinity``, or can be imported as ``zoo``.
Examples
========
>>> from sympy import zoo
>>> zoo + 42
zoo
>>> 42/zoo
0
>>> zoo + zoo
nan
>>> zoo*zoo
zoo
See Also
========
Infinity
"""
is_commutative = True
is_infinite = True
is_number = True
is_prime = False
is_complex = False
is_extended_real = False
kind = NumberKind
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
def floor(self):
return self
def ceiling(self):
return self
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt.is_zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_finite = True
is_number = True
__slots__ = ()
is_NumberSymbol = True
kind = NumberKind
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if self is other:
return True
if other.is_Number and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self == other
def __le__(self, other):
if self is other:
return S.true
return Expr.__le__(self, other)
def __ge__(self, other):
if self is other:
return S.true
return Expr.__ge__(self, other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __hash__(self):
return super().__hash__()
class Exp1(NumberSymbol, metaclass=Singleton):
r"""The `e` constant.
Explanation
===========
The transcendental number `e = 2.718281828\ldots` is the base of the
natural logarithm and of the exponential function, `e = \exp(1)`.
Sometimes called Euler's number or Napier's constant.
Exp1 is a singleton, and can be accessed by ``S.Exp1``,
or can be imported as ``E``.
Examples
========
>>> from sympy import exp, log, E
>>> E is exp(1)
True
>>> log(E)
1
References
==========
.. [1] https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
"""
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = ()
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
if global_parameters.exp_is_pow:
return self._eval_power_exp_is_pow(expt)
else:
from sympy.functions.elementary.exponential import exp
return exp(expt)
def _eval_power_exp_is_pow(self, arg):
if arg.is_Number:
if arg is oo:
return oo
elif arg == -oo:
return S.Zero
from sympy.functions.elementary.exponential import log
if isinstance(arg, log):
return arg.args[0]
# don't autoexpand Pow or Mul (see the issue 3351):
elif not arg.is_Add:
Ioo = I*oo
if arg in [Ioo, -Ioo]:
return nan
coeff = arg.coeff(pi*I)
if coeff:
if (2*coeff).is_integer:
if coeff.is_even:
return S.One
elif coeff.is_odd:
return S.NegativeOne
elif (coeff + S.Half).is_even:
return -I
elif (coeff + S.Half).is_odd:
return I
elif coeff.is_Rational:
ncoeff = coeff % 2 # restrict to [0, 2pi)
if ncoeff > 1: # restrict to (-pi, pi]
ncoeff -= 2
if ncoeff != coeff:
return S.Exp1**(ncoeff*S.Pi*S.ImaginaryUnit)
# Warning: code in risch.py will be very sensitive to changes
# in this (see DifferentialExtension).
# look for a single log factor
coeff, terms = arg.as_coeff_Mul()
# but it can't be multiplied by oo
if coeff in (oo, -oo):
return
coeffs, log_term = [coeff], None
for term in Mul.make_args(terms):
if isinstance(term, log):
if log_term is None:
log_term = term.args[0]
else:
return
elif term.is_comparable:
coeffs.append(term)
else:
return
return log_term**Mul(*coeffs) if log_term else None
elif arg.is_Add:
out = []
add = []
argchanged = False
for a in arg.args:
if a is S.One:
add.append(a)
continue
newa = self**a
if isinstance(newa, Pow) and newa.base is self:
if newa.exp != a:
add.append(newa.exp)
argchanged = True
else:
add.append(a)
else:
out.append(newa)
if out or argchanged:
return Mul(*out)*Pow(self, Add(*add), evaluate=False)
elif arg.is_Matrix:
return arg.exp()
def _eval_rewrite_as_sin(self, **kwargs):
from sympy.functions.elementary.trigonometric import sin
return sin(I + S.Pi/2) - I*sin(I)
def _eval_rewrite_as_cos(self, **kwargs):
from sympy.functions.elementary.trigonometric import cos
return cos(I) + I*cos(I + S.Pi/2)
E = S.Exp1
class Pi(NumberSymbol, metaclass=Singleton):
r"""The `\pi` constant.
Explanation
===========
The transcendental number `\pi = 3.141592654\ldots` represents the ratio
of a circle's circumference to its diameter, the area of the unit circle,
the half-period of trigonometric functions, and many other things
in mathematics.
Pi is a singleton, and can be accessed by ``S.Pi``, or can
be imported as ``pi``.
Examples
========
>>> from sympy import S, pi, oo, sin, exp, integrate, Symbol
>>> S.Pi
pi
>>> pi > 3
True
>>> pi.is_irrational
True
>>> x = Symbol('x')
>>> sin(x + 2*pi)
sin(x)
>>> integrate(exp(-x**2), (x, -oo, oo))
sqrt(pi)
References
==========
.. [1] https://en.wikipedia.org/wiki/Pi
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = ()
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71, 1), Rational(22, 7, 1))
pi = S.Pi
class GoldenRatio(NumberSymbol, metaclass=Singleton):
r"""The golden ratio, `\phi`.
Explanation
===========
`\phi = \frac{1 + \sqrt{5}}{2}` is an algebraic number. Two quantities
are in the golden ratio if their ratio is the same as the ratio of
their sum to the larger of the two quantities, i.e. their maximum.
GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``.
Examples
========
>>> from sympy import S
>>> S.GoldenRatio > 1
True
>>> S.GoldenRatio.expand(func=True)
1/2 + sqrt(5)/2
>>> S.GoldenRatio.is_irrational
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Golden_ratio
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy.functions.elementary.miscellaneous import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
_eval_rewrite_as_sqrt = _eval_expand_func
class TribonacciConstant(NumberSymbol, metaclass=Singleton):
r"""The tribonacci constant.
Explanation
===========
The tribonacci numbers are like the Fibonacci numbers, but instead
of starting with two predetermined terms, the sequence starts with
three predetermined terms and each term afterwards is the sum of the
preceding three terms.
The tribonacci constant is the ratio toward which adjacent tribonacci
numbers tend. It is a root of the polynomial `x^3 - x^2 - x - 1 = 0`,
and also satisfies the equation `x + x^{-3} = 2`.
TribonacciConstant is a singleton, and can be accessed
by ``S.TribonacciConstant``.
Examples
========
>>> from sympy import S
>>> S.TribonacciConstant > 1
True
>>> S.TribonacciConstant.expand(func=True)
1/3 + (19 - 3*sqrt(33))**(1/3)/3 + (3*sqrt(33) + 19)**(1/3)/3
>>> S.TribonacciConstant.is_irrational
True
>>> S.TribonacciConstant.n(20)
1.8392867552141611326
References
==========
.. [1] https://en.wikipedia.org/wiki/Generalizations_of_Fibonacci_numbers#Tribonacci_numbers
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return r"\text{TribonacciConstant}"
def __int__(self):
return 1
def _eval_evalf(self, prec):
rv = self._eval_expand_func(function=True)._eval_evalf(prec + 4)
return Float(rv, precision=prec)
def _eval_expand_func(self, **hints):
from sympy.functions.elementary.miscellaneous import cbrt, sqrt
return (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
_eval_rewrite_as_sqrt = _eval_expand_func
class EulerGamma(NumberSymbol, metaclass=Singleton):
r"""The Euler-Mascheroni constant.
Explanation
===========
`\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical
constant recurring in analysis and number theory. It is defined as the
limiting difference between the harmonic series and the
natural logarithm:
.. math:: \gamma = \lim\limits_{n\to\infty}
\left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right)
EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``.
Examples
========
>>> from sympy import S
>>> S.EulerGamma.is_irrational
>>> S.EulerGamma > 0
True
>>> S.EulerGamma > 1
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = ()
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5, 1))
class Catalan(NumberSymbol, metaclass=Singleton):
r"""Catalan's constant.
Explanation
===========
$G = 0.91596559\ldots$ is given by the infinite series
.. math:: G = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}
Catalan is a singleton, and can be accessed by ``S.Catalan``.
Examples
========
>>> from sympy import S
>>> S.Catalan.is_irrational
>>> S.Catalan > 0
True
>>> S.Catalan > 1
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Catalan%27s_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = ()
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10, 1), S.One)
def _eval_rewrite_as_Sum(self, k_sym=None, symbols=None):
if (k_sym is not None) or (symbols is not None):
return self
from .symbol import Dummy
from sympy.concrete.summations import Sum
k = Dummy('k', integer=True, nonnegative=True)
return Sum(S.NegativeOne**k / (2*k+1)**2, (k, 0, S.Infinity))
def _latex(self, printer):
return "G"
class ImaginaryUnit(AtomicExpr, metaclass=Singleton):
r"""The imaginary unit, `i = \sqrt{-1}`.
I is a singleton, and can be accessed by ``S.I``, or can be
imported as ``I``.
Examples
========
>>> from sympy import I, sqrt
>>> sqrt(-1)
I
>>> I*I
-1
>>> 1/I
-I
References
==========
.. [1] https://en.wikipedia.org/wiki/Imaginary_unit
"""
is_commutative = True
is_imaginary = True
is_finite = True
is_number = True
is_algebraic = True
is_transcendental = False
kind = NumberKind
__slots__ = ()
def _latex(self, printer):
return printer._settings['imaginary_unit_latex']
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Integer):
expt = expt % 4
if expt == 0:
return S.One
elif expt == 1:
return S.ImaginaryUnit
elif expt == 2:
return S.NegativeOne
elif expt == 3:
return -S.ImaginaryUnit
if isinstance(expt, Rational):
i, r = divmod(expt, 2)
rv = Pow(S.ImaginaryUnit, r, evaluate=False)
if i % 2:
return Mul(S.NegativeOne, rv, evaluate=False)
return rv
def as_base_exp(self):
return S.NegativeOne, S.Half
@property
def _mpc_(self):
return (Float(0)._mpf_, Float(1)._mpf_)
I = S.ImaginaryUnit
@dispatch(Tuple, Number) # type:ignore
def _eval_is_eq(self, other): # noqa: F811
return False
def sympify_fractions(f):
return Rational(f.numerator, f.denominator, 1)
_sympy_converter[fractions.Fraction] = sympify_fractions
if HAS_GMPY:
def sympify_mpz(x):
return Integer(int(x))
# XXX: The sympify_mpq function here was never used because it is
# overridden by the other sympify_mpq function below. Maybe it should just
# be removed or maybe it should be used for something...
def sympify_mpq(x):
return Rational(int(x.numerator), int(x.denominator))
_sympy_converter[type(gmpy.mpz(1))] = sympify_mpz
_sympy_converter[type(gmpy.mpq(1, 2))] = sympify_mpq
def sympify_mpmath_mpq(x):
p, q = x._mpq_
return Rational(p, q, 1)
_sympy_converter[type(mpmath.rational.mpq(1, 2))] = sympify_mpmath_mpq
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
_sympy_converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
_sympy_converter[complex] = sympify_complex
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
def _register_classes():
numbers.Number.register(Number)
numbers.Real.register(Float)
numbers.Rational.register(Rational)
numbers.Integral.register(Integer)
_register_classes()
_illegal = (S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity)
|
6f14658c71fd5e70dd95c6858729b940f942bd38dc3b89254b2a7d9a6af262e9 | from __future__ import annotations
from operator import attrgetter
from collections import defaultdict
from sympy.utilities.exceptions import sympy_deprecation_warning
from .sympify import _sympify as _sympify_, sympify
from .basic import Basic
from .cache import cacheit
from .sorting import ordered
from .logic import fuzzy_and
from .parameters import global_parameters
from sympy.utilities.iterables import sift
from sympy.multipledispatch.dispatcher import (Dispatcher,
ambiguity_register_error_ignore_dup,
str_signature, RaiseNotImplementedError)
class AssocOp(Basic):
""" Associative operations, can separate noncommutative and
commutative parts.
(a op b) op c == a op (b op c) == a op b op c.
Base class for Add and Mul.
This is an abstract base class, concrete derived classes must define
the attribute `identity`.
.. deprecated:: 1.7
Using arguments that aren't subclasses of :class:`~.Expr` in core
operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is
deprecated. See :ref:`non-expr-args-deprecated` for details.
Parameters
==========
*args :
Arguments which are operated
evaluate : bool, optional
Evaluate the operation. If not passed, refer to ``global_parameters.evaluate``.
"""
# for performance reason, we don't let is_commutative go to assumptions,
# and keep it right here
__slots__: tuple[str, ...] = ('is_commutative',)
_args_type: type[Basic] | None = None
@cacheit
def __new__(cls, *args, evaluate=None, _sympify=True):
# Allow faster processing by passing ``_sympify=False``, if all arguments
# are already sympified.
if _sympify:
args = list(map(_sympify_, args))
# Disallow non-Expr args in Add/Mul
typ = cls._args_type
if typ is not None:
from .relational import Relational
if any(isinstance(arg, Relational) for arg in args):
raise TypeError("Relational cannot be used in %s" % cls.__name__)
# This should raise TypeError once deprecation period is over:
for arg in args:
if not isinstance(arg, typ):
sympy_deprecation_warning(
f"""
Using non-Expr arguments in {cls.__name__} is deprecated (in this case, one of
the arguments has type {type(arg).__name__!r}).
If you really did intend to use a multiplication or addition operation with
this object, use the * or + operator instead.
""",
deprecated_since_version="1.7",
active_deprecations_target="non-expr-args-deprecated",
stacklevel=4,
)
if evaluate is None:
evaluate = global_parameters.evaluate
if not evaluate:
obj = cls._from_args(args)
obj = cls._exec_constructor_postprocessors(obj)
return obj
args = [a for a in args if a is not cls.identity]
if len(args) == 0:
return cls.identity
if len(args) == 1:
return args[0]
c_part, nc_part, order_symbols = cls.flatten(args)
is_commutative = not nc_part
obj = cls._from_args(c_part + nc_part, is_commutative)
obj = cls._exec_constructor_postprocessors(obj)
if order_symbols is not None:
from sympy.series.order import Order
return Order(obj, *order_symbols)
return obj
@classmethod
def _from_args(cls, args, is_commutative=None):
"""Create new instance with already-processed args.
If the args are not in canonical order, then a non-canonical
result will be returned, so use with caution. The order of
args may change if the sign of the args is changed."""
if len(args) == 0:
return cls.identity
elif len(args) == 1:
return args[0]
obj = super().__new__(cls, *args)
if is_commutative is None:
is_commutative = fuzzy_and(a.is_commutative for a in args)
obj.is_commutative = is_commutative
return obj
def _new_rawargs(self, *args, reeval=True, **kwargs):
"""Create new instance of own class with args exactly as provided by
caller but returning the self class identity if args is empty.
Examples
========
This is handy when we want to optimize things, e.g.
>>> from sympy import Mul, S
>>> from sympy.abc import x, y
>>> e = Mul(3, x, y)
>>> e.args
(3, x, y)
>>> Mul(*e.args[1:])
x*y
>>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
x*y
Note: use this with caution. There is no checking of arguments at
all. This is best used when you are rebuilding an Add or Mul after
simply removing one or more args. If, for example, modifications,
result in extra 1s being inserted they will show up in the result:
>>> m = (x*y)._new_rawargs(S.One, x); m
1*x
>>> m == x
False
>>> m.is_Mul
True
Another issue to be aware of is that the commutativity of the result
is based on the commutativity of self. If you are rebuilding the
terms that came from a commutative object then there will be no
problem, but if self was non-commutative then what you are
rebuilding may now be commutative.
Although this routine tries to do as little as possible with the
input, getting the commutativity right is important, so this level
of safety is enforced: commutativity will always be recomputed if
self is non-commutative and kwarg `reeval=False` has not been
passed.
"""
if reeval and self.is_commutative is False:
is_commutative = None
else:
is_commutative = self.is_commutative
return self._from_args(args, is_commutative)
@classmethod
def flatten(cls, seq):
"""Return seq so that none of the elements are of type `cls`. This is
the vanilla routine that will be used if a class derived from AssocOp
does not define its own flatten routine."""
# apply associativity, no commutativity property is used
new_seq = []
while seq:
o = seq.pop()
if o.__class__ is cls: # classes must match exactly
seq.extend(o.args)
else:
new_seq.append(o)
new_seq.reverse()
# c_part, nc_part, order_symbols
return [], new_seq, None
def _matches_commutative(self, expr, repl_dict=None, old=False):
"""
Matches Add/Mul "pattern" to an expression "expr".
repl_dict ... a dictionary of (wild: expression) pairs, that get
returned with the results
This function is the main workhorse for Add/Mul.
Examples
========
>>> from sympy import symbols, Wild, sin
>>> a = Wild("a")
>>> b = Wild("b")
>>> c = Wild("c")
>>> x, y, z = symbols("x y z")
>>> (a+sin(b)*c)._matches_commutative(x+sin(y)*z)
{a_: x, b_: y, c_: z}
In the example above, "a+sin(b)*c" is the pattern, and "x+sin(y)*z" is
the expression.
The repl_dict contains parts that were already matched. For example
here:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z, repl_dict={a: x})
{a_: x, b_: y, c_: z}
the only function of the repl_dict is to return it in the
result, e.g. if you omit it:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z)
{b_: y, c_: z}
the "a: x" is not returned in the result, but otherwise it is
equivalent.
"""
from .function import _coeff_isneg
# make sure expr is Expr if pattern is Expr
from .expr import Expr
if isinstance(self, Expr) and not isinstance(expr, Expr):
return None
if repl_dict is None:
repl_dict = {}
# handle simple patterns
if self == expr:
return repl_dict
d = self._matches_simple(expr, repl_dict)
if d is not None:
return d
# eliminate exact part from pattern: (2+a+w1+w2).matches(expr) -> (w1+w2).matches(expr-a-2)
from .function import WildFunction
from .symbol import Wild
wild_part, exact_part = sift(self.args, lambda p:
p.has(Wild, WildFunction) and not expr.has(p),
binary=True)
if not exact_part:
wild_part = list(ordered(wild_part))
if self.is_Add:
# in addition to normal ordered keys, impose
# sorting on Muls with leading Number to put
# them in order
wild_part = sorted(wild_part, key=lambda x:
x.args[0] if x.is_Mul and x.args[0].is_Number else
0)
else:
exact = self._new_rawargs(*exact_part)
free = expr.free_symbols
if free and (exact.free_symbols - free):
# there are symbols in the exact part that are not
# in the expr; but if there are no free symbols, let
# the matching continue
return None
newexpr = self._combine_inverse(expr, exact)
if not old and (expr.is_Add or expr.is_Mul):
check = newexpr
if _coeff_isneg(check):
check = -check
if check.count_ops() > expr.count_ops():
return None
newpattern = self._new_rawargs(*wild_part)
return newpattern.matches(newexpr, repl_dict)
# now to real work ;)
i = 0
saw = set()
while expr not in saw:
saw.add(expr)
args = tuple(ordered(self.make_args(expr)))
if self.is_Add and expr.is_Add:
# in addition to normal ordered keys, impose
# sorting on Muls with leading Number to put
# them in order
args = tuple(sorted(args, key=lambda x:
x.args[0] if x.is_Mul and x.args[0].is_Number else
0))
expr_list = (self.identity,) + args
for last_op in reversed(expr_list):
for w in reversed(wild_part):
d1 = w.matches(last_op, repl_dict)
if d1 is not None:
d2 = self.xreplace(d1).matches(expr, d1)
if d2 is not None:
return d2
if i == 0:
if self.is_Mul:
# make e**i look like Mul
if expr.is_Pow and expr.exp.is_Integer:
from .mul import Mul
if expr.exp > 0:
expr = Mul(*[expr.base, expr.base**(expr.exp - 1)], evaluate=False)
else:
expr = Mul(*[1/expr.base, expr.base**(expr.exp + 1)], evaluate=False)
i += 1
continue
elif self.is_Add:
# make i*e look like Add
c, e = expr.as_coeff_Mul()
if abs(c) > 1:
from .add import Add
if c > 0:
expr = Add(*[e, (c - 1)*e], evaluate=False)
else:
expr = Add(*[-e, (c + 1)*e], evaluate=False)
i += 1
continue
# try collection on non-Wild symbols
from sympy.simplify.radsimp import collect
was = expr
did = set()
for w in reversed(wild_part):
c, w = w.as_coeff_mul(Wild)
free = c.free_symbols - did
if free:
did.update(free)
expr = collect(expr, free)
if expr != was:
i += 0
continue
break # if we didn't continue, there is nothing more to do
return
def _has_matcher(self):
"""Helper for .has() that checks for containment of
subexpressions within an expr by using sets of args
of similar nodes, e.g. x + 1 in x + y + 1 checks
to see that {x, 1} & {x, y, 1} == {x, 1}
"""
def _ncsplit(expr):
# this is not the same as args_cnc because here
# we don't assume expr is a Mul -- hence deal with args --
# and always return a set.
cpart, ncpart = sift(expr.args,
lambda arg: arg.is_commutative is True, binary=True)
return set(cpart), ncpart
c, nc = _ncsplit(self)
cls = self.__class__
def is_in(expr):
if isinstance(expr, cls):
if expr == self:
return True
_c, _nc = _ncsplit(expr)
if (c & _c) == c:
if not nc:
return True
elif len(nc) <= len(_nc):
for i in range(len(_nc) - len(nc) + 1):
if _nc[i:i + len(nc)] == nc:
return True
return False
return is_in
def _eval_evalf(self, prec):
"""
Evaluate the parts of self that are numbers; if the whole thing
was a number with no functions it would have been evaluated, but
it wasn't so we must judiciously extract the numbers and reconstruct
the object. This is *not* simply replacing numbers with evaluated
numbers. Numbers should be handled in the largest pure-number
expression as possible. So the code below separates ``self`` into
number and non-number parts and evaluates the number parts and
walks the args of the non-number part recursively (doing the same
thing).
"""
from .add import Add
from .mul import Mul
from .symbol import Symbol
from .function import AppliedUndef
if isinstance(self, (Mul, Add)):
x, tail = self.as_independent(Symbol, AppliedUndef)
# if x is an AssocOp Function then the _evalf below will
# call _eval_evalf (here) so we must break the recursion
if not (tail is self.identity or
isinstance(x, AssocOp) and x.is_Function or
x is self.identity and isinstance(tail, AssocOp)):
# here, we have a number so we just call to _evalf with prec;
# prec is not the same as n, it is the binary precision so
# that's why we don't call to evalf.
x = x._evalf(prec) if x is not self.identity else self.identity
args = []
tail_args = tuple(self.func.make_args(tail))
for a in tail_args:
# here we call to _eval_evalf since we don't know what we
# are dealing with and all other _eval_evalf routines should
# be doing the same thing (i.e. taking binary prec and
# finding the evalf-able args)
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
return self.func(x, *args)
# this is the same as above, but there were no pure-number args to
# deal with
args = []
for a in self.args:
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
return self.func(*args)
@classmethod
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
Examples
========
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr.args
else:
return (sympify(expr),)
def doit(self, **hints):
if hints.get('deep', True):
terms = [term.doit(**hints) for term in self.args]
else:
terms = self.args
return self.func(*terms, evaluate=True)
class ShortCircuit(Exception):
pass
class LatticeOp(AssocOp):
"""
Join/meet operations of an algebraic lattice[1].
Explanation
===========
These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
Common examples are AND, OR, Union, Intersection, max or min. They have an
identity element (op(identity, a) = a) and an absorbing element
conventionally called zero (op(zero, a) = zero).
This is an abstract base class, concrete derived classes must declare
attributes zero and identity. All defining properties are then respected.
Examples
========
>>> from sympy import Integer
>>> from sympy.core.operations import LatticeOp
>>> class my_join(LatticeOp):
... zero = Integer(0)
... identity = Integer(1)
>>> my_join(2, 3) == my_join(3, 2)
True
>>> my_join(2, my_join(3, 4)) == my_join(2, 3, 4)
True
>>> my_join(0, 1, 4, 2, 3, 4)
0
>>> my_join(1, 2)
2
References:
.. [1] https://en.wikipedia.org/wiki/Lattice_%28order%29
"""
is_commutative = True
def __new__(cls, *args, **options):
args = (_sympify_(arg) for arg in args)
try:
# /!\ args is a generator and _new_args_filter
# must be careful to handle as such; this
# is done so short-circuiting can be done
# without having to sympify all values
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return sympify(cls.zero)
if not _args:
return sympify(cls.identity)
elif len(_args) == 1:
return set(_args).pop()
else:
# XXX in almost every other case for __new__, *_args is
# passed along, but the expectation here is for _args
obj = super(AssocOp, cls).__new__(cls, *ordered(_args))
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence, call_cls=None):
"""Generator filtering args"""
ncls = call_cls or cls
for arg in arg_sequence:
if arg == ncls.zero:
raise ShortCircuit(arg)
elif arg == ncls.identity:
continue
elif arg.func == ncls:
yield from arg.args
else:
yield arg
@classmethod
def make_args(cls, expr):
"""
Return a set of args such that cls(*arg_set) == expr.
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([sympify(expr)])
@staticmethod
def _compare_pretty(a, b):
return (str(a) > str(b)) - (str(a) < str(b))
class AssocOpDispatcher:
"""
Handler dispatcher for associative operators
.. notes::
This approach is experimental, and can be replaced or deleted in the future.
See https://github.com/sympy/sympy/pull/19463.
Explanation
===========
If arguments of different types are passed, the classes which handle the operation for each type
are collected. Then, a class which performs the operation is selected by recursive binary dispatching.
Dispatching relation can be registered by ``register_handlerclass`` method.
Priority registration is unordered. You cannot make ``A*B`` and ``B*A`` refer to
different handler classes. All logic dealing with the order of arguments must be implemented
in the handler class.
Examples
========
>>> from sympy import Add, Expr, Symbol
>>> from sympy.core.add import add
>>> class NewExpr(Expr):
... @property
... def _add_handler(self):
... return NewAdd
>>> class NewAdd(NewExpr, Add):
... pass
>>> add.register_handlerclass((Add, NewAdd), NewAdd)
>>> a, b = Symbol('a'), NewExpr()
>>> add(a, b) == NewAdd(a, b)
True
"""
def __init__(self, name, doc=None):
self.name = name
self.doc = doc
self.handlerattr = "_%s_handler" % name
self._handlergetter = attrgetter(self.handlerattr)
self._dispatcher = Dispatcher(name)
def __repr__(self):
return "<dispatched %s>" % self.name
def register_handlerclass(self, classes, typ, on_ambiguity=ambiguity_register_error_ignore_dup):
"""
Register the handler class for two classes, in both straight and reversed order.
Paramteters
===========
classes : tuple of two types
Classes who are compared with each other.
typ:
Class which is registered to represent *cls1* and *cls2*.
Handler method of *self* must be implemented in this class.
"""
if not len(classes) == 2:
raise RuntimeError(
"Only binary dispatch is supported, but got %s types: <%s>." % (
len(classes), str_signature(classes)
))
if len(set(classes)) == 1:
raise RuntimeError(
"Duplicate types <%s> cannot be dispatched." % str_signature(classes)
)
self._dispatcher.add(tuple(classes), typ, on_ambiguity=on_ambiguity)
self._dispatcher.add(tuple(reversed(classes)), typ, on_ambiguity=on_ambiguity)
@cacheit
def __call__(self, *args, _sympify=True, **kwargs):
"""
Parameters
==========
*args :
Arguments which are operated
"""
if _sympify:
args = tuple(map(_sympify_, args))
handlers = frozenset(map(self._handlergetter, args))
# no need to sympify again
return self.dispatch(handlers)(*args, _sympify=False, **kwargs)
@cacheit
def dispatch(self, handlers):
"""
Select the handler class, and return its handler method.
"""
# Quick exit for the case where all handlers are same
if len(handlers) == 1:
h, = handlers
if not isinstance(h, type):
raise RuntimeError("Handler {!r} is not a type.".format(h))
return h
# Recursively select with registered binary priority
for i, typ in enumerate(handlers):
if not isinstance(typ, type):
raise RuntimeError("Handler {!r} is not a type.".format(typ))
if i == 0:
handler = typ
else:
prev_handler = handler
handler = self._dispatcher.dispatch(prev_handler, typ)
if not isinstance(handler, type):
raise RuntimeError(
"Dispatcher for {!r} and {!r} must return a type, but got {!r}".format(
prev_handler, typ, handler
))
# return handler class
return handler
@property
def __doc__(self):
docs = [
"Multiply dispatched associative operator: %s" % self.name,
"Note that support for this is experimental, see the docs for :class:`AssocOpDispatcher` for details"
]
if self.doc:
docs.append(self.doc)
s = "Registered handler classes\n"
s += '=' * len(s)
docs.append(s)
amb_sigs = []
typ_sigs = defaultdict(list)
for sigs in self._dispatcher.ordering[::-1]:
key = self._dispatcher.funcs[sigs]
typ_sigs[key].append(sigs)
for typ, sigs in typ_sigs.items():
sigs_str = ', '.join('<%s>' % str_signature(sig) for sig in sigs)
if isinstance(typ, RaiseNotImplementedError):
amb_sigs.append(sigs_str)
continue
s = 'Inputs: %s\n' % sigs_str
s += '-' * len(s) + '\n'
s += typ.__name__
docs.append(s)
if amb_sigs:
s = "Ambiguous handler classes\n"
s += '=' * len(s)
docs.append(s)
s = '\n'.join(amb_sigs)
docs.append(s)
return '\n\n'.join(docs)
|
9cbd11f1a3b28e01e22817b2f5e5998a1773f787af1bb7266e9c931de8521614 | from .add import Add
from .exprtools import gcd_terms
from .function import Function
from .kind import NumberKind
from .logic import fuzzy_and, fuzzy_not
from .mul import Mul
from .singleton import S
class Mod(Function):
"""Represents a modulo operation on symbolic expressions.
Parameters
==========
p : Expr
Dividend.
q : Expr
Divisor.
Notes
=====
The convention used is the same as Python's: the remainder always has the
same sign as the divisor.
Examples
========
>>> from sympy.abc import x, y
>>> x**2 % y
Mod(x**2, y)
>>> _.subs({x: 5, y: 6})
1
"""
kind = NumberKind
@classmethod
def eval(cls, p, q):
def number_eval(p, q):
"""Try to return p % q if both are numbers or +/-p is known
to be less than or equal q.
"""
if q.is_zero:
raise ZeroDivisionError("Modulo by zero")
if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False:
return S.NaN
if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1):
return S.Zero
if q.is_Number:
if p.is_Number:
return p%q
if q == 2:
if p.is_even:
return S.Zero
elif p.is_odd:
return S.One
if hasattr(p, '_eval_Mod'):
rv = getattr(p, '_eval_Mod')(q)
if rv is not None:
return rv
# by ratio
r = p/q
if r.is_integer:
return S.Zero
try:
d = int(r)
except TypeError:
pass
else:
if isinstance(d, int):
rv = p - d*q
if (rv*q < 0) == True:
rv += q
return rv
# by difference
# -2|q| < p < 2|q|
d = abs(p)
for _ in range(2):
d -= abs(q)
if d.is_negative:
if q.is_positive:
if p.is_positive:
return d + q
elif p.is_negative:
return -d
elif q.is_negative:
if p.is_positive:
return d
elif p.is_negative:
return -d + q
break
rv = number_eval(p, q)
if rv is not None:
return rv
# denest
if isinstance(p, cls):
qinner = p.args[1]
if qinner % q == 0:
return cls(p.args[0], q)
elif (qinner*(q - qinner)).is_nonnegative:
# |qinner| < |q| and have same sign
return p
elif isinstance(-p, cls):
qinner = (-p).args[1]
if qinner % q == 0:
return cls(-(-p).args[0], q)
elif (qinner*(q + qinner)).is_nonpositive:
# |qinner| < |q| and have different sign
return p
elif isinstance(p, Add):
# separating into modulus and non modulus
both_l = non_mod_l, mod_l = [], []
for arg in p.args:
both_l[isinstance(arg, cls)].append(arg)
# if q same for all
if mod_l and all(inner.args[1] == q for inner in mod_l):
net = Add(*non_mod_l) + Add(*[i.args[0] for i in mod_l])
return cls(net, q)
elif isinstance(p, Mul):
# separating into modulus and non modulus
both_l = non_mod_l, mod_l = [], []
for arg in p.args:
both_l[isinstance(arg, cls)].append(arg)
if mod_l and all(inner.args[1] == q for inner in mod_l) and all(t.is_integer for t in p.args) and q.is_integer:
# finding distributive term
non_mod_l = [cls(x, q) for x in non_mod_l]
mod = []
non_mod = []
for j in non_mod_l:
if isinstance(j, cls):
mod.append(j.args[0])
else:
non_mod.append(j)
prod_mod = Mul(*mod)
prod_non_mod = Mul(*non_mod)
prod_mod1 = Mul(*[i.args[0] for i in mod_l])
net = prod_mod1*prod_mod
return prod_non_mod*cls(net, q)
if q.is_Integer and q is not S.One:
non_mod_l = [i % q if i.is_Integer and (i % q is not S.Zero) else i for
i in non_mod_l]
p = Mul(*(non_mod_l + mod_l))
# XXX other possibilities?
from sympy.polys.polyerrors import PolynomialError
from sympy.polys.polytools import gcd
# extract gcd; any further simplification should be done by the user
try:
G = gcd(p, q)
if G != 1:
p, q = [gcd_terms(i/G, clear=False, fraction=False)
for i in (p, q)]
except PolynomialError: # issue 21373
G = S.One
pwas, qwas = p, q
# simplify terms
# (x + y + 2) % x -> Mod(y + 2, x)
if p.is_Add:
args = []
for i in p.args:
a = cls(i, q)
if a.count(cls) > i.count(cls):
args.append(i)
else:
args.append(a)
if args != list(p.args):
p = Add(*args)
else:
# handle coefficients if they are not Rational
# since those are not handled by factor_terms
# e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y)
cp, p = p.as_coeff_Mul()
cq, q = q.as_coeff_Mul()
ok = False
if not cp.is_Rational or not cq.is_Rational:
r = cp % cq
if r == 0:
G *= cq
p *= int(cp/cq)
ok = True
if not ok:
p = cp*p
q = cq*q
# simple -1 extraction
if p.could_extract_minus_sign() and q.could_extract_minus_sign():
G, p, q = [-i for i in (G, p, q)]
# check again to see if p and q can now be handled as numbers
rv = number_eval(p, q)
if rv is not None:
return rv*G
# put 1.0 from G on inside
if G.is_Float and G == 1:
p *= G
return cls(p, q, evaluate=False)
elif G.is_Mul and G.args[0].is_Float and G.args[0] == 1:
p = G.args[0]*p
G = Mul._from_args(G.args[1:])
return G*cls(p, q, evaluate=(p, q) != (pwas, qwas))
def _eval_is_integer(self):
p, q = self.args
if fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]):
return True
def _eval_is_nonnegative(self):
if self.args[1].is_positive:
return True
def _eval_is_nonpositive(self):
if self.args[1].is_negative:
return True
def _eval_rewrite_as_floor(self, a, b, **kwargs):
from sympy.functions.elementary.integers import floor
return a - b*floor(a/b)
|
630bcabb526220b07e9a357339fcf56a4aee0d9e1dadb28017bc1211fc593c51 | from __future__ import annotations
from .assumptions import StdFactKB, _assume_defined
from .basic import Basic, Atom
from .cache import cacheit
from .containers import Tuple
from .expr import Expr, AtomicExpr
from .function import AppliedUndef, FunctionClass
from .kind import NumberKind, UndefinedKind
from .logic import fuzzy_bool
from .singleton import S
from .sorting import ordered
from .sympify import sympify
from sympy.logic.boolalg import Boolean
from sympy.utilities.iterables import sift, is_sequence
from sympy.utilities.misc import filldedent
import string
import re as _re
import random
from itertools import product
from typing import Any
class Str(Atom):
"""
Represents string in SymPy.
Explanation
===========
Previously, ``Symbol`` was used where string is needed in ``args`` of SymPy
objects, e.g. denoting the name of the instance. However, since ``Symbol``
represents mathematical scalar, this class should be used instead.
"""
__slots__ = ('name',)
def __new__(cls, name, **kwargs):
if not isinstance(name, str):
raise TypeError("name should be a string, not %s" % repr(type(name)))
obj = Expr.__new__(cls, **kwargs)
obj.name = name
return obj
def __getnewargs__(self):
return (self.name,)
def _hashable_content(self):
return (self.name,)
def _filter_assumptions(kwargs):
"""Split the given dict into assumptions and non-assumptions.
Keys are taken as assumptions if they correspond to an
entry in ``_assume_defined``.
"""
assumptions, nonassumptions = map(dict, sift(kwargs.items(),
lambda i: i[0] in _assume_defined,
binary=True))
Symbol._sanitize(assumptions)
return assumptions, nonassumptions
def _symbol(s, matching_symbol=None, **assumptions):
"""Return s if s is a Symbol, else if s is a string, return either
the matching_symbol if the names are the same or else a new symbol
with the same assumptions as the matching symbol (or the
assumptions as provided).
Examples
========
>>> from sympy import Symbol
>>> from sympy.core.symbol import _symbol
>>> _symbol('y')
y
>>> _.is_real is None
True
>>> _symbol('y', real=True).is_real
True
>>> x = Symbol('x')
>>> _symbol(x, real=True)
x
>>> _.is_real is None # ignore attribute if s is a Symbol
True
Below, the variable sym has the name 'foo':
>>> sym = Symbol('foo', real=True)
Since 'x' is not the same as sym's name, a new symbol is created:
>>> _symbol('x', sym).name
'x'
It will acquire any assumptions give:
>>> _symbol('x', sym, real=False).is_real
False
Since 'foo' is the same as sym's name, sym is returned
>>> _symbol('foo', sym)
foo
Any assumptions given are ignored:
>>> _symbol('foo', sym, real=False).is_real
True
NB: the symbol here may not be the same as a symbol with the same
name defined elsewhere as a result of different assumptions.
See Also
========
sympy.core.symbol.Symbol
"""
if isinstance(s, str):
if matching_symbol and matching_symbol.name == s:
return matching_symbol
return Symbol(s, **assumptions)
elif isinstance(s, Symbol):
return s
else:
raise ValueError('symbol must be string for symbol name or Symbol')
def uniquely_named_symbol(xname, exprs=(), compare=str, modify=None, **assumptions):
"""
Return a symbol whose name is derivated from *xname* but is unique
from any other symbols in *exprs*.
*xname* and symbol names in *exprs* are passed to *compare* to be
converted to comparable forms. If ``compare(xname)`` is not unique,
it is recursively passed to *modify* until unique name is acquired.
Parameters
==========
xname : str or Symbol
Base name for the new symbol.
exprs : Expr or iterable of Expr
Expressions whose symbols are compared to *xname*.
compare : function
Unary function which transforms *xname* and symbol names from
*exprs* to comparable form.
modify : function
Unary function which modifies the string. Default is appending
the number, or increasing the number if exists.
Examples
========
By default, a number is appended to *xname* to generate unique name.
If the number already exists, it is recursively increased.
>>> from sympy.core.symbol import uniquely_named_symbol, Symbol
>>> uniquely_named_symbol('x', Symbol('x'))
x0
>>> uniquely_named_symbol('x', (Symbol('x'), Symbol('x0')))
x1
>>> uniquely_named_symbol('x0', (Symbol('x1'), Symbol('x0')))
x2
Name generation can be controlled by passing *modify* parameter.
>>> from sympy.abc import x
>>> uniquely_named_symbol('x', x, modify=lambda s: 2*s)
xx
"""
def numbered_string_incr(s, start=0):
if not s:
return str(start)
i = len(s) - 1
while i != -1:
if not s[i].isdigit():
break
i -= 1
n = str(int(s[i + 1:] or start - 1) + 1)
return s[:i + 1] + n
default = None
if is_sequence(xname):
xname, default = xname
x = compare(xname)
if not exprs:
return _symbol(x, default, **assumptions)
if not is_sequence(exprs):
exprs = [exprs]
names = set().union(
[i.name for e in exprs for i in e.atoms(Symbol)] +
[i.func.name for e in exprs for i in e.atoms(AppliedUndef)])
if modify is None:
modify = numbered_string_incr
while any(x == compare(s) for s in names):
x = modify(x)
return _symbol(x, default, **assumptions)
_uniquely_named_symbol = uniquely_named_symbol
class Symbol(AtomicExpr, Boolean):
"""
Assumptions:
commutative = True
You can override the default assumptions in the constructor.
Examples
========
>>> from sympy import symbols
>>> A,B = symbols('A,B', commutative = False)
>>> bool(A*B != B*A)
True
>>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative
True
"""
is_comparable = False
__slots__ = ('name',)
name: str
is_Symbol = True
is_symbol = True
@property
def kind(self):
if self.is_commutative:
return NumberKind
return UndefinedKind
@property
def _diff_wrt(self):
"""Allow derivatives wrt Symbols.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> x._diff_wrt
True
"""
return True
@staticmethod
def _sanitize(assumptions, obj=None):
"""Remove None, convert values to bool, check commutativity *in place*.
"""
# be strict about commutativity: cannot be None
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
whose = '%s ' % obj.__name__ if obj else ''
raise ValueError(
'%scommutativity must be True or False.' % whose)
# sanitize other assumptions so 1 -> True and 0 -> False
for key in list(assumptions.keys()):
v = assumptions[key]
if v is None:
assumptions.pop(key)
continue
assumptions[key] = bool(v)
def _merge(self, assumptions):
base = self.assumptions0
for k in set(assumptions) & set(base):
if assumptions[k] != base[k]:
raise ValueError(filldedent('''
non-matching assumptions for %s: existing value
is %s and new value is %s''' % (
k, base[k], assumptions[k])))
base.update(assumptions)
return base
def __new__(cls, name, **assumptions):
"""Symbols are identified by name and assumptions::
>>> from sympy import Symbol
>>> Symbol("x") == Symbol("x")
True
>>> Symbol("x", real=True) == Symbol("x", real=False)
False
"""
cls._sanitize(assumptions, cls)
return Symbol.__xnew_cached_(cls, name, **assumptions)
@staticmethod
def __xnew__(cls, name, **assumptions): # never cached (e.g. dummy)
if not isinstance(name, str):
raise TypeError("name should be a string, not %s" % repr(type(name)))
obj = Expr.__new__(cls)
obj.name = name
# TODO: Issue #8873: Forcing the commutative assumption here means
# later code such as ``srepr()`` cannot tell whether the user
# specified ``commutative=True`` or omitted it. To workaround this,
# we keep a copy of the assumptions dict, then create the StdFactKB,
# and finally overwrite its ``._generator`` with the dict copy. This
# is a bit of a hack because we assume StdFactKB merely copies the
# given dict as ``._generator``, but future modification might, e.g.,
# compute a minimal equivalent assumption set.
tmp_asm_copy = assumptions.copy()
# be strict about commutativity
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
assumptions['commutative'] = is_commutative
obj._assumptions = StdFactKB(assumptions)
obj._assumptions._generator = tmp_asm_copy # Issue #8873
return obj
@staticmethod
@cacheit
def __xnew_cached_(cls, name, **assumptions): # symbols are always cached
return Symbol.__xnew__(cls, name, **assumptions)
def __getnewargs_ex__(self):
return ((self.name,), self.assumptions0)
# NOTE: __setstate__ is not needed for pickles created by __getnewargs_ex__
# but was used before Symbol was changed to use __getnewargs_ex__ in v1.9.
# Pickles created in previous SymPy versions will still need __setstate__
# so that they can be unpickled in SymPy > v1.9.
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
def _hashable_content(self):
# Note: user-specified assumptions not hashed, just derived ones
return (self.name,) + tuple(sorted(self.assumptions0.items()))
def _eval_subs(self, old, new):
if old.is_Pow:
from sympy.core.power import Pow
return Pow(self, S.One, evaluate=False)._eval_subs(old, new)
def _eval_refine(self, assumptions):
return self
@property
def assumptions0(self):
return {key: value for key, value
in self._assumptions.items() if value is not None}
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One
def as_dummy(self):
# only put commutativity in explicitly if it is False
return Dummy(self.name) if self.is_commutative is not False \
else Dummy(self.name, commutative=self.is_commutative)
def as_real_imag(self, deep=True, **hints):
if hints.get('ignore') == self:
return None
else:
from sympy.functions.elementary.complexes import im, re
return (re(self), im(self))
def is_constant(self, *wrt, **flags):
if not wrt:
return False
return self not in wrt
@property
def free_symbols(self):
return {self}
binary_symbols = free_symbols # in this case, not always
def as_set(self):
return S.UniversalSet
class Dummy(Symbol):
"""Dummy symbols are each unique, even if they have the same name:
Examples
========
>>> from sympy import Dummy
>>> Dummy("x") == Dummy("x")
False
If a name is not supplied then a string value of an internal count will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important.
>>> Dummy() #doctest: +SKIP
_Dummy_10
"""
# In the rare event that a Dummy object needs to be recreated, both the
# `name` and `dummy_index` should be passed. This is used by `srepr` for
# example:
# >>> d1 = Dummy()
# >>> d2 = eval(srepr(d1))
# >>> d2 == d1
# True
#
# If a new session is started between `srepr` and `eval`, there is a very
# small chance that `d2` will be equal to a previously-created Dummy.
_count = 0
_prng = random.Random()
_base_dummy_index = _prng.randint(10**6, 9*10**6)
__slots__ = ('dummy_index',)
is_Dummy = True
def __new__(cls, name=None, dummy_index=None, **assumptions):
if dummy_index is not None:
assert name is not None, "If you specify a dummy_index, you must also provide a name"
if name is None:
name = "Dummy_" + str(Dummy._count)
if dummy_index is None:
dummy_index = Dummy._base_dummy_index + Dummy._count
Dummy._count += 1
cls._sanitize(assumptions, cls)
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.dummy_index = dummy_index
return obj
def __getnewargs_ex__(self):
return ((self.name, self.dummy_index), self.assumptions0)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (
2, (self.name, self.dummy_index)), S.One.sort_key(), S.One
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
class Wild(Symbol):
"""
A Wild symbol matches anything, or anything
without whatever is explicitly excluded.
Parameters
==========
name : str
Name of the Wild instance.
exclude : iterable, optional
Instances in ``exclude`` will not be matched.
properties : iterable of functions, optional
Functions, each taking an expressions as input
and returns a ``bool``. All functions in ``properties``
need to return ``True`` in order for the Wild instance
to match the expression.
Examples
========
>>> from sympy import Wild, WildFunction, cos, pi
>>> from sympy.abc import x, y, z
>>> a = Wild('a')
>>> x.match(a)
{a_: x}
>>> pi.match(a)
{a_: pi}
>>> (3*x**2).match(a*x)
{a_: 3*x}
>>> cos(x).match(a)
{a_: cos(x)}
>>> b = Wild('b', exclude=[x])
>>> (3*x**2).match(b*x)
>>> b.match(a)
{a_: b_}
>>> A = WildFunction('A')
>>> A.match(a)
{a_: A_}
Tips
====
When using Wild, be sure to use the exclude
keyword to make the pattern more precise.
Without the exclude pattern, you may get matches
that are technically correct, but not what you
wanted. For example, using the above without
exclude:
>>> from sympy import symbols
>>> a, b = symbols('a b', cls=Wild)
>>> (2 + 3*y).match(a*x + b*y)
{a_: 2/x, b_: 3}
This is technically correct, because
(2/x)*x + 3*y == 2 + 3*y, but you probably
wanted it to not match at all. The issue is that
you really did not want a and b to include x and y,
and the exclude parameter lets you specify exactly
this. With the exclude parameter, the pattern will
not match.
>>> a = Wild('a', exclude=[x, y])
>>> b = Wild('b', exclude=[x, y])
>>> (2 + 3*y).match(a*x + b*y)
Exclude also helps remove ambiguity from matches.
>>> E = 2*x**3*y*z
>>> a, b = symbols('a b', cls=Wild)
>>> E.match(a*b)
{a_: 2*y*z, b_: x**3}
>>> a = Wild('a', exclude=[x, y])
>>> E.match(a*b)
{a_: z, b_: 2*x**3*y}
>>> a = Wild('a', exclude=[x, y, z])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
Wild also accepts a ``properties`` parameter:
>>> a = Wild('a', properties=[lambda k: k.is_Integer])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
"""
is_Wild = True
__slots__ = ('exclude', 'properties')
def __new__(cls, name, exclude=(), properties=(), **assumptions):
exclude = tuple([sympify(x) for x in exclude])
properties = tuple(properties)
cls._sanitize(assumptions, cls)
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.exclude = exclude
obj.properties = properties
return obj
def _hashable_content(self):
return super()._hashable_content() + (self.exclude, self.properties)
# TODO add check against another Wild
def matches(self, expr, repl_dict=None, old=False):
if any(expr.has(x) for x in self.exclude):
return None
if not all(f(expr) for f in self.properties):
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
_range = _re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])')
def symbols(names, *, cls=Symbol, **args) -> Any:
r"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols({'a', 'b', 'c'})
{a, b, c}
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols(r'x(:1\,:2)') # or r'x((:1)\,(:2))'
(x(0,0), x(0,1))
All newly created symbols have assumptions set according to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol-like objects like
instances of Function or Wild classes. To achieve this, set ``cls``
keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if isinstance(names, str):
marker = 0
splitters = r'\,', r'\:', r'\ '
literals: list[tuple[str, str]] = []
for splitter in splitters:
if splitter in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(splitter, lit_char)
literals.append((lit_char, splitter[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue
split: list[str] = _range.split(name)
split_list: list[list[str]] = []
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for s in split:
if ':' in s:
if s.endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a_i = 0 if not a else int(a)
b_i = int(b)
split_list.append([str(c) for c in range(a_i, b_i)])
else:
a = a or 'a'
split_list.append([string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)]) # inclusive
if not split_list[-1]:
break
else:
split_list.append([s])
else:
seq = True
if len(split_list) == 1:
names = split_list[0]
else:
names = [''.join(s) for s in product(*split_list)]
if literals:
result.extend([cls(literal(s), **args) for s in names])
else:
result.extend([cls(s, **args) for s in names])
if not seq and len(result) <= 1:
if not result:
return ()
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, cls=cls, **args))
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
Explanation
===========
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
Examples
========
>>> from sympy import var
>>> var('x')
x
>>> x # noqa: F821
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc # noqa: F821
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real # noqa: F821
True
See :func:`symbols` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
def disambiguate(*iter):
"""
Return a Tuple containing the passed expressions with symbols
that appear the same when printed replaced with numerically
subscripted symbols, and all Dummy symbols replaced with Symbols.
Parameters
==========
iter: list of symbols or expressions.
Examples
========
>>> from sympy.core.symbol import disambiguate
>>> from sympy import Dummy, Symbol, Tuple
>>> from sympy.abc import y
>>> tup = Symbol('_x'), Dummy('x'), Dummy('x')
>>> disambiguate(*tup)
(x_2, x, x_1)
>>> eqs = Tuple(Symbol('x')/y, Dummy('x')/y)
>>> disambiguate(*eqs)
(x_1/y, x/y)
>>> ix = Symbol('x', integer=True)
>>> vx = Symbol('x')
>>> disambiguate(vx + ix)
(x + x_1,)
To make your own mapping of symbols to use, pass only the free symbols
of the expressions and create a dictionary:
>>> free = eqs.free_symbols
>>> mapping = dict(zip(free, disambiguate(*free)))
>>> eqs.xreplace(mapping)
(x_1/y, x/y)
"""
new_iter = Tuple(*iter)
key = lambda x:tuple(sorted(x.assumptions0.items()))
syms = ordered(new_iter.free_symbols, keys=key)
mapping = {}
for s in syms:
mapping.setdefault(str(s).lstrip('_'), []).append(s)
reps = {}
for k in mapping:
# the first or only symbol doesn't get subscripted but make
# sure that it's a Symbol, not a Dummy
mapk0 = Symbol("%s" % (k), **mapping[k][0].assumptions0)
if mapping[k][0] != mapk0:
reps[mapping[k][0]] = mapk0
# the others get subscripts (and are made into Symbols)
skip = 0
for i in range(1, len(mapping[k])):
while True:
name = "%s_%i" % (k, i + skip)
if name not in mapping:
break
skip += 1
ki = mapping[k][i]
reps[ki] = Symbol(name, **ki.assumptions0)
return new_iter.xreplace(reps)
|
a59b845af1fe91c50636ad1ea5a566d751bc7265d942d332a1ea32c0155c6d85 | """sympify -- convert objects SymPy internal format"""
from __future__ import annotations
from typing import Any, Callable
from inspect import getmro
import string
from sympy.core.random import choice
from .parameters import global_parameters
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import iterable
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter: dict[type[Any], Callable[[Any], Basic]] = {}
#holds the conversions defined in SymPy itself, i.e. non-user defined conversions
_sympy_converter: dict[type[Any], Callable[[Any], Basic]] = {}
#alias for clearer use in the library
_external_converter = converter
class CantSympify:
"""
Mix in this trait to a class to disallow sympification of its instances.
Examples
========
>>> from sympy import sympify
>>> from sympy.core.sympify import CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
__slots__ = ()
def _is_numpy_instance(a):
"""
Checks if an object is an instance of a type from the numpy module.
"""
# This check avoids unnecessarily importing NumPy. We check the whole
# __mro__ in case any base type is a numpy type.
return any(type_.__module__ == 'numpy'
for type_ in type(a).__mro__)
def _convert_numpy_types(a, **sympify_args):
"""
Converts a numpy datatype input to an appropriate SymPy type.
"""
import numpy as np
if not isinstance(a, np.floating):
if np.iscomplex(a):
return _sympy_converter[complex](a.item())
else:
return sympify(a.item(), **sympify_args)
else:
try:
from .numbers import Float
prec = np.finfo(a).nmant + 1
# E.g. double precision means prec=53 but nmant=52
# Leading bit of mantissa is always 1, so is not stored
a = str(list(np.reshape(np.asarray(a),
(1, np.size(a)))[0]))[1:-1]
return Float(a, precision=prec)
except NotImplementedError:
raise SympifyError('Translation for numpy float : %s '
'is not implemented' % a)
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
evaluate=None):
"""
Converts an arbitrary expression to a type that can be used inside SymPy.
Explanation
===========
It will convert Python ints into instances of :class:`~.Integer`, floats
into instances of :class:`~.Float`, etc. It is also able to coerce
symbolic expressions which inherit from :class:`~.Basic`. This can be
useful in cooperation with SAGE.
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
Examples
========
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse 'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcount`` function is treated as a symbol and the ``O`` is
interpreted as the :class:`~.Order` object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> ns = {}
>>> exec('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: ``_clash1`` (single-letter variables),
``_clash2`` (the multi-letter Greek names) or ``_clash`` (both single and
multi-letter names that are defined in ``abc``).
>>> from sympy.abc import _clash1
>>> set(_clash1) # if this fails, see issue #23903
{'E', 'I', 'N', 'O', 'Q', 'S'}
>>> sympify('I & Q', _clash1)
I & Q
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
.. deprecated:: 1.6
``sympify(obj)`` automatically falls back to ``str(obj)`` when all
other conversion methods fail, but this is deprecated. ``strict=True``
will disable this deprecated behavior. See
:ref:`deprecated-sympify-string-fallback`.
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used. If
argument a is not a string, the mathematical expression is evaluated
before being passed to sympify, so adding ``evaluate=False`` will still
return the evaluated result of expression.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
>>> sympify('4/2+7', evaluate=True)
9
>>> sympify('4/2+7', evaluate=False)
4/2 + 7
>>> sympify(4/2+7, evaluate=False)
9.00000000000000
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
The keywords ``rational`` and ``convert_xor`` are only used
when the input is a string.
convert_xor
-----------
>>> sympify('x^y',convert_xor=True)
x**y
>>> sympify('x^y',convert_xor=False)
x ^ y
rational
--------
>>> sympify('0.1',rational=False)
0.1
>>> sympify('0.1',rational=True)
1/10
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
$-1$ by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
Parameters
==========
a :
- any object defined in SymPy
- standard numeric Python types: ``int``, ``long``, ``float``, ``Decimal``
- strings (like ``"0.09"``, ``"2e-19"`` or ``'sin(x)'``)
- booleans, including ``None`` (will leave ``None`` unchanged)
- dicts, lists, sets or tuples containing any of the above
convert_xor : bool, optional
If true, treats ``^`` as exponentiation.
If False, treats ``^`` as XOR itself.
Used only when input is a string.
locals : any object defined in SymPy, optional
In order to have strings be recognized it can be imported
into a namespace dictionary and passed as locals.
strict : bool, optional
If the option strict is set to ``True``, only the types for which
an explicit conversion has been defined are converted. In the
other cases, a SympifyError is raised.
rational : bool, optional
If ``True``, converts floats into :class:`~.Rational`.
If ``False``, it lets floats remain as it is.
Used only when input is a string.
evaluate : bool, optional
If False, then arithmetic and operators will be converted into
their SymPy equivalents. If True the expression will be evaluated
and the result will be returned.
"""
# XXX: If a is a Basic subclass rather than instance (e.g. sin rather than
# sin(x)) then a.__sympy__ will be the property. Only on the instance will
# a.__sympy__ give the *value* of the property (True). Since sympify(sin)
# was used for a long time we allow it to pass. However if strict=True as
# is the case in internal calls to _sympify then we only allow
# is_sympy=True.
#
# https://github.com/sympy/sympy/issues/20124
is_sympy = getattr(a, '__sympy__', None)
if is_sympy is True:
return a
elif is_sympy is not None:
if not strict:
return a
else:
raise SympifyError(a)
if isinstance(a, CantSympify):
raise SympifyError(a)
cls = getattr(a, "__class__", None)
#Check if there exists a converter for any of the types in the mro
for superclass in getmro(cls):
#First check for user defined converters
conv = _external_converter.get(superclass)
if conv is None:
#if none exists, check for SymPy defined converters
conv = _sympy_converter.get(superclass)
if conv is not None:
return conv(a)
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
if evaluate is None:
evaluate = global_parameters.evaluate
# Support for basic numpy datatypes
if _is_numpy_instance(a):
import numpy as np
if np.isscalar(a):
return _convert_numpy_types(a, locals=locals,
convert_xor=convert_xor, strict=strict, rational=rational,
evaluate=evaluate)
_sympy_ = getattr(a, "_sympy_", None)
if _sympy_ is not None:
try:
return a._sympy_()
# XXX: Catches AttributeError: 'SymPyConverter' object has no
# attribute 'tuple'
# This is probably a bug somewhere but for now we catch it here.
except AttributeError:
pass
if not strict:
# Put numpy array conversion _before_ float/int, see
# <https://github.com/sympy/sympy/issues/13924>.
flat = getattr(a, "flat", None)
if flat is not None:
shape = getattr(a, "shape", None)
if shape is not None:
from sympy.tensor.array import Array
return Array(a.flat, a.shape) # works with e.g. NumPy arrays
if not isinstance(a, str):
if _is_numpy_instance(a):
import numpy as np
assert not isinstance(a, np.number)
if isinstance(a, np.ndarray):
# Scalar arrays (those with zero dimensions) have sympify
# called on the scalar element.
if a.ndim == 0:
try:
return sympify(a.item(),
locals=locals,
convert_xor=convert_xor,
strict=strict,
rational=rational,
evaluate=evaluate)
except SympifyError:
pass
else:
# float and int can coerce size-one numpy arrays to their lone
# element. See issue https://github.com/numpy/numpy/issues/10404.
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational, evaluate=evaluate) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if not isinstance(a, str):
try:
a = str(a)
except Exception as exc:
raise SympifyError(a, exc)
sympy_deprecation_warning(
f"""
The string fallback in sympify() is deprecated.
To explicitly convert the string form of an object, use
sympify(str(obj)). To add define sympify behavior on custom
objects, use sympy.core.sympify.converter or define obj._sympy_
(see the sympify() docstring).
sympify() performed the string fallback resulting in the following string:
{a!r}
""",
deprecated_since_version='1.6',
active_deprecations_target="deprecated-sympify-string-fallback",
)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of :func:`~.sympify` for internal usage for ``__add__`` and
``__eq__`` methods where it is ok to allow some things (like Python
integers and floats) in the expression. This excludes things (like strings)
that are unwise to allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from distributing a
a number into an Add; this modification does not
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y
The 2-arg Mul distributes a number (or minus sign) across the terms
of an expression, but kernS will prevent that:
>>> 2*(x + y), -(x + 1)
(2*x + 2*y, -x - 1)
>>> kernS('2*(x + y)')
2*(x + y)
>>> kernS('-(x + 1)')
-(x + 1)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
"""
hit = False
quoted = '"' in s or "'" in s
if '(' in s and not quoted:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
# strip all space from s
s = ''.join(s.split())
olds = s
# now use space to represent a symbol that
# will
# step 1. turn potential 2-arg Muls into 3-arg versions
# 1a. *( -> * *(
s = s.replace('*(', '* *(')
# 1b. close up exponentials
s = s.replace('** *', '**')
# 2. handle the implied multiplication of a negated
# parenthesized expression in two steps
# 2a: -(...) --> -( *(...)
target = '-( *('
s = s.replace('-(', target)
# 2b: double the matching closing parenthesis
# -( *(...) --> -( *(...))
i = nest = 0
assert target.endswith('(') # assumption below
while True:
j = s.find(target, i)
if j == -1:
break
j += len(target) - 1
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + ")" + s[j:]
i = j + 2 # the first char after 2nd )
if ' ' in s:
# get a unique kern
kern = '_'
while kern in s:
kern += choice(string.ascii_letters + string.digits)
s = s.replace(' ', kern)
hit = kern in s
else:
hit = False
for i in range(2):
try:
expr = sympify(s)
break
except TypeError: # the kern might cause unknown errors...
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
from .symbol import Symbol
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
# Avoid circular import
from .basic import Basic
|
1ee8d32a029c5f4aca99fefc882984803ec392605a88c9a0503862de27970a16 | """
Adaptive numerical evaluation of SymPy expressions, using mpmath
for mathematical functions.
"""
from __future__ import annotations
from typing import Tuple as tTuple, Optional, Union as tUnion, Callable, List, Dict as tDict, Type, TYPE_CHECKING, \
Any, overload
import math
import mpmath.libmp as libmp
from mpmath import (
make_mpc, make_mpf, mp, mpc, mpf, nsum, quadts, quadosc, workprec)
from mpmath import inf as mpmath_inf
from mpmath.libmp import (from_int, from_man_exp, from_rational, fhalf,
fnan, finf, fninf, fnone, fone, fzero, mpf_abs, mpf_add,
mpf_atan, mpf_atan2, mpf_cmp, mpf_cos, mpf_e, mpf_exp, mpf_log, mpf_lt,
mpf_mul, mpf_neg, mpf_pi, mpf_pow, mpf_pow_int, mpf_shift, mpf_sin,
mpf_sqrt, normalize, round_nearest, to_int, to_str)
from mpmath.libmp import bitcount as mpmath_bitcount
from mpmath.libmp.backend import MPZ
from mpmath.libmp.libmpc import _infs_nan
from mpmath.libmp.libmpf import dps_to_prec, prec_to_dps
from .sympify import sympify
from .singleton import S
from sympy.external.gmpy import SYMPY_INTS
from sympy.utilities.iterables import is_sequence
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import as_int
if TYPE_CHECKING:
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.symbol import Symbol
from sympy.integrals.integrals import Integral
from sympy.concrete.summations import Sum
from sympy.concrete.products import Product
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.complexes import Abs, re, im
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.trigonometric import atan
from .numbers import Float, Rational, Integer, AlgebraicNumber, Number
LG10 = math.log(10, 2)
rnd = round_nearest
def bitcount(n):
"""Return smallest integer, b, such that |n|/2**b < 1.
"""
return mpmath_bitcount(abs(int(n)))
# Used in a few places as placeholder values to denote exponents and
# precision levels, e.g. of exact numbers. Must be careful to avoid
# passing these to mpmath functions or returning them in final results.
INF = float(mpmath_inf)
MINUS_INF = float(-mpmath_inf)
# ~= 100 digits. Real men set this to INF.
DEFAULT_MAXPREC = 333
class PrecisionExhausted(ArithmeticError):
pass
#----------------------------------------------------------------------------#
# #
# Helper functions for arithmetic and complex parts #
# #
#----------------------------------------------------------------------------#
"""
An mpf value tuple is a tuple of integers (sign, man, exp, bc)
representing a floating-point number: [1, -1][sign]*man*2**exp where
sign is 0 or 1 and bc should correspond to the number of bits used to
represent the mantissa (man) in binary notation, e.g.
"""
MPF_TUP = tTuple[int, int, int, int] # mpf value tuple
"""
Explanation
===========
>>> from sympy.core.evalf import bitcount
>>> sign, man, exp, bc = 0, 5, 1, 3
>>> n = [1, -1][sign]*man*2**exp
>>> n, bitcount(man)
(10, 3)
A temporary result is a tuple (re, im, re_acc, im_acc) where
re and im are nonzero mpf value tuples representing approximate
numbers, or None to denote exact zeros.
re_acc, im_acc are integers denoting log2(e) where e is the estimated
relative accuracy of the respective complex part, but may be anything
if the corresponding complex part is None.
"""
TMP_RES = Any # temporary result, should be some variant of
# tUnion[tTuple[Optional[MPF_TUP], Optional[MPF_TUP],
# Optional[int], Optional[int]],
# 'ComplexInfinity']
# but mypy reports error because it doesn't know as we know
# 1. re and re_acc are either both None or both MPF_TUP
# 2. sometimes the result can't be zoo
# type of the "options" parameter in internal evalf functions
OPT_DICT = tDict[str, Any]
def fastlog(x: Optional[MPF_TUP]) -> tUnion[int, Any]:
"""Fast approximation of log2(x) for an mpf value tuple x.
Explanation
===========
Calculated as exponent + width of mantissa. This is an
approximation for two reasons: 1) it gives the ceil(log2(abs(x)))
value and 2) it is too high by 1 in the case that x is an exact
power of 2. Although this is easy to remedy by testing to see if
the odd mpf mantissa is 1 (indicating that one was dealing with
an exact power of 2) that would decrease the speed and is not
necessary as this is only being used as an approximation for the
number of bits in x. The correct return value could be written as
"x[2] + (x[3] if x[1] != 1 else 0)".
Since mpf tuples always have an odd mantissa, no check is done
to see if the mantissa is a multiple of 2 (in which case the
result would be too large by 1).
Examples
========
>>> from sympy import log
>>> from sympy.core.evalf import fastlog, bitcount
>>> s, m, e = 0, 5, 1
>>> bc = bitcount(m)
>>> n = [1, -1][s]*m*2**e
>>> n, (log(n)/log(2)).evalf(2), fastlog((s, m, e, bc))
(10, 3.3, 4)
"""
if not x or x == fzero:
return MINUS_INF
return x[2] + x[3]
def pure_complex(v: 'Expr', or_real=False) -> tuple['Number', 'Number'] | None:
"""Return a and b if v matches a + I*b where b is not zero and
a and b are Numbers, else None. If `or_real` is True then 0 will
be returned for `b` if `v` is a real number.
Examples
========
>>> from sympy.core.evalf import pure_complex
>>> from sympy import sqrt, I, S
>>> a, b, surd = S(2), S(3), sqrt(2)
>>> pure_complex(a)
>>> pure_complex(a, or_real=True)
(2, 0)
>>> pure_complex(surd)
>>> pure_complex(a + b*I)
(2, 3)
>>> pure_complex(I)
(0, 1)
"""
h, t = v.as_coeff_Add()
if t:
c, i = t.as_coeff_Mul()
if i is S.ImaginaryUnit:
return h, c
elif or_real:
return h, S.Zero
return None
# I don't know what this is, see function scaled_zero below
SCALED_ZERO_TUP = tTuple[List[int], int, int, int]
@overload
def scaled_zero(mag: SCALED_ZERO_TUP, sign=1) -> MPF_TUP:
...
@overload
def scaled_zero(mag: int, sign=1) -> tTuple[SCALED_ZERO_TUP, int]:
...
def scaled_zero(mag: tUnion[SCALED_ZERO_TUP, int], sign=1) -> \
tUnion[MPF_TUP, tTuple[SCALED_ZERO_TUP, int]]:
"""Return an mpf representing a power of two with magnitude ``mag``
and -1 for precision. Or, if ``mag`` is a scaled_zero tuple, then just
remove the sign from within the list that it was initially wrapped
in.
Examples
========
>>> from sympy.core.evalf import scaled_zero
>>> from sympy import Float
>>> z, p = scaled_zero(100)
>>> z, p
(([0], 1, 100, 1), -1)
>>> ok = scaled_zero(z)
>>> ok
(0, 1, 100, 1)
>>> Float(ok)
1.26765060022823e+30
>>> Float(ok, p)
0.e+30
>>> ok, p = scaled_zero(100, -1)
>>> Float(scaled_zero(ok), p)
-0.e+30
"""
if isinstance(mag, tuple) and len(mag) == 4 and iszero(mag, scaled=True):
return (mag[0][0],) + mag[1:]
elif isinstance(mag, SYMPY_INTS):
if sign not in [-1, 1]:
raise ValueError('sign must be +/-1')
rv, p = mpf_shift(fone, mag), -1
s = 0 if sign == 1 else 1
rv = ([s],) + rv[1:]
return rv, p
else:
raise ValueError('scaled zero expects int or scaled_zero tuple.')
def iszero(mpf: tUnion[MPF_TUP, SCALED_ZERO_TUP, None], scaled=False) -> Optional[bool]:
if not scaled:
return not mpf or not mpf[1] and not mpf[-1]
return mpf and isinstance(mpf[0], list) and mpf[1] == mpf[-1] == 1
def complex_accuracy(result: TMP_RES) -> tUnion[int, Any]:
"""
Returns relative accuracy of a complex number with given accuracies
for the real and imaginary parts. The relative accuracy is defined
in the complex norm sense as ||z|+|error|| / |z| where error
is equal to (real absolute error) + (imag absolute error)*i.
The full expression for the (logarithmic) error can be approximated
easily by using the max norm to approximate the complex norm.
In the worst case (re and im equal), this is wrong by a factor
sqrt(2), or by log2(sqrt(2)) = 0.5 bit.
"""
if result is S.ComplexInfinity:
return INF
re, im, re_acc, im_acc = result
if not im:
if not re:
return INF
return re_acc
if not re:
return im_acc
re_size = fastlog(re)
im_size = fastlog(im)
absolute_error = max(re_size - re_acc, im_size - im_acc)
relative_error = absolute_error - max(re_size, im_size)
return -relative_error
def get_abs(expr: 'Expr', prec: int, options: OPT_DICT) -> TMP_RES:
result = evalf(expr, prec + 2, options)
if result is S.ComplexInfinity:
return finf, None, prec, None
re, im, re_acc, im_acc = result
if not re:
re, re_acc, im, im_acc = im, im_acc, re, re_acc
if im:
if expr.is_number:
abs_expr, _, acc, _ = evalf(abs(N(expr, prec + 2)),
prec + 2, options)
return abs_expr, None, acc, None
else:
if 'subs' in options:
return libmp.mpc_abs((re, im), prec), None, re_acc, None
return abs(expr), None, prec, None
elif re:
return mpf_abs(re), None, re_acc, None
else:
return None, None, None, None
def get_complex_part(expr: 'Expr', no: int, prec: int, options: OPT_DICT) -> TMP_RES:
"""no = 0 for real part, no = 1 for imaginary part"""
workprec = prec
i = 0
while 1:
res = evalf(expr, workprec, options)
if res is S.ComplexInfinity:
return fnan, None, prec, None
value, accuracy = res[no::2]
# XXX is the last one correct? Consider re((1+I)**2).n()
if (not value) or accuracy >= prec or -value[2] > prec:
return value, None, accuracy, None
workprec += max(30, 2**i)
i += 1
def evalf_abs(expr: 'Abs', prec: int, options: OPT_DICT) -> TMP_RES:
return get_abs(expr.args[0], prec, options)
def evalf_re(expr: 're', prec: int, options: OPT_DICT) -> TMP_RES:
return get_complex_part(expr.args[0], 0, prec, options)
def evalf_im(expr: 'im', prec: int, options: OPT_DICT) -> TMP_RES:
return get_complex_part(expr.args[0], 1, prec, options)
def finalize_complex(re: MPF_TUP, im: MPF_TUP, prec: int) -> TMP_RES:
if re == fzero and im == fzero:
raise ValueError("got complex zero with unknown accuracy")
elif re == fzero:
return None, im, None, prec
elif im == fzero:
return re, None, prec, None
size_re = fastlog(re)
size_im = fastlog(im)
if size_re > size_im:
re_acc = prec
im_acc = prec + min(-(size_re - size_im), 0)
else:
im_acc = prec
re_acc = prec + min(-(size_im - size_re), 0)
return re, im, re_acc, im_acc
def chop_parts(value: TMP_RES, prec: int) -> TMP_RES:
"""
Chop off tiny real or complex parts.
"""
if value is S.ComplexInfinity:
return value
re, im, re_acc, im_acc = value
# Method 1: chop based on absolute value
if re and re not in _infs_nan and (fastlog(re) < -prec + 4):
re, re_acc = None, None
if im and im not in _infs_nan and (fastlog(im) < -prec + 4):
im, im_acc = None, None
# Method 2: chop if inaccurate and relatively small
if re and im:
delta = fastlog(re) - fastlog(im)
if re_acc < 2 and (delta - re_acc <= -prec + 4):
re, re_acc = None, None
if im_acc < 2 and (delta - im_acc >= prec - 4):
im, im_acc = None, None
return re, im, re_acc, im_acc
def check_target(expr: 'Expr', result: TMP_RES, prec: int):
a = complex_accuracy(result)
if a < prec:
raise PrecisionExhausted("Failed to distinguish the expression: \n\n%s\n\n"
"from zero. Try simplifying the input, using chop=True, or providing "
"a higher maxn for evalf" % (expr))
def get_integer_part(expr: 'Expr', no: int, options: OPT_DICT, return_ints=False) -> \
tUnion[TMP_RES, tTuple[int, int]]:
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
from sympy.functions.elementary.complexes import re, im
# The expression is likely less than 2^30 or so
assumed_size = 30
result = evalf(expr, assumed_size, options)
if result is S.ComplexInfinity:
raise ValueError("Cannot get integer part of Complex Infinity")
ire, iim, ire_acc, iim_acc = result
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire) - ire_acc, fastlog(iim) - iim_acc)
elif ire:
gap = fastlog(ire) - ire_acc
elif iim:
gap = fastlog(iim) - iim_acc
else:
# ... or maybe the expression was exactly zero
if return_ints:
return 0, 0
else:
return None, None, None, None
margin = 10
if gap >= -margin:
prec = margin + assumed_size + gap
ire, iim, ire_acc, iim_acc = evalf(
expr, prec, options)
else:
prec = assumed_size
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close).
def calc_part(re_im: 'Expr', nexpr: MPF_TUP):
from .add import Add
_, _, exponent, _ = nexpr
is_int = exponent == 0
nint = int(to_int(nexpr, rnd))
if is_int:
# make sure that we had enough precision to distinguish
# between nint and the re or im part (re_im) of expr that
# was passed to calc_part
ire, iim, ire_acc, iim_acc = evalf(
re_im - nint, 10, options) # don't need much precision
assert not iim
size = -fastlog(ire) + 2 # -ve b/c ire is less than 1
if size > prec:
ire, iim, ire_acc, iim_acc = evalf(
re_im, size, options)
assert not iim
nexpr = ire
nint = int(to_int(nexpr, rnd))
_, _, new_exp, _ = ire
is_int = new_exp == 0
if not is_int:
# if there are subs and they all contain integer re/im parts
# then we can (hopefully) safely substitute them into the
# expression
s = options.get('subs', False)
if s:
doit = True
# use strict=False with as_int because we take
# 2.0 == 2
for v in s.values():
try:
as_int(v, strict=False)
except ValueError:
try:
[as_int(i, strict=False) for i in v.as_real_imag()]
continue
except (ValueError, AttributeError):
doit = False
break
if doit:
re_im = re_im.subs(s)
re_im = Add(re_im, -nint, evaluate=False)
x, _, x_acc, _ = evalf(re_im, 10, options)
try:
check_target(re_im, (x, None, x_acc, None), 3)
except PrecisionExhausted:
if not re_im.equals(0):
raise PrecisionExhausted
x = fzero
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, INF
re_, im_, re_acc, im_acc = None, None, None, None
if ire:
re_, re_acc = calc_part(re(expr, evaluate=False), ire)
if iim:
im_, im_acc = calc_part(im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re_ or fzero)), int(to_int(im_ or fzero))
return re_, im_, re_acc, im_acc
def evalf_ceiling(expr: 'ceiling', prec: int, options: OPT_DICT) -> TMP_RES:
return get_integer_part(expr.args[0], 1, options)
def evalf_floor(expr: 'floor', prec: int, options: OPT_DICT) -> TMP_RES:
return get_integer_part(expr.args[0], -1, options)
def evalf_float(expr: 'Float', prec: int, options: OPT_DICT) -> TMP_RES:
return expr._mpf_, None, prec, None
def evalf_rational(expr: 'Rational', prec: int, options: OPT_DICT) -> TMP_RES:
return from_rational(expr.p, expr.q, prec), None, prec, None
def evalf_integer(expr: 'Integer', prec: int, options: OPT_DICT) -> TMP_RES:
return from_int(expr.p, prec), None, prec, None
#----------------------------------------------------------------------------#
# #
# Arithmetic operations #
# #
#----------------------------------------------------------------------------#
def add_terms(terms: list, prec: int, target_prec: int) -> \
tTuple[tUnion[MPF_TUP, SCALED_ZERO_TUP, None], Optional[int]]:
"""
Helper for evalf_add. Adds a list of (mpfval, accuracy) terms.
Returns
=======
- None, None if there are no non-zero terms;
- terms[0] if there is only 1 term;
- scaled_zero if the sum of the terms produces a zero by cancellation
e.g. mpfs representing 1 and -1 would produce a scaled zero which need
special handling since they are not actually zero and they are purposely
malformed to ensure that they cannot be used in anything but accuracy
calculations;
- a tuple that is scaled to target_prec that corresponds to the
sum of the terms.
The returned mpf tuple will be normalized to target_prec; the input
prec is used to define the working precision.
XXX explain why this is needed and why one cannot just loop using mpf_add
"""
terms = [t for t in terms if not iszero(t[0])]
if not terms:
return None, None
elif len(terms) == 1:
return terms[0]
# see if any argument is NaN or oo and thus warrants a special return
special = []
from .numbers import Float
for t in terms:
arg = Float._new(t[0], 1)
if arg is S.NaN or arg.is_infinite:
special.append(arg)
if special:
from .add import Add
rv = evalf(Add(*special), prec + 4, {})
return rv[0], rv[2]
working_prec = 2*prec
sum_man, sum_exp = 0, 0
absolute_err: List[int] = []
for x, accuracy in terms:
sign, man, exp, bc = x
if sign:
man = -man
absolute_err.append(bc + exp - accuracy)
delta = exp - sum_exp
if exp >= sum_exp:
# x much larger than existing sum?
# first: quick test
if ((delta > working_prec) and
((not sum_man) or
delta - bitcount(abs(sum_man)) > working_prec)):
sum_man = man
sum_exp = exp
else:
sum_man += (man << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta - bc > working_prec:
if not sum_man:
sum_man, sum_exp = man, exp
else:
sum_man = (sum_man << delta) + man
sum_exp = exp
absolute_error = max(absolute_err)
if not sum_man:
return scaled_zero(absolute_error)
if sum_man < 0:
sum_sign = 1
sum_man = -sum_man
else:
sum_sign = 0
sum_bc = bitcount(sum_man)
sum_accuracy = sum_exp + sum_bc - absolute_error
r = normalize(sum_sign, sum_man, sum_exp, sum_bc, target_prec,
rnd), sum_accuracy
return r
def evalf_add(v: 'Add', prec: int, options: OPT_DICT) -> TMP_RES:
res = pure_complex(v)
if res:
h, c = res
re, _, re_acc, _ = evalf(h, prec, options)
im, _, im_acc, _ = evalf(c, prec, options)
return re, im, re_acc, im_acc
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
i = 0
target_prec = prec
while 1:
options['maxprec'] = min(oldmaxprec, 2*prec)
terms = [evalf(arg, prec + 10, options) for arg in v.args]
n = terms.count(S.ComplexInfinity)
if n >= 2:
return fnan, None, prec, None
re, re_acc = add_terms(
[a[0::2] for a in terms if isinstance(a, tuple) and a[0]], prec, target_prec)
im, im_acc = add_terms(
[a[1::2] for a in terms if isinstance(a, tuple) and a[1]], prec, target_prec)
if n == 1:
if re in (finf, fninf, fnan) or im in (finf, fninf, fnan):
return fnan, None, prec, None
return S.ComplexInfinity
acc = complex_accuracy((re, im, re_acc, im_acc))
if acc >= target_prec:
if options.get('verbose'):
print("ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc)
break
else:
if (prec - target_prec) > options['maxprec']:
break
prec = prec + max(10 + 2**i, target_prec - acc)
i += 1
if options.get('verbose'):
print("ADD: restarting with prec", prec)
options['maxprec'] = oldmaxprec
if iszero(re, scaled=True):
re = scaled_zero(re)
if iszero(im, scaled=True):
im = scaled_zero(im)
return re, im, re_acc, im_acc
def evalf_mul(v: 'Mul', prec: int, options: OPT_DICT) -> TMP_RES:
res = pure_complex(v)
if res:
# the only pure complex that is a mul is h*I
_, h = res
im, _, im_acc, _ = evalf(h, prec, options)
return None, im, None, im_acc
args = list(v.args)
# see if any argument is NaN or oo and thus warrants a special return
has_zero = False
special = []
from .numbers import Float
for arg in args:
result = evalf(arg, prec, options)
if result is S.ComplexInfinity:
special.append(result)
continue
if result[0] is None:
if result[1] is None:
has_zero = True
continue
num = Float._new(result[0], 1)
if num is S.NaN:
return fnan, None, prec, None
if num.is_infinite:
special.append(num)
if special:
if has_zero:
return fnan, None, prec, None
from .mul import Mul
return evalf(Mul(*special), prec + 4, {})
if has_zero:
return None, None, None, None
# With guard digits, multiplication in the real case does not destroy
# accuracy. This is also true in the complex case when considering the
# total accuracy; however accuracy for the real or imaginary parts
# separately may be lower.
acc = prec
# XXX: big overestimate
working_prec = prec + len(args) + 5
# Empty product is 1
start = man, exp, bc = MPZ(1), 0, 1
# First, we multiply all pure real or pure imaginary numbers.
# direction tells us that the result should be multiplied by
# I**direction; all other numbers get put into complex_factors
# to be multiplied out after the first phase.
last = len(args)
direction = 0
args.append(S.One)
complex_factors = []
for i, arg in enumerate(args):
if i != last and pure_complex(arg):
args[-1] = (args[-1]*arg).expand()
continue
elif i == last and arg is S.One:
continue
re, im, re_acc, im_acc = evalf(arg, working_prec, options)
if re and im:
complex_factors.append((re, im, re_acc, im_acc))
continue
elif re:
(s, m, e, b), w_acc = re, re_acc
elif im:
(s, m, e, b), w_acc = im, im_acc
direction += 1
else:
return None, None, None, None
direction += 2*s
man *= m
exp += e
bc += b
while bc > 3*working_prec:
man >>= working_prec
exp += working_prec
bc -= working_prec
acc = min(acc, w_acc)
sign = (direction & 2) >> 1
if not complex_factors:
v = normalize(sign, man, exp, bitcount(man), prec, rnd)
# multiply by i
if direction & 1:
return None, v, None, acc
else:
return v, None, acc, None
else:
# initialize with the first term
if (man, exp, bc) != start:
# there was a real part; give it an imaginary part
re, im = (sign, man, exp, bitcount(man)), (0, MPZ(0), 0, 0)
i0 = 0
else:
# there is no real part to start (other than the starting 1)
wre, wim, wre_acc, wim_acc = complex_factors[0]
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
re = wre
im = wim
i0 = 1
for wre, wim, wre_acc, wim_acc in complex_factors[i0:]:
# acc is the overall accuracy of the product; we aren't
# computing exact accuracies of the product.
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
use_prec = working_prec
A = mpf_mul(re, wre, use_prec)
B = mpf_mul(mpf_neg(im), wim, use_prec)
C = mpf_mul(re, wim, use_prec)
D = mpf_mul(im, wre, use_prec)
re = mpf_add(A, B, use_prec)
im = mpf_add(C, D, use_prec)
if options.get('verbose'):
print("MUL: wanted", prec, "accurate bits, got", acc)
# multiply by I
if direction & 1:
re, im = mpf_neg(im), re
return re, im, acc, acc
def evalf_pow(v: 'Pow', prec: int, options) -> TMP_RES:
target_prec = prec
base, exp = v.args
# We handle x**n separately. This has two purposes: 1) it is much
# faster, because we avoid calling evalf on the exponent, and 2) it
# allows better handling of real/imaginary parts that are exactly zero
if exp.is_Integer:
p: int = exp.p # type: ignore
# Exact
if not p:
return fone, None, prec, None
# Exponentiation by p magnifies relative error by |p|, so the
# base must be evaluated with increased precision if p is large
prec += int(math.log(abs(p), 2))
result = evalf(base, prec + 5, options)
if result is S.ComplexInfinity:
if p < 0:
return None, None, None, None
return result
re, im, re_acc, im_acc = result
# Real to integer power
if re and not im:
return mpf_pow_int(re, p, target_prec), None, target_prec, None
# (x*I)**n = I**n * x**n
if im and not re:
z = mpf_pow_int(im, p, target_prec)
case = p % 4
if case == 0:
return z, None, target_prec, None
if case == 1:
return None, z, None, target_prec
if case == 2:
return mpf_neg(z), None, target_prec, None
if case == 3:
return None, mpf_neg(z), None, target_prec
# Zero raised to an integer power
if not re:
if p < 0:
return S.ComplexInfinity
return None, None, None, None
# General complex number to arbitrary integer power
re, im = libmp.mpc_pow_int((re, im), p, prec)
# Assumes full accuracy in input
return finalize_complex(re, im, target_prec)
result = evalf(base, prec + 5, options)
if result is S.ComplexInfinity:
if exp.is_Rational:
if exp < 0:
return None, None, None, None
return result
raise NotImplementedError
# Pure square root
if exp is S.Half:
xre, xim, _, _ = result
# General complex square root
if xim:
re, im = libmp.mpc_sqrt((xre or fzero, xim), prec)
return finalize_complex(re, im, prec)
if not xre:
return None, None, None, None
# Square root of a negative real number
if mpf_lt(xre, fzero):
return None, mpf_sqrt(mpf_neg(xre), prec), None, prec
# Positive square root
return mpf_sqrt(xre, prec), None, prec, None
# We first evaluate the exponent to find its magnitude
# This determines the working precision that must be used
prec += 10
result = evalf(exp, prec, options)
if result is S.ComplexInfinity:
return fnan, None, prec, None
yre, yim, _, _ = result
# Special cases: x**0
if not (yre or yim):
return fone, None, prec, None
ysize = fastlog(yre)
# Restart if too big
# XXX: prec + ysize might exceed maxprec
if ysize > 5:
prec += ysize
yre, yim, _, _ = evalf(exp, prec, options)
# Pure exponential function; no need to evalf the base
if base is S.Exp1:
if yim:
re, im = libmp.mpc_exp((yre or fzero, yim), prec)
return finalize_complex(re, im, target_prec)
return mpf_exp(yre, target_prec), None, target_prec, None
xre, xim, _, _ = evalf(base, prec + 5, options)
# 0**y
if not (xre or xim):
if yim:
return fnan, None, prec, None
if yre[0] == 1: # y < 0
return S.ComplexInfinity
return None, None, None, None
# (real ** complex) or (complex ** complex)
if yim:
re, im = libmp.mpc_pow(
(xre or fzero, xim or fzero), (yre or fzero, yim),
target_prec)
return finalize_complex(re, im, target_prec)
# complex ** real
if xim:
re, im = libmp.mpc_pow_mpf((xre or fzero, xim), yre, target_prec)
return finalize_complex(re, im, target_prec)
# negative ** real
elif mpf_lt(xre, fzero):
re, im = libmp.mpc_pow_mpf((xre, fzero), yre, target_prec)
return finalize_complex(re, im, target_prec)
# positive ** real
else:
return mpf_pow(xre, yre, target_prec), None, target_prec, None
#----------------------------------------------------------------------------#
# #
# Special functions #
# #
#----------------------------------------------------------------------------#
def evalf_exp(expr: 'exp', prec: int, options: OPT_DICT) -> TMP_RES:
from .power import Pow
return evalf_pow(Pow(S.Exp1, expr.exp, evaluate=False), prec, options)
def evalf_trig(v: 'Expr', prec: int, options: OPT_DICT) -> TMP_RES:
"""
This function handles sin and cos of complex arguments.
TODO: should also handle tan of complex arguments.
"""
from sympy.functions.elementary.trigonometric import cos, sin
if isinstance(v, cos):
func = mpf_cos
elif isinstance(v, sin):
func = mpf_sin
else:
raise NotImplementedError
arg = v.args[0]
# 20 extra bits is possibly overkill. It does make the need
# to restart very unlikely
xprec = prec + 20
re, im, re_acc, im_acc = evalf(arg, xprec, options)
if im:
if 'subs' in options:
v = v.subs(options['subs'])
return evalf(v._eval_evalf(prec), prec, options)
if not re:
if isinstance(v, cos):
return fone, None, prec, None
elif isinstance(v, sin):
return None, None, None, None
else:
raise NotImplementedError
# For trigonometric functions, we are interested in the
# fixed-point (absolute) accuracy of the argument.
xsize = fastlog(re)
# Magnitude <= 1.0. OK to compute directly, because there is no
# danger of hitting the first root of cos (with sin, magnitude
# <= 2.0 would actually be ok)
if xsize < 1:
return func(re, prec, rnd), None, prec, None
# Very large
if xsize >= 10:
xprec = prec + xsize
re, im, re_acc, im_acc = evalf(arg, xprec, options)
# Need to repeat in case the argument is very close to a
# multiple of pi (or pi/2), hitting close to a root
while 1:
y = func(re, prec, rnd)
ysize = fastlog(y)
gap = -ysize
accuracy = (xprec - xsize) - gap
if accuracy < prec:
if options.get('verbose'):
print("SIN/COS", accuracy, "wanted", prec, "gap", gap)
print(to_str(y, 10))
if xprec > options.get('maxprec', DEFAULT_MAXPREC):
return y, None, accuracy, None
xprec += gap
re, im, re_acc, im_acc = evalf(arg, xprec, options)
continue
else:
return y, None, prec, None
def evalf_log(expr: 'log', prec: int, options: OPT_DICT) -> TMP_RES:
if len(expr.args)>1:
expr = expr.doit()
return evalf(expr, prec, options)
arg = expr.args[0]
workprec = prec + 10
result = evalf(arg, workprec, options)
if result is S.ComplexInfinity:
return result
xre, xim, xacc, _ = result
# evalf can return NoneTypes if chop=True
# issue 18516, 19623
if xre is xim is None:
# Dear reviewer, I do not know what -inf is;
# it looks to be (1, 0, -789, -3)
# but I'm not sure in general,
# so we just let mpmath figure
# it out by taking log of 0 directly.
# It would be better to return -inf instead.
xre = fzero
if xim:
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import log
# XXX: use get_abs etc instead
re = evalf_log(
log(Abs(arg, evaluate=False), evaluate=False), prec, options)
im = mpf_atan2(xim, xre or fzero, prec)
return re[0], im, re[2], prec
imaginary_term = (mpf_cmp(xre, fzero) < 0)
re = mpf_log(mpf_abs(xre), prec, rnd)
size = fastlog(re)
if prec - size > workprec and re != fzero:
from .add import Add
# We actually need to compute 1+x accurately, not x
add = Add(S.NegativeOne, arg, evaluate=False)
xre, xim, _, _ = evalf_add(add, prec, options)
prec2 = workprec - fastlog(xre)
# xre is now x - 1 so we add 1 back here to calculate x
re = mpf_log(mpf_abs(mpf_add(xre, fone, prec2)), prec, rnd)
re_acc = prec
if imaginary_term:
return re, mpf_pi(prec), re_acc, prec
else:
return re, None, re_acc, None
def evalf_atan(v: 'atan', prec: int, options: OPT_DICT) -> TMP_RES:
arg = v.args[0]
xre, xim, reacc, imacc = evalf(arg, prec + 5, options)
if xre is xim is None:
return (None,)*4
if xim:
raise NotImplementedError
return mpf_atan(xre, prec, rnd), None, prec, None
def evalf_subs(prec: int, subs: dict) -> dict:
""" Change all Float entries in `subs` to have precision prec. """
newsubs = {}
for a, b in subs.items():
b = S(b)
if b.is_Float:
b = b._eval_evalf(prec)
newsubs[a] = b
return newsubs
def evalf_piecewise(expr: 'Expr', prec: int, options: OPT_DICT) -> TMP_RES:
from .numbers import Float, Integer
if 'subs' in options:
expr = expr.subs(evalf_subs(prec, options['subs']))
newopts = options.copy()
del newopts['subs']
if hasattr(expr, 'func'):
return evalf(expr, prec, newopts)
if isinstance(expr, float):
return evalf(Float(expr), prec, newopts)
if isinstance(expr, int):
return evalf(Integer(expr), prec, newopts)
# We still have undefined symbols
raise NotImplementedError
def evalf_alg_num(a: 'AlgebraicNumber', prec: int, options: OPT_DICT) -> TMP_RES:
return evalf(a.to_root(), prec, options)
#----------------------------------------------------------------------------#
# #
# High-level operations #
# #
#----------------------------------------------------------------------------#
def as_mpmath(x: Any, prec: int, options: OPT_DICT) -> tUnion[mpc, mpf]:
from .numbers import Infinity, NegativeInfinity, Zero
x = sympify(x)
if isinstance(x, Zero) or x == 0:
return mpf(0)
if isinstance(x, Infinity):
return mpf('inf')
if isinstance(x, NegativeInfinity):
return mpf('-inf')
# XXX
result = evalf(x, prec, options)
return quad_to_mpmath(result)
def do_integral(expr: 'Integral', prec: int, options: OPT_DICT) -> TMP_RES:
func = expr.args[0]
x, xlow, xhigh = expr.args[1]
if xlow == xhigh:
xlow = xhigh = 0
elif x not in func.free_symbols:
# only the difference in limits matters in this case
# so if there is a symbol in common that will cancel
# out when taking the difference, then use that
# difference
if xhigh.free_symbols & xlow.free_symbols:
diff = xhigh - xlow
if diff.is_number:
xlow, xhigh = 0, diff
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
with workprec(prec + 5):
xlow = as_mpmath(xlow, prec + 15, options)
xhigh = as_mpmath(xhigh, prec + 15, options)
# Integration is like summation, and we can phone home from
# the integrand function to update accuracy summation style
# Note that this accuracy is inaccurate, since it fails
# to account for the variable quadrature weights,
# but it is better than nothing
from sympy.functions.elementary.trigonometric import cos, sin
from .symbol import Wild
have_part = [False, False]
max_real_term: tUnion[float, int] = MINUS_INF
max_imag_term: tUnion[float, int] = MINUS_INF
def f(t: 'Expr') -> tUnion[mpc, mpf]:
nonlocal max_real_term, max_imag_term
re, im, re_acc, im_acc = evalf(func, mp.prec, {'subs': {x: t}})
have_part[0] = re or have_part[0]
have_part[1] = im or have_part[1]
max_real_term = max(max_real_term, fastlog(re))
max_imag_term = max(max_imag_term, fastlog(im))
if im:
return mpc(re or fzero, im)
return mpf(re or fzero)
if options.get('quad') == 'osc':
A = Wild('A', exclude=[x])
B = Wild('B', exclude=[x])
D = Wild('D')
m = func.match(cos(A*x + B)*D)
if not m:
m = func.match(sin(A*x + B)*D)
if not m:
raise ValueError("An integrand of the form sin(A*x+B)*f(x) "
"or cos(A*x+B)*f(x) is required for oscillatory quadrature")
period = as_mpmath(2*S.Pi/m[A], prec + 15, options)
result = quadosc(f, [xlow, xhigh], period=period)
# XXX: quadosc does not do error detection yet
quadrature_error = MINUS_INF
else:
result, quadrature_err = quadts(f, [xlow, xhigh], error=1)
quadrature_error = fastlog(quadrature_err._mpf_)
options['maxprec'] = oldmaxprec
if have_part[0]:
re: Optional[MPF_TUP] = result.real._mpf_
re_acc: Optional[int]
if re == fzero:
re_s, re_acc = scaled_zero(int(-max(prec, max_real_term, quadrature_error)))
re = scaled_zero(re_s) # handled ok in evalf_integral
else:
re_acc = int(-max(max_real_term - fastlog(re) - prec, quadrature_error))
else:
re, re_acc = None, None
if have_part[1]:
im: Optional[MPF_TUP] = result.imag._mpf_
im_acc: Optional[int]
if im == fzero:
im_s, im_acc = scaled_zero(int(-max(prec, max_imag_term, quadrature_error)))
im = scaled_zero(im_s) # handled ok in evalf_integral
else:
im_acc = int(-max(max_imag_term - fastlog(im) - prec, quadrature_error))
else:
im, im_acc = None, None
result = re, im, re_acc, im_acc
return result
def evalf_integral(expr: 'Integral', prec: int, options: OPT_DICT) -> TMP_RES:
limits = expr.limits
if len(limits) != 1 or len(limits[0]) != 3:
raise NotImplementedError
workprec = prec
i = 0
maxprec = options.get('maxprec', INF)
while 1:
result = do_integral(expr, workprec, options)
accuracy = complex_accuracy(result)
if accuracy >= prec: # achieved desired precision
break
if workprec >= maxprec: # can't increase accuracy any more
break
if accuracy == -1:
# maybe the answer really is zero and maybe we just haven't increased
# the precision enough. So increase by doubling to not take too long
# to get to maxprec.
workprec *= 2
else:
workprec += max(prec, 2**i)
workprec = min(workprec, maxprec)
i += 1
return result
def check_convergence(numer: 'Expr', denom: 'Expr', n: 'Symbol') -> tTuple[int, Any, Any]:
"""
Returns
=======
(h, g, p) where
-- h is:
> 0 for convergence of rate 1/factorial(n)**h
< 0 for divergence of rate factorial(n)**(-h)
= 0 for geometric or polynomial convergence or divergence
-- abs(g) is:
> 1 for geometric convergence of rate 1/h**n
< 1 for geometric divergence of rate h**n
= 1 for polynomial convergence or divergence
(g < 0 indicates an alternating series)
-- p is:
> 1 for polynomial convergence of rate 1/n**h
<= 1 for polynomial divergence of rate n**(-h)
"""
from sympy.polys.polytools import Poly
npol = Poly(numer, n)
dpol = Poly(denom, n)
p = npol.degree()
q = dpol.degree()
rate = q - p
if rate:
return rate, None, None
constant = dpol.LC() / npol.LC()
if abs(constant) != 1:
return rate, constant, None
if npol.degree() == dpol.degree() == 0:
return rate, constant, 0
pc = npol.all_coeffs()[1]
qc = dpol.all_coeffs()[1]
return rate, constant, (qc - pc)/dpol.LC()
def hypsum(expr: 'Expr', n: 'Symbol', start: int, prec: int) -> mpf:
"""
Sum a rapidly convergent infinite hypergeometric series with
given general term, e.g. e = hypsum(1/factorial(n), n). The
quotient between successive terms must be a quotient of integer
polynomials.
"""
from .numbers import Float
from sympy.simplify.simplify import hypersimp
if prec == float('inf'):
raise NotImplementedError('does not support inf prec')
if start:
expr = expr.subs(n, n + start)
hs = hypersimp(expr, n)
if hs is None:
raise NotImplementedError("a hypergeometric series is required")
num, den = hs.as_numer_denom()
func1 = lambdify(n, num)
func2 = lambdify(n, den)
h, g, p = check_convergence(num, den, n)
if h < 0:
raise ValueError("Sum diverges like (n!)^%i" % (-h))
term = expr.subs(n, 0)
if not term.is_Rational:
raise NotImplementedError("Non rational term functionality is not implemented.")
# Direct summation if geometric or faster
if h > 0 or (h == 0 and abs(g) > 1):
term = (MPZ(term.p) << prec) // term.q
s = term
k = 1
while abs(term) > 5:
term *= MPZ(func1(k - 1))
term //= MPZ(func2(k - 1))
s += term
k += 1
return from_man_exp(s, -prec)
else:
alt = g < 0
if abs(g) < 1:
raise ValueError("Sum diverges like (%i)^n" % abs(1/g))
if p < 1 or (p == 1 and not alt):
raise ValueError("Sum diverges like n^%i" % (-p))
# We have polynomial convergence: use Richardson extrapolation
vold = None
ndig = prec_to_dps(prec)
while True:
# Need to use at least quad precision because a lot of cancellation
# might occur in the extrapolation process; we check the answer to
# make sure that the desired precision has been reached, too.
prec2 = 4*prec
term0 = (MPZ(term.p) << prec2) // term.q
def summand(k, _term=[term0]):
if k:
k = int(k)
_term[0] *= MPZ(func1(k - 1))
_term[0] //= MPZ(func2(k - 1))
return make_mpf(from_man_exp(_term[0], -prec2))
with workprec(prec):
v = nsum(summand, [0, mpmath_inf], method='richardson')
vf = Float(v, ndig)
if vold is not None and vold == vf:
break
prec += prec # double precision each time
vold = vf
return v._mpf_
def evalf_prod(expr: 'Product', prec: int, options: OPT_DICT) -> TMP_RES:
if all((l[1] - l[2]).is_Integer for l in expr.limits):
result = evalf(expr.doit(), prec=prec, options=options)
else:
from sympy.concrete.summations import Sum
result = evalf(expr.rewrite(Sum), prec=prec, options=options)
return result
def evalf_sum(expr: 'Sum', prec: int, options: OPT_DICT) -> TMP_RES:
from .numbers import Float
if 'subs' in options:
expr = expr.subs(options['subs'])
func = expr.function
limits = expr.limits
if len(limits) != 1 or len(limits[0]) != 3:
raise NotImplementedError
if func.is_zero:
return None, None, prec, None
prec2 = prec + 10
try:
n, a, b = limits[0]
if b is not S.Infinity or a is S.NegativeInfinity or a != int(a):
raise NotImplementedError
# Use fast hypergeometric summation if possible
v = hypsum(func, n, int(a), prec2)
delta = prec - fastlog(v)
if fastlog(v) < -10:
v = hypsum(func, n, int(a), delta)
return v, None, min(prec, delta), None
except NotImplementedError:
# Euler-Maclaurin summation for general series
eps = Float(2.0)**(-prec)
for i in range(1, 5):
m = n = 2**i * prec
s, err = expr.euler_maclaurin(m=m, n=n, eps=eps,
eval_integral=False)
err = err.evalf()
if err is S.NaN:
raise NotImplementedError
if err <= eps:
break
err = fastlog(evalf(abs(err), 20, options)[0])
re, im, re_acc, im_acc = evalf(s, prec2, options)
if re_acc is None:
re_acc = -err
if im_acc is None:
im_acc = -err
return re, im, re_acc, im_acc
#----------------------------------------------------------------------------#
# #
# Symbolic interface #
# #
#----------------------------------------------------------------------------#
def evalf_symbol(x: 'Expr', prec: int, options: OPT_DICT) -> TMP_RES:
val = options['subs'][x]
if isinstance(val, mpf):
if not val:
return None, None, None, None
return val._mpf_, None, prec, None
else:
if '_cache' not in options:
options['_cache'] = {}
cache = options['_cache']
cached, cached_prec = cache.get(x, (None, MINUS_INF))
if cached_prec >= prec:
return cached
v = evalf(sympify(val), prec, options)
cache[x] = (v, prec)
return v
evalf_table: tDict[Type['Expr'], Callable[['Expr', int, OPT_DICT], TMP_RES]] = {}
def _create_evalf_table():
global evalf_table
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from .add import Add
from .mul import Mul
from .numbers import Exp1, Float, Half, ImaginaryUnit, Integer, NaN, NegativeOne, One, Pi, Rational, \
Zero, ComplexInfinity, AlgebraicNumber
from .power import Pow
from .symbol import Dummy, Symbol
from sympy.functions.elementary.complexes import Abs, im, re
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import atan, cos, sin
from sympy.integrals.integrals import Integral
evalf_table = {
Symbol: evalf_symbol,
Dummy: evalf_symbol,
Float: evalf_float,
Rational: evalf_rational,
Integer: evalf_integer,
Zero: lambda x, prec, options: (None, None, prec, None),
One: lambda x, prec, options: (fone, None, prec, None),
Half: lambda x, prec, options: (fhalf, None, prec, None),
Pi: lambda x, prec, options: (mpf_pi(prec), None, prec, None),
Exp1: lambda x, prec, options: (mpf_e(prec), None, prec, None),
ImaginaryUnit: lambda x, prec, options: (None, fone, None, prec),
NegativeOne: lambda x, prec, options: (fnone, None, prec, None),
ComplexInfinity: lambda x, prec, options: S.ComplexInfinity,
NaN: lambda x, prec, options: (fnan, None, prec, None),
exp: evalf_exp,
cos: evalf_trig,
sin: evalf_trig,
Add: evalf_add,
Mul: evalf_mul,
Pow: evalf_pow,
log: evalf_log,
atan: evalf_atan,
Abs: evalf_abs,
re: evalf_re,
im: evalf_im,
floor: evalf_floor,
ceiling: evalf_ceiling,
Integral: evalf_integral,
Sum: evalf_sum,
Product: evalf_prod,
Piecewise: evalf_piecewise,
AlgebraicNumber: evalf_alg_num,
}
def evalf(x: 'Expr', prec: int, options: OPT_DICT) -> TMP_RES:
"""
Evaluate the ``Expr`` instance, ``x``
to a binary precision of ``prec``. This
function is supposed to be used internally.
Parameters
==========
x : Expr
The formula to evaluate to a float.
prec : int
The binary precision that the output should have.
options : dict
A dictionary with the same entries as
``EvalfMixin.evalf`` and in addition,
``maxprec`` which is the maximum working precision.
Returns
=======
An optional tuple, ``(re, im, re_acc, im_acc)``
which are the real, imaginary, real accuracy
and imaginary accuracy respectively. ``re`` is
an mpf value tuple and so is ``im``. ``re_acc``
and ``im_acc`` are ints.
NB: all these return values can be ``None``.
If all values are ``None``, then that represents 0.
Note that 0 is also represented as ``fzero = (0, 0, 0, 0)``.
"""
from sympy.functions.elementary.complexes import re as re_, im as im_
try:
rf = evalf_table[type(x)]
r = rf(x, prec, options)
except KeyError:
# Fall back to ordinary evalf if possible
if 'subs' in options:
x = x.subs(evalf_subs(prec, options['subs']))
xe = x._eval_evalf(prec)
if xe is None:
raise NotImplementedError
as_real_imag = getattr(xe, "as_real_imag", None)
if as_real_imag is None:
raise NotImplementedError # e.g. FiniteSet(-1.0, 1.0).evalf()
re, im = as_real_imag()
if re.has(re_) or im.has(im_):
raise NotImplementedError
if re == 0:
re = None
reprec = None
elif re.is_number:
re = re._to_mpmath(prec, allow_ints=False)._mpf_
reprec = prec
else:
raise NotImplementedError
if im == 0:
im = None
imprec = None
elif im.is_number:
im = im._to_mpmath(prec, allow_ints=False)._mpf_
imprec = prec
else:
raise NotImplementedError
r = re, im, reprec, imprec
if options.get("verbose"):
print("### input", x)
print("### output", to_str(r[0] or fzero, 50) if isinstance(r, tuple) else r)
print("### raw", r) # r[0], r[2]
print()
chop = options.get('chop', False)
if chop:
if chop is True:
chop_prec = prec
else:
# convert (approximately) from given tolerance;
# the formula here will will make 1e-i rounds to 0 for
# i in the range +/-27 while 2e-i will not be chopped
chop_prec = int(round(-3.321*math.log10(chop) + 2.5))
if chop_prec == 3:
chop_prec -= 1
r = chop_parts(r, chop_prec)
if options.get("strict"):
check_target(x, r, prec)
return r
def quad_to_mpmath(q):
"""Turn the quad returned by ``evalf`` into an ``mpf`` or ``mpc``. """
if q is S.ComplexInfinity:
raise NotImplementedError
re, im, _, _ = q
if im:
if not re:
re = fzero
return make_mpc((re, im))
elif re:
return make_mpf(re)
else:
return make_mpf(fzero)
class EvalfMixin:
"""Mixin class adding evalf capability."""
__slots__ = () # type: tTuple[str, ...]
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Evaluate the given formula to an accuracy of *n* digits.
Parameters
==========
subs : dict, optional
Substitute numerical values for symbols, e.g.
``subs={x:3, y:1+pi}``. The substitutions must be given as a
dictionary.
maxn : int, optional
Allow a maximum temporary working precision of maxn digits.
chop : bool or number, optional
Specifies how to replace tiny real or imaginary parts in
subresults by exact zeros.
When ``True`` the chop value defaults to standard precision.
Otherwise the chop value is used to determine the
magnitude of "small" for purposes of chopping.
>>> from sympy import N
>>> x = 1e-4
>>> N(x, chop=True)
0.000100000000000000
>>> N(x, chop=1e-5)
0.000100000000000000
>>> N(x, chop=1e-4)
0
strict : bool, optional
Raise ``PrecisionExhausted`` if any subresult fails to
evaluate to full accuracy, given the available maxprec.
quad : str, optional
Choose algorithm for numerical quadrature. By default,
tanh-sinh quadrature is used. For oscillatory
integrals on an infinite interval, try ``quad='osc'``.
verbose : bool, optional
Print debug information.
Notes
=====
When Floats are naively substituted into an expression,
precision errors may adversely affect the result. For example,
adding 1e16 (a Float) to 1 will truncate to 1e16; if 1e16 is
then subtracted, the result will be 0.
That is exactly what happens in the following:
>>> from sympy.abc import x, y, z
>>> values = {x: 1e16, y: 1, z: 1e16}
>>> (x + y - z).subs(values)
0
Using the subs argument for evalf is the accurate way to
evaluate such an expression:
>>> (x + y - z).evalf(subs=values)
1.00000000000000
"""
from .numbers import Float, Number
n = n if n is not None else 15
if subs and is_sequence(subs):
raise TypeError('subs must be given as a dictionary')
# for sake of sage that doesn't like evalf(1)
if n == 1 and isinstance(self, Number):
from .expr import _mag
rv = self.evalf(2, subs, maxn, chop, strict, quad, verbose)
m = _mag(rv)
rv = rv.round(1 - m)
return rv
if not evalf_table:
_create_evalf_table()
prec = dps_to_prec(n)
options = {'maxprec': max(prec, int(maxn*LG10)), 'chop': chop,
'strict': strict, 'verbose': verbose}
if subs is not None:
options['subs'] = subs
if quad is not None:
options['quad'] = quad
try:
result = evalf(self, prec + 4, options)
except NotImplementedError:
# Fall back to the ordinary evalf
if hasattr(self, 'subs') and subs is not None: # issue 20291
v = self.subs(subs)._eval_evalf(prec)
else:
v = self._eval_evalf(prec)
if v is None:
return self
elif not v.is_number:
return v
try:
# If the result is numerical, normalize it
result = evalf(v, prec, options)
except NotImplementedError:
# Probably contains symbols or unknown functions
return v
if result is S.ComplexInfinity:
return result
re, im, re_acc, im_acc = result
if re is S.NaN or im is S.NaN:
return S.NaN
if re:
p = max(min(prec, re_acc), 1)
re = Float._new(re, p)
else:
re = S.Zero
if im:
p = max(min(prec, im_acc), 1)
im = Float._new(im, p)
return re + im*S.ImaginaryUnit
else:
return re
n = evalf
def _evalf(self, prec):
"""Helper for evalf. Does the same thing but takes binary precision"""
r = self._eval_evalf(prec)
if r is None:
r = self
return r
def _eval_evalf(self, prec):
return
def _to_mpmath(self, prec, allow_ints=True):
# mpmath functions accept ints as input
errmsg = "cannot convert to mpmath number"
if allow_ints and self.is_Integer:
return self.p
if hasattr(self, '_as_mpf_val'):
return make_mpf(self._as_mpf_val(prec))
try:
result = evalf(self, prec, {})
return quad_to_mpmath(result)
except NotImplementedError:
v = self._eval_evalf(prec)
if v is None:
raise ValueError(errmsg)
if v.is_Float:
return make_mpf(v._mpf_)
# Number + Number*I is also fine
re, im = v.as_real_imag()
if allow_ints and re.is_Integer:
re = from_int(re.p)
elif re.is_Float:
re = re._mpf_
else:
raise ValueError(errmsg)
if allow_ints and im.is_Integer:
im = from_int(im.p)
elif im.is_Float:
im = im._mpf_
else:
raise ValueError(errmsg)
return make_mpc((re, im))
def N(x, n=15, **options):
r"""
Calls x.evalf(n, \*\*options).
Explanations
============
Both .n() and N() are equivalent to .evalf(); use the one that you like better.
See also the docstring of .evalf() for information on the options.
Examples
========
>>> from sympy import Sum, oo, N
>>> from sympy.abc import k
>>> Sum(1/k**k, (k, 1, oo))
Sum(k**(-k), (k, 1, oo))
>>> N(_, 4)
1.291
"""
# by using rational=True, any evaluation of a string
# will be done using exact values for the Floats
return sympify(x, rational=True).evalf(n, **options)
def _evalf_with_bounded_error(x: 'Expr', eps: 'Optional[Expr]' = None,
m: int = 0,
options: Optional[OPT_DICT] = None) -> TMP_RES:
"""
Evaluate *x* to within a bounded absolute error.
Parameters
==========
x : Expr
The quantity to be evaluated.
eps : Expr, None, optional (default=None)
Positive real upper bound on the acceptable error.
m : int, optional (default=0)
If *eps* is None, then use 2**(-m) as the upper bound on the error.
options: OPT_DICT
As in the ``evalf`` function.
Returns
=======
A tuple ``(re, im, re_acc, im_acc)``, as returned by ``evalf``.
See Also
========
evalf
"""
if eps is not None:
if not (eps.is_Rational or eps.is_Float) or not eps > 0:
raise ValueError("eps must be positive")
r, _, _, _ = evalf(1/eps, 1, {})
m = fastlog(r)
c, d, _, _ = evalf(x, 1, {})
# Note: If x = a + b*I, then |a| <= 2|c| and |b| <= 2|d|, with equality
# only in the zero case.
# If a is non-zero, then |c| = 2**nc for some integer nc, and c has
# bitcount 1. Therefore 2**fastlog(c) = 2**(nc+1) = 2|c| is an upper bound
# on |a|. Likewise for b and d.
nr, ni = fastlog(c), fastlog(d)
n = max(nr, ni) + 1
# If x is 0, then n is MINUS_INF, and p will be 1. Otherwise,
# n - 1 bits get us past the integer parts of a and b, and +1 accounts for
# the factor of <= sqrt(2) that is |x|/max(|a|, |b|).
p = max(1, m + n + 1)
options = options or {}
return evalf(x, p, options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.