licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 2330 | using DataStructures
struct SpaceIndex
value::Int
end
struct Spaces
indices::IntDisjointSets # Union-Find datastructure
dimensions::Dict{Int, Int}
polyvars::Dict{Int, Vector{SpaceVariable}}
end
Spaces() = Spaces(IntDisjointSets(0), Dict{Int, Int}(),
Dict{Int, Vector{SpaceVariable}}())
function new_space(spaces::Spaces)
return SpaceIndex(push!(spaces.indices))
end
function new_space(spaces::Spaces,
dim::Int)
space_index = new_space(spaces)
spaces.dimensions[space_index.value] = dim
return space_index
end
function new_space(spaces::Spaces,
polyvars::Vector{SpaceVariable})
space_index = new_space(spaces)
spaces.dimensions[space_index.value] = length(polyvars)
spaces.polyvars[space_index.value] = polyvars
return space_index
end
function merge_property(d::Dict, root, key1, key2, name)
value = nothing
if key1 in keys(d)
v1 = d[key1]
if key2 in keys(d) && v1 != d[key2]
error("Sets lie on the same spaces but have different $name")
end
value = v1
elseif key2 in keys(d)
value = d[key2]
end
if value !== nothing
d[root] = value
end
end
function merge_spaces(spaces::Spaces, a::SpaceIndex, b::SpaceIndex)
root_a = find_root!(spaces.indices, a.value)
root_b = find_root!(spaces.indices, b.value)
root = root_union!(spaces.indices, root_a, root_b)
# the properties my not be set at a.value when it is set at root_a.value
merge_property(spaces.dimensions, root, root_a, root_b, "dimension")
merge_property(spaces.polyvars, root, root_a, root_b,
"polynomial variables")
return SpaceIndex(root)
end
function space_dimension(spaces::Spaces, si::SpaceIndex)
idx = find_root!(spaces.indices, si.value)
if !haskey(spaces.dimensions, idx)
error("Missing dimension information, use Ellipsoid(dimension=...) or PolySet(dimension=...)")
end
return spaces.dimensions[idx]
end
function space_polyvars(spaces::Spaces, si::SpaceIndex)
idx = find_root!(spaces.indices, si.value)
if !haskey(spaces.polyvars, idx)
dim = space_dimension(spaces, si)
DynamicPolynomials.@polyvar x[1:dim]
spaces.polyvars[idx] = x
end
return spaces.polyvars[idx]
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3385 | export boundary_point, tangent_cone
struct BoundaryPoint{S} <: SymbolicVariable
set::S
symbol::Symbol
end
struct TangentCone{S} <: SymbolicVariable
set::S
symbol::Symbol
end
boundary_point(S, s::Symbol) = BoundaryPoint(S, s)
tangent_cone(S, s::Symbol) = TangentCone(S, s)
function tangent_cone(S, b::BoundaryPoint)
@assert S === b.set
tangent_cone(S, b.symbol)
end
variablify(t::BoundaryPoint) = BoundaryPoint(variablify(t.set), t.symbol)
need_variablify(t::TangentCone) = need_variablify(t.set)
variablify(t::TangentCone) = TangentCone(variablify(t.set), t.symbol)
create_spaces(b::BoundaryPoint, spaces::Spaces) = create_spaces(b.set, spaces)
create_spaces(t::TangentCone, spaces::Spaces) = create_spaces(t.set, spaces)
function create_spaces(c::MembershipConstraint, spaces::Spaces)
sub = create_spaces(c.member, spaces)
sup = create_spaces(c.set, spaces)
return merge_spaces(spaces, sub, sup)
end
clear_spaces(b::BoundaryPoint) = clear_spaces(b.set)
clear_spaces(t::TangentCone) = clear_spaces(t.set)
function clear_spaces(c::MembershipConstraint)
clear_spaces(c.member)
clear_spaces(c.set)
end
Sets.perspective_variable(b::BoundaryPoint) = Sets.perspective_variable(b.set)
Sets.perspective_variable(t::TangentCone) = Sets.perspective_variable(t.set)
function Sets.perspective_variable(c::MembershipConstraint)
return synchronize_perspective(
Sets.perspective_variable(c.member),
Sets.perspective_variable(c.set)
)
end
function set_space(
space::Space,
::MembershipConstraint{
<:Sets.LinearImage{<:BoundaryPoint},
<:Sets.LinearImage{<:TangentCone}}
)
return set_space(space, DualSpace)
end
using LinearAlgebra
function linear_algebraic_surface(set::Sets.PolarOf{<:Sets.Ellipsoid}, A, E)
Q = set.set.Q
return psd_constraint(Symmetric(-A * Q * E' - E * Q * A'))
end
function linear_algebraic_surface(set::Sets.PolarOf{<:Sets.ConvexPolySet}, A, E)
v = MP.variables(set.set.p)
DynamicPolynomials.@polyvar z[1:size(E, 1)]
Ez = E' * z
return JuMP.build_constraint(
error,
-dot(A' * z, [d(v => Ez) for d in MP.differentiate(set.set.p, v)]),
SOSCone(),
)
end
function JuMP.build_constraint(
_error::Function,
member::Sets.LinearImage{BoundaryPoint{S}},
set::Sets.LinearImage{TangentCone{S}}
) where S <: Sets.PolarOf{<:Union{Sets.Ellipsoid, Sets.ConvexPolySet}}
_set = member.set.set
@assert set.set.set === _set
return linear_algebraic_surface(_set, member.A, set.A)
end
function add_linear_algebraic_surface_domain(
model, set::Sets.Ellipsoid, domain, A, E)
Q = set.Q
P = Symmetric(-A * Q * E' - E * Q * A')
return _add_constraint_or_not(model, psd_in_domain(model, P, E' \ domain))
end
function add_linear_algebraic_surface(model, set::Sets.PolarOf{<:Sets.Piecewise}, A, E)
for (i, si) in enumerate(set.set.sets)
add_linear_algebraic_surface_domain(
model, si, set.set.pieces[i], A, E
)
end
end
function JuMP.add_constraint(
model::JuMP.Model,
constraint::MembershipConstraint{
<:Sets.LinearImage{<:BoundaryPoint},
<:Sets.LinearImage{<:TangentCone}
}
)
_set = constraint.member.set.set
@assert constraint.set.set.set === _set
return add_linear_algebraic_surface(model, _set, constraint.member.A, constraint.set.A)
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 4710 | # Efficient implementation a' * Q * b that avoid unnecessary type promotion as
# well as unnecessary allocation
function quad_form(a::AbstractVector{<:Real},
Q::SymMatrix{<:Real},
b::AbstractVector{<:Real})
n = length(a)
@assert n == LinearAlgebra.checksquare(Q)
@assert n == length(b)
out = zero(typeof(zero(eltype(a)) * zero(eltype(Q)) * zero(eltype(b))))
k = 0
for j in 1:n
α = a[j]
β = b[j]
for i in 1:(j-1)
k += 1
out += a[i] * Q.Q[k] * β
out += α * Q.Q[k] * b[i]
end
k += 1
out += a[j] * Q.Q[k] * b[j]
end
return out
end
function quad_form(a::AbstractVector,
Q::AbstractMatrix,
b::AbstractVector)
(n, m) = size(Q)
n == length(a) || throw(DimensionMismatch())
m == length(b) || throw(DimensionMismatch())
U = MA.promote_operation(*, eltype(a), eltype(Q), eltype(b))
out = zero(MA.promote_operation(+, U, U))
for j in 1:m
for i in 1:n
out = MA.add_mul!!(out, a[i], Q[i, j], b[j])
end
end
return out
end
# Same as quad_form(a, Q, a)
function quad_form(Q::Symmetric,
a::AbstractVector)
return quad_form(a, Q, a)
end
function quad_form(Q::Symmetric{JuMP.VariableRef}, a::AbstractVector{<:Real})
return quad_form(a, Q, a)
end
# We have x' Q x and we want y' Q y where y is obtained by substituting
# vars for A * new_vars in x. We want to compute the matrix M such that
# y = M z where z is the vector of monomials of degree d of new_vars
# Then we will have y' Q y = z' M' Q M z
struct GramTransformation{T, MT <: MP.AbstractMonomial,
MVT <: AbstractVector{MT}}
M::Matrix{T}
monos::MVT
end
function apply_transformation(p::SumOfSquares.GramMatrix,
t::GramTransformation)
new_n = length(t.monos)
new_Q = [quad_form(t.M[:, i], p.Q, t.M[:, j]) for j in 1:new_n for i in 1:j]
return GramMatrix(SymMatrix(new_Q, new_n), t.monos)
end
function transformation(old_monos, A::AbstractMatrix, new_vars, d)
new_monos = MP.monomials(new_vars, d)
new_n = length(new_monos)
M = zeros(eltype(A), length(old_monos), new_n)
mapped_vars = A * new_vars
# Cache the result of mapped_vars[i]^n
powers = [Union{Nothing, eltype(mapped_vars)}[mapped_vars[i]]
for i in eachindex(mapped_vars)]
# Compute mapped_vars[i]^n by "Power by Squaring" and cache it in `powers`
function _power(i, n)
@assert n > 0
while n > length(powers[i])
push!(powers[i], nothing)
end
if powers[i][n] === nothing
if isodd(n)
powers[i][n] = mapped_vars[i] * _power(i, n - 1)
else
p_2 = _power(i, div(n, 2))
powers[i][n] = p_2 * p_2
end
end
return powers[i][n]::eltype(mapped_vars)
end
function _map(mono::MP.AbstractMonomial)
exps = MP.exponents(mono)
length(exps) == length(mapped_vars) || throw(ArgumentError("A monomial have less variables than `new_vars`"))
cur = one(eltype(mapped_vars))
for i in eachindex(exps)
if exps[i] > 0
cur *= _power(i, exps[i])
end
end
return cur
end
for i in eachindex(old_monos)
y = _map(old_monos[i])
j = 1
for term in MP.terms(y)
mono = MP.monomial(term)
while j <= length(new_monos) && mono > new_monos[j]
j += 1
end
M[i, j] = MP.coefficient(term)
end
end
return GramTransformation(M, new_monos)
end
# computes p ∘ A or more precisely p(variables(p) => A * new_vars)
function apply_matrix(p::SumOfSquares.GramMatrix{T, <:MonomialBasis},
A::AbstractMatrix, new_vars, d) where T
return apply_transformation(p, transformation(p.basis.monomials, A, new_vars, d))
end
# computes A # μ or more precisely p(variables(p) => A * new_vars)
function psd_constraint(Q::Symmetric)
n = LinearAlgebra.checksquare(Q)
q = [Q[i, j] for j in 1:n for i in 1:j]
# For n == 0, it will create no constraint, for n == 1, it will simply
# be a Nonnegatives constraint and for n == 2 it will be a rotated SOC.
set = SumOfSquares.matrix_cone(MOI.PositiveSemidefiniteConeTriangle, n)
return PolyJuMP.bridgeable(JuMP.build_constraint(error, q, set),
JuMP.moi_function_type(typeof(q)), typeof(set))
end
function psd_constraint(model, Q::Symmetric)
return JuMP.add_constraint(model, psd_constraint(Q))
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 17977 | abstract type AbstractVariable <: JuMP.AbstractVariable end
abstract type HintPoint end
# It will not really be the center and the center for z = 0 is the the same as for <h, x> = 0
struct CenterPoint{T} <: HintPoint
h::Vector{T}
end
struct InteriorPoint{T} <: HintPoint
h::Vector{T}
end
_β(model, ::InteriorPoint) = @variable(model)
_b(model, h::InteriorPoint) = @variable(model, [1:length(h.h)])
function polar_perspective_ellipsoid(ell, point::HintPoint, z::SpaceVariable,
x::Vector{SpaceVariable})
y = [z; x]
H = Sets._householder(point.h)
p = y' * H * Sets._perspective_cat(ell) * H * y
return Sets.perspective_dual(Sets.Householder(ell, p, point.h, z, x))
end
function polar_perspective_ellipsoid(model, Q::Symmetric{JuMP.VariableRef},
point::CenterPoint, z, x)
psd_constraint(model, Q)
ell = Sets.Ellipsoid(Q)
return polar_perspective_ellipsoid(ell, point, z, x)
end
function polar_perspective_ellipsoid(model, Q::Symmetric{JuMP.VariableRef},
point::InteriorPoint, z, x)
n = LinearAlgebra.checksquare(Q)
@assert n == length(point.h)
β = @variable(model, base_name="β")
b = @variable(model, [1:length(point.h)], base_name="b")
psd_constraint(model, Symmetric([β+1 b'; b Q]))
ell = Sets.ShiftedEllipsoid(Q, b, β)
return polar_perspective_ellipsoid(ell, point, z, x)
end
function perspective_dual_polyset(set, point::HintPoint, z::SpaceVariable,
x::Vector{SpaceVariable})
y = [z; x]
H = Sets._householder(point.h)
p = Sets.perspective_gauge0(set)(y => H * y)
return Sets.perspective_dual(Sets.Householder(set, p, point.h, z, x))
end
# For `CenterPoint`, `q` should be non-perspective, need refactoring
function perspective_dual_polyset(degree, q, point::InteriorPoint, z, x)
set = Sets.ConvexPolynomialSet(degree, q, z, x)
perspective_dual_polyset(set, point, z, x)
end
### Polytope ###
struct Polytope <: AbstractVariable
symmetric::Bool
dimension::Union{Nothing, Int}
piecewise::Union{Polyhedra.Rep, Nothing}
end
function Polytope(;
symmetric::Bool=false,
dimension::Union{Int, Nothing}=nothing,
piecewise::Union{Polyhedra.Rep, Nothing}=nothing,
)
function update_dim(object, dim_fun)
if object !== nothing
d = dim_fun(object)
if dimension === nothing
dimension = d
elseif dimension != d
throw(DimensionMismatch())
end
end
end
update_dim(piecewise, Polyhedra.fulldim)
return Polytope(symmetric, dimension, piecewise)
end
Sets.space_variables(::Polytope) = nothing
function variable_set(model::JuMP.AbstractModel, ell::Polytope, space::Space,
space_dimension, space_polyvars)
n = space_dimension
if ell.symmetric
if ell.piecewise === nothing
set = Sets.PolarPoint(@variable(model, [1:n], base_name = "a"))
else
hashyperplanes(ell.piecewise) && error("hyperplanes not supported for piecewise")
a = @variable(model, [1:nhalfspaces(ell.piecewise), 1:n], base_name="a")
sets = [Sets.PolarPoint(a[i, :]) for i in 1:nhalfspaces(ell.piecewise)]
set = Sets.Piecewise(sets, ell.piecewise)
for i in eachindex(set.graph)
for (j, v) in set.graph[i]
if i < j # The constraints are the same for (i, j) and (j, i)
λ = @variable(model, base_name="λc[$i,$j]")
# Ensures continuity
@constraint(model, sets[i].a - sets[j].a .== λ * v)
# Ensures convexity
# We need to ensure that `dot(sets[i].a, x)` is maximal
# over when `piece[i]`; see Proposition 5 of [R21].
#
# [R21] Raković, S. V.
# *Control Minkowski–Lyapunov functions*
# Automatica, Elsevier BV, 2021, 128, 109598
#
# In fact, we only need this to hold locally to prove
# convexity of each point so we can just check for every
# neighbor.
# Because <ai, x> = <aj, x> by the previous constraint,
# we can check the gradient.
@constraint(model, dot(sets[i].a, v) <= dot(sets[j].a, v), base_name="conv[$i,$j]")
end
end
end
end
if space == PrimalSpace
return set
else
@assert space == DualSpace
return Polyhedra.polar(set)
end
else
error("Non-symmetric polytope not supported yet")
end
end
function JuMP.value(h::Sets.PolarPoint)
return Sets.PolarPoint(JuMP.value.(h.a))
end
### Ellipsoid ###
struct Ellipsoid <: AbstractVariable
point::Union{Nothing, HintPoint}
symmetric::Bool
dimension::Union{Nothing, Int}
guaranteed_psd::Bool # Is it already guaranteed that it is PSD ? e.g. by nth_root
superset::Union{Sets.Ellipsoid, Nothing}
piecewise::Union{Polyhedra.Rep, Nothing}
end
function Ellipsoid(; point::Union{Nothing, HintPoint}=nothing,
symmetric::Bool=false,
dimension::Union{Int, Nothing}=nothing,
superset::Union{Sets.Ellipsoid, Nothing}=nothing,
piecewise::Union{Polyhedra.Rep, Nothing}=nothing)
function update_dim(object, dim_fun)
if object !== nothing
d = dim_fun(object)
if dimension === nothing
dimension = d
elseif dimension != d
throw(DimensionMismatch())
end
end
end
update_dim(point, point -> length(point.h))
update_dim(superset, Sets.dimension)
update_dim(piecewise, Polyhedra.fulldim)
return Ellipsoid(point, symmetric, dimension, false, superset, piecewise)
end
Sets.space_variables(::Ellipsoid) = nothing
function variable_set(model::JuMP.AbstractModel, ell::Ellipsoid, space::Space,
space_dimension, space_polyvars)
n = space_dimension
function new_Q()
# TODO, we should use constraiend variable instead in case direct mode is used.
Q = @variable(model, [1:n, 1:n], Symmetric, base_name="Q")
if !ell.guaranteed_psd
psd_constraint(model, Q)
end
return Q
end
if ell.symmetric
function new_piece()
if space == PrimalSpace
if ell.superset !== nothing
Q = Symmetric(new_Q() + ell.superset.Q)
else
Q = new_Q()
end
return Sets.Ellipsoid(Q)
else
ell.superset === nothing || error("superset not supported in dual space")
@assert space == DualSpace
return Sets.Ellipsoid(new_Q())
end
end
if ell.piecewise === nothing
set = new_piece()
else
hashyperplanes(ell.piecewise) && error("hyperplanes not supported for piecewise")
sets = [new_piece() for i in 1:nhalfspaces(ell.piecewise)]
set = Sets.Piecewise(sets, ell.piecewise)
DynamicPolynomials.@polyvar x[1:n]
q = [quad_form(set.Q, x) for set in sets]
for i in eachindex(set.graph)
for (j, v) in set.graph[i]
if i < j # The constraints are the same for (i, j) and (j, i)
@constraint(model, q[i] == q[j], domain = @set x'v == 0)
# v corresponds to `-n_ij` in LCSS paper
Δ = sets[i].Q * v - sets[j].Q * v
inter = set.pieces[i] ∩ set.pieces[j]
h = HalfSpace(Δ, zero(eltype(Δ)))
@constraint(model, inter ⊆ h)
end
end
end
end
if space == PrimalSpace
return set
else
return Polyhedra.polar(set)
end
else
ell.superset === nothing || error("superset not supported for non-symmetric Ellipsoid")
ell.piecewise === nothing || error("piecewise not supported for non-symmetric Ellipsoid")
if space == PrimalSpace
error("Non-symmetric ellipsoid non implemented yet, use `Ellipsoid(symmetric=true)`.")
else
@assert space == DualSpace
if ell.point === nothing
throw(ArgumentError("Specify a point for nonsymmetric ellipsoid, e.g. `Ellipsoid(point=InteriorPoint([1.0, 0.0]))"))
end
return polar_perspective_ellipsoid(model, new_Q(), ell.point,
data(model).perspective_polyvar,
space_polyvars)
end
end
end
function JuMP.value(ell::Sets.Ellipsoid)
return Sets.Ellipsoid(Symmetric(JuMP.value.(ell.Q)))
end
### PolySet ###
struct PolySet <: AbstractVariable
point::Union{Nothing, HintPoint}
symmetric::Bool
degree::Int
dimension::Union{Nothing, Int}
convex::Bool
variables::Union{Nothing, Vector{SpaceVariable}}
superset::Union{Sets.PolySet, Nothing}
basis::Type
end
function PolySet(; point::Union{Nothing, HintPoint}=nothing,
symmetric::Bool=false,
degree::Union{Int, Nothing}=nothing,
dimension::Union{Int, Nothing}=nothing,
convex::Bool=false,
variables::Union{Vector{SpaceVariable}, Nothing}=nothing,
superset::Union{Sets.PolySet, Nothing}=nothing,
basis::Type=MultivariateBases.MonomialBasis)
if degree === nothing
error("Degree of PolySet not specified, use PolySet(degree=..., ...)")
end
if isodd(degree)
throw(ArgumentError("Degree of PolySet not even"))
end
if dimension === nothing
if point !== nothing
dimension = length(point.h)
end
end
if superset !== nothing
if dimension === nothing
dimension = Sets.dimension(superset)
elseif dimension != Sets.dimension(superset)
throw(DimensionMismatch())
end
if variables === nothing
variables = Sets.space_variables(superset)
elseif variables != Sets.space_variables(superset)
error("Space variables set does not correspond to superset space variables.")
end
end
return PolySet(point, symmetric, degree, dimension, convex, variables, superset, basis)
end
Sets.space_variables(p::PolySet) = p.variables
function constrain_convex(model, p, vars)
hessian = MP.differentiate(p, vars, 2)
# We do not just do `@constraint(model, p in SOSConvex())` as we would
# like to have access to the PSD matrix of variables for the det volume heuristic
y = [MP.similar_variable(eltype(hessian), gensym()) for i in 1:LinearAlgebra.checksquare(hessian)]
q = dot(y, hessian * y)
multipartite = SumOfSquares.Certificate.NewtonDegreeBounds(tuple(y,))
X = SumOfSquares.Certificate.monomials_half_newton_polytope(MP.monomials(q), multipartite)
# If `X` is empty, we will need the following bridge
JuMP.add_bridge(model, SumOfSquares.Bridges.Constraint.EmptyBridge)
# If `length(X)` is 2, we will need the following bridge
JuMP.add_bridge(model, SumOfSquares.Bridges.Constraint.PositiveSemidefinite2x2Bridge)
set = SumOfSquares.matrix_cone(MOI.PositiveSemidefiniteConeTriangle,
length(X))
Q = @variable(model, [1:MOI.dimension(set)])
@constraint(model, Q in set)
s = SumOfSquares.build_gram_matrix(
Q, MonomialBasis(X), MOI.PositiveSemidefiniteConeTriangle, Float64)
@constraint(model, q == s)
return MultivariateMoments.value_matrix(s)
end
function variable_set(model::JuMP.AbstractModel, set::PolySet, space::Space,
space_dimension, space_polyvars)
n = space_dimension
d = data(model)
# General all monomials of degree `degree`, we don't want monomials of
# lower degree as the polynomial is homogeneous
@assert iseven(set.degree)
if set.symmetric
monos = MP.monomials(space_polyvars, div(set.degree, 2))
else
monos = MP.monomials(lift_space_variables(d, space_polyvars),
div(set.degree, 2))
end
basis = MultivariateBases.basis_covering_monomials(set.basis, monos)
# TODO If `set.convex` and `set.symmetric`, no need for the poly to be SOS, see Lemma 6.33 of [BPT12]
p = @variable(model, variable_type=SOSPoly(basis))
if set.convex
set.superset === nothing || error("superset not supported for convex PolySet")
if set.symmetric
convexity_proof = constrain_convex(model, p, space_polyvars)
if space == PrimalSpace
return Sets.ConvexPolySet(set.degree, p, convexity_proof)
else
@assert space == DualSpace
return Polyhedra.polar(Sets.ConvexPolySet(set.degree, p, convexity_proof))
end
else
constrain_convex(model, MP.subs(p, d.perspective_polyvar => 1),
space_polyvars)
if space == PrimalSpace
error("Non-symmetric PolySet in PrimalSpace not implemented yet")
else
@assert space == DualSpace
if set.point === nothing
throw(ArgumentError("Specify a point for nonsymmetric polyset, e.g. `PolySet(point=InteriorPoint([1.0, 0.0]))"))
end
return perspective_dual_polyset(set.degree, p, set.point, d.perspective_polyvar, space_polyvars)
end
end
else
if set.symmetric
if space == PrimalSpace
if set.superset !== nothing
p = SetProg.SumOfSquares.gram_operate(+, set.superset.p, p)
end
return Sets.PolySet(set.degree, p)
else
error("Non-convex PolySet not supported in $space")
end
else
error("Non-convex nonsymmetric PolySet not implemented yet.")
end
end
end
_value(convexity_proof::Nothing) = nothing
function _value(convexity_proof::MultivariateMoments.SymMatrix)
return MultivariateMoments.SymMatrix(JuMP.value.(convexity_proof.Q),
convexity_proof.n)
end
function JuMP.value(set::Sets.PolySet)
return Sets.PolySet(set.degree, JuMP.value(set.p))
end
function JuMP.value(set::Sets.ConvexPolySet)
return Sets.ConvexPolySet(set.degree, JuMP.value(set.p), _value(set.convexity_proof))
end
function JuMP.value(set::Sets.Polar)
return Polyhedra.polar(JuMP.value(Polyhedra.polar(set)))
end
function JuMP.value(set::Sets.PerspectiveDual)
return Sets.perspective_dual(JuMP.value(Sets.perspective_dual(set)))
end
function JuMP.value(set::Sets.Householder)
return Sets.Householder(JuMP.value(set.set), JuMP.value(set.p), set.h,
set.z, set.x)
end
function JuMP.value(set::Sets.ShiftedEllipsoid)
return Sets.ShiftedEllipsoid(Symmetric(JuMP.value.(set.Q)),
JuMP.value.(set.b), JuMP.value(set.β))
end
function JuMP.value(set::Sets.ConvexPolynomialSet)
return Sets.ConvexPolynomialSet(set.degree, JuMP.value(set.q), set.z, set.x)
end
function JuMP.value(set::Sets.Piecewise)
return Sets.Piecewise(JuMP.value.(set.sets), set.polytope, set.pieces, set.graph)
end
### SetVariableRef ###
mutable struct SetVariableRef{M <: JuMP.AbstractModel,
S <: AbstractVariable} <: JuMP.AbstractVariableRef
model::M
set::S
name::String
# `variable` is typically `Sets.AbstractSet{JuMP.VariableRef}` but it can
# also be `Sets.AbstractSet{JuMP.AffExpr}` with `PolySet(superset = ...)`.
variable::Union{Nothing, Sets.AbstractSet}
space_index::Union{Nothing, SpaceIndex}
end
JuMP.name(vref::SetVariableRef) = vref.name
function JuMP.build_variable(_error::Function, info::JuMP.VariableInfo, set::AbstractVariable)
@assert !info.has_lb && !info.has_ub && !info.has_fix && !info.binary && !info.integer && !info.has_start
return set
end
function JuMP.add_variable(model::JuMP.AbstractModel, set::AbstractVariable, name::String)
vref = SetVariableRef(model, set, name, nothing, nothing)
d = data(model)
@assert d.state == Modeling
push!(d.variables, vref)
return vref
end
JuMP.value(vref::SetVariableRef) = JuMP.value(vref.variable)
function clear_spaces(vref::SetVariableRef)
vref.space_index = nothing
end
Sets.space_variables(::SetVariableRef) = nothing
Sets.perspective_variable(::SetVariableRef) = nothing
function create_spaces(vref::SetVariableRef, spaces::Spaces)
if vref.space_index === nothing
if Sets.space_variables(vref.set) === nothing
if vref.set.dimension === nothing
vref.space_index = new_space(spaces)
else
vref.space_index = new_space(spaces, vref.set.dimension)
end
else
vref.space_index = new_space(spaces, Sets.space_variables(vref.set))
end
end
return vref.space_index
end
space_index(vref::SetVariableRef) = vref.space_index
function load(model::JuMP.AbstractModel, vref::SetVariableRef)
d = data(model)
vref.variable = variable_set(model, vref.set, d.space,
space_dimension(d.spaces, vref.space_index),
space_polyvars(d.spaces, vref.space_index))
end
variablify(p::Sets.Projection) = Polyhedra.project(variablify(p.set), p.indices)
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 7290 | module Sets
using LinearAlgebra
using RecipesBase
import MultivariatePolynomials as MP
import DynamicPolynomials
import MultivariateBases as MB
const VariableOrder =
DynamicPolynomials.Commutative{DynamicPolynomials.CreationOrder}
const MonomialOrder = MP.Graded{MP.LexOrder}
const SpaceVariable = DynamicPolynomials.Variable{VariableOrder,MonomialOrder}
const MonoBasis = MB.MonomialBasis{
DynamicPolynomials.Monomial{VariableOrder,MonomialOrder},
DynamicPolynomials.MonomialVector{VariableOrder,MonomialOrder},
}
using Polyhedra
abstract type AbstractSet{T} end
"""
struct Polar{T, S<:AbstractSet{T}} <: AbstractSet{T}
set::S
end
The polar of the set `set`.
"""
struct Polar{T, S<:AbstractSet{T}} <: AbstractSet{T}
set::S
end
const PolarOf{S} = Polar{<:Any, S}
const PolarOrNot{S} = Union{S, PolarOf{S}}
"""
polar(set::AbstractSet)
Return the polar of `set`.
"""
Polyhedra.polar(set::AbstractSet) = Polar(set)
Polyhedra.polar(set::Polar) = set.set
function print_support_function(set::Polar; kws...)
print("h(S, x) =")
_print_gauge_function(polar(set); kws...)
end
"""
polar_representation(set::AbstractSet)
Return a representation of the same set but in the polar space of the current
representation of `set`.
"""
function polar_representation end
"""
gauge1(set::AbstractSet)
Function `f(x)` such that `set` is the 1-sublevel set of `f`, i.e.
`{ x | f(x) ≤ 1 }`.
"""
function gauge1 end
"""
struct PerspectiveDual{T, S <: AbstractSet{T}} <: AbstractSet{T}
set::S
end
Set determined by the dual of the perspective cone of `set`.
"""
struct PerspectiveDual{T, S <: AbstractSet{T}} <: AbstractSet{T}
set::S
end
const PerspectiveDualOf{S} = PerspectiveDual{<:Any, S}
const PerspectiveDualOrPolarOrNot{S} = Union{PerspectiveDualOf{S}, PolarOrNot{S}}
"""
perspective_dual(set::AbstractSet)
Return the set determined by the dual of the perspective cone of `set`.
"""
perspective_dual(set::AbstractSet) = PerspectiveDual(set)
perspective_dual(set::PerspectiveDual) = set.set
function scaling_function(set::PerspectiveDual)
@assert length(space_variables(set)) == 2
vars = [perspective_variable(set); space_variables(set)]
# z is a halfspace of the primal so a ray of the dual
z = [1.0, 0.0, 0.0]
in_set(Δ::Vector) = perspective_gauge0(set.set)(vars => z + Δ) < 0
@assert in_set(zeros(3))
return (Δz, Δx, Δy) -> begin
Δ = [Δz, Δx, Δy]
_in_set(λ::Real) = in_set(Δ * λ)
λ = 1.0
while _in_set(λ)
if λ > 1e10
error("Error in plotting : the `InteriorPoint` seems to be on the boundary")
end
λ *= 2
end
λmin = 0.0
λmax = λ
# Binary search. Invariant: in_set(λmin) and !in_set(λmax)
while abs(λmin - λmax) > 1e-8
λ = (λmin + λmax) / 2
if _in_set(λ)
λmin = λ
else
λmax = λ
end
end
λ = (λmin + λmax) / 2
return 1 / λ
end
end
# TODO rename space_dimension to avoid confusion with Polyhedra.dimension
"""
dimension(set::AbstractSet)
Return the dimension of the space where the set `set` is defined.
"""
dimension(set::AbstractSet) = length(space_variables(set))
function dimension(set::Union{Polar, PerspectiveDual})
return dimension(set.set)
end
"""
space_variables(set::AbstractSet)
Return the variables of the space where the set `set` is defined or `nothing`
if none are used.
"""
function space_variables end
function space_variables(set::Union{Polar, PerspectiveDual})
return space_variables(set.set)
end
function perspective_variable(set::Union{Polar, PerspectiveDual})
return perspective_variable(set.set)
end
function perspective_gauge0 end
function perspective_gauge1 end
convexity_proof(set::Union{Polar, PerspectiveDual}) = convexity_proof(set.set)
struct UnknownSet{T} <: AbstractSet{T} end
include("transformations.jl")
struct Piecewise{T, S<:AbstractSet{T}, U, Po <: Polyhedra.Polyhedron{U}, Pi} <: AbstractSet{T}
sets::Vector{S}
polytope::Po
pieces::Vector{Pi}
# TODO add adjacency graph in Polyhedra
graph::Vector{Vector{Tuple{Int, Vector{U}}}}
end
function Piecewise(sets::Vector{<:AbstractSet}, polytope::Polyhedra.Polyhedron{U}) where U
@assert length(sets) == nhalfspaces(polytope)
points = [Set(incidentpointindices(polytope, hidx)) for hidx in eachindex(halfspaces(polytope))]
graph = [Tuple{Int, Vector{U}}[] for i in eachindex(halfspaces(polytope))]
if !(Polyhedra.origin(Polyhedra.pointtype(polytope), Polyhedra.fulldim(polytope)) in polytope)
error("The origin is not in the polytope")
end
for (i, hi) in enumerate(halfspaces(polytope))
hi.β > 0 || error("The origin is not in the polytope")
vi = hi.a / hi.β
for (j, hj) in enumerate(halfspaces(polytope))
hi.β > 0 || error("The origin is not in the polytope")
vj = hj.a / hj.β
i == j && break
if length(points[i] ∩ points[j]) ≥ fulldim(polytope) - 1
push!(graph[i], (j, vj - vi))
push!(graph[j], (i, vi - vj))
end
end
end
function piece(i, h)
# Need to be a polyhedron as `detecthlinearity` needs a solver in `add_constraint_inclusion_domain`
return Polyhedra.polyhedron(hrep([HalfSpace(edge[2], zero(U)) for edge in graph[i]]),
Polyhedra.DefaultLibrary{U}(Polyhedra.default_solver(polytope)))
end
pieces = [piece(i, h) for (i, h) in enumerate(halfspaces(polytope))]
return Piecewise(sets, polytope, pieces, graph)
end
dimension(set::Piecewise) = Polyhedra.fulldim(set.polytope)
space_variables(set::Piecewise) = space_variables(set.sets[1])
function scaling_function(set::Piecewise)
g = scaling_function.(set.sets)
return (x, y) -> begin
v = [x, y]
i = findfirst(piece -> v in piece, set.pieces)
return g[i](x, y)
end
end
function zero_eliminate(set::Piecewise, I)
_elim(p) = Polyhedra.fixandeliminate(p, I, zeros(Polyhedra.coefficient_type(p), length(I)))
J = setdiff(1:dimension(set), I)
return Piecewise(
[zero_eliminate(s, I) for s in set.sets],
_elim(set.polytope),
_elim.(set.pieces),
map(set.graph) do adj
map(adj) do iv
iv[1], iv[2][J]
end
end
)
end
function _print_gauge_function(set::Piecewise; digits=6)
DynamicPolynomials.@polyvar x[1:2]
println()
for (set, piece) in zip(set.sets, set.pieces)
print(" ")
_print_gauge_function(set, digits=digits)
print(" if ")
for (i, h) in enumerate(halfspaces(piece))
if i > 1
print(", ")
end
a = -h.a
if count(!iszero, a) == 1
a /= abs(sum(a)) # Simplify printing
end
if digits !== nothing
a = round.(a, digits=digits)
end
print(a'x)
print(" ≥ 0")
end
println()
end
end
include("polytope.jl")
include("ellipsoids.jl")
include("polynomials.jl")
include("recipe.jl")
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 4310 | abstract type AbstractEllipsoid{T} <: AbstractSet{T} end
dimension(ell::AbstractEllipsoid) = LinearAlgebra.checksquare(ell.Q)
struct HyperSphere <: AbstractEllipsoid{Bool}
dim::Int
end
dimension(sphere::HyperSphere) = sphere.dim
Polyhedra.polar(sphere::HyperSphere) = sphere
"""
struct Ellipsoid{T} <: AbstractEllipsoid{T}
Q::Symmetric{T, Matrix{T}}
end
"""
struct Ellipsoid{T} <: AbstractEllipsoid{T}
Q::Symmetric{T, Matrix{T}}
end
ellipsoid(ell::Ellipsoid) = ell
function Polyhedra.project(ell::Ellipsoid, I)
return project(polar_representation(ell), I)
end
convexity_proof(ell::Ellipsoid) = ell.Q
function scaling_function(ell::Ellipsoid)
@assert dimension(ell) == 2
Q = ell.Q
return (x, y) -> begin
val = x^2 * Q[1, 1] + 2x*y * Q[1, 2] + y^2 * Q[2, 2]
if -1e-8 < val < 0
# `sqrt` would error
return zero(float(val))
end
return sqrt(val)
end
end
function ellipsoid(ell::PolarOf{<:Ellipsoid})
ellipsoid(polar_representation(ell))
end
function polar_representation(ell::PolarOf{<:Ellipsoid})
Ellipsoid(inv(ell.set.Q))
end
function polar_representation(ell::Ellipsoid)
polar(Ellipsoid(inv(ell.Q)))
end
function zero_eliminate(ell::Ellipsoid, I)
J = setdiff(1:dimension(ell), I)
return Ellipsoid(Symmetric(ell.Q[J, J]))
end
function _print_gauge_function(ell::Ellipsoid; digits=6)
DynamicPolynomials.@polyvar x[1:2]
print(" ")
Q = ell.Q
if digits !== nothing
Q = round.(Q, digits=digits)
end
println(x' * Q * x)
end
struct LiftedEllipsoid{T}
P::Matrix{T}
end
dimension(ell::LiftedEllipsoid) = LinearAlgebra.checksquare(ell.P) - 1
function perspective_variables(ell::Union{Ellipsoid, LiftedEllipsoid})
return nothing
end
function space_variables(ell::Union{Ellipsoid, LiftedEllipsoid})
return nothing
end
function LiftedEllipsoid(t::Translation{<:Ellipsoid})
ell = t.set
md = ell.Q * t.c
δ = t.c' * md-1
d = -md
D = ell.Q
return LiftedEllipsoid(_perspective_cat(D, d, δ))
end
function Bbβλ(P)
B, b, β = _perspective_split(P)
λ = dot(b, B \ b) - β
@assert λ >= 0
B, b, β, λ
end
function ellipsoid(ell::LiftedEllipsoid)
# P is
# λ * [c'Qc-1 -c'Q
# -Qc Q]
# Let P be [β b'; b B]
# We have
# β = λ c'Qc - λ
# b = -λ Qc <=> Q^{-1/2}b = -λ Q^{1/2} c
# hence
# λ c'Qc = β + λ
# λ^2 c'Qc = b'Q^{-1}b = λ b'B^{-1}b <=> λ c'Qc = b'B^{-1}b
# Hence λ = b'B^{-1}b - β
B, b, β, λ = Bbβλ(ell.P)
c = -(B \ b)
Q = B / λ
Translation(Ellipsoid(Symmetric(Q)), c)
end
function _perspective_split(P::AbstractMatrix)
n = LinearAlgebra.checksquare(P) - 1
ix = 1 .+ (1:n)
return P[ix, ix], P[1, ix], P[1, 1]
end
function _perspective_cat(D::AbstractMatrix, d::AbstractVector, δ)
return [δ d'
d D]
end
_perspective_cat(ell::Ellipsoid) = _perspective_cat(ell.Q, zeros(size(q.Q, 1)), -1.0)
"""
struct ShiftedEllipsoid{T}
Q::Symmetric{T, Matrix{T}}
b::Vector{S}
β::S
end
Set ``\\{\\, x \\mid x^\\top Q x + 2 b^\\top x + \\beta \\le 0 \\,\\}``.
"""
struct ShiftedEllipsoid{T} <: AbstractEllipsoid{T}
Q::Symmetric{T, Matrix{T}}
b::Vector{T}
β::T
end
_perspective_cat(q::ShiftedEllipsoid) = _perspective_cat(q.Q, q.b, q.β)
convexity_proof(ell::ShiftedEllipsoid) = ell.Q
function LiftedEllipsoid(qc::HouseDualOf{<:AbstractEllipsoid})
return LiftedEllipsoid(inv(_perspective_cat(perspective_dual(qc))))
end
function ellipsoid(qc::HouseDualOf{<:AbstractEllipsoid})
return ellipsoid(LiftedEllipsoid(qc))
end
function Polyhedra.project(ell::HouseDualOf{<:AbstractEllipsoid},
I::AbstractVector)
return project(ellipsoid(ell), I)
end
function PerspectiveInteriorEllipsoid(ell::LiftedEllipsoid)
Pd = inv(ell.P)
H = _householder(h[state])
HPdH = H * Pd * H
# HPdH is not like a solution what would be obtained by solving the program
# since the λ computed for unlifting it is maybe not one.
# Therefore, the S-procedure's λ for the constraints will be different.
B, b, β, λ = Bbβλ(HPdH)
ps[state] = y' * H * _perspective_cat(B/λ, b/λ, β/λ) * H * y
error("TODO: LiftedEllipsoid -> PerspectiveInteriorEllipsoid")
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3706 | using Polyhedra
using SumOfSquares
"""
struct PolySet{T, B, U} <: AbstractSet{U}
degree::Int
p::GramMatrix{T, B, U}
end
Set ``\\{\\, x \\mid p(x) \\le 1 \\,\\}`` where `p` is a homogeneous polynomial
of degree `degree`.
"""
struct PolySet{T, B, U} <: AbstractSet{U}
degree::Int
p::GramMatrix{T, B, U}
end
function space_variables(set::PolySet)
return MP.variables(set.p)
end
"""
struct ConvexPolySet{T, B, U} <: AbstractSet{U}
degree::Int
p::GramMatrix{T, B, U}
convexity_proof::Union{Nothing, SumOfSquares.SymMatrix{T}} # may be nothing after applying LinearMap
end
Set ``\\{\\, x \\mid p(x) \\le 1 \\,\\}`` where `p` is a homogeneous polynomial
of degree `degree`.
"""
struct ConvexPolySet{T, B, U} <: AbstractSet{U}
degree::Int
p::GramMatrix{T, B, U}
convexity_proof::Union{Nothing, SumOfSquares.SymMatrix{T}} # may be nothing after applying LinearMap
end
function ConvexPolySet(
degree::Int,
p::GramMatrix{T, B, U},
convexity_proof::SumOfSquares.SymMatrix{T}) where {T, B, U}
return ConvexPolySet{T, B, U}(degree, p, convexity_proof)
end
function ConvexPolySet(
degree::Int,
p::GramMatrix{S},
convexity_proof::SumOfSquares.SymMatrix{T}) where {S, T}
U = promote_type(S, T)
_convert(mat) = SumOfSquares.SymMatrix(convert(Vector{U}, mat.Q), mat.n)
return ConvexPolySet(
degree, GramMatrix(_convert(p.Q), p.basis), _convert(convexity_proof))
end
function space_variables(set::ConvexPolySet)
return MP.variables(set.p)
end
function dimension(set::ConvexPolySet)
return MP.nvariables(set.p)
end
function gauge1(set::ConvexPolySet)
return set.p
end
function zero_eliminate(set::ConvexPolySet, I)
vars = space_variables(set)[I]
K = findall(mono -> all(var -> iszero(MP.degree(mono, var)), vars),
set.p.basis.monomials)
Q = SumOfSquares.square_getindex(set.p.Q, K)
monos = set.p.basis.monomials[K]
J = setdiff(1:dimension(set), I)
monos = DynamicPolynomials.MonomialVector(
monos.vars[J],
Vector{Int}[z[J] for z in monos.Z]
)
p = SumOfSquares.GramMatrix(Q, MB.MonomialBasis(monos))
return ConvexPolySet(set.degree, p, nothing)
end
convexity_proof(set::ConvexPolySet) = set.convexity_proof
function scaling_function(set::Union{PolySet, ConvexPolySet})
@assert dimension(set) == 2
# We convert the GramMatrix to a polynomial to avoid having to do the
# conversion for every substitution.
p = MP.polynomial(set.p)
vars = MP.variables(p)
@assert length(vars) == 2
vx, vy = vars
return (x, y) -> p(vx => x, vy => y)^(1 / set.degree)
end
function _print_gauge_function(set::ConvexPolySet; digits=6)
print(" ")
p = MP.polynomial(set.p)
if digits !== nothing
p = round(p, digits=digits)
end
println(p)
end
"""
struct ConvexPolynomialSet{T, U} <: AbstractSet{U}
degree::Int
q::GramMatrix{T, MonoBasis, u}
z::SpaceVariable
x::Vector{SpaceVariable}
end
Set ``\\{\\, (z, x) \\mid p(z, x) \\le 0 \\,\\}`` or
``H \\{\\, (z, x) \\mid q(z, x) \\le z^{\\texttt{degree}} \\,\\}`` where `p` and
`q` are homogeneous polynomials of degree `degree` and `H` is a householder
matrix.
"""
struct ConvexPolynomialSet{T, B, U} <: AbstractSet{U}
degree::Int
q::GramMatrix{T, B, U}
z::SpaceVariable
x::Vector{SpaceVariable}
end
perspective_gauge0(set) = set.q - set.z^set.degree
perspective_variable(set::ConvexPolynomialSet) = set.z
space_variables(set::ConvexPolynomialSet) = set.x
function gauge1(set::ConvexPolynomialSet{T}) where T
return MP.subs(set.q, perspective_variable(set) => one(T))
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 502 | struct PolarPoint{T} <: AbstractSet{T}
a::Vector{T}
end
dimension(h::PolarPoint) = length(h.a)
space_variables(::PolarPoint) = nothing
function scaling_function(h::PolarPoint)
@assert dimension(h) == 2
return (x, y) -> begin
return h.a[1] * x + h.a[2] * y
end
end
function _print_gauge_function(h::PolarPoint; digits=6)
DynamicPolynomials.@polyvar x[1:2]
print(" ")
a = h.a
if digits !== nothing
a = round.(a, digits=6)
end
println(x' * a)
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3707 | """
primal_contour(f::Function, npoints::Int)
Return `npoints` points with equally spaced angles of the 1-sublevel set of the
homogeneous function `f(x, y)`.
"""
function primal_contour(f::Function, npoints::Int)
x = Vector{Float64}(undef, npoints)
y = Vector{Float64}(undef, npoints)
for (i, α) in enumerate(range(0, stop=2π - 2π/npoints, length=npoints))
x0 = cos(α)
y0 = sin(α)
r = f(x0, y0)
# f is homogeneous so f(x0/r, y0/r) = 1
x[i] = x0 / r
y[i] = y0 / r
end
return x, y
end
"""
dual_contour(f::Function, nhalfspaces::Int, T::Type)
Return a polytope of `nhalfspaces` halfspaces defined by normal vectors of
equally spaced angles for the polar of the 1-sublevel set of the homogeneous
function `f(x, y)`.
"""
function dual_contour(f::Function, nhalfspaces::Int, ::Type{T},
point::Vector{T} = [0.0, 0.0],
x_axis::Vector{T} = [1.0, 0.0],
y_axis::Vector{T} = [0.0, 1.0],
cone = false) where T
h = hrep(Polyhedra.HyperPlane{T, Vector{T}}[],
Polyhedra.HalfSpace{T, Vector{T}}[], d=length(x_axis))
for α in range(0, stop=2π - 2π/nhalfspaces, length=nhalfspaces)
ray = x_axis * cos(α) + y_axis * sin(α)
λ = f(ray...)
# We have f(ray/λ) = 1 so the halfspace is
# (point + ray / λ) ⋅ x ≤ 1 for non-cone
# (point + ray / λ) ⋅ x ≥ 0 for coen
a = point + ray / λ
intersect!(h, HalfSpace(cone ? -a : a, cone ? zero(T) : one(T)))
end
return polyhedron(h, Polyhedra.DefaultLibrary{T}(Polyhedra.OppositeMockOptimizer))
end
function Polyhedra.planar_contour(sphere::HyperSphere; npoints=64)
@assert dimension(sphere) == 2
return primal_contour((x, y) -> sqrt(x^2 + y^2), npoints)
end
function Polyhedra.planar_contour(ell::PerspectiveDualOrPolarOrNot{<:AbstractEllipsoid};
kws...)
return Polyhedra.planar_contour(ellipsoid(ell); kws...)
end
function Polyhedra.planar_contour(set::Union{Ellipsoid, PolySet, ConvexPolySet,
Piecewise};
npoints=64)
return primal_contour(scaling_function(set), npoints)
end
function Polyhedra.planar_contour(set::PolarOf{<:Union{Piecewise{U}, ConvexPolySet{T, B, U}}};
npoints=64) where {T, B, U}
return Polyhedra.planar_contour(dual_contour(scaling_function(Polyhedra.polar(set)),
npoints, U))
end
function Polyhedra.planar_contour(set::PerspectiveDual{T, <:Householder};
npoints=64) where T
@assert dimension(set) == 2
# z is a halfspace of the primal so a ray of the dual
z = [1.0, 0.0, 0.0]
h1, h2 = set.set.h
# a is a ray of the primal so a halfspace of the dual
a = [1, h1, h2]
b = [h1, -1, 0]
@assert abs(dot(a, b)) < 1e-8
c = [h2 / (1 + h1^2), h1*h2 / (1 + h1^2), -1]
@assert abs(dot(b, c)) < 1e-8
@assert abs(dot(a, c)) < 1e-8
polyhedron = dual_contour(scaling_function(set), npoints, T,
z, b, c, true)
# We fix z to 1.0 and eliminate it, this is cheap for H-rep
return Polyhedra.planar_contour(fixandeliminate(polyhedron, 1, 1.0))
end
function Polyhedra.planar_contour(t::Translation; kws...)
@assert dimension(t) == 2
x, y = Polyhedra.planar_contour(t.set; kws...)
return x .+ t.c[1], y .+ t.c[2]
end
@recipe function f(set::AbstractSet; npoints=64)
@assert dimension(set) == 2
seriestype --> :shape
legend --> false
Polyhedra.planar_contour(set; npoints=npoints)
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 2663 | struct Projection{S, I}
set::S
indices::I
end
Polyhedra.project(set, indices) = Projection(set, indices)
function space_variables(p::Projection)
vars = space_variables(p.set)
if vars === nothing
return nothing
else
return vars[p.indices]
end
end
dimension(p::Projection) = length(p.indices)
# A^{-1} * S
struct LinearImage{S, T, MT <: AbstractMatrix{T}} <: AbstractSet{T}
set::S
A::MT
end
perspective_variable(li::LinearImage) = perspective_variable(li.set)
space_variables(::LinearImage) = nothing
dimension(li::LinearImage) = size(li.A, 1)
# A^{-1} * S
struct LinearPreImage{S, T, MT <: AbstractMatrix{T}} <: AbstractSet{T}
set::S
A::MT
end
# S + c
struct Translation{S, T, VT <: AbstractVector{T}} <: AbstractSet{T}
set::S
c::VT
end
dimension(t::Translation) = length(t.c)
space_variables(t::Translation) = space_variables(t.set)
function Polyhedra.project(t::Translation, I)
return Translation(Polyhedra.project(t.set, I), t.c[I])
end
_perspective_cat(x::AbstractVector, z) = [z; x]
_perspective_split(xz::AbstractVector) = xz[2:end], xz[1]
"""
householder(x)
Householder reflection
```math
I - 2 v v^T / (v^T v)
```
It is symmetric and orthogonal.
"""
function householder(x)
y = copy(x)
t = LinearAlgebra.reflector!(y)
v = _perspective_cat(_perspective_split(y)[1], 1)
I - t * v * v'
end
_householder(h) = householder(_perspective_cat(h, 1)) # We add 1, for perspective variable z
struct Householder{T, S <: AbstractSet{T}, U} <: AbstractSet{T}
set::S
p::DynamicPolynomials.Polynomial{VariableOrder,MonomialOrder,U}
h::Vector{Float64}
z::SpaceVariable
x::Vector{SpaceVariable}
end
perspective_gauge0(set::Householder) = set.p
perspective_variable(set::Householder) = set.z
space_variables(set::Householder) = set.x
convexity_proof(set::Householder) = convexity_proof(set.set)
const HouseDualOf{S, T, U} = PerspectiveDualOf{Householder{T, S, U}}
function Polyhedra.project(set::Polar{T}, I) where T
return Polyhedra.polar(zero_eliminate(Polyhedra.polar(set), setdiff(1:dimension(set), I)))
end
function zero_eliminate(set::Householder{T}, I) where T
J = setdiff(1:dimension(set), I)
p = MP.subs(set.p, set.x[I] => zeros(T, length(I)))
return Householder(UnknownSet{T}(), p, set.h[J], set.z, set.x[J])
end
function Polyhedra.project(set::PerspectiveDual, I)
return perspective_dual(zero_eliminate(perspective_dual(set),
setdiff(1:dimension(set), I)))
end
function _perspective_cat(set::Householder)
H = _householder(set.h)
return H * _perspective_cat(set.set) * H
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 1846 | using Test, LinearAlgebra
using DynamicPolynomials
using SetProg
using Polyhedra
@testset "L1 heuristic" begin
@polyvar x y
p = 2x^2*y + 3x + 4y^2 - 5x^4*y^2 + 6x^2
@test SetProg.rectangle_integrate(p, [2, 3]) ≈ -672
Q = [6 5 4
5 3 2
4 2 1]
q = SetProg.GramMatrix(Q, SetProg.MP.monomials([x, y], 2))
@test SetProg.rectangle_integrate(polynomial(q), [2, 3]) ≈ 3465.6
@test SetProg.rectangle_integrate(polynomial(q), [1, 1]) ≈ 472/45
subset = SetProg.Sets.PolySet(4, q)
v_rep(a, b) = polyhedron(convexhull([a, b], [-a, -b], [-a, b], [a, -b]))
h_rep(a, b) = polyhedron(HalfSpace([1, 0], a) ∩ HalfSpace([-1, 0], a) ∩
HalfSpace([0, 1], b) ∩ HalfSpace([ 0, -1], b))
v_square = v_rep(1.0, 1.0)
h_square = h_rep(1.0, 1.0)
for square in [v_square, h_square]
set = SetProg.Sets.Piecewise([subset, subset, subset, subset], square)
@test SetProg.l1_integral(set, nothing) ≈ 472/45
end
Q = [1 2
2 3]
@test SetProg.rectangle_integrate(polynomial(Q, [x, y]), [2, 3]) ≈ 248
@test SetProg.rectangle_integrate(polynomial(Q, [x, y]), [1, 1]) ≈ 16/3
ell = SetProg.Sets.Ellipsoid(Symmetric(Q))
for square in [v_square, h_square]
set = SetProg.Sets.Piecewise([ell, ell, ell, ell], square)
@test SetProg.l1_integral(set, nothing) ≈ 16/3
end
Δ = polyhedron(convexhull([0.0, 0.0], [1.0, 0.0], [1.0, 0.5]))
Q = [0.0 0.5
0.5 0.0]
ell = SetProg.Sets.Ellipsoid(Symmetric(Q))
@test SetProg.l1_integral(ell, Δ) ≈ 1/32
Q = [1.0 0.0
0.0 0.0]
ell = SetProg.Sets.Ellipsoid(Symmetric(Q))
@test SetProg.l1_integral(ell, Δ) ≈ 1/8
Q = [0.0 0.0
0.0 1.0]
ell = SetProg.Sets.Ellipsoid(Symmetric(Q))
@test SetProg.l1_integral(ell, Δ) ≈ 1/96
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 1679 | using Test
using LinearAlgebra
using DynamicPolynomials
using SetProg, SetProg.Sets
@testset "Sets" begin
@testset "ConvexPolySet" begin
@polyvar x y
P = SetProg.SumOfSquares.SymMatrix(Float64[1, 2, 3], 2)
Q = SetProg.SumOfSquares.SymMatrix(BigInt[2, 3, 4], 2)
basis = SetProg.Sets.MonoBasis(monomial_vector([x, y]))
q = SetProg.Sets.ConvexPolySet(2, SetProg.SumOfSquares.GramMatrix(P, basis), Q)
@test q isa SetProg.Sets.ConvexPolySet{BigFloat}
end
@testset "zero_eliminate" begin
@polyvar x y z
p = SetProg.GramMatrix{Float64}((i, j) -> convert(Float64, 8 - (i + j)),
monomial_vector([x, y, z]))
set = Sets.ConvexPolySet(2, p, nothing)
el = Sets.zero_eliminate(set, 1:2)
@test el.p.Q == 6ones(1, 1)
el = Sets.zero_eliminate(set, 3:3)
@test el.p.Q == [4 3; 3 2]
el = Sets.zero_eliminate(set, 2:2)
@test el.p.Q == [6 4; 4 2]
@testset "Householder" begin
p = SetProg.GramMatrix{Float64}((i, j) -> convert(Float64, 6 - (i + j)),
monomial_vector([x, y]))
set = SetProg.perspective_dual_polyset(2, p, SetProg.InteriorPoint(zeros(2)), z, [x, y])
@test set.set.p == 2x^2 + 6x*y + 4y^2 - z^2
@test set.set.h == zeros(2)
@test set.set.x == [x, y]
@test Sets.gauge1(set.set.set) == 2x^2 + 6x*y + 4y^2
set2 = Sets.project(set, [2])
@test set2.set.p == 4y^2 - z^2
@test set2.set.h == zeros(1)
@test set2.set.x == [y]
end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 4704 | using LinearAlgebra
using Test
using SetProg
using Polyhedra
using JuMP
@testset "Algebraic" begin
□ = polyhedron(HalfSpace([1, 0], 1.0) ∩ HalfSpace([-1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([0, -1], 1))
@testset "Ellipsoid" begin
@testset "John" begin
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
# Q = [1 0
# 0 1]
# t = √det(Q) = 1 Q11 Q12 Q22 t
MOI.Utilities.set_mock_optimize!(mock, mock -> MOI.Utilities.mock_optimize!(mock, [1.0, 0.0, 1.0, 1.0]));
@variable(model, ◯, Ellipsoid(symmetric=true, dimension=2))
cref = @constraint(model, ◯ ⊆ □)
@objective(model, Max, nth_root(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) == 1.0
@test JuMP.value(◯) isa SetProg.Sets.PolarEllipsoid
@test JuMP.value(◯).Q == Symmetric([1.0 0.0; 0.0 1.0])
end
@testset "Löwner" begin
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
# Q = [√2 0
# 0 √2]
# t = √det(Q) = 2 Q11 Q12 Q22 t
MOI.Utilities.set_mock_optimize!(mock, mock -> MOI.Utilities.mock_optimize!(mock, [0.5, 0.0, 0.5, 0.5]));
@variable(model, ◯, Ellipsoid(symmetric=true, dimension=2))
cref = @constraint(model, □ ⊆ ◯)
@objective(model, Min, nth_root(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) == 0.5
@test JuMP.value(◯) isa SetProg.Sets.Ellipsoid
@test JuMP.value(◯).Q == Symmetric([0.5 0.0; 0.0 0.5])
end
end
@testset "Quartic" begin
@testset "Inner" begin
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
# The PSD matrix for the variable is 3 x 3 so 3 * (3+1) / 2 = 6
# The PSD matrix for the convexity is 6 x 6 so 6 * (6+1) / 2 = 21
# entries
# 1 variable for t
# hence 28 variables
MOI.Utilities.set_mock_optimize!(mock, mock -> MOI.Utilities.mock_optimize!(mock, ones(28)))
@variable(model, ◯, PolySet(symmetric=true, degree=4, dimension=2,
convex=true))
cref = @constraint(model, ◯ ⊆ □)
@objective(model, Max, nth_root(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) == 1.0
@test JuMP.value(◯) isa SetProg.Sets.PolarPolySet{Float64}
@test JuMP.value(◯).degree == 4
x, y = SetProg.data(model).polyvars
@test polynomial(JuMP.value(◯).p) == x^4 + 2x^3*y + 3x^2*y^2 + 2x*y^3 + y^4
@test JuMP.value(◯).convexity_proof.n == 6
@test JuMP.value(◯).convexity_proof.Q == ones(21)
@test JuMP.objective_value(model) == 1.0
end
@testset "Outer" begin
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
MOI.Utilities.set_mock_optimize!(mock, mock -> MOI.Utilities.mock_optimize!(mock, ones(28)))
@variable(model, ◯, PolySet(symmetric=true, degree=4, dimension=2,
convex=true))
cref = @constraint(model, □ ⊆ ◯)
@objective(model, Min, nth_root(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) == 1.0
@test JuMP.value(◯) isa SetProg.Sets.PolySet{Float64}
@test JuMP.value(◯).degree == 4
x, y = SetProg.data(model).polyvars
@test polynomial(JuMP.value(◯).p) == x^4 + 2x^3*y + 3x^2*y^2 + 2x*y^3 + y^4
@test JuMP.value(◯).convexity_proof.n == 6
@test JuMP.value(◯).convexity_proof.Q == ones(21)
end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 747 | using Test
using SetProg
@testset "apply_matrix" begin
SetProg.DynamicPolynomials.@polyvar x[1:2]
SetProg.DynamicPolynomials.@polyvar y[1:2]
SetProg.DynamicPolynomials.@polyvar z[1:1]
Q = [6 5 4
5 3 2
4 2 1]
q = SetProg.GramMatrix(Q, SetProg.MP.monomials(x, 2))
p = SetProg.MP.polynomial(q)
@testset "2x2 Float64" begin
B = [2.0 3.0
4.0 5.0]
qB = SetProg.apply_matrix(q, B, y, 2)
@test qB isa SetProg.GramMatrix{Float64}
@test qB == p(x => B * y)
end
@testset "2x1 Int" begin
A = reshape([2, 3], 2, 1)
qA = SetProg.apply_matrix(q, A, z, 2)
@test qA isa SetProg.GramMatrix{Int}
@test qA == p(x => A * z)
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 673 | using SetProg
using Polyhedra
using JuMP
const MOI = JuMP.MOI
□ = polyhedron(HalfSpace([1, 0], 1.0) ∩ HalfSpace([-1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([0, -1], 1))
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
# Q = [1 0
# 0 1]
# t = √det(Q) = 1 Q11 Q12 Q22 t
MOI.Utilities.set_mock_optimize!(mock, mock -> MOI.Utilities.mock_optimize!(mock, [1.0, 0.0, 1.0, 1.0]));
@variable(model, ◯, PolySet(degree=4, dimension=2, convex=true))
cref = @constraint(model, ◯ ⊆ □)
@objective(model, Max, nth_root(volume(◯)))
SetProg.optimize!(model)
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3285 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
using JuMP
@testset "Controlled invariant" begin
config = MOI.Test.Config()
@testset "Ellipsoid" begin
# Q = [1 0
# 0 1]
# t = √det(Q) = 1
Q = [1.0, -1/4, 1.0]
t = √15/4
@testset "Homogeneous" begin
Tests.ci_ell_homogeneous_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; t])),
config)
end
@testset "Non-homogeneous" begin
@testset "Ellipsoid" begin
β = -1.0
b = [0.0, 0.0]
Tests.ci_ell_nonhomogeneous_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; β; b; t])),
config)
end
@testset "PolySet" begin
β = -1.0
b = [0.0, 0.0]
Q = [1.0, -0.5, 1.0]
Tests.ci_quad_nonhomogeneous_test(bridged_mock(mock -> begin
# Q[3] Q[2] b[2]
# . Q[1] b[1]
# . . β+1
MOI.Utilities.mock_optimize!(mock, [Q; b; β+1; 2Q])
end),
config)
end
end
@testset "Piecewise" begin
@testset "Diamond/2D cross-polytope" begin
Q1 = [1.0, -0.25, 1.0]
Q2 = [1.0, -1.0, 1.0]
Tests.ci_piecewise_semiell_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(
mock,
[Q1; Q2; Q2; Q1; collect(1:MOI.get(mock, MOI.NumberOfVariables()) - 12)])),
config)
end
@testset "mci" begin
Q1 = [1.0, -1.0, 1.0]
Q2 = [1.0, 0.0, 0.0]
Q3 = [0.25, 0.5, 1.0]
# 32-bit is failing: https://github.com/blegat/SetProg.jl/runs/4384813153?check_suite_focus=true
if Sys.WORD_SIZE != 32
Tests.ci_piecewise_semiell_mci_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(
mock,
[Q3; Q2; Q3; Q2; Q1; Q1; zeros(MOI.get(mock, MOI.NumberOfVariables()) - 18)],
)),
config,
)
end
end
end
end
@testset "Quartic" begin
# The PSD matrix for the variable is 3 x 3 so 3 * (3+1) / 2 = 6
# The PSD matrix for the convexity is 6 x 6 so 6 * (6+1) / 2 = 21
# entries
# 1 variable for t
# hence 28 variables
ci_quartic_α = -1/8
ci_quartic_β = 1/4
ci_quartic_hess = 6 * [2.0, ci_quartic_α, 2.0, ci_quartic_α, 2.0,
2.0, 2.0, ci_quartic_α, ci_quartic_α, 2.0]
sol = [1.0; ci_quartic_α; 6 - 2ci_quartic_β; ci_quartic_β; ci_quartic_α; 1.0;
ci_quartic_hess]
Tests.ci_quartic_homogeneous_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, sol)),
config)
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 1908 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
using JuMP
@testset "Invariant" begin
config = MOI.Test.Config()
@testset "Maximal" begin
# Q = [1 0
# 0 1]
# t = √det(Q) = 1
Q = [1.0, 0.0, 1.0]
t = 1.0
@testset "Homogeneous" begin
@testset "Ellipsoid" begin
Tests.maximal_invariant_ell_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; t])),
config)
end
@testset "Convex Quadratic" begin
Tests.maximal_convex_invariant_quad_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [1.0, 0.0, 1.0, 2.0, 0.0, 2.0, 2.0])),
config)
end
end
end
@testset "Minimal" begin
# Q = [0.5 0
# 0 0.5]
# t = √det(Q) = 0.5
Q = [0.5, 0.0, 0.5]
t = 0.5
@testset "Homogeneous" begin
@testset "Ellipsoid" begin
Tests.minimal_invariant_ell_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; t])),
config)
end
@testset "Quadratic" begin
@testset "Non-convex" begin
Tests.minimal_invariant_quad_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [0.5, 0.0, 0.5])),
config)
end
@testset "Convex" begin
Tests.minimal_convex_invariant_quad_homogeneous_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [[0.5, 0.0, 0.5]; 2Q; 2t])),
config)
end
end
end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 179 | include("Tests/Tests.jl")
include("utilities.jl")
using Test, JuMP
include("square.jl")
include("invariant.jl")
include("controlled_invariant.jl")
include("switched_system.jl")
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 631 | include("solver_preamble.jl")
using MosekTools
optimizer_constructor = optimizer_with_attributes(Mosek.Optimizer, "QUIET" => true)
config = MOI.Test.Config(atol=1e-3, rtol=1e-3)
@testset "Mosek" begin
@testset "Square" begin
Tests.square_test(optimizer_constructor, config)
end
@testset "Invariant in Square" begin
Tests.invariant_test(optimizer_constructor, config)
end
@testset "Controlled Invariant in Square" begin
Tests.ci_test(optimizer_constructor, config)
end
@testset "Switched System" begin
Tests.switched_system_test(optimizer_constructor, config)
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 5606 | using LinearAlgebra
using Test
using RecipesBase
using DynamicPolynomials
using SetProg, SetProg.Sets
RecipesBase.is_key_supported(k::Symbol) = true # Plots normally defines this
function recipe(set, npoints)
result = RecipesBase.apply_recipe(Dict{Symbol, Any}(:npoints => npoints), set)
return result[1].args
end
function recipe_test(set, exp_x, exp_y, npoints=4)
x, y = recipe(set, npoints)
@test x ≈ exp_x
@test y ≈ exp_y
end
@testset "Recipe" begin
Q = [1.0 0.0; 0.0 1.0]
@testset "Ellipsoid" begin
@testset "Circle" begin
for circle in [Sets.HyperSphere(2),
Sets.Ellipsoid(Symmetric(Q))]
recipe_test(circle,
[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0])
recipe_test(Sets.polar(circle),
[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0])
end
end
@testset "Shifted Circle" begin
for circle in [Sets.HyperSphere(2),
Sets.Ellipsoid(Symmetric(Q))]
shifted = Sets.Translation(circle, [1.0, 2.0])
recipe_test(shifted, [2.0, 1.0, 0.0, 1.0], [2.0, 3.0, 2.0, 1.0])
end
end
@testset "Scaled circle" begin
scaled_circle = Sets.Ellipsoid(Symmetric(2Q))
recipe_test(scaled_circle,
[1/√2, 0.0, -1/√2, 0.0], [0.0, 1/√2, 0.0, -1/√2])
recipe_test(Sets.polar(scaled_circle),
[√2, 0.0, -√2, 0.0], [0.0, √2, 0.0, -√2])
end
end
@testset "Polynomial" begin
@polyvar x y z
@testset "Circle" begin
p = SetProg.GramMatrix{Float64}((i, j) -> convert(Float64, i == j),
monomial_vector([x, y]))
circle = Sets.PolySet(2, p)
recipe_test(circle,
[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0])
end
@testset "Convex Circle" begin
p = SetProg.GramMatrix{Float64}((i, j) -> convert(Float64, i == j),
monomial_vector([x, y]))
circle = Sets.ConvexPolySet(2, p, nothing)
recipe_test(circle,
[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0])
recipe_test(Sets.polar(circle),
[-1.0, -1.0, 1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0, -1.0, -1.0])
end
@testset "Scaled circle" begin
p = SetProg.GramMatrix{Float64}((i, j) -> 2convert(Float64, i == j),
monomial_vector([x, y]))
circle = Sets.ConvexPolySet(2, p, nothing)
recipe_test(circle,
[1/√2, 0.0, -1/√2, 0.0], [0.0, 1/√2, 0.0, -1/√2])
recipe_test(Sets.polar(circle),
[-√2, -√2, √2, √2, -√2],
[-√2, √2, √2, -√2, -√2])
end
@testset "Non-homogeneous Circle" begin
@testset "Basic" begin
q = SetProg.GramMatrix(Float64[0 0 0
0 1 0
0 0 1], monomial_vector([z, y, x]))
shifted_circle = SetProg.perspective_dual_polyset(2, q, SetProg.InteriorPoint(zeros(2)), z, [x, y])
recipe_test(shifted_circle,
[-1.0, -1.0, 1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0, -1.0, -1.0])
end
@testset "Scaled" begin
q = SetProg.GramMatrix(Float64[0 0 0
0 2 0
0 0 2], monomial_vector([z, y, x]))
shifted_circle = SetProg.perspective_dual_polyset(2, q, SetProg.InteriorPoint(zeros(2)), z, [x, y])
recipe_test(shifted_circle,
[-√2, -√2, √2, √2, -√2],
[-√2, √2, √2, -√2, -√2])
end
@testset "z-Scaled" begin
# z: -1/2 + 1 = 1/2
q = SetProg.GramMatrix([1/2 0 0
0 1 0
0 0 1], monomial_vector([z, y, x]))
shifted_circle = SetProg.perspective_dual_polyset(2, q, SetProg.InteriorPoint(zeros(2)), z, [x, y])
recipe_test(shifted_circle,
[-√2, -√2, √2, √2, -√2],
[-√2, √2, √2, -√2, -√2])
end
end
end
@testset "Piecewise" begin
polytope = polyhedron(HalfSpace([1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([-1, 1], 1) ∩ HalfSpace([-1, -1], 1) ∩ HalfSpace([1, -1], 1), lib)
Q_1 = [1.0 0.0
0.0 0.0]
Q_2 = [0.0 0.0
0.0 1.0]
Q_3 = [1.0 0.0
0.0 1.0]
Q_4 = [1.0 1.0
1.0 1.0]
Q_5 = [ 1.0 -0.5
-0.5 1.0]
set = Sets.Piecewise(Sets.Ellipsoid.(Symmetric.([Q_1, Q_2, Q_3, Q_4, Q_5])), polytope)
recipe_test(
set,
[1.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, -1.0]
)
recipe_test(
set,
[1.0, 1.0, 0.0, -1/√2, -1.0, -0.5, -0.0, 1/√3],
[0.0, 1.0, 1.0, 1/√2, 0.0, -0.5, -1.0, -1/√3],
8
)
α = 0.4142135623730951
β = 0.732050807568877
recipe_test(
Sets.polar(set),
[-1.0, -1.0, -α, 0.0, 1.0, 1.0, β, -1.0],
[-1.0, α, 1.0, 1.0, 0.0, -β, -1.0, -1.0],
8)
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 257 | import GLPK
const lp_solver = GLPK.Optimizer
import Polyhedra
const lib = Polyhedra.DefaultLibrary{Float64}(lp_solver)
include("Sets.jl")
include("apply.jl")
include("variables.jl")
include("L1_heuristic.jl")
include("recipes.jl")
include("mock_tests.jl")
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 311 | # In `solver_tests.jl` we want to load the preamble only once but if we load
# several time, say `csdp_tests.jl`, from the REPL it may be because we modify
# the tests in-between so we want to reload it
if !(@isdefined shared_preamble) || !shared_preamble
include("Tests/Tests.jl")
using Test, JuMP
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 980 | import Pkg
using Test
function solver_test(name::Symbol)
if string(name) in keys(Pkg.installed())
ok = false
try
@eval import $name
ok = true
catch e
@warn("The solver $name cannot be imported, run `] build $name`.")
end
if ok
@testset "$name" begin
include("$(lowercase(string(name)))_tests.jl")
end
end
else
@warn("The solver $name is not installed, run `] add $name`.")
end
end
include("solver_preamble.jl")
shared_preamble = true
# LP solvers
#solver_test(:GLPK)
# SOCP solvers
#solver_test(:ECOS)
# SDP solvers (SOC is reformulated into SDP)
#solver_test(:CSDP)
#solver_test(:SDPA)
# SDP+SOC solvers
solver_test(:Mosek)
#solver_test(:SeDuMi)
#solver_test(:SCS)
# If we re-run `solver_tests.jl`, it may be because we changed `Tests/Tests.jl`.
shared_preamble = false
nothing # Show nothing when `include` is called from REPL
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 1063 | using LinearAlgebra, Test
using DynamicPolynomials
using SetProg, SetProg.Sets
const MOI = SetProg.JuMP.MOI
@testset "Spaces" begin
B = Symmetric([1.0 0.0; 0.0 1.0])
b = [0.0, 0.0]
β = 1.0
h = [0.0, 0.0]
@polyvar x y z
dual = Sets.PerspectiveInteriorCone(B, b, β, [z, x, y], h)
◯ = Sets.perspective_dual(dual)
mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
model = JuMP.direct_model(mock);
Q = [1.0, 0.0, 1.0]
t = 1.0
# The difference will be zero hence we put `zeros(6)`
mock_optimize!(mock) = MOI.Utilities.mock_optimize!(mock, [Q; β; b; zeros(6); t])
MOI.Utilities.set_mock_optimize!(mock, mock_optimize!)
@variable(model, ◯◯, Ellipsoid(point=SetProg.InteriorPoint(h)))
cref = @constraint(model, ◯◯ ⊆ ◯)
@objective(model, Max, nth_root(volume(◯◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) == t
@test JuMP.value(◯◯).p == ◯.p
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 4292 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
using JuMP
const quartic_inner_poly = [3.1518541833100864, -0.1617384194869734]
const quartic_inner_obj = 6.447419478140056
const quartic_inner_α = 5.6567546886722795
const quartic_inner_convexity = [12.0, 0.0, quartic_inner_α, 0.0, quartic_inner_poly[1]+2quartic_inner_poly[2],
quartic_inner_α, 8.48516455194103, 0.0, 0.0, 12.0]
const quartic_outer_β = 0.30177574048813055
const quartic_outer_γ = 0.5936049698923986
const quartic_outer_λ = -0.09857757888257276
const quartic_outer_obj = 1.611854896946893
const quartic_outer_α = 0.7928996242545062
const quartic_outer_convexity = [3.621308885857567, 0.0, quartic_outer_α, 0.0, 0.08578956499151169,
quartic_outer_α, 1.5, 0.0, 0.0, 3.6212933687704307]
@testset "Square" begin
config = MOI.Test.Config()
@testset "Ellipsoid" begin
@testset "John" begin
# Q = [1 0
# 0 1]
# t = √det(Q) = 1
Q = [1.0, 0.0, 1.0]
t = 1.0
@testset "Homogeneous" begin
Tests.john_homogeneous_square_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; t])),
config)
end
@testset "Non-homogeneous" begin
@testset "Ellipsoid" begin
β = -1.0
b = [0.0, 0.0]
Tests.john_nonhomogeneous_ell_square_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [Q; β; b; t])),
config)
end
@testset "PolySet" begin
β = 1.0
b = [0.0, 0.0]
Tests.john_nonhomogeneous_quad_square_test(bridged_mock(mock -> begin
# Q[3] Q[2] b[2]
# . Q[1] b[1]
# . . β-1
MOI.Utilities.mock_optimize!(mock, [Q[3]; Q[2]; Q[1]; b[2]; b[1]; β-1; 2Q])
end), config)
end
end
@testset "Piecewise" begin
Q1 = ones(3)
Q2 = [1.0, -1.0, 1.0]
Tests.piecewise_semiell_inner_homogeneous_◇_square_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(
mock,
[Q1; Q2; Q2; Q1])),
config)
Q = [1.0, 0.0, 1.0]
Tests.piecewise_semiell_inner_homogeneous_□_square_test(
bridged_mock(mock -> MOI.Utilities.mock_optimize!(
mock,
[Q; Q; Q; Q])),
config)
end
end
@testset "Löwner" begin
# Q = [√2 0
# 0 √2]
# t = √det(Q) = 2 Q11 Q12 Q22 t
Tests.löwner_homogeneous_square_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, [0.5, 0.0, 0.5, 0.5])),
config)
end
end
@testset "Quartic" begin
@testset "Inner" begin
# The PSD matrix for the variable is 3 x 3 so 3 * (3+1) / 2 = 6
# The PSD matrix for the convexity is 4 x 4 so 4 * (5+1) / 2 = 10
# entries
# 1 variable for t
# hence 17 variables
sol = [1.0; 0.0; quartic_inner_poly; 0.0; 1.0;
quartic_inner_convexity; quartic_inner_obj]
Tests.quartic_inner_homogeneous_square_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, sol)),
config)
end
@testset "Outer" begin
sol = [quartic_outer_β; 0.0; quartic_outer_γ; quartic_outer_λ; 0.0; quartic_outer_β;
quartic_outer_convexity; quartic_outer_obj]
Tests.quartic_outer_homogeneous_square_test(bridged_mock(mock -> MOI.Utilities.mock_optimize!(mock, sol)),
config)
end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3168 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
import DynamicPolynomials
using JuMP
@testset "Switched System" begin
config = MOI.Test.Config()
@testset "Ellipsoid" begin
@testset "Feasible" begin
optimize!(mock) = MOIU.mock_optimize!(
mock, MOI.OPTIMAL, (MOI.FEASIBLE_POINT, zeros(3)))
Tests.feasible_switched_system_ell_test(bridged_mock(optimize!), config)
end
@testset "Infeasible" begin
optimize!(mock) = MOIU.mock_optimize!(
mock, MOI.INFEASIBLE, MOI.NO_SOLUTION, MOI.INFEASIBILITY_CERTIFICATE,
(MOI.VectorOfVariables, SetProg.SumOfSquares.PositiveSemidefinite2x2ConeTriangle) => [[0.00282108, 0.0, 0.00282108]],
(MOI.VectorAffineFunction{Float64}, SetProg.SumOfSquares.PositiveSemidefinite2x2ConeTriangle) => [[0.00117187, 0.0, 2.82044], [2.82044, 0.0, 0.00117187]]
)
Tests.infeasible_switched_system_ell_test(bridged_mock(optimize!), config)
end
end
@testset "Quadratic" begin
@testset "Feasible" begin
optimize!(mock) = MOIU.mock_optimize!(
mock, MOI.OPTIMAL, (MOI.FEASIBLE_POINT, zeros(3)))
Tests.feasible_switched_system_quad_test(bridged_mock(optimize!), config)
end
# Blocked by a bug in MockOptimizer which does not implement support for MomentMatrixAttribute correctly
# @testset "Infeasible" begin
# function optimize!(mock)
# MOI.set(mock, MOI.TerminationStatus(), MOI.INFEASIBLE)
# MOI.set(mock, MOI.PrimalStatus(), MOI.NO_SOLUTION)
# MOI.set(mock, MOI.DualStatus(), MOI.INFEASIBILITY_CERTIFICATE)
# DynamicPolynomials.@polyvar x[1:2]
# for (F, S) in MOI.get(mock, MOI.ListOfConstraints())
# if F == MOI.VectorAffineFunction{Float64}
# cis = MOI.get(mock, MOI.ListOfConstraintIndices{F, S}())
# function _moment_matrix(q)
# SetProg.SumOfSquares.build_moment_matrix(q, monomial_vector(x))
# end
# MOI.set(mock, SetProg.SumOfSquares.MomentMatrixAttribute(),
# cis[1], _moment_matrix([0.00336272, 0.0, 5.64559]))
# MOI.set(mock, SetProg.SumOfSquares.MomentMatrixAttribute(),
# cis[2], _moment_matrix([5.64559, 0.0, 0.00336272]))
# end
# end
# end
# Tests.infeasible_switched_system_quad_test(bridged_mock(optimize!), config)
# end
end
@testset "Quartic" begin
@testset "Feasible" begin
α = 11.814054544955727
optimize!(mock) = MOIU.mock_optimize!(
mock, MOI.OPTIMAL, (MOI.FEASIBLE_POINT, [α, 0.0, 0.0, -α, -0.0, α]))
Tests.feasible_switched_system_quartic_test(bridged_mock(optimize!), config)
end
@testset "Infeasible" begin
# Blocked for the same reason than quad
end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 543 | using Test
using SetProg
function bridged_mock(mock_optimize!::Function...;
# We use a UF to add support for SOSCone, ... so that
# we don't have to set the variables created by the SOS
# bridges
model = MOIU.UniversalFallback(MOI.Utilities.Model{Float64}()))
mock = MOI.Utilities.MockOptimizer(model)
bridged = MOI.Bridges.full_bridge_optimizer(mock, Float64)
MOI.Utilities.set_mock_optimize!(mock, mock_optimize!...)
return bridged
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 1869 | using Test
using SetProg
@testset "Variables" begin
@testset "Ellipsoid" begin
@testset "Dimension" begin
model = Model()
@variable(model, S, Ellipsoid())
err = ErrorException("Missing dimension information, use Ellipsoid(dimension=...) or PolySet(dimension=...)")
@test_throws err JuMP.optimize!(model)
end
for d in 1:3
@test Ellipsoid(point=SetProg.InteriorPoint(ones(d))).dimension == d
end
@testset "Missing Point" begin
model = Model()
@variable(model, E, Ellipsoid(dimension=2))
@objective(model, Max, nth_root(volume(E))) # Force dual space
err = ArgumentError("Specify a point for nonsymmetric ellipsoid, e.g. `Ellipsoid(point=InteriorPoint([1.0, 0.0]))")
@test_throws err JuMP.optimize!(model)
end
end
@testset "PolySet" begin
err = ErrorException("Degree of PolySet not specified, use PolySet(degree=..., ...)")
@test_throws err PolySet(dimension=1)
@test_throws ArgumentError("Degree of PolySet not even") PolySet(degree=1)
@testset "Dimension" begin
model = Model()
@variable(model, S, PolySet(degree=2))
err = ErrorException("Missing dimension information, use Ellipsoid(dimension=...) or PolySet(dimension=...)")
@test_throws err JuMP.optimize!(model)
end
#@testset "Convex" begin
# model = Model()
# @variable(model, E, PolySet(degree=2, dimension=2))
# @objective(model, Max, nth_root(volume(E)))
# err = ErrorException("Cannot optimize volume of non-convex polynomial sublevel set. Use PolySet(convex=true, ...)")
# @test_throws err begin
# JuMP.optimize!(model)
# end
#end
end
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 339 | module Tests
using JuMP
import GLPK
const lp_solver = optimizer_with_attributes(GLPK.Optimizer, "presolve" => GLPK.GLP_ON)
import Polyhedra
const lib = Polyhedra.DefaultLibrary{Float64}(lp_solver)
include("utilities.jl")
include("square.jl")
include("invariant.jl")
include("controlled_invariant.jl")
include("switched_system.jl")
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 7852 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
const MP = MultivariatePolynomials
import DynamicPolynomials
using JuMP
function ci_square_test(optimizer, config::MOI.Test.Config,
inner::Bool, variable::SetProg.AbstractVariable,
metric::Function, objective_value, set_test, nvars=nothing)
model = _model(optimizer)
□ = polyhedron(HalfSpace([1, 0], 1.0) ∩ HalfSpace([-1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([0, -1], 1))
@variable(model, ◯, variable)
if inner
cref = @constraint(model, ◯ ⊆ □)
else
cref = @constraint(model, □ ⊆ ◯)
end
Δt = 0.5
A = [1.0 Δt]
E = [1.0 0.0]
if variable.symmetric
@constraint(model, A * ◯ ⊆ E * ◯)
else
@constraint(model, A * ◯ ⊆ E * ◯, S_procedure_scaling = 1.0)
end
@objective(model, inner ? MOI.MAX_SENSE : MOI.MIN_SENSE,
metric(volume(◯)))
SetProg.optimize!(model)
if nvars !== nothing
@test nvars == num_variables(model)
end
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) ≈ objective_value atol=config.atol rtol=config.rtol
set_test(JuMP.value(◯))
end
function ci_ell_homogeneous_test(optimizer, config)
ci_square_test(optimizer, config, true,
Ellipsoid(symmetric=true, dimension=2), nth_root, √15/4,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.Ellipsoid{Float64}}
@test Sets.polar(◯).Q ≈ Symmetric([1.0 -1/4; -1/4 1.0]) atol=config.atol rtol=config.rtol
end)
end
function ci_ell_nonhomogeneous_test(optimizer, config)
ci_square_test(optimizer, config, true,
Ellipsoid(point=SetProg.InteriorPoint([0.0, 0.0])),
nth_root, √15/4,
◯ -> begin
@test ◯ isa Sets.PerspectiveDual{Float64, Sets.Householder{Float64, Sets.ShiftedEllipsoid{Float64}, Float64}}
z = Sets.perspective_variable(◯)
x, y = Sets.space_variables(◯)
◯_dual = Sets.perspective_dual(◯)
@test ◯_dual.p ≈ -z^2 + x^2 - x*y/2 + y^2 atol=config.atol rtol=config.rtol
@test ◯_dual.set.Q ≈ Symmetric([1.0 -1/4; -1/4 1.0]) atol=config.atol rtol=config.rtol
@test ◯_dual.set.b ≈ [0.0, 0.0] atol=config.atol rtol=config.rtol
@test ◯_dual.set.β ≈ -1.0 atol=config.atol rtol=config.rtol
@test Sets._householder(◯_dual.h) ≈ [-1.0 0.0 0.0
0.0 1.0 0.0
0.0 0.0 1.0] atol=config.atol rtol=config.rtol
end)
end
function ci_piecewise_semiell_homogeneous_test(optimizer, config)
ci_square_test(
optimizer, config, true,
Ellipsoid(symmetric=true, piecewise=◇),
set -> L1_heuristic(set), 19 / 6,
p◯ -> begin
@test p◯ isa Sets.Polar
◯ = p◯.set
@test ◯ isa Sets.Piecewise{Float64, Sets.Ellipsoid{Float64}}
@test length(◯.sets) == 4
Q1 = Symmetric([ 1.0 -0.25
-0.25 1.0])
Q2 = Symmetric([ 1.0 -1.0
-1.0 1.0])
_test_piece(◯, [-0.5, -0.5], Q1, config)
_test_piece(◯, [0.5, -0.5], Q2, config)
_test_piece(◯, [-0.5, 0.5], Q2, config)
_test_piece(◯, [0.5, 0.5], Q1, config)
end,
24,
)
end
function ci_piecewise_semiell_mci_homogeneous_test(optimizer, config)
polar_mci = polyhedron(convexhull([1.0, 0.0], [-1.0, 0.0], [0.0, 1.0], [0.0, -1.0], [1.0, 0.5], [-1.0, -0.5]), lib)
ci_square_test(
optimizer, config, true,
Ellipsoid(symmetric=true, piecewise=polar_mci),
set -> L1_heuristic(set), 2.9909434642487316,
p◯ -> begin
@test p◯ isa Sets.Polar
◯ = p◯.set
@test ◯ isa Sets.Piecewise{Float64, Sets.Ellipsoid{Float64}}
@test length(◯.sets) == 6
Q1 = Symmetric([ 1.0 -1.0
-1.0 1.0])
Q2 = Symmetric([ 1.0 0.0
0.0 0.0])
Q3 = Symmetric([ 0.25 0.5
0.5 1.0])
_test_piece(◯, [1.5, 0.5], Q2, config)
_test_piece(◯, [0.5, -0.5], Q1, config)
_test_piece(◯, [0.5, 0.75], Q3, config)
_test_piece(◯, [-0.5, 0.5], Q1, config)
_test_piece(◯, [-1.5, -0.5], Q2, config)
_test_piece(◯, [-1, -0.75], Q3, config)
end,
25,
)
end
function ci_quad_nonhomogeneous_test(optimizer, config)
ci_square_test(optimizer, config, true,
PolySet(degree=2, convex=true, point=SetProg.InteriorPoint([0.0, 0.0])),
set -> L1_heuristic(set, [1.0, 1.0]), 8/3,
◯ -> begin
@test ◯ isa Sets.PerspectiveDual{Float64, Sets.Householder{Float64, Sets.ConvexPolynomialSet{Float64, SetProg.Sets.MonoBasis, Float64}, Float64}}
z = Sets.perspective_variable(◯)
x, y = Sets.space_variables(◯)
◯_dual = Sets.perspective_dual(◯)
# The coefficient of `x*y` does not influence the volume
# and with the values of the other parameters, it should
# simply be in the interval [-2, -0.5].
α = MP.coefficient(◯_dual.p, x*y)
@test α ≥ -2 - 2config.atol - config.rtol
@test α ≤ -0.5 + 0.5config.atol + config.rtol
@test ◯_dual.p ≈ -z^2 + x^2 + α*x*y + y^2 atol=config.atol rtol=config.rtol
end)
end
function ci_quartic_homogeneous_test(optimizer, config)
ci_square_test(optimizer, config, true,
PolySet(symmetric=true, degree=4, convex=true),
set -> L1_heuristic(set, [1.0, 1.0]),
0.4,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.ConvexPolySet{Float64, SetProg.Sets.MonoBasis, Float64}}
@test Sets.polar(◯).degree == 4
x, y = variables(Sets.polar(◯).p)
α = MP.coefficient(Sets.polar(◯).p, x^3*y) / 2
q = x^4 + 2α*x^3*y + 6x^2*y^2 + 2α*x*y^3 + y^4
@test all(eigvals(Matrix(Sets.polar(◯).p.Q)) .≥ -config.atol)
@test polynomial(Sets.polar(◯).p) ≈ q atol=config.atol rtol=config.rtol
convexity_proof = Sets.convexity_proof(◯)
@test convexity_proof.n == 4
hess = 6 * [2.0, α, 2.0, α, 2.0,
2.0, 2.0, α, α, 2.0]
Hess = SetProg.SumOfSquares.MultivariateMoments.SymMatrix(hess, 4)
@test all(eigvals(Matrix(Hess)) .≥ -config.atol)
@test convexity_proof.Q ≈ hess atol=config.atol rtol=config.rtol
end)
end
const ci_tests = Dict(
"ci_ell_homogeneous" =>
ci_ell_homogeneous_test,
"ci_ell_nonhomogeneous" =>
ci_ell_nonhomogeneous_test,
"ci_piecewise_semiell_homogeneous" =>
ci_piecewise_semiell_homogeneous_test,
"ci_piecewise_semiell_mci_homogeneous" =>
ci_piecewise_semiell_mci_homogeneous_test,
"ci_quad_nonhomogeneous" =>
ci_quad_nonhomogeneous_test,
"ci_quartic_homogeneous" =>
ci_quartic_homogeneous_test
)
@test_suite ci
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 4001 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
import DynamicPolynomials
using JuMP
function invariant_square_test(optimizer, config::MOI.Test.Config,
inner::Bool, variable::SetProg.AbstractVariable,
metric::Function, objective_value, set_test)
model = _model(optimizer)
□ = polyhedron(HalfSpace([1, 0], 1.0) ∩ HalfSpace([-1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([0, -1], 1), lib)
@variable(model, ◯, variable)
if inner
cref = @constraint(model, ◯ ⊆ □)
else
cref = @constraint(model, □ ⊆ ◯)
end
A = [0.0 -1.0
1.0 0.0]
if variable.symmetric
@constraint(model, A * ◯ ⊆ ◯)
else
@constraint(model, A * ◯ ⊆ ◯, S_procedure_scaling = 1.0)
end
@objective(model, inner ? MOI.MAX_SENSE : MOI.MIN_SENSE,
metric(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) ≈ objective_value atol=config.atol rtol=config.rtol
return set_test(JuMP.value(◯))
end
function maximal_invariant_ell_homogeneous_test(optimizer, config)
invariant_square_test(
optimizer, config, true, Ellipsoid(symmetric=true, dimension=2),
nth_root, 1.0,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.Ellipsoid{Float64}}
@test Sets.polar(◯).Q ≈ Symmetric([1.0 0.0; 0.0 1.0]) atol=config.atol rtol=config.rtol
end)
end
function maximal_convex_invariant_quad_homogeneous_test(optimizer, config)
invariant_square_test(
optimizer, config, true,
PolySet(degree=2, convex=true, symmetric=true),
nth_root, 2.0,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.ConvexPolySet{Float64, SetProg.Sets.MonoBasis, Float64}}
x, y = Sets.space_variables(◯)
◯_polar = Sets.polar(◯)
@test ◯_polar.p ≈ x^2 + y^2 atol=config.atol rtol=config.rtol
end)
end
function minimal_invariant_ell_homogeneous_test(optimizer, config)
invariant_square_test(
optimizer, config, false, Ellipsoid(symmetric=true, dimension=2),
nth_root, 0.5,
◯ -> begin
@test ◯ isa Sets.Ellipsoid{Float64}
@test ◯.Q ≈ Symmetric([0.5 0.0; 0.0 0.5]) atol=config.atol rtol=config.rtol
end)
end
function minimal_invariant_quad_homogeneous_test(optimizer, config)
invariant_square_test(
optimizer, config, false,
PolySet(degree=2, symmetric=true),
set -> L1_heuristic(set, ones(2)), 4/3,
◯ -> begin
@test ◯ isa Sets.PolySet{Float64, SetProg.Sets.MonoBasis}
x, y = Sets.space_variables(◯)
@test ◯.p ≈ 0.5x^2 + 0.5y^2 atol=config.atol rtol=config.rtol
end)
end
function minimal_convex_invariant_quad_homogeneous_test(optimizer, config)
invariant_square_test(
optimizer, config, false,
PolySet(degree=2, convex=true, symmetric=true),
nth_root, 1.0,
◯ -> begin
@test ◯ isa Sets.ConvexPolySet{Float64, SetProg.Sets.MonoBasis}
x, y = Sets.space_variables(◯)
@test ◯.p ≈ 0.5x^2 + 0.5y^2 atol=config.atol rtol=config.rtol
end)
end
const invariant_tests = Dict("maximal_invariant_ell_homogeneous_test" => maximal_invariant_ell_homogeneous_test,
"maximal_convex_invariant_quad_homogeneous_test" => maximal_convex_invariant_quad_homogeneous_test,
"minimal_invariant_ell_homogeneous_test" => minimal_invariant_ell_homogeneous_test,
"minimal_invariant_quad_homogeneous_test" => minimal_invariant_quad_homogeneous_test,
"minimal_convex_invariant_quad_homogeneous_test" => minimal_convex_invariant_quad_homogeneous_test)
@test_suite invariant
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 8811 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
import DynamicPolynomials
using JuMP
const □ = polyhedron(HalfSpace([1, 0], 1.0) ∩ HalfSpace([-1, 0], 1) ∩ HalfSpace([0, 1], 1) ∩ HalfSpace([0, -1], 1))
const ◇ = polyhedron(convexhull([1.0, 0], [0, 1], [-1, 0], [0, -1]), lib)
function square_test(optimizer, config::MOI.Test.Config,
inner::Bool, variable::SetProg.AbstractVariable,
metric::Function, objective_value, set_test)
model = _model(optimizer)
@variable(model, ◯, variable)
if inner
cref = @constraint(model, ◯ ⊆ □)
else
cref = @constraint(model, □ ⊆ ◯)
end
@objective(model, inner ? MOI.MAX_SENSE : MOI.MIN_SENSE,
metric(volume(◯)))
SetProg.optimize!(model)
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.objective_sense(model) == MOI.MAX_SENSE
@test JuMP.objective_value(model) ≈ objective_value atol=config.atol rtol=config.rtol
set_test(JuMP.value(◯))
end
function john_homogeneous_square_test(optimizer, config)
square_test(optimizer, config, true,
Ellipsoid(symmetric=true, dimension=2),
nth_root, 1.0,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.Ellipsoid{Float64}}
@test Sets.polar(◯).Q ≈ Symmetric([1.0 0.0; 0.0 1.0]) atol=config.atol rtol=config.rtol
end)
end
function john_nonhomogeneous_ell_square_test(optimizer, config)
square_test(optimizer, config, true,
Ellipsoid(point=SetProg.InteriorPoint([0.0, 0.0])),
nth_root, 1.0,
◯ -> begin
@test ◯ isa Sets.PerspectiveDual{Float64, Sets.Householder{Float64, Sets.ShiftedEllipsoid{Float64}, Float64}}
z = Sets.perspective_variable(◯)
x, y = Sets.space_variables(◯)
◯_dual = Sets.perspective_dual(◯)
@test ◯_dual.p ≈ -z^2 + x^2 + y^2 atol=config.atol rtol=config.rtol
@test Sets._householder(◯_dual.h) ≈ [-1.0 0.0 0.0
0.0 1.0 0.0
0.0 0.0 1.0] atol=config.atol rtol=config.rtol
@test ◯_dual.set.Q ≈ Symmetric([1.0 0.0; 0.0 1.0]) atol=config.atol rtol=config.rtol
@test ◯_dual.set.b ≈ [0.0, 0.0] atol=config.atol rtol=config.rtol
@test ◯_dual.set.β ≈ -1.0 atol=config.atol rtol=config.rtol
end)
end
function john_nonhomogeneous_quad_square_test(optimizer, config)
square_test(optimizer, config, true,
PolySet(degree=2, convex=true, point=SetProg.InteriorPoint([0.0, 0.0])),
set -> L1_heuristic(set, [1.0, 1.0]),
8/3,
◯ -> begin
@test ◯ isa Sets.PerspectiveDual{Float64, Sets.Householder{Float64, Sets.ConvexPolynomialSet{Float64, SetProg.Sets.MonoBasis, Float64}, Float64}}
z = Sets.perspective_variable(◯)
x, y = Sets.space_variables(◯)
◯_dual = Sets.perspective_dual(◯)
@test ◯_dual.p ≈ -z^2 + x^2 + y^2 atol=config.atol rtol=config.rtol
end)
end
function löwner_homogeneous_square_test(optimizer, config)
square_test(optimizer, config, false,
Ellipsoid(symmetric=true, dimension=2),
nth_root, 0.5,
◯ -> begin
@test ◯ isa Sets.Ellipsoid
@test ◯.Q ≈ Symmetric([0.5 0.0
0.0 0.5]) atol=config.atol rtol=config.rtol
end)
end
function piecewise_semiell_inner_homogeneous_◇_square_test(optimizer, config)
square_test(optimizer, config, true,
Ellipsoid(symmetric=true, piecewise=◇),
set -> L1_heuristic(set), 4,
p◯ -> begin
@test p◯ isa Sets.Polar
◯ = p◯.set
@test ◯ isa Sets.Piecewise{Float64, Sets.Ellipsoid{Float64}}
@test length(◯.sets) == 4
Q1 = Symmetric([ 1.0 1.0
1.0 1.0])
Q2 = Symmetric([ 1.0 -1.0
-1.0 1.0])
_test_piece(◯, [-0.5, -0.5], Q1, config)
_test_piece(◯, [0.5, -0.5], Q2, config)
_test_piece(◯, [-0.5, 0.5], Q2, config)
_test_piece(◯, [0.5, 0.5], Q1, config)
end)
end
function piecewise_semiell_inner_homogeneous_□_square_test(optimizer, config)
square_test(optimizer, config, true,
Ellipsoid(symmetric=true, piecewise=□),
set -> L1_heuristic(set), 8/3,
p◯ -> begin
@test p◯ isa Sets.Polar
◯ = p◯.set
@test ◯ isa Sets.Piecewise{Float64, Sets.Ellipsoid{Float64}}
@test length(◯.sets) == 4
Q = Symmetric([1.0 0.0
0.0 1.0])
@test ◯.sets[1].Q ≈ Q atol=config.atol rtol=config.rtol
@test ◯.sets[2].Q ≈ Q atol=config.atol rtol=config.rtol
@test ◯.sets[3].Q ≈ Q atol=config.atol rtol=config.rtol
@test ◯.sets[4].Q ≈ Q atol=config.atol rtol=config.rtol
end)
end
const quartic_inner_poly = [3.1518541833100864, -0.1617384194869734]
const quartic_inner_obj = 6.447419478140056
const quartic_inner_α = 5.6567546886722795
const quartic_inner_convexity = [12.0, 0.0, quartic_inner_α, 0.0, quartic_inner_poly[1]+2quartic_inner_poly[2],
quartic_inner_α, 8.48516455194103, 0.0, 0.0, 12.0]
function quartic_inner_homogeneous_square_test(optimizer, config)
square_test(optimizer, config, true,
PolySet(symmetric=true, degree=4, dimension=2, convex=true),
nth_root, quartic_inner_obj,
◯ -> begin
@test ◯ isa Sets.Polar{Float64, Sets.ConvexPolySet{Float64, SetProg.Sets.MonoBasis, Float64}}
◯_polar = Sets.polar(◯)
@test ◯_polar.degree == 4
x, y = variables(◯_polar.p)
@test polynomial(◯_polar.p) ≈ x^4 + quartic_inner_convexity[5]*x^2*y^2 + y^4 atol=config.atol rtol=config.rtol
convexity_proof = Sets.convexity_proof(◯)
@test convexity_proof.n == 4
@test convexity_proof.Q ≈ quartic_inner_convexity atol=config.atol rtol=config.rtol
end)
end
const quartic_outer_β = 0.30177574048813055
const quartic_outer_γ = 0.5936049698923986
const quartic_outer_λ = -0.09857757888257276
const quartic_outer_obj = 1.611854896946893
const quartic_outer_α = 0.7928996242545062
const quartic_outer_convexity = [3.621308885857567, 0.0, quartic_outer_α, 0.0, 0.08578956499151169,
quartic_outer_α, 1.5, 0.0, 0.0, 3.6212933687704307]
function quartic_outer_homogeneous_square_test(optimizer, config)
square_test(optimizer, config, false,
PolySet(symmetric=true, degree=4, dimension=2, convex=true),
nth_root,
quartic_outer_obj,
◯ -> begin
@test ◯ isa Sets.ConvexPolySet{Float64, SetProg.Sets.MonoBasis, Float64}
@test ◯.degree == 4
x, y = variables(◯.p)
@test polynomial(◯.p) ≈ quartic_outer_β*x^4 + (quartic_outer_γ+2quartic_outer_λ)*x^2*y^2 + quartic_outer_β*y^4 atol=config.atol rtol=config.rtol
convexity_proof = Sets.convexity_proof(◯)
@test convexity_proof.n == 4
@test convexity_proof.Q ≈ quartic_outer_convexity atol=config.atol rtol=config.rtol
end)
end
const square_tests = Dict(
"john_homogeneous_square" =>
john_homogeneous_square_test,
"john_nonhomogeneous_ell_square" =>
john_nonhomogeneous_ell_square_test,
"john_nonhomogeneous_quad_square" =>
john_nonhomogeneous_quad_square_test,
"löwner_homogeneous_square" =>
löwner_homogeneous_square_test,
"piecewise_semiell_inner_homogeneous_◇_square" =>
piecewise_semiell_inner_homogeneous_◇_square_test,
"piecewise_semiell_inner_homogeneous_□_square" =>
piecewise_semiell_inner_homogeneous_□_square_test,
"quartic_inner_homogeneous_square" =>
quartic_inner_homogeneous_square_test,
"quartic_outer_homogeneous_square" =>
quartic_outer_homogeneous_square_test
)
@test_suite square
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 5564 | using LinearAlgebra
using Test
using SetProg, SetProg.Sets
using Polyhedra
using MultivariatePolynomials
using DynamicPolynomials
using JuMP
function switched_system_test(optimizer, config::MOI.Test.Config,
variable::SetProg.AbstractVariable, γ,
feasible, objective_value, set_test, dual_test)
model = _model(optimizer)
@variable(model, ◯, variable)
# See https://github.com/blegat/SwitchOnSafety.jl/blob/master/examples/LPJ17e43.ipynb
A1 = [1 0
1 0] / γ
A2 = [0 1
0 -1] / γ
cref1 = @constraint(model, A1 * ◯ ⊆ ◯)
cref2 = @constraint(model, A2 * ◯ ⊆ ◯)
if objective_value !== nothing
@objective(model, MOI.MAX_SENSE, L1_heuristic(volume(◯), ones(2)))
end
SetProg.optimize!(model)
if objective_value === nothing
@test JuMP.objective_sense(model) == MOI.FEASIBILITY_SENSE
else
@test JuMP.objective_sense(model) == MOI.MIN_SENSE
end
if feasible
@test JuMP.termination_status(model) == MOI.OPTIMAL
@test JuMP.primal_status(model) == MOI.FEASIBLE_POINT
if objective_value !== nothing
@test JuMP.objective_value(model) ≈ objective_value atol=config.atol rtol=config.rtol
end
return set_test(JuMP.value(◯))
else
#@test JuMP.termination_status(model) == MOI.INFEASIBLE # Mosek returns SLOW_PROGRESS
@test JuMP.dual_status(model) == MOI.INFEASIBILITY_CERTIFICATE
return dual_test(cref1, cref2)
end
end
function feasible_switched_system_ell_test(optimizer, config, ε=1e-3)
Q = [1.0 0.0
0.0 1.0]
superset = SetProg.Sets.Ellipsoid(Symmetric(Q))
switched_system_test(
optimizer, config,
Ellipsoid(symmetric=true, superset=superset),
√2 + ε, true, 4/3,
◯ -> begin
@test ◯ isa Sets.Ellipsoid{Float64}
@test ◯.Q ≈ Q atol=config.atol rtol=config.rtol
end,
(cref1, cref2) -> begin end)
end
function infeasible_switched_system_ell_test(optimizer, config, ε=1e-3)
Q = [1.0 0.0
0.0 1.0]
superset = SetProg.Sets.Ellipsoid(Symmetric(Q))
switched_system_test(
optimizer, config,
Ellipsoid(symmetric=true, superset=superset),
√2 - ε, false, NaN,
◯ -> begin end,
(cref1, cref2) -> begin
function ispsd(q)
Q = [q[1] q[2]; q[2] q[3]]
return all(eigvals(Q) .≥ -config.atol)
end
@test ispsd(JuMP.dual(cref1))
@test ispsd(JuMP.dual(cref2))
end)
end
function superset(x, d)
q = SetProg.SumOfSquares.GramMatrix(SetProg.SumOfSquares.SOSDecomposition(x.^d))
return SetProg.Sets.PolySet(2d, q)
end
function feasible_switched_system_quad_test(optimizer, config, ε=1e-3)
@polyvar x[1:2]
switched_system_test(
optimizer, config,
PolySet(symmetric=true, degree=2, superset=superset(x, 1)),
√2 + ε, true, 8/3,
◯ -> begin
@test ◯ isa Sets.PolySet{Float64, SetProg.Sets.MonoBasis}
@test polynomial(◯.p) ≈ x[1]^2 + x[2]^2 atol=config.atol rtol=config.rtol
end,
(cref1, cref2) -> begin end)
end
function infeasible_switched_system_quad_test(optimizer, config, ε=1e-3)
@polyvar x[1:2]
switched_system_test(
optimizer, config,
PolySet(symmetric=true, degree=2, superset=superset(x, 1)),
√2 - ε, false, NaN,
◯ -> begin end,
(cref1, cref2) -> begin
function ispsd(M)
return all(eigvals(Matrix(M.Q)) .≥ -config.atol)
end
@test ispsd(SetProg.SumOfSquares.moment_matrix(cref1))
@test ispsd(SetProg.SumOfSquares.moment_matrix(cref2))
end)
end
function feasible_switched_system_quartic_test(optimizer, config, ε=1e-2)
@polyvar x[1:2]
switched_system_test(
optimizer, config,
PolySet(symmetric=true, degree=4, superset=superset(x, 2)),
1.0 + ε, true, 10.001105454190741,
◯ -> begin
@test ◯ isa Sets.PolySet{Float64, SetProg.Sets.MonoBasis}
α = 11.814054544955727
@test polynomial(◯.p) ≈ (α+1) * x[1]^4 - 2α * x[1]^2*x[2]^2 + (α+1) * x[2]^4 atol=config.atol rtol=config.rtol
end,
(cref1, cref2) -> begin end)
end
function infeasible_switched_system_quartic_test(optimizer, config, ε=2e-1)
@polyvar x[1:2]
switched_system_test(
optimizer, config,
PolySet(symmetric=true, degree=4, superset=superset(x, 2)),
1.0 - ε, false, nothing,
◯ -> begin end,
(cref1, cref2) -> begin
function ispsd(M)
return all(eigvals(Matrix(M.Q)) .≥ -config.atol)
end
@test ispsd(SetProg.SumOfSquares.moment_matrix(cref1))
@test ispsd(SetProg.SumOfSquares.moment_matrix(cref2))
end)
end
const switched_system_tests = Dict("feasible_switched_system_ell" => feasible_switched_system_ell_test,
"infeasible_switched_system_ell" => infeasible_switched_system_ell_test,
"feasible_switched_system_quad" => feasible_switched_system_quad_test,
"infeasible_switched_system_quad" => infeasible_switched_system_quad_test,
"feasible_switched_system_quartic" => feasible_switched_system_quartic_test,
"infeasible_switched_system_quartic" => infeasible_switched_system_quartic_test)
@test_suite switched_system
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | code | 3046 | using Test, JuMP
function _model(optimizer::MOI.AbstractOptimizer)
MOI.empty!(optimizer)
return direct_model(optimizer)
end
function _model(optimizer_constructor)
return Model(optimizer_constructor)
end
#"""
# @test_suite setname subsets
#
#Defines a function `setname_test(model, config, exclude)` that runs the tests
#defined in the dictionary `setname_tests` with the model `model` and config
#`config` except the tests whose dictionary key is in `exclude`. If `subsets` is
#`true` then each test runs in fact multiple tests hence the `exclude` argument
#is passed as it can also contains test to be excluded from these subsets of
#tests.
#"""
macro test_suite(setname, subsets=false)
testname = Symbol(string(setname) * "_test")
testdict = Symbol(string(testname) * "s")
if subsets
runtest = :( f(model, config, exclude) )
else
runtest = :( f(model, config) )
end
esc(:(
function $testname(model,
config::$MOI.Test.Config,
exclude::Vector{String} = String[])
for (name,f) in $testdict
if name in exclude
continue
end
@testset "$name" begin
$runtest
end
end
end
))
end
# Utilities for building the mock `optimize!` from the solution of a solver
# Variables primal values for inner bridged model
function print_value(v, atol)
i = round(v)
if isapprox(v, i, atol=atol)
print(float(i))
else
print(v)
end
end
function inner_variable_value(model, atol=1e-4)
inner = backend(model)
println("optimize!(mock) = MOIU.mock_optimize!(mock,")
println(JuMP.termination_status(model))
if JuMP.primal_status(model) != MOI.NO_SOLUTION
values = MOI.get(inner, MOI.VariablePrimal(),
MOI.get(inner, MOI.ListOfVariableIndices()))
print("(MOI.FEASIBLE_POINT, [")
for (i, v) in enumerate(values)
if i > 1
print(", ")
end
print_value(v, atol)
end
print("])")
else
print("MOI.NO_SOLUTION")
end
println(",")
if JuMP.dual_status(model) != MOI.NO_SOLUTION
for (F, S) in MOI.get(inner, MOI.ListOfConstraints())
print("($F, $S) => [")
for ci in MOI.get(inner, MOI.ListOfConstraintIndices{F, S}())
print(MOI.get(inner, MOI.ConstraintDual(), ci))
print(", ")
end
println("])")
end
end
println(")")
end
# Constraint dual values for inner bridged model
# The order between the pieces might vary so it's more robust
# to find at which index is a piece using a known point in the interior of the piece
function _test_piece(set, point, Q, config)
is_in = [point in piece for piece in set.pieces]
@test count(is_in) == 1
i = findfirst(is_in)
@test set.sets[i].Q ≈ Q atol=config.atol rtol=config.rtol
return
end
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | docs | 6759 | # SetProg
| **Documentation** | **Build Status** | **Social** |
|:-----------------:|:----------------:|:----------:|
| [![][docs-stable-img]][docs-stable-url] | [![Build Status][build-img]][build-url] | [![Gitter][gitter-img]][gitter-url] |
| [![][docs-latest-img]][docs-latest-url] | [![Codecov branch][codecov-img]][codecov-url] | [<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/a/af/Discourse_logo.png/799px-Discourse_logo.png" width="64">][discourse-url] |
JuMP extension for Set Programming : optimization with set variables and inclusion/containment constraints. This package allows the formulation of a mathematical program involving set variables and inclusion/membership constraints in addition to classical variables and constraints supported by JuMP.
## Documentation
- [**STABLE**][docs-stable-url] — **most recently tagged version of the documentation.**
- [**LATEST**][docs-latest-url] — *in-development version of the documentation.*
## Variables
The variables can either be
* a `Polytope`;
* an `Ellipsoid`, or a piecewise semi-ellipsoid;
* a `Polyset`, that is the 1-sublevel set of a polynomial of degree `2d`.
```julia
@variable model S Polytope(piecewise=p) # polytope defined over the pieces defined by `p`
@variable model S Ellipsoid()
@variable model S Ellipsoid(piecewise=p) # piecewise semi-ellipsoid defined over the pieces defined by `p`
@variable model S PolySet(d) # 1-sublevel set of a polynomial of degree 2d
@variable model S PolySet(d, convex=true) # Convex 1-sublevel set of a polynomial of degree 2d
@variable model S PolySet(d, symmetric=true) # 1-sublevel set of a polynomial of degree 2d symmetric around the origin
@variable model S PolySet(d, symmetric=true, point=SetProg.CenterPoint([1, 0])) # 1-sublevel set of a polynomial of degree 2d symmetric around the [1, 0]
```
## Expressions
The following operations are allowed:
| Operation | Description |
|-----------|-------------------------------|
| A\*S | Linear mapping |
But more operations are planned to be added:
| Operation | Description |
|-----------|-------------------------------|
| S + x | Translation of `S` by `x` |
| S1 + S2 | Minkowski sum |
| S1 ∩ S2 | Intersection of `S1` and `S2` |
| S1 ∪ S2 | Union of `S1` and `S2` |
| polar(S) | Polar of S |
## Constraints
The following constraints are implemented
| Operation | Description |
|-----------|--------------------------|
| x ∈ S | `x` is contained in `S` |
| S1 ⊆ S2 | `S1` is included in `S2` |
| S1 ⊇ S2 | `S1` is included in `S2` |
## Examples
Consider a polytope
```julia
using Polyhedra
diamond = HalfSpace([1, 1], 1) ∩ HalfSpace([-1, -1], 1) ∩ HalfSpace([1, -1], 1) ∩ HalfSpace([-1, 1], 1)
simplex = HalfSpace([1, 1], 1) ∩ HalfSpace([-1, 0], 0) ∩ HalfSpace([0, -1], 0)
```
Pick an SDP solver (see [here](https://www.juliaopt.org/JuMP.jl/stable/installation/#Getting-Solvers-1) for a list)
```julia
using CSDP # Optimizer
optimizer_constructor = CSDP.Optimizer
```
To compute the maximal symmetric ellipsoid contained in the polytope `diamond` defined above (i.e. [Löwner-John ellipsoid](https://github.com/rdeits/LoewnerJohnEllipsoids.jl)):
```julia
using SetProg
model = Model(optimizer_constructor)
@variable(model, S, Ellipsoid(symmetric=true))
@constraint(model, S ⊆ diamond)
@objective(model, Max, nth_root(volume(S)))
optimize!(model)
```
We specify in the example that the ellipsoid is symmetric around the origin to
simplify the computation as the solver does not need to look for the center so
the SDP problem that need to be solved has a smaller size.
We can visualize the result with [Plots](http://juliaplots.org/) as follows:
```julia
using Plots
plot(polyhedron(diamond), ratio=1)
plot!(value(S))
```
To compute the maximal ellipsoid contained in `simplex`, we don't need to specify
the center but at least a point in the interior of the ellipsoid. The SDP
formulation used will then determine the center and shape of the ellipsoid
simultaneously in the same SDP. For the interior point, we take the chebyshev
center of the simplex (which can be found by solving an LP). This the center of
the sphere of maximal volume in the simplex so one might rightly guess that is is
in the interior of the maximal ellispoid contained in the simplex.
```julia
using SetProg
cheby_center, cheby_radius = chebyshevcenter(simplex, optimizer_constructor)
interior_point = SetProg.InteriorPoint(cheby_center)
model = Model(optimizer_constructor)
@variable(model, S, Ellipsoid(point=interior_point))
@constraint(model, S ⊆ simplex)
@objective(model, Max, nth_root(volume(S)))
optimize!(model)
```
We now visualize the result:
```julia
using Plots
plot(polyhedron(simplex), ratio=1)
plot!(value(S))
```
To compute the maximal invariant set contained in a polytope (*not yet implemented*):
```julia
using SetProg
model = Model(optimizer_constructor)
@variable(model, S, Polytope())
@constraint(model, S ⊆ diamond)
@constraint(model, A*S ⊆ S) # Invariance constraint
@objective(model, Max, volume(S))
optimize!(model)
```
To compute the maximal invariant ellipsoid contained in the polytope `diamond` defined above:
```julia
using SetProg
model = Model(optimizer_constructor)
@variable(model, S, Ellipsoid(symmetric=true))
@constraint(model, S ⊆ diamond)
@constraint(model, A*S ⊆ S) # Invariance constraint
@objective(model, Max, nth_root(volume(S)))
optimize!(model)
```
To compute the maximal algebraic-invariant ellipsoid (i.e. `AS ⊆ ES`) contained in the polytope `diamond` defined above:
```julia
using SetProg
model = Model(optimizer_constructor)
@variable(model, S, Ellipsoid(symmetric=true)))
@constraint(model, S ⊆ diamond)
@constraint(model, A*S ⊆ E*S) # Invariance constraint
@objective(model, Max, L1_heuristic(volume(S), ones(Polyhedra.fulldim(P))))
optimize!(model)
```
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-stable-url]: https://blegat.github.io/SetProg.jl/stable
[docs-latest-url]: https://blegat.github.io/SetProg.jl/dev
[build-img]: https://github.com/blegat/SetProg.jl/workflows/CI/badge.svg?branch=master
[build-url]: https://github.com/blegat/SetProg.jl/actions?query=workflow%3ACI
[codecov-img]: http://codecov.io/github/blegat/SetProg.jl/coverage.svg?branch=master
[codecov-url]: http://codecov.io/github/blegat/SetProg.jl?branch=master
[gitter-url]: https://gitter.im/JuliaPolyhedra/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link
[gitter-img]: https://badges.gitter.im/JuliaPolyhedra/Lobby.svg
[discourse-url]: https://discourse.julialang.org/c/domain/opt
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.4.0 | 7fe53b00c86f4bd820ea7d8d97dcb3cf3287e0eb | docs | 342 | # SetProg
[SetProg](https://github.com/blegat/SetProg.jl) is a JuMP extension for Set Programming :
optimization with set variables and inclusion/containment constraints.
This package allows the formulation of a mathematical programming involving both classical variables and constraints supported by JuMP and set variables and constraints.
| SetProg | https://github.com/blegat/SetProg.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 674 | using Documenter, LoopTools
makedocs(;
modules=[LoopTools],
authors="Feng-Kun Guo",
repo="https://github.com/fkguo/LoopTools.jl/blob/{commit}{path}#L{line}",
sitename="LoopTools.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://fkguo.github.io/LoopTools.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Definitions" => "definitions.md",
"Example" => "example.md"
],
)
deploydocs(;
repo="github.com/fkguo/LoopTools.jl.git",
target = "build",
deps = nothing,
make = nothing,
branch = "gh-pages"
# push_preview=true,
)
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 2313 |
# The 1-point one-loop integrals
const aa0 = 1; const aa00 = 4
const acoef = (:aa0, :aa00)
@doc raw"""
A0i(id, m^2)
one-point tensor coefficient for `id`
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int d^D q \frac{\{1, g_{μν} \} }{q^2-m^2}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε```.
Special cases:
| `id` | Int | Description |
|:---|:---:|:---|
| `aa0` |`1` | scalar one-point one-loop function, i.e., `A0(m^2)` |
| `aa00` | `4` | coefficient of ``g_{μν}`` |
"""
function A0i end
# somehow here the returning type must be ComplexF64 is I use the original LoopTools_jll.
# otherwise there would be a EXCEPTION_ACCESS_VIOLATION to crash julia
function A0i(id, msq::Real)
_a0i = ccall((:a0i_, libooptools), ComplexF64,
(Ref{Int64}, Ref{Float64}),
id, msq)
return real(_a0i) # we know this must be real
end
A0i(id, msq::Complex) = ccall((:a0ic_, libooptools), ComplexF64,
(Ref{Int64}, Ref{ComplexF64}),
id, msq)
@doc raw"""
A0(m^2)
the scalar one-point one-loop function
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int d^D q \frac{1}{q^2-m^2}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
"""
A0(msq::Real) = A0i(aa0, msq)
# ccall((:a0_, libooptools), ComplexF64, (Ref{Float64},), msq)
A0(msq) = ccall((:a0c_, libooptools), ComplexF64,
(Ref{ComplexF64},),
msq)
A00(msq) = A0i(aa00, msq)
"""
Aget(m^2; val_only = false)
the finite piece of all one-point tensor coefficients.
See also [`aget`](@ref).
* `val_only = false`, return a `NamedTuple`; otherwise return an `NTuple`.
""" Aget
@doc raw"""
aget(msq)
return all one-point coefficients; each one is characterized by three numbers,
with the later two coefficients of ``ε^{-1}`` and ``ε^{-2}``, respectively.
See also [`Aget`](@ref) and [`agetsym`](@ref).
""" aget
@doc raw"""
aput!(res::Vector{ComplexF64}, msq)
return all one-point coefficients to the preallocated array `res` of length 6.
See also [`aget`](@ref) and [`agetsym`](@ref).
""" aput!
_define_get('A', 1, 2)
function aget(msq::Real)
ccall((:aput_, libooptools), Cvoid,
(Ptr{Vector{ComplexF64}}, Ref{Float64}, Csize_t),
_Ares_, msq, 6)
_Ares_
end
# slower than `Aget`
Agetnocache(msq) = NamedTuple{(:aa0, :aa00)}((A0i(aa0, msq), A0i(aa00, msq)) )
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 3646 | # The 2-point one-loop integrals
const bb0 = 1; const bb1 = 4; const bb00 = 7
const bb11 = 10; const bb001 = 13; const bb111 = 16
const dbb0 = 19; const dbb1 = 22; const dbb00 = 25;
const dbb11 = 28; const dbb001 = 31
const bcoef = (:bb0, :bb1, :bb00, :bb11, :bb001, :bb111, :dbb0, :dbb1,
:dbb00, :dbb11, :dbb001)
@doc raw"""
B0i(id, p^2, m1^2, m2^2)
two-point tensor coefficient for `id`
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{({\rm numerator})\, d^D q }{(q^2-m_1^2)\left[(q+p)^2-m_2^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
Special cases:
| `id` | Int | Description |
|:---|:---:|:---|
| `bb0` (`dbb0`) |`1` (`19`) | (derivative of) scalar two-point one-loop function |
| `bb1` (`dbb1`) | `4` (`22`) | (derivative of) coefficient of ``p_{μ}`` |
| `bb00` (`dbb00`) | `7` (`25`) | (derivative of) coefficient of ``g_{μν}`` |
| `bb11` (`dbb11`) | `10` (`28`) | (derivative of) coefficient of ``p_μ p_ν`` |
| `bb001` (`dbb001`) | `13` (`31`) | (derivative of) coefficient of ``g_{μν}p_ρ`` |
| `bb111` | `16` | coefficient of ``p_μ p_ν p_ρ`` |
Functions `B0`, `B1`, `B00`, `B11`, `B001` and `B111` are defined.
"""
function B0i(id, psq::Real, m1sq::Real, m2sq::Real)
ccall((:b0i_, libooptools), ComplexF64,
(Ref{Int64}, Ref{Float64}, Ref{Float64}, Ref{Float64}),
id, psq, m1sq, m2sq)
end
function B0i(id, psq, m1sq, m2sq)
ccall((:b0ic_, libooptools), ComplexF64,
(Ref{Int64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}),
id, psq, m1sq, m2sq)
end
@doc raw"""
B0(p^2, m1^2, m2^2)
the scalar two-point one-loop function
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{d^D q }{(q^2-m_1^2)\left[(q+p)^2-m_2^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
""" B0
"Coefficients of two-point tensor loop integral. See `B0i`." B00, B11, B001, B111, DB0, DB1, DB00, DB11
for f in (:B0, :B1, :B00, :B11, :B001, :B111, :DB0, :DB1, :DB00, :DB11)
ff = lowercase(string("$(f)_"))
@eval function ($f)(psq::Real, m1sq::Real, m2sq::Real)
ccall(($ff, libooptools), ComplexF64,
(Ref{Float64}, Ref{Float64}, Ref{Float64}),
psq, m1sq, m2sq)
end
end
for f in (:B0, :B1, :B00, :B11, :B001, :B111, :DB0, :DB1, :DB00, :DB11)
ff = lowercase(string("$(f)c_"))
@eval function ($f)(psq, m1sq, m2sq)
ccall(($ff, libooptools), ComplexF64,
(Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}),
psq, m1sq, m2sq)
end
end
"""
Bget(p^2, m1^2, m2^2; val_only = false)
return the finite piece of all two-point coefficients.
See also [`bget`](@ref).
* `val_only = false`, return a `NamedTuple`; otherwise return an `NTuple`.
""" Bget
@doc raw"""
bget(p^2, m1^2, m2^2)
return all two-point coefficients; each one is characterized by three numbers,
with the later two coefficients of ``ε^{-1}`` and ``ε^{-2}``, respectively.
See also [`Bget`](@ref) and [`bgetsym`](@ref).
""" bget
@doc raw"""
bput!(res::Vector{ComplexF64}, p^2, m1^2, m2^2)
return all two-point coefficients to the preallocated array `res` of length 33.
See also [`bget`](@ref) and [`bgetsym`](@ref).
""" bput!
# define bget and Bget
_define_get('B', 3, 11)
function bget(p::Real, m1::Real, m2::Real)
ccall((:bput_, libooptools), Cvoid,
(Ptr{Vector{ComplexF64}}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Csize_t),
_Bres_, p, m1, m2, 33)
_Bres_
end
# Bgetnocache
let _str_ = join(["B0i($i, psq, m1sq, m2sq)," for i in bcoef])
Meta.parse(string("Bgetnocache(psq, m1sq, m2sq) = NamedTuple{bcoef}((", _str_[1:end-1], "))")) |> eval
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 4312 | # The 3-point one-loop integrals
const cc0 = 1; const cc1 = 4; const cc2 = 7;
const cc00 = 10; const cc11 = 13; const cc12 = 16; const cc22 = 19;
const cc001 = 22; const cc002 = 25; const cc111 = 28; const cc112 = 31; const cc122 = 34; const cc222 = 37;
const cc0000 = 40; const cc0011 = 43; const cc0012 = 46; const cc0022 = 49;
const cc1111 = 52; const cc1112 = 55; const cc1122 = 58; const cc1222 = 61; const cc2222 = 64
const ccoef = (:cc0, :cc1, :cc2, :cc00, :cc11, :cc12, :cc22, :cc001, :cc002, :cc111,
:cc112, :cc122, :cc222, :cc0000, :cc0011, :cc0012, :cc0022, :cc1111,
:cc1112, :cc1122, :cc1222, :cc2222)
@doc raw"""
C0i(id, p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2)
three-point tensor coefficient for `id`
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{({\rm numerator})\, d^D q }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
Special cases:
| `id` | Int | Description |
|:---|:---:|:---|
| `cc0` |`1` | scalar three-point one-loop function |
| `cc1` | `4` | coefficient of ``k_{1μ}`` |
| `cc2` | `7` | coefficient of ``k_{2μ}`` |
| `cc00` | `10` | coefficient of ``g_{μν}`` |
| `cc11` | `13` | coefficient of ``k_{1μ} k_{1ν}`` |
| `cc12` | `16` | coefficient of ``k_{1μ} k_{2ν}`` |
| `cc22` | `19` | coefficient of ``k_{2μ} k_{2ν}`` |
| `...` | `...` | `...` |
| `cc2222` | `64` | coefficient of ``k_{2μ} k_{2ν} k_{2ρ} k_{2σ}`` |
where $k_{1,2}$ are related to $p_{1,2}$ by
``k_{j} = \sum_{i=1}^j p_i``.
"""
function C0i(id, p1sq::Real, p2sq::Real, p3sq::Real, m1sq::Real, m2sq::Real, m3sq::Real)
ccall((:c0i_, libooptools), ComplexF64,
(Ref{Int64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}),
id, p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
end
function C0i(id, p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
ccall((:c0ic_, libooptools), ComplexF64,
(Ref{Int64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64},
Ref{ComplexF64}, Ref{ComplexF64}),
id, p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
end
@doc raw"""
C0(p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2)
the scalar three-point one-loop function
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{d^D q }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
"""
function C0(p1sq::Real, p2sq::Real, p3sq::Real, m1sq::Real, m2sq::Real, m3sq::Real)
ccall((:c0_, libooptools), ComplexF64,
(Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}),
p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
end
function C0(p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
ccall((:c0c_, libooptools), ComplexF64,
(Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64},
Ref{ComplexF64}, Ref{ComplexF64}),
p1sq, p2sq, p3sq, m1sq, m2sq, m3sq)
end
"""
Cget(p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2; val_only = false)
return the finite piece of all three-point coefficients.
See also [`cget`](@ref).
* `val_only = false`, return a `NamedTuple`; otherwise return an `NTuple`.
""" Cget
@doc raw"""
cget(p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2)
return all three-point coefficients; each one is characterized by three numbers,
with the later two coefficients of ``ε^{-1}`` and ``ε^{-2}``, respectively.
See also [`Cget`](@ref) and [`cgetsym`](@ref).
""" cget
@doc raw"""
cput!(res::Vector{ComplexF64}, p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2)
return all three-point coefficients to the preallocated array `res` of length 66.
See also [`cget`](@ref) and [`cgetsym`](@ref).
""" cput!
_define_get('C', 6, 22)
function cget(p1sq::Real, p2sq::Real, p3sq::Real, m1sq::Real, m2sq::Real, m3sq::Real)
ccall((:cput_, libooptools), Cvoid,
(Ptr{Vector{ComplexF64}}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Csize_t),
_Cres_, p1sq, p2sq, p3sq, m1sq, m2sq, m3sq, 66)
_Cres_
end
# Cgetnocache
let _str_ = join(["C0i($i, p1,p2,p1p2,m1,m2,m3)," for i in ccoef])
Meta.parse(
string("Cgetnocache(p1, p2, p1p2, m1, m2, m3) = NamedTuple{ccoef}((",
_str_[1:end-1], "))")
) |> eval
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 6298 | # The 4-point one-loop integrals
const dd0 = 1; const dd1 = 4; const dd2 = 7; const dd3 = 10;
const dd00 = 13; const dd11 = 16; const dd12 = 19; const dd13 = 22; const dd22 = 25; const dd23 = 28;
const dd33 = 31; const dd001 = 34; const dd002 = 37;
const dd003 = 40; const dd111 = 43; const dd112 = 46; const dd113 = 49;
const dd122 = 52; const dd123 = 55; const dd133 = 58;
const dd222 = 61; const dd223 = 64; const dd233 = 67;
const dd333 = 70; const dd0000 = 73; const dd0011 = 76; const dd0012 = 79;
const dd0013 = 82; const dd0022 = 85; const dd0023 = 88;
const dd0033 = 91; const dd1111 = 94; const dd1112 = 97;
const dd1113 = 100; const dd1122 = 103; const dd1123 = 106; const dd1133 = 109;
const dd1222 = 112; const dd1223 = 115; const dd1233 = 118;
const dd1333 = 121; const dd2222 = 124; const dd2223 = 127;
const dd2233 = 130; const dd2333 = 133; const dd3333 = 136; const dd00001 = 139;
const dd00002 = 142; const dd00003 = 145; const dd00111 = 148;
const dd00112 = 151; const dd00113 = 154; const dd00122 = 157;
const dd00123 = 160; const dd00133 = 163; const dd00222 = 166; const dd00223 = 169;
const dd00233 = 172; const dd00333 = 175; const dd11111 = 178;
const dd11112 = 181; const dd11113 = 184; const dd11122 = 187;
const dd11123 = 190; const dd11133 = 193; const dd11222 = 196; const dd11223 = 199;
const dd11233 = 202; const dd11333 = 205; const dd12222 = 208;
const dd12223 = 211; const dd12233 = 214; const dd12333 = 217;
const dd13333 = 220; const dd22222 = 223; const dd22223 = 226; const dd22233 = 229;
const dd22333 = 232; const dd23333 = 235; const dd33333 = 238
const dcoef = (:dd0, :dd1, :dd2, :dd3, :dd00, :dd11, :dd12, :dd13, :dd22, :dd23,
:dd33, :dd001, :dd002, :dd003, :dd111, :dd112, :dd113, :dd122, :dd123, :dd133,
:dd222, :dd223, :dd233, :dd333, :dd0000, :dd0011, :dd0012, :dd0013, :dd0022,
:dd0023, :dd0033, :dd1111, :dd1112, :dd1113, :dd1122, :dd1123, :dd1133, :dd1222,
:dd1223, :dd1233, :dd1333, :dd2222, :dd2223, :dd2233, :dd2333, :dd3333, :dd00001,
:dd00002, :dd00003, :dd00111, :dd00112, :dd00113, :dd00122,
:dd00123, :dd00133, :dd00222, :dd00223, :dd00233, :dd00333, :dd11111,
:dd11112, :dd11113, :dd11122, :dd11123, :dd11133, :dd11222, :dd11223,
:dd11233, :dd11333, :dd12222, :dd12223, :dd12233, :dd12333,
:dd13333, :dd22222, :dd22223, :dd22233, :dd22333, :dd23333, :dd33333)
@doc raw"""
D0i(id, p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2, m1^2, m2^2, m3^2, m4^2)
four-point tensor coefficient for `id`
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{({\rm numerator})\, d^D q }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right] \left[(q+p_1+p_2+p_3)^2-m_4^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
Special cases:
| `id` | Int | Description |
|:---|:---:|:---|
| `dd0` |`1` | scalar four-point one-loop function |
| `dd1` | `4` | coefficient of ``k_{1μ}`` |
| `dd2` | `7` | coefficient of ``k_{2μ}`` |
| `dd3` | `10` | coefficient of ``k_{3μ}`` |
| `dd00` | `13` | coefficient of ``g_{μν}`` |
| `...` | `...` | `...` |
| `dd33333` | `238` | coefficient of ``k_{3μ} k_{3ν} k_{3ρ} k_{3σ} k_{3λ}`` |
where $k_{1,2,3}$ are related to the external momenta $p_{1,2,3}$ by
``k_{j} = \sum_{i=1}^j p_i``.
"""
function D0i(id, p1sq::Real, p2sq::Real, p3sq::Real, p4sq::Real, p12sq::Real, p23sq::Real,
m1sq::Real, m2sq::Real, m3sq::Real, m4sq::Real)
ccall((:d0i_, libooptools), ComplexF64,
(Ref{Int64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}),
id, p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq)
end
function D0i(id, p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq)
ccall((:d0ic_, libooptools), ComplexF64,
(Ref{Int64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64},
Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}),
id, p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq)
end
@doc raw"""
D0(p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2, m1^2, m2^2, m3^2, m4^2)
the scalar four-point one-loop function
```math
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int
\frac{d^D q }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right] \left[(q+p_1+p_2+p_3)^2-m_4^2\right]}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
"""
D0(p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq) = D0i(dd0,
p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq)
"""
Dget(p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2,
m1^2, m2^2, m3^2, m4^2; val_only = false)
return the finite piece of all four-point coefficients.
See also [`dget`](@ref).
* `val_only = false`, return a `NamedTuple`; otherwise return an `NTuple`.
""" Dget
@doc raw"""
dget(p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2, m1^2, m2^2, m3^2, m4^2)
return all four-point coefficients; each one is characterized by three numbers,
with the later two coefficients of ``ε^{-1}`` and ``ε^{-2}``, respectively.
See also [`Dget`](@ref) and [`dgetsym`](@ref).
""" dget
@doc raw"""
dput!(res::Vector{ComplexF64}, p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2,
m1^2, m2^2, m3^2, m4^2)
return all four-point coefficients to the preallocated array `res` of length 240.
See also [`dget`](@ref) and [`dgetsym`](@ref).
""" dput!
_define_get('D', 10, 80)
function dget(p1sq::Real, p2sq::Real, p3sq::Real, p4sq::Real, p12sq::Real, p23sq::Real,
m1sq::Real, m2sq::Real, m3sq::Real, m4sq::Real)
ccall((:dput_, libooptools), Cvoid,
(Ptr{Vector{ComplexF64}}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Csize_t),
_Dres_, p1sq, p2sq, p3sq, p4sq, p12sq, p23sq, m1sq, m2sq, m3sq, m4sq, 240)
_Dres_
end
# Dgetnocache
let _str_ = join(["D0i($i, p1,p2,p3,p4,p12,p23,m1,m2,m3,m4)," for i in dcoef])
Meta.parse(string("Dgetnocache(p1,p2,p3,p4,p12,p23,m1,m2,m3,m4) = NamedTuple{dcoef}((",
_str_[1:end-1], "))") ) |> eval
end | LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 7567 | # The 5-point one-loop integrals
const ee0 = 1; const ee1 = 4; const ee2 = 7; const ee3 = 10; const ee4 = 13;
const ee00 = 16; const ee11 = 19; const ee12 = 22; const ee13 = 25; const ee14 = 28;
const ee22 = 31; const ee23 = 34; const ee24 = 37; const ee33 = 40; const ee34 = 43;
const ee44 = 46; const ee001 = 49; const ee002 = 52; const ee003 = 55; const ee004 = 58;
const ee111 = 61; const ee112 = 64; const ee113 = 67; const ee114 = 70; const ee122 = 73;
const ee123 = 76; const ee124 = 79; const ee133 = 82; const ee134 = 85; const ee144 = 88;
const ee222 = 91; const ee223 = 94; const ee224 = 97; const ee233 = 100; const ee234 = 103;
const ee244 = 106; const ee333 = 109; const ee334 = 112; const ee344 = 115; const ee444 = 118;
const ee0000 = 121; const ee0011 = 124; const ee0012 = 127; const ee0013 = 130;
const ee0014 = 133; const ee0022 = 136; const ee0023 = 139; const ee0024 = 142;
const ee0033 = 145; const ee0034 = 148; const ee0044 = 151; const ee1111 = 154;
const ee1112 = 157; const ee1113 = 160; const ee1114 = 163; const ee1122 = 166;
const ee1123 = 169; const ee1124 = 172; const ee1133 = 175; const ee1134 = 178;
const ee1144 = 181; const ee1222 = 184; const ee1223 = 187; const ee1224 = 190;
const ee1233 = 193; const ee1234 = 196; const ee1244 = 199; const ee1333 = 202;
const ee1334 = 205; const ee1344 = 208; const ee1444 = 211; const ee2222 = 214;
const ee2223 = 217; const ee2224 = 220; const ee2233 = 223; const ee2234 = 226;
const ee2244 = 229; const ee2333 = 232; const ee2334 = 235; const ee2344 = 238;
const ee2444 = 241; const ee3333 = 244; const ee3334 = 247; const ee3344 = 250;
const ee3444 = 253; const ee4444 = 256
const ecoef = (:ee0, :ee1, :ee2, :ee3, :ee4, :ee00, :ee11, :ee12, :ee13, :ee14,
:ee22, :ee23, :ee24, :ee33, :ee34, :ee44, :ee001, :ee002, :ee003, :ee004,
:ee111, :ee112, :ee113, :ee114, :ee122, :ee123, :ee124, :ee133, :ee134, :ee144,
:ee222, :ee223, :ee224, :ee233, :ee234, :ee244, :ee333, :ee334, :ee344, :ee444,
:ee0000, :ee0011, :ee0012, :ee0013, :ee0014, :ee0022, :ee0023, :ee0024,
:ee0033, :ee0034, :ee0044, :ee1111, :ee1112, :ee1113, :ee1114, :ee1122,
:ee1123, :ee1124, :ee1133, :ee1134, :ee1144, :ee1222, :ee1223, :ee1224,
:ee1233, :ee1234, :ee1244, :ee1333, :ee1334, :ee1344, :ee1444, :ee2222,
:ee2223, :ee2224, :ee2233, :ee2234, :ee2244, :ee2333, :ee2334, :ee2344,
:ee2444, :ee3333, :ee3334, :ee3344, :ee3444, :ee4444 )
@doc raw"""
E0i(id, p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2, (p2+p3)^2, (p3+p4)^2, (p4+p5)^2,
(p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2)
five-point tensor coefficient for `id`
```math
\begin{aligned}
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int d^D q &
\frac{({\rm numerator}) }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right] \left[(q+p_1+p_2+p_3)^2-m_4^2\right]} \\
&\times \frac1{\left[(q+p_1+p_2+p_3+p_4)^2-m_5^2\right]}
\end{aligned}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
Special cases:
| `id` | Int | Description |
|:---|:---:|:---|
| `ee0` |`1` | scalar five-point one-loop function |
| `ee1` | `4` | coefficient of ``k_{1μ}`` |
| `ee2` | `7` | coefficient of ``k_{2μ}`` |
| `ee3` | `10` | coefficient of ``k_{3μ}`` |
| `ee00` | `13` | coefficient of ``g_{μν}`` |
| `...` | `...` | `...` |
| `ee4444` | `256` | coefficient of ``k_{4μ} k_{4ν} k_{4ρ} k_{4σ}`` |
where $k_{1,2,3,4}$ are related to the external momenta $p_{1,2,3,4}$ by
``k_{j} = \sum_{i=1}^j p_i``.
"""
function E0i(id, p1sq::Real, p2sq::Real, p3sq::Real, p4sq::Real, p5sq::Real, p12sq::Real,
p23sq::Real, p34sq::Real, p45sq::Real, p51sq::Real, m1sq::Real, m2sq::Real,
m3sq::Real, m4sq::Real, m5sq::Real)
ccall((:e0i_, libooptools), ComplexF64,
(Ref{Int64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}),
id, p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq,
p34sq, p45sq, p51sq, m1sq, m2sq, m3sq, m4sq, m5sq)
end
function E0i(id, p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq, p34sq, p45sq, p51sq,
m1sq, m2sq, m3sq, m4sq, m5sq)
ccall((:e0ic_, libooptools), ComplexF64,
(Ref{Int64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64},
Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64},
Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}, Ref{ComplexF64}),
id, p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq, p34sq, p45sq, p51sq, m1sq, m2sq, m3sq, m4sq, m5sq)
end
@doc raw"""
E0(p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2, (p2+p3)^2, (p3+p4)^2, (p4+p5)^2,
(p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2)
the scalar five-point one-loop function
```math
\begin{aligned}
\frac{μ^{4-D}}{iπ^{D/2} r_Γ} \int d^D q &
\frac{1 }{(q^2-m_1^2)\left[(q+p_1)^2-m_2^2\right]
\left[(q+p_1+p_2)^2-m_3^2\right] \left[(q+p_1+p_2+p_3)^2-m_4^2\right]} \\
&\times \frac1{\left[(q+p_1+p_2+p_3+p_4)^2-m_5^2\right]}
\end{aligned}
```
with ``r_Γ = \frac{Γ^2(1-ε)Γ(1+ε)}{Γ(1-2ε)}``, ``D=4-2ε``.
"""
E0(p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq, p34sq, p45sq, p51sq, m1sq, m2sq,
m3sq, m4sq, m5sq) = E0i(ee0, p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq, p34sq,
p45sq, p51sq, m1sq, m2sq, m3sq, m4sq, m5sq)
"""
Eget(p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2, (p2+p3)^2, (p3+p4)^2, (p4+p5)^2,
(p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2; val_only = false)
return the finite piece of all five-point coefficients.
See also [`eget`](@ref).
* `val_only = false`, return a `NamedTuple`; otherwise return an `NTuple`.
""" Eget
@doc raw"""
eget(p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2, (p2+p3)^2, (p3+p4)^2, (p4+p5)^2,
(p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2)
return all five-point coefficients; each one is characterized by three numbers,
with the later two coefficients of ``ε^{-1}`` and ``ε^{-2}``, respectively.
See also [`Eget`](@ref) and [`egetsym`](@ref).
""" eget
@doc raw"""
eput!(res::Vector{ComplexF64}, p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2,
(p2+p3)^2, (p3+p4)^2, (p4+p5)^2, (p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2)
return all five-point coefficients to the preallocated array `res` of length 258.
See also [`eget`](@ref) and [`egetsym`](@ref).
""" eput!
_define_get('E', 15, 86)
# for all real args, if using the one from _defince_get, julia would crash;
# this does not happen for other cases, don't know why
# _Eres_ defined via _define_get;
function eget(p1sq::Real, p2sq::Real, p3sq::Real, p4sq::Real, p5sq::Real, p12sq::Real,
p23sq::Real, p34sq::Real, p45sq::Real, p51sq::Real, m1sq::Real, m2sq::Real,
m3sq::Real, m4sq::Real, m5sq::Real)
ccall((:eput_, libooptools), Cvoid,
(Ptr{Vector{ComplexF64}}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64},
Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Ref{Float64}, Csize_t),
_Eres_, p1sq, p2sq, p3sq, p4sq, p5sq, p12sq, p23sq, p34sq, p45sq, p51sq,
m1sq, m2sq, m3sq, m4sq, m5sq, 258)
_Eres_
end
# Egetnocache
let _str_ = join(["E0i($i, p1,p2,p3,p4,p5,p12,p23,p34,p45,p51,m1,m2,m3,m4,m5)," for i in ecoef])
Meta.parse(string("Egetnocache(p1,p2,p3,p4,p5,p12,p23,p34,p45,p51,m1,m2,m3,m4,m5) = NamedTuple{ecoef}((",
_str_[1:end-1], "))") ) |> eval
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 430 | @doc """
Li2(x)
return the dilogarithm of ``x``.
"""
Li2(x) = ccall((:li2c_, libooptools), ComplexF64, (Ref{ComplexF64},), x)
Li2(x::Real) = ccall((:li2_, libooptools), ComplexF64, (Ref{Float64},), x)
@doc """
Li2omx(x)
return the dilogarithm of ``1-x``.
"""
Li2omx(x) = ccall((:li2omxc_, libooptools), ComplexF64, (Ref{ComplexF64},), x)
Li2omx(x::Real) = ccall((:li2omx_, libooptools), ComplexF64, (Ref{Float64},), x) | LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 2617 | __precompile__(true)
module LoopTools
export A0, A0i, A00, Aget, aget, Agetnocache, Li2, Li2omx,
B0, B0i, B1, B00, B11, B001, B111, DB0, DB1, DB00, DB11, Bget, bget, Bgetnocache,
C0, C0i, Cget, cget, Cgetnocache, aput!, bput!, cput!, dput!, eput!,
D0, D0i, Dget, dget, Dgetnocache, E0, E0i, Eget, eget, Egetnocache
export aa0, bb0, bb1, bb00, bb11, bb001, bb111
export dbb0, dbb1, dbb00, dbb11, dbb001
export cc0, cc1, cc2, cc00, cc11, cc12, cc22, cc001, cc002, cc111, cc112, cc122
export cc222, cc0000, cc0011, cc0012, cc0022, cc1111, cc1112, cc1122, cc1222, cc2222
export dd0, dd1, dd2, dd3, dd00, dd11, dd12, dd13, dd22, dd23, dd33, dd001, dd002, dd003
export dd111, dd112, dd113, dd122, dd123, dd133, dd222, dd223, dd233, dd333
export dd0000, dd0011, dd0012, dd0013, dd0022, dd0023, dd0033, dd1111, dd1112, dd1113,
dd1122, dd1123, dd1133, dd1222, dd1223, dd1233, dd1333, dd2222, dd2223, dd2233, dd2333,
dd3333, dd00001, dd00002, dd00003, dd00111, dd00112, dd00113, dd00122, dd00123, dd00133,
dd00222, dd00223, dd00233, dd00333, dd11111, dd11112, dd11113, dd11122, dd11123, dd11133,
dd11222, dd11223, dd11233, dd11333, dd12222, dd12223, dd12233, dd12333, dd13333, dd22222,
dd22223, dd22233, dd22333, dd23333, dd33333
export ee0, ee1, ee2, ee3, ee4, ee00, ee11, ee12, ee13, ee14, ee22, ee23, ee24, ee33, ee34, ee44,
ee001, ee002, ee003, ee004, ee111, ee112, ee113, ee114, ee122, ee123, ee124, ee133, ee134, ee144,
ee222, ee223, ee224, ee233, ee234, ee244, ee333, ee334, ee344, ee444, ee0000, ee0011, ee0012, ee0013,
ee0014, ee0022, ee0023, ee0024, ee0033, ee0034, ee0044, ee1111, ee1112, ee1113, ee1114, ee1122,
ee1123, ee1124, ee1133, ee1134, ee1144, ee1222, ee1223, ee1224, ee1233, ee1234, ee1244, ee1333,
ee1334, ee1344, ee1444, ee2222, ee2223, ee2224, ee2233, ee2234, ee2244, ee2333, ee2334, ee2344,
ee2444, ee3333, ee3334, ee3344, ee3444, ee4444
export libooptools
export ltexi, clearcache, markcache, restorecache, getcmpbits, setcmpbits,
getversionkey, setversionkey, getdebugkey, setdebugkey, getmaxdev, setmaxdev,
setdebugrange, getwarndigits, setwarndigits, geterrdigits, seterrdigits, getdelta,
setdelta, getmudim, setmudim, getlambda, setlambda, getminmass, setminmass,
getuvdiv, setuvdiv, getzeroeps, setzeroeps, setdiffeps, getepsi
export acoef, bcoef, ccoef, dcoef, ecoef
export DRResult, ε, agetsym, bgetsym, cgetsym, dgetsym, egetsym
using LoopTools_jll
include("init.jl")
include("meta.jl")
include("A.jl")
include("B.jl")
include("C.jl")
include("D.jl")
include("E.jl")
include("Li2.jl")
include("utils.jl")
include("sym.jl")
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 93 |
# initialize LoopTools
function __init__()
ccall((:ltini_, libooptools), Cvoid, ())
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 2200 |
# E functions have 15 arguments, this list is sufficient
const _for_args = "a,b,c,d,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t"
# internal function, to define the Lget and lget functions
# L: 'A', 'B', 'C', 'D', 'E' for 1 to 5 point integrals
# args: the arguments of these integrals
# narg: No. of arguments; 1 for A, 3 for B, 6 for C, 10 for D, 15 for E
# ncoef: No. of tensor coefficients; 2 for A, 11 for B, 22 for C, 80 for D, 86 for E
function _define_get(L, narg::Int, ncoef::Int)
_args = _for_args[1:(2narg-1)]
_size = 3ncoef
_l = lowercase(L)
_str_ = join(["Ref{ComplexF64}," for _ in 1:narg])
_str2_ = join(["_a[$(3i-2)]," for i in 1:ncoef])
# define lget functions
Meta.parse(
string("const _$(L)res_ = Vector{ComplexF64}(undef, $(_size));",
"function $(_l)get($(_args));",
"ccall((:$(_l)putc_, libooptools), Cvoid,",
"(Ptr{Vector{ComplexF64}},", _str_, "Csize_t),",
"_$(L)res_,", _args, ", $(_size) ); _$(L)res_; end")
) |> eval
# define Lget functions
Meta.parse(
string("function $(L)get($(_args); val_only = false);",
"_a = $(_l)get($(_args));",
"val_only ? (", _str2_[1:end-1], ") : ",
"NamedTuple{$(_l)coef}((", _str2_[1:end-1], "));",
"end" )
) |> eval
# define lput! functions
Meta.parse(
string("function $(_l)put!(res::Vector{ComplexF64}, $(_args));",
"""@assert length(res)==$(3ncoef) "length should be $(3ncoef)";""",
"ccall((:$(_l)putc_, libooptools), Cvoid,",
"(Ptr{Vector{ComplexF64}},", _str_, "Csize_t),",
"res,", _args, ", $(_size) ); res; end")
) |> eval
end
# prototype; much shorter using metaprogramming as above
# function Bget(psq, m1sq, m2sq)
# NamedTuple{bcoef}(
# (B0i(bb0, psq, m1sq, m2sq), B0i(bb1, psq, m1sq, m2sq), B0i(bb00, psq, m1sq, m2sq),
# B0i(bb11, psq, m1sq, m2sq), B0i(bb001, psq, m1sq, m2sq), B0i(bb111, psq, m1sq, m2sq),
# B0i(dbb0, psq, m1sq, m2sq), B0i(dbb1, psq, m1sq, m2sq), B0i(dbb00, psq, m1sq, m2sq),
# B0i(dbb11, psq, m1sq, m2sq), B0i(dbb001, psq, m1sq, m2sq) )
# )
# end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 1995 | using SymbolicUtils: @syms, @rule, @acrule, _iszero, Chain, Postwalk
@syms ε::Real
_r1 = @rule ~x::_iszero / ~y => 0
_r2 = @acrule ~x::_iszero + ~y => ~y
_simplify = Postwalk(Chain([_r1, _r2, _r2]))
@doc raw"""
DRResult(c0, c1, c2)
arrange the coefficients of ``1/ε``, corresponding to the `Mathematica` version of the same name.
`1/ε` corresponds to `DR1eps` in the `Mathematica` version.
"""
DRResult(c0, c1, c2) = _simplify(c0 + c1/ε + c2/ε^2)
function _round(x; kws...)
imag(x) < sqrt(eps(1.0)) && (x = real(x))
return round(x; kws...)
end
function _define_getsym(L, narg::Int, ncoef::Int)
l = lowercase(L)
_args = _for_args[1:(2narg-1)]
Meta.parse(
string("function $(l)getsym($(_args));",
"_$(L)res = $(l)get($(_args));",
"_b = _round.(_$(L)res, digits=6);",
"NamedTuple{$(l)coef}(DRResult(_b[3i-2], _b[3i-1], _b[3i]) for i = 1:$(ncoef));",
"end" )
) |> eval
end
_define_getsym('A', 1, 2)
_define_getsym('B', 3, 11)
_define_getsym('C', 6, 22)
_define_getsym('D', 10, 80)
_define_getsym('E', 15, 86)
@doc raw"""
agetsym(msq)
return all one-point coefficients with terms of ``ε^{-1}`` and ``ε^{-2}`` kept symbolically.
""" agetsym
@doc raw"""
bgetsym(p^2, m1^2, m2^2)
return all two-point coefficients with terms of ``ε^{-1}`` and ``ε^{-2}`` kept symbolically.
""" bgetsym
@doc raw"""
cgetsym(p1^2, p2^2, (p1+p2)^2, m1^2, m2^2, m3^2)
return all three-point coefficients with terms of ``ε^{-1}`` and ``ε^{-2}`` kept symbolically.
""" cgetsym
@doc raw"""
dgetsym(p1^2, p2^2, p3^2, p4^2, (p1+p2)^2, (p2+p3)^2, m1^2, m2^2, m3^2, m4^2)
return all four-point coefficients with terms of ``ε^{-1}`` and ``ε^{-2}`` kept symbolically.
""" dgetsym
@doc raw"""
egetsym(p1^2, p2^2, p3^2, p4^2, p5^2, (p1+p2)^2, (p2+p3)^2, (p3+p4)^2, (p4+p5)^2,
(p5+p1)^2, m1^2, m2^2, m3^2, m4^2, m5^2)
return all five-point coefficients with terms of ``ε^{-1}`` and ``ε^{-2}`` kept symbolically.
""" egetsym | LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 3915 | for f in (:ltexi, :clearcache, :markcache, :restorecache)
ff = string("$(f)_")
@eval ($f)() = ccall(($ff, libooptools), Cvoid, ())
end
for f in (:getversionkey, :getcmpbits, :getdebugkey, :getwarndigits, :geterrdigits,
:getepsi)
ff = string("$(f)_")
@eval ($f)() = ccall(($ff, libooptools), Int64, ())
end
for f in (:setversionkey, :setcmpbits, :setdebugkey, :setwarndigits, :seterrdigits)
ff = string("$(f)_")
@eval ($f)(b::Int) = ccall(($ff, libooptools), Cvoid, (Ref{Int64},), b)
end
for f in (:getmaxdev, :getdelta, :getmudim, :getlambda, :getminmass, :getuvdiv,
:getzeroeps, :getdiffeps)
ff = string("$(f)_")
@eval ($f)() = ccall(($ff, libooptools), Float64, ())
end
for f in (:setmaxdev, :setdelta, :setmudim, :setlambda, :setminmass, :setuvdiv,
:setzeroeps, :setdiffeps)
ff = string("$(f)_")
@eval ($f)(b::Real) = ccall(($ff, libooptools), Cvoid, (Ref{Float64},), b)
end
"""
setdebugrange(f::Int, t::Int)
The integrals are listed in the output with a unique serial number. If the list of integrals becomes too long, one can select only a range of serial numbers for viewing.
"""
setdebugrange(f::Int, t::Int) = ccall((:setdebugrange_, libooptools), Cvoid,
(Ref{Int64}, Ref{Int64}),
f, t )
"""
ltexi()
summary of errors.
""" ltexi
"""
clearcache()
remove all integrals from the cache.
""" clearcache
"""
markcache()
mark the current cache pointers.
""" markcache
"""
restorecache()
restore cache pointers marked by `markcache()`.
""" restorecache
"""
getcmpbits()
the number of bits for the cache-lookup precision. The default is 62 for double precision, 64 for quadruple precision.
""" getcmpbits
"""
setcmpbits(b::Int)
set the number of bits for cache-lookup precision.
""" setcmpbits
"""
getversionkey()
return the version key; the loop integrals are implemented using different versions (Denner, `FF`). For more information, see Section 1.3.10 of the `LoopTools` manual.
""" getversionkey
"""
setversionkey(k::Int)
set the version key to choose the implementations of various functions and whether checking is performed.
""" setversionkey
"""
getmaxdev()
setmaxdev(b::Real)
set the tolerance of the deviation between different implementations of functions. The default value is 1e-12 for issuing warnings.
""" getmaxdev, setmaxdev
"""
getdebugkey()
setdebugkey()
Debugging output can be turned on.
""" getdebugkey, setdebugkey
@doc raw"""
getzeroeps()
setzeroeps(b::Real)
A given quantity `x` is tested to be zero if `|x|` is smaller than `getzeroeps()`. Default value: ``10^{-22}``.
""" getzeroeps, setzeroeps
@doc raw"""
getdiffeps()
setdiffeps(b::Real)
Two quantities are tested for equality if the absolute value of their difference is smaller than `getdiffoeps()`. Default value: ``10^{-12}``.
""" getdiffeps, setdiffeps
@doc raw"""
getdelta()
setdelta(Δ::Real)
`getdelta()` returns the value of ``\Delta``, the finite piece of UV divergence,
default to `0` (the ``\overline{MS}`` scheme);
change by `setdelta(Δ)`.
""" getdelta, setdelta
@doc raw"""
getlambda()
setlambda(λ::Real)
`getdelta()` returns the value of ``\lambda^2`` for regularizing the IR divergence.
It is positive for photon-mass regularization; ``0``, ``-1`` and ``-2`` give the
finite piece, coefficients of the ``ε^{-1}`` and ``ε^{-2}``, respectively.
Change by `setlambda(λ)`.
""" getlambda, setlambda
@doc raw"""
getmudim()
setmudim(μ::Real)
The dimensional regularization scale ``μ``; the default value is `1.0`.
""" getmudim, setmudim
@doc raw"""
getminmass()
setminmass(m::Real)
For regularizing collinear divergence.
""" getminmass, setminmass
@doc raw"""
getuvdiv()
setuvdiv(x::Real)
Switch on (`x=1`, default) or off (`x=0`) the UV divergence part of ``1/ε``.
""" getuvdiv, setuvdiv | LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | code | 91 | using LoopTools
using Test
@testset "LoopTools.jl" begin
# Write your tests here.
end
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | docs | 554 | # LoopTools
<!-- [](https://fkguo.github.io/LoopTools.jl/stable) -->
[](https://fkguo.github.io/LoopTools.jl/dev)
[](https://github.com/fkguo/LoopTools.jl/actions)
This is a julia wrapper of functions in the [LoopTools package](http://www.feynarts.de/looptools/) for computing one-loop integrals.
Install by `] add https://github.com/fkguo/LoopTools.jl.git`.
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | docs | 5157 | # Definitions
Copied from the [`LoopTools` manual](https://github.com/fkguo/LoopTools.jl/blob/master/docs/LT215Guide.pdf).
For details about the tensor reduction of one-loop integrals, see *A. Denner, S. Dittmaier, Reduction schemes for one-loop tensor integrals* [[hep-ph/0509141]](https://inspirehep.net/literature/692154).
## General one-loop integral
Consider a general one-loop integral shown as follows:

The tensor loop integral is defined as
```math
T_{\mu_1\mu_2...\mu_P}^N = \frac{\mu^{4-D}}{i\pi^{D/2} r_\Gamma} \int d^D q
\frac{ q_{\mu_1}q_{\mu_2}...q_{\mu_P} }{(q^2-m_1^2)\left[(q+k_1)^2-m_2^2\right] \cdots
\left[(q+k_{N-1})^2-m_N^2\right]}
```
with ``r_\Gamma = \frac{\Gamma^2(1-\varepsilon) \Gamma(1+\varepsilon)}{\GammaΓ(1-2\varepsilon)}``, ``D=4-2\varepsilon``.
The momentum flow is shown in the figure. We have
```math
\begin{aligned}
& p_1 = k_1, \quad p_2 = k_2 - k_1, \quad \ldots, \quad p_N = k_N - k_{N-1}, \\
& k_1 = p_1, \quad k_2 = p_1 + p_2, \quad \ldots, \quad k_N = \sum_{i = 1}^N p_i .
\end{aligned}
```
## Tensor coefficients
The 1-point, 2-point, 3-point, 4-point and 5-point scalar loop integrals are denoted by `A0`, `B0`, `C0`, `D0` and `E0`, respectively.
The 1-loop tensor integral can be decomposed as
```math
\begin{aligned}
B_\mu & = k_{1\mu} B_1, \\
B_{\mu\nu} & = g_{\mu\nu} B_{00} + k_{1\mu} k_{1\nu} B_{11}, \\
C_\mu &= k_{1\mu} C_1 + k_{2\mu} C_2 \\
C_{\mu\nu} &= g_{\mu\nu} C_{00} + \sum_{i,j=1}^2 k_{i\mu}k_{j\nu} C_{ij}, \\
C_{\mu\nu\rho} &= \sum_{i=1}^2 \left(g_{\mu\nu}k_{i\rho} + g_{\nu\rho} k_{i\mu} + g_{\mu\rho} k_{i\nu} \right) C_{00i} + \sum_{i,j,\ell=1}^2 k_{i\mu}k_{j\nu}k_{\ell \rho} C_{ij\ell}, \\
D_\mu &= \sum_{i=1}^3 k_{i\mu} D_i ,\\
D_{\mu\nu} &= g_{\mu\nu} D_{00} + \sum_{i,j=1}^3 k_{i\mu}k_{j\nu} D_{ij}, \\
D_{\mu\nu\rho} &= \sum_{i=1}^3 \left(g_{\mu\nu}k_{i\rho} + g_{\nu\rho} k_{i\mu} + g_{\mu\rho} k_{i\nu} \right) D_{00i} + \sum_{i,j,\ell=1}^3 k_{i\mu}k_{j\nu}k_{\ell \rho} D_{ij\ell}, \\
D_{\mu\nu\rho\sigma} &= \left( g_{\mu\nu}g_{\rho\sigma} + g_{\mu\rho}g_{\nu\sigma} + g_{\mu\sigma}g_{\nu\rho} \right) D_{0000} + \sum_{i,j,\ell,m=1}^3 k_{i\mu}k_{j\nu}k_{\ell \rho}k_{m\sigma} D_{ij\ell m} \\
&\phantom{=} + \sum_{i,j=1}^3 \left(g_{\mu\nu}k_{i\rho}k_{j\sigma} + g_{\nu\rho} k_{i\mu}k_{j\sigma} + g_{\mu\rho} k_{i\nu}k_{j\sigma}
+ g_{\mu\sigma}k_{i\nu}k_{j\rho} + g_{\nu\sigma}k_{i\mu}k_{j\rho} + g_{\rho\sigma}k_{i\mu}k_{j\nu} \right) D_{00ij} . \\
\end{aligned}
```
## Conventions for the momenta
Consider the triangle diagram:

The 3-point loop function can be written either in terms of the external momenta as
```math
C\left(p_1^2, p_2^2, (p_1+p_2)^2, m_1^2, m_2^2, m_3^2 \right),
```
or in terms of the momenta $k_i$ as
```math
C\left(k_1^2, (k_1-k_2)^2, k_2^2, m_1^2, m_2^2, m_3^2 \right).
```
The coefficients computed are the coefficients of the momenta $k_i$, e.g., $C_1$ and $C_{112}$ are the coefficients of $k_{1\mu}$ and $k_{1\mu}k_{1\nu}k_{2\rho}$, respectively. The advantage of this basis is that the tensor-coefficient functions are totally symmetric in the indices.
## UV, IR and collinear divergences
### UV divergence
Using dimensional regularization, the UV divergence has the form ``1/\varepsilon -\gamma_E+\log(4\pi)``
with $\varepsilon = (4-D)/2$.
The 1-point scalar loop reads
```math
A_0(m^2) = m^2 \left[ \Delta + \log\left( \frac{\mu^2}{m^2} \right) + 1 \right],
```
where $\Delta = 1/\varepsilon -\gamma_E+\log(4\pi)$.
In `LoopTools`, the UV divergence is kept as the ``1/\varepsilon`` part, and the finite part of this
combination is set as ``\Delta``, which is $0$ (default value in `LoopTools`) in the ``\overline{\rm MS}`` scheme.
Setting ``\Delta=-2`` reproduces the one-loop functions of constrained differential renormalization
in [Nucl.Phys.B 537 (1999) 561](https://inspirehep.net/literature/472202).
A change of ``\Delta`` can be absorbed by a change of the dim. reg. scale ``\mu``:
``\mu_{\rm new}^2 = e^\Delta\mu_{\rm old}^2``.
Relevant functions: [`getdelta/setdelta`](@ref getdelta) and [`getmudim/setmudim`](@ref getmudim).
### IR divergence
The treatment of the IR divergence is controlled by the parameter ``\lambda^2``.
- ``\lambda^2>0``: photon-mass regularization.
- In dim. reg., ``\lambda^2=0`` gives the finite piece of the result,
and ``\lambda^2=-1`` and ``\lambda^2=-2`` return the coefficients of ``1/\varepsilon``
and ``1/\varepsilon^2``, respectively.
Relevant functions: [`getlambda/setlambda`](@ref getlambda).
For ``\lambda^2\leq0``, the ``\varepsilon^{-1}`` component contains both the UV and IR divergences.
The UV part can be switched off and on by `setuvdiv(1)` and `setuvdiv(0)`, respectively.
Relevant functions: [`getuvdiv/setuvdiv`](@ref getuvdiv).
### Collinear divergence
For the collinear divergence, there is a parameter ``m_{\rm min}^2``;
all arguments smaller than that are set to 0 to detect the existence of collinear divergence.
If there is a IR divergence, then ``m_{\rm min}^2`` is substituted back to ``p_i^2``.
Relevant functions: [`getminmass/setminmass`](@ref getminmass).
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | docs | 1769 |
# Examples

Let us compute the 3-point loop integral:
```math
C\left(p_1^2, p_2^2, (p_1+p_2)^2, m_1^2, m_2^2, m_3^2 \right)
```
with some hadron masses as input.
Tensor-coefficients:
```@example 1
using LoopTools
using PrettyPrinting, BenchmarkTools
(@btime Cget(1.87^2, 2.9^2, 5.28^2, 4.36^2, 2.01^2, 0.89^2) ) |> pprint
```
Here we used `Cget` (upper case for the first letter) which returns only the finite piece. If `val_only=true` (default to `false`), then numerical values are given as an `NTuple`. If we want also the coefficients of ``1/\varepsilon`` and ``\varepsilon^{-2}``, we need `cget` (lower case) or `cgetsym`, see below.
Scalar integral:
```@example 1
# or simply using `C0`
C0i(cc0, 1.87^2, 2.9^2, 5.28^2, 4.36^2, 2.01^2, 0.89^2)
```
Preallocate a vector to receive results from `bput!`:
```@example 1
const res = Vector{ComplexF64}(undef, 33)
@btime bput!(res, 1, 0.1, 0.1)
pprint(res)
```
The same result can be obtained using `bget(1, 0.1, 0.1)`, which takes
only the `p^2, m1^2, m2^2` as the arguments, and the preallocated array has already been defined
inside the package as `LoopTools._Bres_` (`LoopTools._Cres_` for the three-point loop and so on).
Writing out the ``\varepsilon^{-1}`` (corresponding to `DR1eps` in the `Mathematica` version) terms explicitly:
```@example 1
bgetsym(1, 0.1, 0.1) |> pprint
```
```@example 1
Bget(1, 0.1, 0.1, val_only = true)
```
Real and imaginary parts of a three-point scalar integral showing
the effect of the Landau singularity:
```@example 1
using Plots; default(frame=:box, minorticks=4)
c0(x) = -C0(1.87^2, x^2, 5.28^2, (4.36-0.05im)^2, 2.01^2, 0.89^2 )
plot(2.5:0.001:3.4, x-> real(c0(x)), label="Re" )
plot!(2.5:0.001:3.4, x-> imag(c0(x)), label="Im" )
``` | LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.1.2 | 17a7dec09490b2857341327e7a39e28a651a371f | docs | 328 | ```@meta
CurrentModule = LoopTools
```
# LoopTools.jl
For more information about the loop integrals, see the [`LoopTools` manual](https://github.com/fkguo/LoopTools.jl/blob/master/docs/LT215Guide.pdf).
[The LoopTools Visitor Center](http://www.feynarts.de/looptools/)
```@index
```
```@autodocs
Modules = [LoopTools]
```
| LoopTools | https://github.com/fkguo/LoopTools.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 386 | using Documenter, ErrorfreeArithmetic
makedocs(
modules = [ErrorfreeArithmetic],
format = :html,
sitename = "ErrorfreeArithmetic",
pages = [
"Package" => "index.md",
"Errorfree Arithmetic" => "intro.md",
"Basic usage" => "usage.md"
]
)
deploydocs(
repo = "github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git",
target = "build"
)
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 1265 | module ErrorfreeArithmetic
export SysFloat,
# error-free transformations
two_sum, two_diff, two_prod, two_square,
three_sum, three_diff, three_prod,
four_sum, four_diff,
ad_minus_bc, two_fma, three_fma, two_muladd,
# error-free transformations with magnitude sorted arguments
two_hilo_sum, two_lohi_sum, two_hilo_diff, two_lohi_diff,
three_hilo_sum, three_lohi_sum, three_hilo_diff, three_lohi_diff,
four_hilo_sum, four_lohi_sum, four_hilo_diff, four_lohi_diff,
# least-error transformations, as close to error-free as possible
two_div,
two_inv, two_sqrt,
# error-free remainders
div_rem, sqrt_rem
using Base: IEEEFloat
"""
SysFloat
SysFloats are floating point types with processor fma support.
"""
const SysFloat = Union{Float64, Float32}
"""
FloatWithFMA
Floats with FMA support fused multiply-add, fma(x,y,z)
""" FloatWithFMA
if isdefined(Main, :DoubleFloats)
const FloatWithFMA = Union{Float64, Float32, Float16, Double64, Double32, Double16}
else
const FloatWithFMA = Union{Float64, Float32, Float16}
end
include("errorfree.jl")
include("errorfree_tuples.jl")
include("leasterror.jl")
include("remainder.jl")
end # ErrorfreeArithmetic
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 1383 | #=
from "Error-free transformations in real and complex floating point arithmetics"
by Stef Graillat and Valérie Ménissier-Morain
2007 International Symposium on Nonlinear Theory and its Applications
NOLTA'07, Vancouver, Canada, September 16-19, 2007
=#
@inline function two_sum(a::Complex{T}, b::Complex{T}) where {T<:Real}
hi1, lo1 = two_sum(a.re, b.re)
hi2, lo2 = two_sum(a.im, b.im)
hi = Complex{T}(hi1, hi2)
lo = Complex{T}(lo1, lo2)
return hi, lo
end
@inline function two_diff(a::Complex{T}, b::Complex{T}) where {T<:Real}
hi1, lo1 = two_diff(a.re, b.re)
hi2, lo2 = two_diff(a.im, b.im)
hi = Complex{T}(hi1, hi2)
lo = Complex{T}(lo1, lo2)
return hi, lo
end
#=
from "Error-free transformations in real and complex floating point arithmetics"
by Stef Graillat and Valérie Ménissier-Morain
2007 International Symposium on Nonlinear Theory and its Applications
NOLTA'07, Vancouver, Canada, September 16-19, 2007
=#
@inline function four_prod(a::Complex{T}, b::Complex{T}) where {T<:Real}
hi1, lo1 = two_prod(a.re, b.re)
hi2, lo2 = two_prod(a.im, b.im)
hi3, lo3 = two_prod(a.re, b.im)
hi4, lo4 = two_prod(a.im, b.re)
hi5, lo5 = two_sum(hi1, -hi2)
hi6, lo6 = two_sum(hi3, hi4)
p = Complex{T}(hi5, hi6)
q = Complex{T}(lo5, lo6)
r = Complex{T}(lo1, lo3)
s = Complex{T}(-lo2, lo4)
return p,q,r,s
end
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 10348 | """
two_sum(a, b)
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_sum(a::T, b::T) where {T<:Real}
hi = a + b
v = hi - a
lo = (a - (hi - v)) + (b - v)
return hi, lo
end
"""
three_sum(a, b, c)
Computes `hi = fl(a+b+c)` and `md = err(a+b+c), lo = err(md)`.
"""
function three_sum(a::T,b::T,c::T) where {T}
t0, t1 = two_sum(a, b)
hi, t2 = two_sum(t0, c)
md, lo = two_sum(t1, t2)
hi, md = two_hilo_sum(hi,md)
return hi, md, lo
end
function three_sum_magnitudes(a::T, b::T, c::T) where {T}
a, b, c = magnitude_maxtomin(a, b, c)
b, c = two_hilo_sum(b, c)
a, b = two_hilo_sum(a, b)
b, c = two_hilo_sum(b, c)
a, b = two_hilo_sum(a, b)
return a,b,c
end
"""
two_sum(a, b, c)
Computes `hi = fl(a+b+c)` and `lo = err(a+b+c)`.
"""
function two_sum(a::T,b::T,c::T) where {T}
hi1, lo1 = two_sum(b, c)
hi2, lo2 = two_sum(a, hi1)
lo12 = lo1 + lo2
hi, lo = two_hilo_sum(hi2, lo12)
return hi, lo
end
"""
four_sum(a, b, c, d)
Computes `hi = fl(a+b+c+d)` and `hm = err(a+b+c+d), ml = err(hm), lo = err(ml)`.
"""
function four_sum(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_sum(a, b)
t2, t3 = two_sum(c, d)
hi, t4 = two_sum(t0, t2)
t5, lo = two_sum(t1, t3)
hm, ml = two_sum(t4, t5)
ml, lo = two_hilo_sum(ml, lo)
hm, ml = two_hilo_sum(hm, ml)
hi, hm = two_hilo_sum(hi,hm)
return hi, hm, ml, lo
end
function vec_sum(x0::T, x1::T, x2::T, x3::T) where {T}
s3 = x3
s2, e3 = two_sum(x2, s3)
s1, e2 = two_sum(x1, s2)
s0, e1 = two_sum(x0, s1)
return s0,e1,e2,e3
end
function vsum_errbranch(x::NTuple{4,T}) where {T}
y = zeros(T, 4)
r = zeros(T, 4)
e = zeros(T, 4)
j = 1
e[1] = x[1]
for i = 1:2
r[i], t = two_sum(e[i], x[i+1])
if t !== zero(T)
y[j] = r[i]
e[i+1] = t
j += 1
else
e[i+1] = r[i]
end
end
y[j], y[j+1] = two_sum(e[3], x[4])
return y
end
function foursum(x1::T, x2::T, x3::T, x4::T) where {T}
a1, a2 = two_sum(x1, x2)
b1, b2 = two_sum(x3, x4)
c1, c2 = two_sum(a1, b1)
d1, d2 = two_sum(a2, b2)
e1to4 = vec_sum(c1,c2,d1,d2)
y = vsum_errbranch(e1to4)
return (y...,)
end
"""
three_sum(a, b, c, d)
Computes `hi = fl(a+b+c+d)` and `md = err(a+b+c+d), lo = err(md)`.
"""
function three_sum(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_sum(a , b)
t0, t2 = two_sum(t0, c)
hi, t3 = two_sum(t0, d)
t0, t1 = two_sum(t1, t2)
hm, t2 = two_sum(t0, t3) # here, t0 >= t3
ml = t1 + t2
return hi, hm, ml
end
"""
two_sum(a, b, c, d)
Computes `hi = fl(a+b+c+d)` and `lo = err(a+b+c+d)`.
"""
function two_sum(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_sum(a , b)
t0, t2 = two_sum(t0, c)
hi, t3 = two_sum(t0, d)
t0, t1 = two_sum(t1, t2)
lo = t0 + t3
return hi, lo
end
"""
five_sum(a, b, c, d, e)
Computes `s = fl(a+b+c+d+e)` and
`e1 = err(a+b+c+d), e2 = err(e1), e3 = err(e2), e4 = err(e3)`.
"""
function five_sum(v::T, w::T, x::T, y::T, z::T) where {T}
t0, t4 = two_sum(y, z)
t0, t3 = two_sum(x, t0)
t0, t2 = two_sum(w, t0)
a, t1 = two_sum(v, t0)
t0, t3 = two_sum(t3, t4)
t0, t2 = two_sum(t2, t0)
b, t1 = two_sum(t1, t0)
t0, t2 = two_sum(t2, t3)
c, t1 = two_sum(t1, t0)
d, e = two_sum(t1, t2)
return a, b, c, d, e
end
"""
two_diff(a, b)
Computes `s = fl(a-b)` and `e = err(a-b)`.
"""
@inline function two_diff(a::T, b::T) where {T}
hi = a - b
a1 = hi + b
b1 = hi - a1
lo = (a - a1) - (b + b1)
return hi, lo
end
"""
three_diff(a, b, c)
Computes `s = fl(a-b-c)` and `e1 = err(a-b-c), e2 = err(e1)`.
"""
function three_diff(a::T,b::T,c::T) where {T}
s, t = two_diff(-b, c)
x, u = two_sum(a, s)
y, z = two_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
four_diff(a, b, c, d)
Computes `hi = fl(a-b-c-d)` and `hm = err(a-b-c-d), ml = err(hm), lo = err(ml)`.
"""
function four_diff(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_diff(a , b)
t0, t2 = two_diff(t0, c)
hi, t3 = two_diff(t0, d)
t0, t1 = two_sum(t1, t2)
hm, t2 = two_sum(t0, t3) # here, t0 >= t3
ml, lo = two_sum(t1, t2)
return hi, hm, ml, lo
end
"""
two_square(a)
Computes `hi = fl(a*a)` and `lo = fl(err(a*a))`.
"""
@inline function two_square(a::T) where {T}
hi = a * a
lo = fma(a, a, -hi)
hi, lo
end
"""
two_prod(a, b)
Computes `hi = fl(a*b)` and `lo = fl(err(a*b))`.
"""
@inline function two_prod(a::T, b::T) where {T}
hi = a * b
lo = fma(a, b, -hi)
hi, lo
end
"""
three_prod(a, b, c)
Computes `hi = fl(a*b*c)` and `md = err(a*b*c), lo = err(md)`.
"""
function three_prod(a::T, b::T, c::T) where {T}
abhi, ablo = two_prod(a, b)
hi, abhiclo = two_prod(abhi, c)
ablochi, abloclo = two_prod(ablo, c)
md, lo, tmp = three_sum(ablochi, abhiclo, abloclo)
return hi, md, lo
end
#=
three_fma algorithm from
Sylvie Boldo and Jean-Michel Muller
Some Functions Computable with a Fused-mac
=#
"""
three_fma(a, b, c)
Computes `hi = fl(fma(a,b,c))` and `md = fl(err(fma(a,b,c))), lo = fl(err(md))`.
"""
function three_fma(a::T, b::T, c::T) where {T}
hi = fma(a, b, c)
hi0, lo0 = two_prod(a, b)
hi1, lo1 = two_sum(c, lo0)
hi2, lo2 = two_sum(hi0, hi1)
y = ((hi2 - hi) + lo2)
md, lo = two_hilo_sum(y, lo1)
return hi, md, lo
end
"""
two_fma(a, b, c)
Computes `hi = fl(fma(a,b,c))` and `lo = fl(err(fma(a,b,c)))`.
"""
function two_fma(a::T, b::T, c::T) where {T}
hi = fma(a, b, c)
hi0, lo0 = two_prod(a, b)
hi1, lo1 = two_sum(c, lo0)
hi2, lo2 = two_sum(hi0, hi1)
lo2 += lo1
lo = (hi2 - hi) + lo2
return hi, lo
end
"""
two_muladd(a, b, c)
Computes `hi = fl(muladd(a,b,c))` and `lo = fl(err(muladd(a,b,c)))`.
"""
function two_muladd(a::T, b::T, c::T) where {T}
hi = fma(a, b, c)
c_minus_hi = c - hi
lo = muladd(a, b, c_minus_hi)
return hi, lo
end
# with arguments sorted by magnitude
"""
two_hilo_sum(a, b)
*unchecked* requirement `|a| ≥ |b|`
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_hilo_sum(a::T, b::T) where {T}
hi = a + b
lo = b - (hi - a)
return hi, lo
end
"""
two_lohi_sum(a, b)
*unchecked* requirement `|b| ≥ |a|`
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_lohi_sum(a::T, b::T) where {T}
hi = b + a
lo = a - (hi - b)
return hi, lo
end
"""
two_hilo_diff(a, b)
*unchecked* requirement `|a| ≥ |b|`
Computes `hi = fl(a-b)` and `lo = err(a-b)`.
"""
@inline function two_hilo_diff(a::T, b::T) where {T}
hi = a - b
lo = (a - hi) - b
hi, lo
end
"""
two_lohi_diff(a, b)
*unchecked* requirement `|b| ≥ |a|`
Computes `hi = fl(a-b)` and `lo = err(a-b)`.
"""
@inline function two_lohi_diff(a::T, b::T) where {T}
hi = b - a
lo = (b - hi) - a
hi, lo
end
"""
three_hilo_sum(a, b, c)
*unchecked* requirement `|a| ≥ |b| ≥ |c|`
Computes `x = fl(a+b+c)` and `y = err(a+b+c), z = err(y)`.
"""
function three_hilo_sum(a::T,b::T,c::T) where {T}
s, t = two_hilo_sum(b, c)
x, u = two_hilo_sum(a, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
three_lohi_sum(a, b, c)
*unchecked* requirement `|c| ≥ |b| ≥ |a|`
Computes `x = fl(a+b+c)` and `y = err(a+b+c), z = err(y)`.
"""
function three_lohi_sum(a::T,b::T,c::T) where {T}
s, t = two_hilo_sum(b, a)
x, u = two_hilo_sum(c, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
three_hilo_diff(a, b, c)
*unchecked* requirement `|a| ≥ |b| ≥ |c|`
Computes `x = fl(a-b-c)` and `y = err(a-b-c), z = err(y)`.
"""
function three_hilo_diff(a::T,b::T,c::T) where {T}
s, t = two_hilo_diff(b, -c)
x, u = two_hilo_sum(a, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
three_lohi_diff(a, b, c)
*unchecked* requirement `|c| ≥ |b| ≥ |a|`
Computes `x = fl(a-b-c)` and `y = err(a-b-c), z = err(y)`.
"""
function three_lohi_diff(c::T,b::T,a::T) where {T}
s, t = two_hilo_diff(b, -c)
x, u = two_hilo_sum(a, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
four_hilo_sum(a, b, c, d)
*unchecked* requirement `|a| ≥ |b| ≥ |c| ≥ |d|`
Computes `hi = fl(a+b+c+d)` and `hm = err(a+b+c+d), ml = err(hm), lo = err(ml)`.
"""
function four_hilo_sum(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_hilo_sum(a , b)
t0, t2 = two_hilo_sum(t0, c)
hi, t3 = two_hilo_sum(t0, d)
t0, t1 = two_hilo_sum(t1, t2)
hm, t2 = two_hilo_sum(t0, t3) # here, t0 >= t3
ml, lo = two_hilo_sum(t1, t2)
return hi, hm, ml, lo
end
"""
four_lohi_sum(a, b, c, d)
*unchecked* requirement `|d| ≥ |c| ≥ |b| ≥ |a|`
Computes `hi = fl(a+b+c+d)` and `hm = err(a+b+c+d), ml = err(hm), lo = err(ml)`.
"""
function four_lohi_sum(d::T,c::T,b::T,a::T) where {T}
t0, t1 = two_hilo_sum(a , b)
t0, t2 = two_hilo_sum(t0, c)
hi, t3 = two_hilo_sum(t0, d)
t0, t1 = two_hilo_sum(t1, t2)
hm, t2 = two_hilo_sum(t0, t3)
ml, lo = two_hilo_sum(t1, t2)
return hi, hm, ml, lo
end
"""
four_hilo_diff(a, b, c, d)
*unchecked* requirement `|a| ≥ |b| ≥ |c| ≥ |d|`
Computes `hi = fl(a-b-c-d)` and `hm = err(a-b-c-d), ml = err(hm), lo = err(ml)`.
"""
function four_hilo_diff(a::T,b::T,c::T,d::T) where {T}
t0, t1 = two_hilo_diff(a, b)
t0, t2 = two_hilo_diff(t0, c)
hi, t3 = two_hilo_diff(t0, d)
t0, t1 = two_hilo_sum(t1, t2)
hm, t2 = two_hilo_sum(t0, t3) # here, t0 >= t3
ml, lo = two_hilo_sum(t1, t2)
return hi, hm, ml, lo
end
"""
four_hilo_diff(a, b, c, d)
*unchecked* requirement `|d| ≥ |c| ≥ |b| ≥ |a|`
Computes `hi = fl(a-b-c-d)` and `hm = err(a-b-c-d), ml = err(hm), lo = err(ml)`.
"""
function four_lohi_diff(d::T,c::T,b::T,a::T) where {T}
t0, t1 = two_hilo_diff(a, b)
t0, t2 = two_hilo_diff(t0, c)
hi, t3 = two_hilo_diff(t0, d)
t0, t1 = two_hilo_sum(t1, t2)
hm, t2 = two_hilo_sum(t0, t3) # here, t0 >= t3
ml, lo = two_hilo_sum(t1, t2)
return hi, hm, ml, lo
end
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 4911 | """
two_sum(a, b)
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_sum(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .+ b
v = hi .- a
lo = (a .- (hi .- v)) .+ (b .- v)
return hi, lo
end
"""
two_sum(a, b, c)
Computes `hi = fl(a+b+c)` and `lo = err(a+b+c)`.
"""
function two_sum(a::T,b::T,c::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
s, t = two_sum(b, c)
hi, u = two_sum(a, s)
lo = u .+ t
hi, lo = two_hilo_sum(hi, lo)
return hi, lo
end
"""
three_sum(a, b, c, d)
Computes `hi = fl(a+b+c+d)` and `md = err(a+b+c+d), lo = err(md)`.
"""
function three_sum(a::T,b::T,c::T,d::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
t0, t1 = two_sum(a , b)
t0, t2 = two_sum(t0, c)
hi, t3 = two_sum(t0, d)
t0, t1 = two_sum(t1, t2)
hm, t2 = two_sum(t0, t3) # here, t0 >= t3
ml = t1 .+ t2
return hi, hm, ml
end
"""
two_sum(a, b, c, d)
Computes `hi = fl(a+b+c+d)` and `lo = err(a+b+c+d)`.
"""
function two_sum(a::T,b::T,c::T,d::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
t0, t1 = two_sum(a , b)
t0, t2 = two_sum(t0, c)
hi, t3 = two_sum(t0, d)
t0, t1 = two_sum(t1, t2)
lo = t0 .+ t3
return hi, lo
end
"""
two_diff(a, b)
Computes `s = fl(a-b)` and `e = err(a-b)`.
"""
@inline function two_diff(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .- b
a1 = hi .+ b
b1 = hi .- a1
lo = (a .- a1) .- (b .+ b1)
return hi, lo
end
"""
three_diff(a, b, c)
Computes `s = fl(a-b-c)` and `e1 = err(a-b-c), e2 = err(e1)`.
"""
function three_diff(a::T,b::T,c::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
negb = (-).(b)
s, t = two_diff(negb, c)
x, u = two_sum(a, s)
y, z = two_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
two_square(a)
Computes `s = fl(a*a)` and `e = err(a*a)`.
"""
@inline function two_square(a::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .* a
neghi = (-).(hi)
lo = (fma).(a, a, neghi)
hi, lo
end
"""
two_prod(a, b)
Computes `s = fl(a*b)` and `e = err(a*b)`.
"""
@inline function two_prod(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .* b
neghi = (-).(hi)
lo = (fma).(a, b, neghi)
hi, lo
end
#=
three_fma algorithm from
Sylvie Boldo and Jean-Michel Muller
Some Functions Computable with a Fused-mac
=#
"""
three_fma(a, b, c)
Computes `s = fl(fma(a,b,c))` and `e1 = err(fma(a,b,c)), e2 = err(e1)`.
"""
function three_fma(a::T, b::T, c::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
x = (fma).(a, b, c)
y, z = two_prod(a, b)
t, z = two_sum(c, z)
t, u = two_sum(y, t)
y = ((t .- x) .+ u)
y, z = two_hilo_sum(y, z)
infs = isinf.(x)
if any(infs)
ys = [y...]
zs = [z...]
ys[infs] .= zero(F)
zs[infs] .= zero(F)
y = (ys...,)
z = (zs...,)
end
return x, y, z
end
# with arguments sorted by magnitude
"""
two_hilo_sum(a, b)
*unchecked* requirement `|a| ≥ |b|`
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_hilo_sum(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .+ b
lo = b .- (hi .- a)
return hi, lo
end
"""
two_lohi_sum(a, b)
*unchecked* requirement `|b| ≥ |a|`
Computes `hi = fl(a+b)` and `lo = err(a+b)`.
"""
@inline function two_lohi_sum(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = b .+ a
lo = a .- (hi .- b)
return hi, lo
end
"""
two_hilo_diff(a, b)
*unchecked* requirement `|a| ≥ |b|`
Computes `hi = fl(a-b)` and `lo = err(a-b)`.
"""
@inline function two_hilo_diff(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = a .- b
lo = (a .- hi) .- b
hi, lo
end
"""
two_lohi_diff(a, b)
*unchecked* requirement `|b| ≥ |a|`
Computes `hi = fl(a-b)` and `lo = err(a-b)`.
"""
@inline function two_lohi_diff(a::T, b::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
hi = b .- a
lo = (b .- hi) .- a
hi, lo
end
"""
three_hilo_diff(a, b, c)
*unchecked* requirement `|a| ≥ |b| ≥ |c|`
Computes `x = fl(a-b-c)` and `y = err(a-b-c), z = err(y)`.
"""
function three_hilo_diff(a::T,b::T,c::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
negc = (-).(c)
s, t = two_hilo_diff(b, negc)
x, u = two_hilo_sum(a, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
"""
three_lohi_diff(a, b, c)
*unchecked* requirement `|c| ≥ |b| ≥ |a|`
Computes `x = fl(a-b-c)` and `y = err(a-b-c), z = err(y)`.
"""
function three_lohi_diff(c::T,b::T,a::T) where {N, F<:Base.IEEEFloat, T<:NTuple{N,F}}
negc = (-).(c)
s, t = two_hilo_diff(b, negc)
x, u = two_hilo_sum(a, s)
y, z = two_hilo_sum(u, t)
x, y = two_hilo_sum(x, y)
return x, y, z
end
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 2157 | const two27 = Float64(1<<27)
const two12 = Float32(1<<12)
const two6 = Float16(1<<6)
"""
extractscalar
Split a value relative to an integral power of 2
hi is the high order part of p, lo is the residual part
hi + lo === p
"""
for (T,P) in ((:Float64, 53), (:Float32, 24), (:Float16, 11))
@eval begin
@inline function extractscalar(value::$T)
powoftwo = ldexp(one($T), ($P- exponent(value)) >> 1) # P == precision(Float64)
return extractscalar(powoftwo, value)
end
end
end
function extractscalar(powoftwo::T, value::T) where {T}
hi = (powoftwo + value) - powoftwo
lo = value - hi
return hi, lo
end
"""
two_cube(a)
Computes `s = fl(a*a*a)` and `e1 = err(a*a*a)`.
"""
function two_cube(value::T) where {T}
hi, lo = extractscalar(value)
hhh = three_prod(hi, hi, hi)
hhl = three_prod(hi, hi, 3*lo)
hll = three_prod(hi, 3*lo, lo)
lll = three_prod(lo, lo, lo)
himh = four_sum(hhh[1], hhl[1], hhh[2], hll[1])
mllo = four_sum(hhl[2], hll[2], lll[1], lll[2])
hilo = four_sum(himh[1], himh[2], mllo[1], mllo[2])
return hilo[1], hilo[2]
end
"""
three_cube(a)
Computes `s = fl(a*a*a)` and `e1 = err(a*a*a), e2 = err(e1)`.
"""
function three_cube(value::T) where {T}
hi, lo = extractscalar(value)
hhh = three_prod(hi, hi, hi)
hhl = three_prod(hi, hi, 3*lo)
hll = three_prod(hi, 3*lo, lo)
lll = three_prod(lo, lo, lo)
himh = four_sum(hhh[1], hhl[1], hhh[2], hll[1])
mllo = four_sum(hhl[2], hll[2], lll[1], lll[2])
hilo = four_sum(himh[1], himh[2], mllo[1], mllo[2])
return hilo[1], hilo[2], hilo[3]
end
"""
four_cube(a)
Computes `s = fl(a*a*a)` and `e1 = err(a*a*a), e2 = err(e1), e3 = err(e2)`.
"""
function three_cube(value::T) where {T}
hi, lo = extractscalar(value)
hhh = three_prod(hi, hi, hi)
hhl = three_prod(hi, hi, 3*lo)
hll = three_prod(hi, 3*lo, lo)
lll = three_prod(lo, lo, lo)
himh = four_sum(hhh[1], hhl[1], hhh[2], hll[1])
mllo = four_sum(hhl[2], hll[2], lll[1], lll[2])
hilo = four_sum(himh[1], himh[2], mllo[1], mllo[2])
return hilo
end
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 1337 | @inline function two_inv(b::T) where {T}
hi = inv(b)
lo = fma(-hi, b, one(T))
lo /= b
return hi, lo
end
@inline function two_div(a::T, b::T) where {T}
hi = a / b
lo = fma(-hi, b, a)
lo /= b
return hi, lo
end
@inline function two_sqrt(a::T) where {T}
hi = sqrt(a)
lo = fma(-hi, hi, a)
lo /= 2
lo /= hi
return hi, lo
end
"""
ad_minus_bc(a, b, c, d)
Computes the determinant of a 2x2 matrix.
"""
function ad_minus_bc(a::T, b::T, c::T, d::T) where {T}
adhi, adlo = two_prod(a,d)
bchi, bclo = two_prod(b,c)
return four_sum(adhi, adlo, -bchi, -bclo)
end
#=
"Concerning the division, the elementary rounding error is
generally not a floating point number, so it cannot be computed
exactly. Hence we cannot expect to obtain an error
free transformation for the division. ...
This means that the computed approximation is as good as
we can expect in the working precision."
-- http://perso.ens-lyon.fr/nicolas.louvet/LaLo05.pdf
While the sqrt algorithm is not strictly an errorfree transformation,
it is known to be reliable and is recommended for general use.
"Augmented precision square roots, 2-D norms and
discussion on correctly reounding xsqrt(x^2 + y^2)"
by Nicolas Brisebarre, Mioara Joldes, Erik Martin-Dorel,
Hean-Michel Muller, Peter Kornerup
=#
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 733 | """
div_rem(x, y)
Computes `quotient = fl(x/y)` and `remainder = x - quotient*y` such that `(x-remainder) / y = quotient`.
"""
@inline function div_rem(x::T, y::T) where {T}
quotient = x / y
remainder = fma(-y, quotient, x)
return quotient, remainder
end
"""
sqrt_rem(x)
Computes `root = fl(sqrt(x))` and `remainder = x - root*root` such that `sqrt(x-remainder) = root`.
"""
@inline function sqrt_rem(x::T) where {T}
root = sqrt(x)
remainder = fma(-root, root, x)
return root, remainder
end
#=
Those remainders are exact.
For more informations, applications and additional references, see the introduction of
Sylvie Boldo and Jean-Michel Muller
Some Functions Computable with a Fused-mac
=#
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | code | 4199 | using ErrorfreeArithmetic
using Test
Base.BigFloat(x::T) where {T} = Base.convert(BigFloat, x)
function hilo(::Type{T}, x::BigFloat) where {T}
hi = T(x)
lo = T(x - hi)
return hi, lo
end
function himdlo(::Type{T}, x::BigFloat) where {T}
hi = T(x)
md = T(x - hi)
lo = T(x - hi - md)
return hi, md, lo
end
function calc_two_sum(a::T, b::T) where {T}
aa, bb = BigFloat(a), BigFloat(b)
ab = aa + bb
return hilo(T, ab)
end
function calc_two_diff(a::T, b::T) where {T}
aa, bb = BigFloat(a), BigFloat(b)
ab = aa - bb
return hilo(T, ab)
end
function calc_two_prod(a::T, b::T) where {T}
aa, bb = BigFloat(a), BigFloat(b)
ab = aa * bb
return hilo(T, ab)
end
function calc_two_square(a::T) where {T}
aa = BigFloat(a)
ab = aa * aa
return hilo(T, ab)
end
function calc_two_cube(a::T) where {T}
aa = BigFloat(a)
ab = aa * aa * aa
return hilo(T, ab)
end
function calc_two_inv(a::T) where {T}
aa = BigFloat(a)
ab = inv(aa)
return hilo(T, ab)
end
function calc_two_div(a::T, b::T) where {T}
aa, bb = BigFloat(a), BigFloat(b)
ab = aa / bb
return hilo(T, ab)
end
function calc_two_sqrt(a::T) where {T}
aa = BigFloat(a)
ab = sqrt(aa)
return hilo(T, ab)
end
function calc_two_cbrt(a::T) where {T}
aa = BigFloat(a)
ab = cbrt(aa)
return hilo(T, ab)
end
function calc_three_sum(a::T, b::T, c::T) where {T}
aa, bb, cc = BigFloat(a), BigFloat(b), BigFloat(c)
abc = aa + bb + cc
return himdlo(T, abc)
end
function calc_three_diff(a::T, b::T, c::T) where {T}
aa, bb, cc = BigFloat(a), BigFloat(b), BigFloat(c)
abc = aa - bb - cc
return himdlo(T, abc)
end
function calc_three_prod(a::T, b::T, c::T) where {T}
aa, bb, cc = BigFloat(a), BigFloat(b), BigFloat(c)
abc = aa * bb * cc
return himdlo(T, abc)
end
function isclosest(lo::T, low::T) where {T}
lo === low || ((abs(lo - low) <= abs(lo - nextfloat(low))) && (abs(lo - low) <= abs(lo - prevfloat(low))))
end
function test_two_sum(a::T, b::T) where {T}
hi, lo = two_sum(a, b)
high, low = calc_two_sum(a, b)
hi === high && lo === low
end
function test_two_diff(a::T, b::T) where {T}
hi, lo = two_diff(a, b)
high, low = calc_two_diff(a, b)
hi === high && lo === low
end
function test_two_square(a::T) where {T}
hi, lo = two_square(a)
high, low = calc_two_square(a)
hi === high && lo === low
end
function test_two_cube(a::T) where {T}
hi, lo = two_cube(a)
high, low = calc_two_cube(a)
hi === high && lo === low
end
function test_two_prod(a::T, b::T) where {T}
hi, lo = two_prod(a, b)
high, low = calc_two_prod(a, b)
hi === high && lo === low
end
function test_two_sqrt(a::T) where {T}
hi, lo = two_sqrt(a)
high, low = calc_two_sqrt(a)
hi === high && isclosest(lo, low)
end
#=
function test_two_cbrt(a::T) where {T}
hi, lo = two_cbrt(a)
high, low = calc_two_cbrt(a)
hi === high && isclosest(lo, low)
end
=#
function test_two_inv(a::T) where {T}
hi, lo = two_inv(a)
high, low = calc_two_inv(a)
hi === high && isclosest(lo, low)
end
function test_two_div(a::T, b::T) where {T}
hi, lo = two_div(a, b)
high, low = calc_two_div(a, b)
hi === high && isclosest(lo, low)
end
function test_three_sum(a::T, b::T, c::T) where {T}
hi, md, lo = three_sum(a, b, c)
high, mid, low = calc_three_sum(a, b, c)
hi === high && md === mid && lo === low
end
function test_three_diff(a::T, b::T, c::T) where {T}
hi, md, lo = three_diff(a, b, c)
high, mid, low = calc_three_diff(a, b, c)
hi === high && md === mid && lo === low
end
function test_three_prod(a::T, b::T, c::T) where {T}
hi, md, lo = three_prod(a, b, c)
high, mid, low = calc_three_prod(a, b, c)
hi === high && md === mid && lo === low
end
a = sqrt(2.0)
b = sqrt(987654.0)
c = cbrt(456.125)
@test test_two_sum(a, b)
@test test_two_diff(a, b)
@test test_two_square(b)
@test test_two_prod(a, b)
@test test_two_inv(b)
@test test_two_div(a, b)
@test test_two_sqrt(b)
@test test_three_sum(a, b, c)
@test test_three_diff(a, b, c)
@test test_three_prod(a, b, c)
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.5.2 | d6863c556f1142a061532e79f611aa46be201686 | docs | 4287 | # ErrorfreeArithmetic.jl
Error-free transformations are used to get results with extra accuracy.
#### Copyright © 2016-2020 by Jeffrey Sarnoff. Released under the MIT License.
-----
[](https://travis-ci.org/JeffreySarnoff/ErrorfreeArithmetic.jl)
-----
## exports
- The number that begins a function name always matches the number of values returned.
- the values returned are of descending magnitude and non-overlapping when added.
- The number that begins a function name often matches the number of arguments expected.
- `two_inv` and `two_sqrt` are single argument functions returning two values
*These are error-free transformations.*
- `two_sum`, `two_diff`, `two_prod`
- `two_square`, `two_cube`
- `three_sum`, `three_diff`, `three_prod`
- `two_fma`, `three_fma`
- `four_sum`, `four_diff`
*These are error-free transformations with magnitude sorted arguments.*
- `two_hilo_sum`, `two_lohi_sum`
- `two_hilo_diff`, `two_lohi_diff`
- `three_hilo_sum`, `three_lohi_sum`
- `three_hilo_diff`, `three_lohi_diff`
- `four_hilo_sum`, `four_lohi_sum`
- `four_hilo_diff`, `four_lohi_diff`
*These are least-error transformations, as close to error-free as possible.*
- `two_inv`, `two_sqrt`
- `two_div`
### naming
The routines named with the prefix `two_` return a two-tuple holding `(high_order_part, low_order_part)`.
Those named with the prefix `three_` return a three-tuple holding `(high_part, mid_part, low_part)`.
## introduction
Error-free transformations return a tuple of the nominal result and the residual from the result (the left-over part).
Error-free addition sums two floating point values (a, b) and returns two floating point values (hi, lo) such that:
* `(+)(a, b) == hi`
* `|hi| > |lo|` and `(+)(hi, lo) == hi` *abs(hi) and abs(lo) do not share significant bits*
Here is how it is done:
```julia
function add_errorfree(a::T, b::T) where T<:Union{Float64, Float32}
a_plus_b_hipart = a + b
b_asthe_summand = a_plus_b_hipart - a
a_plus_b_lopart = (a - (a_plus_b_hipart - b_asthe_summand)) + (b - b_asthe_summand)
return a_plus_b_hipart, a_plus_b_lopart
end
a = Float32(1/golden^2) # 0.3819_6602f0
b = Float32(pi^3) # 31.0062_7700f0
a_plus_b = a + b # 31.3882_4300f0
hi, lo = add_errorfree(a,b) # (31.3882_4300f0, 3.8743_0270f-7)
a_plus_b == hi # true
abs(hi) > abs(lo) && hi + lo == hi # true
```
The `lo` part is a portion of the accurate value, it is (most of) the residuum that the `hi` part could not represent.
The `hi` part runs out of significant bits before the all of the accurate value is represented. We can see this:
```julia
a = Float32(1/golden^2) # 0.3819_6602f0
b = Float32(pi^3) # 31.0062_7700f0
hi, lo = add_errorfree(a,b) # (31.3882_4300f0, 3.8743_0270f-7)
a_plus_b_accurate = BigFloat(a) + BigFloat(b)
lo_accurate = Float32(a_plus_b_accurate - hi)
lo == lo_accurate # true
```
## use
This package is intended to be used in the support of other work.
All routines expect Float64 or Float32 or Float16 values.
## references
Takeshi Ogita, Siegfried M. Rump, and Shin'ichi Oishi
Accurate Sum and Dot Product
SIAM J. Sci. Comput., 26(6), 1955–1988.
Published online: 25 July 2006
[DOI: 10.1137/030601818](http://dx.doi.org/10.1137/030601818)
Stef Graillat, Valérie Ménissier-Morain
Error-Free Transformations in Real and Complex Floating Point Arithmetic
International Symposium on Nonlinear Theory and its Applications (NOLTA'07), Sep 2007, Vancouver, Canada.
Proceedings of International Symposium on Nonlinear Theory and its Applications (NOLTA'07), pp.341-344.
https://hal.archives-ouvertes.fr/hal-01306229
Sylvie Boldo, Stef Graillat, and Jean-Michel Muller
On the robustness of the 2Sum and Fast2Sum algorithms
ACM Transactions on Mathematical Software, Association for Computing Machinery, 2017
https://hal.inria.fr/ensl-01310023
| ErrorfreeArithmetic | https://github.com/JeffreySarnoff/ErrorfreeArithmetic.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 217 | using Documenter, LSODA
makedocs(
modules=[LSODA],
doctest = false
)
deploydocs(
deps = Deps.pip("mkdocs","python-markdown-math"),
repo = "github.com/rveltz/LSODA.jl.git",
julia = "release"
)
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 846 | __precompile__()
module LSODA
using Compat, DiffEqBase
import DiffEqBase: solve
using LinearAlgebra, Printf
using LSODA_jll
const warnkeywords =
(:save_idxs, :d_discontinuities, :isoutofdomain, :unstable_check,
:calck, :progress, :timeseries_steps, :dense,
:dtmin, :dtmax,
:internalnorm, :gamma, :beta1, :beta2, :qmax, :qmin, :qoldinit)
function __init__()
global warnlist = Set(warnkeywords)
end
abstract type LSODAAlgorithm <: DiffEqBase.AbstractODEAlgorithm end
struct lsoda <: LSODAAlgorithm end
SciMLBase.alg_order(alg::lsoda) = 12
export lsoda, lsoda_0, lsoda_opt_t, lsoda_context_t, lsoda_prepare, lsoda_reset, lsoda_opt_t, lsoda_free, lsoda_evolve!, UserFunctionAndData
export LSODAAlgorithm, solve
include("types_and_consts.jl")
include("handle.jl")
include("solver.jl")
include("common.jl")
end # module
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 6948 | @noinline function old_cfunction(f, r, a)
ccall(:jl_function_ptr, Ptr{Cvoid}, (Any, Any, Any), f, r, a)
end
## Common Interface Solve Functions
mutable struct CommonFunction{F,P}
func::F
p::P
neq::Cint
end
function commonfun(t::T1,y::T2,yp::T3,comfun::CommonFunction) where {T1,T2,T3}
y_ = unsafe_wrap(Array,y,comfun.neq)
ydot_ = unsafe_wrap(Array,yp,comfun.neq)
comfun.func(ydot_,y_,comfun.p,t)
return Int32(0)
end
function DiffEqBase.__solve(
prob::DiffEqBase.AbstractODEProblem{uType,tupType,isinplace},
alg::LSODAAlgorithm,
timeseries=[],ts=[],ks=[];
verbose=true,
abstol=1/10^6,reltol=1/10^3,
tstops=Float64[],
saveat=Float64[], maxiter=Int(1e5),
callback=nothing,
timeseries_errors=true,
save_everystep=isempty(saveat),
save_start = save_everystep || isempty(saveat) || typeof(saveat) <: Number ? true : prob.tspan[1] in saveat,
userdata=nothing,
alias_u0=false,
kwargs...) where {uType,tupType,isinplace}
tType = eltype(tupType)
if verbose
warned = !isempty(kwargs) && check_keywords(alg, kwargs, warnlist)
if !(typeof(prob.f) <: DiffEqBase.AbstractParameterizedFunction)
if DiffEqBase.has_tgrad(prob.f)
@warn("Explicit t-gradient given to this stiff solver is ignored.")
warned = true
end
if DiffEqBase.has_jac(prob.f)
@warn("Explicit Jacobian given to this stiff solver is ignored.")
warned = true
end
end
warned && warn_compat()
end
if prob.f.mass_matrix != I
error("This solver is not able to use mass matrices.")
end
if callback != nothing || :callback in keys(prob.kwargs)
error("LSODA is not compatible with callbacks.")
end
tspan = prob.tspan
t0 = tspan[1]
T = tspan[end]
if typeof(saveat) <: Number
if (tspan[1]:saveat:tspan[end])[end] == tspan[end]
saveat_vec = convert(Vector{tType},collect(tType,tspan[1]+saveat:saveat:tspan[end]))
else
saveat_vec = convert(Vector{tType},collect(tType,tspan[1]+saveat:saveat:(tspan[end]-saveat)))
end
else
saveat_vec = convert(Vector{tType},collect(saveat))
end
if !isempty(saveat_vec) && saveat_vec[end] == tspan[2]
pop!(saveat_vec)
end
if !isempty(saveat_vec) && saveat_vec[1] == tspan[1]
save_ts = sort(unique([saveat_vec;T]))
else
save_ts = sort(unique([t0;saveat_vec;T]))
end
if T < save_ts[end]
error("Final saving timepoint is past the solving timespan")
end
if t0 > save_ts[1]
error("First saving timepoint is before the solving timespan")
end
if !isempty(tstops)
error("tstops is not supported for this solver. Please use saveat instead")
end
if typeof(prob.u0) <: Number
u0 = [prob.u0]
else
if alias_u0
u0 = vec(prob.u0)
else
u0 = vec(deepcopy(prob.u0))
end
end
sizeu = size(prob.u0)
### Fix the more general function to Sundials allowed style
if !isinplace && (typeof(prob.u0)<:Vector{Float64} || typeof(prob.u0)<:Number)
f! = (du,u,p,t) -> (du[:] = prob.f(u,p,t); nothing)
elseif !isinplace && typeof(prob.u0)<:AbstractArray
f! = (du,u,p,t) -> (du[:] = vec(prob.f(reshape(u,sizeu),p,t)); nothing)
elseif typeof(prob.u0)<:Vector{Float64}
f! = prob.f
else # Then it's an in-place function on an abstract array
f! = (du,u,p,t) -> (prob.f(reshape(du,sizeu),reshape(u,sizeu),p,t); nothing)
end
ures = Vector{Float64}[]
push!(ures,u0)
utmp = copy(u0)
utmp2= copy(u0)
ttmp = [t0]
t = [t0]
t2 = [t0]
save_start ? ts = [t0] : ts = typeof(t0)[]
neq = Int32(length(u0))
comfun = CommonFunction(f!,prob.p,neq)
atol = ones(Float64,neq)
rtol = ones(Float64,neq)
if typeof(abstol) == Float64
atol *= abstol
else
atol = copy(abstol)
end
if typeof(reltol) == Float64
rtol *= reltol
else
rtol = copy(reltol)
end
GC.@preserve comfun atol rtol begin
global ___ref = comfun
opt = lsoda_opt_t(mxstep = maxiter)
opt.ixpr = 0
opt.rtol = pointer(rtol)
opt.atol = pointer(atol)
if save_everystep
itask_tmp = 2
else
itask_tmp = 1
end
opt.itask = itask_tmp
function get_cfunction(comfun::T) where T
@cfunction commonfun Cint (Cdouble, Ptr{Cdouble}, Ptr{Cdouble}, Ref{T})
end
fex_c = get_cfunction(comfun)
ctx = lsoda_context_t()
ctx.function_ = fex_c
ctx.neq = neq
ctx.state = 1
ctx.data = pointer_from_objref(comfun)
ch = ContextHandle(ctx)
lsoda_prepare(ctx,opt)
for k in 2:length(save_ts)
ttmp[1] = save_ts[k]
if t[1] < ttmp[1]
while t[1] < ttmp[1]
lsoda(ctx, utmp, t, ttmp[1])
if t[1] > ttmp[1] # overstepd, interpolate back
t2[1] = t[1] # save step values
copyto!(utmp2,utmp) # save step values
opt.itask = 1 # change to interpolating
lsoda(ctx, utmp, t, ttmp[1])
opt.itask = itask_tmp
push!(ures, copy(utmp))
push!(ts, t[1])
# don't overstep the last timestep
if k != length(save_ts) && save_ts[k+1] > t2[1]
push!(ures, copy(utmp2))
push!(ts, t2[1])
end
copyto!(utmp, utmp2)
t[1] = t2[1]
else
push!(ures, copy(utmp))
push!(ts,t[1])
end
end
else
t2[1] = t[1] # save step values
copyto!(utmp2, utmp) # save step values
opt.itask = 1 # change to interpolating
lsoda(ctx, utmp, t, ttmp[1])
opt.itask = itask_tmp
push!(ures, copy(utmp))
push!(ts, t[1])
if k != length(save_ts) && save_ts[k+1] > t2[1] # don't overstep the last timestep
push!(ures,copy(utmp2))
push!(ts,t2[1])
end
copyto!(utmp,utmp2)
t[1] = t2[1]
end
end
### Finishing Routine
timeseries = uType[]
save_start ? start_idx = 1 : start_idx = 2
if typeof(prob.u0)<:Number
for i=start_idx:length(ures)
push!(timeseries,ures[i][1])
end
else
for i=start_idx:length(ures)
push!(timeseries,reshape(ures[i],sizeu))
end
end
lsoda_free(ch)
global ___ref = nothing
end
DiffEqBase.build_solution(prob, alg, ts, timeseries,
timeseries_errors = timeseries_errors,
retcode = ReturnCode.Success)
end
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 730 | abstract type AbstractLSODAHandle end
mutable struct ContextHandle <: AbstractLSODAHandle
ctx::lsoda_context_t
freed::Bool
function (::Type{ContextHandle})(ctx::lsoda_context_t)
h = new(ctx,false)
finalizer(release_handle,h)
return h
end
end
release_handle(ch::ContextHandle) = lsoda_free(ch)
function lsoda_free(ch::ContextHandle)
if !ch.freed
lsoda_free(ch.ctx)
ch.freed = true
end
nothing
end
# Now wrap the rest of the APIs for convenience
lsoda_reset(ch::ContextHandle) = lsoda_reset(ch.ctx)
lsoda_prepare(ch::ContextHandle,opt::lsoda_opt_t) = lsoda_prepare(ch.ctx,opt)
lsoda(ch::ContextHandle,y::Vector,t::Vector{Float64},tout) = lsoda(ch.ctx,y,t,tout)
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 3780 | function lsodafun(t::T1, y::T2, yp::T3, userfun::UserFunctionAndData) where {T1, T2, T3}
y_ = unsafe_wrap(Array,y,userfun.neq)
ydot_ = unsafe_wrap(Array,yp,userfun.neq)
userfun.func(t, y_, ydot_,userfun.data)
return Int32(0)
end
function lsoda_0(f::Function, y0::Vector{Float64}, tspan::Vector{Float64}; userdata::Any=nothing, reltol::Union{Float64,Vector}=1e-4, abstol::Union{Float64,Vector}=1e-10)
neq = Int32(length(y0))
userfun = UserFunctionAndData(f, userdata,neq)
atol = ones(Float64,neq)
rtol = ones(Float64,neq)
if typeof(abstol) == Float64
atol *= abstol
else
atol = copy(abstol)
end
if typeof(reltol) == Float64
rtol *= reltol
else
rtol = copy(reltol)
end
t = Float64[tspan[1]]
tout = Float64[tspan[2]]
y = copy(y0)
#
opt = lsoda_opt_t()
opt.ixpr = 0
opt.rtol = pointer(rtol)
opt.atol = pointer(atol)
opt.itask = 1
#
ctx = lsoda_context_t()
ctx.function_ = @cfunction($lsodafun,Cint,(Cdouble,Ptr{Cdouble},Ptr{Cdouble},Ref{UserFunctionAndData})).ptr
ctx.neq = neq
ctx.state = 1
ctx.data = pointer_from_objref(userfun)
lsoda_prepare(ctx,opt)
for i=1:12
lsoda(ctx,y,t,tout[1])
@assert (ctx.state >0) string("LSODA error istate = ", ctx.state)
@printf("at t = %12.4e y= %14.6e %14.6e %14.6e\n",t[1],y[1], y[2], y[3])
tout[1] *= 10.0E0
end
end
"""
lsoda(f::Function, y0::Vector{Float64}, tspan::Vector{Float64}; userdata::Any=nothing, reltol::Union{Float64,Vector}=1e-4, abstol::Union{Float64,Vector}=1e-10)
Solves a set of ordinary differential equations using the LSODA algorithm. The vector field encoded in an inplace f::Function needs to have the self-explanatory arguments f(t, y, ydot, data)
"""
function lsoda(f::Function, y0::Vector{Float64}, tspan::Vector{Float64}; userdata::Any=nothing, reltol::Union{Float64,Vector}=1e-4, abstol::Union{Float64,Vector}=1e-10,nbsteps = 10000)
neq = Int32(length(y0))
userfun = UserFunctionAndData(f, userdata, neq)
atol = ones(Float64,neq)
rtol = ones(Float64,neq)
yres = zeros(length(tspan), length(y0))
if typeof(abstol) == Float64
atol *= abstol
else
atol = copy(abstol)
end
if typeof(reltol) == Float64
rtol *= reltol
else
rtol = copy(reltol)
end
t = Float64[tspan[1]]
tout = Float64[tspan[2]]
y = copy(y0)
opt = lsoda_opt_t()
opt.mxstep = nbsteps
opt.ixpr = 0
opt.rtol = pointer(rtol)
opt.atol = pointer(atol)
opt.itask = 1
ctx_ptr = lsoda_context_t()
ctx_ptr.function_ = @cfunction($lsodafun,Cint,(Cdouble,Ptr{Cdouble},Ptr{Cdouble},Ref{UserFunctionAndData})).ptr
ctx_ptr.neq = neq
ctx_ptr.state = 1
ctx_ptr.data = pointer_from_objref(userfun)
lsoda_prepare(ctx_ptr,opt)
yres[1,:] = y0
for k in 2:length(tspan)
tout[1] = tspan[k]
lsoda(ctx_ptr,y,t,tout[1])
@assert (ctx_ptr.state >0) string("LSODA error istate = ", ctx_ptr.state, ", error = ",unsafe_string(ctx_ptr.error))
yres[k,:] = copy(y)
end
lsoda_free(ctx_ptr)
return yres
end
"""
lsoda_evolve!(ctx::lsoda_context_t,y::Vector{Float64},tspan::Vector{Float64})
Solves a set of ordinary differential equations using the LSODA algorithm and the context variable ctx. This avoid re-allocating ctx. You have to be carefull to remember the current time or this function will return an error.
"""
function lsoda_evolve!(ctx::lsoda_context_t,y::Vector{Float64},tspan::Vector{Float64})
@assert ctx.neq == length(y)
# if userdata !== nothing
# # this functionality is not working yet
# # ctx.data.data = userdata
# # unsafe_pointer_to_objref(ctx.data).data = userdata
# end
t = Float64[tspan[1]]
tout = Float64[tspan[2]]
lsoda(ctx,y,t,tout[1])
@assert (ctx.state >0) string("LSODA error istate = ", ctx.state)
end
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 3052 | using Parameters, Compat
@with_kw mutable struct lsoda_common_t
yh::Ptr{Ptr{Cdouble}}= C_NULL
wm::Ptr{Ptr{Cdouble}}= C_NULL
ewt::Ptr{Cdouble}= C_NULL
savf::Ptr{Cdouble}= C_NULL
acor::Ptr{Cdouble}= C_NULL
ipvt::Ptr{Cint}= C_NULL
memory::Ptr{Nothing}= C_NULL
h::Cdouble = 0.
hu::Cdouble = 0.
rc::Cdouble = 0.
tn::Cdouble = 0.
tsw::Cdouble = 0.
pdnorm::Cdouble = 0.
crate::Cdouble = 0.
el::NTuple{14,Cdouble} = ntuple(x->Cdouble(0), 14)
elco::NTuple{13,NTuple{14,Cdouble}} = ntuple(x->ntuple(x->Cdouble(0), 14),13)
tesco::NTuple{13,NTuple{4,Cdouble}} = ntuple(x->ntuple(x->Cdouble(0), 4),13)
hold::Cdouble = 0.
rmax::Cdouble = 0.
pdest::Cdouble = 0.
pdlast::Cdouble = 0.
ialth::Cint = 0
ipup::Cint = 0
nslp::Cint = 0
icount::Cint = 0
irflag::Cint = 0
imxer::Cint = 0
illin::Cint = 0
nhnil::Cint = 0
nslast::Cint = 0
jcur::Cint = 0
meth::Cint = 0
mused::Cint = 0
nq::Cint = 0
nst::Cint = 0
ncf::Cint = 0
nfe::Cint = 0
nje::Cint = 0
nqu::Cint = 0
miter::Cint = 0
end
@with_kw mutable struct lsoda_opt_t
ixpr::Cint = 0
mxstep::Cint = 0
mxhnil::Cint = 0
mxordn::Cint = 0
mxords::Cint = 0
tcrit::Cdouble = 0.
h0::Cdouble = 0.
hmax::Cdouble = 0.
hmin::Cdouble = 0.
hmxi::Cdouble = 0.
itask::Cint = 0
rtol::Ptr{Cdouble} = C_NULL
atol::Ptr{Cdouble} = C_NULL
end
const _lsoda_f = Ptr{Nothing}
@with_kw mutable struct lsoda_context_t
function_::_lsoda_f = C_NULL
data::Ptr{Nothing} = C_NULL ##
neq::Cint = 0
state::Cint = 0
error::Cstring = C_NULL
common::Ptr{lsoda_common_t} = C_NULL
opt::Ptr{lsoda_opt_t} = C_NULL
end
const lsoda_context_t_ptr = Ptr{lsoda_context_t}
mutable struct UserFunctionAndData
func::Function
data::Any
neq::Cint
UserFunctionAndData(func::Function, data::Any, neq::Cint) = new(func, data, neq)
end
# UserFunctionAndData(func::Function) = func
# UserFunctionAndData(func::Function, data::Nothing) = func
# UserFunctionAndData(func::Function, data::Nothing, neq::Cint) = func
function lsoda_prepare(ctx::lsoda_context_t,opt::lsoda_opt_t)
return ccall((:lsoda_prepare,liblsoda),Cint,
(Ref{lsoda_context_t},Ref{lsoda_opt_t}),
Ref(ctx),Ref(opt))
end
function lsoda(ctx::lsoda_context_t,y::Vector,t::Vector{Float64},tout)
return ccall((:lsoda,liblsoda),Cint,
(Ref{lsoda_context_t},Ref{Cdouble},Ref{Cdouble},Cdouble),
Ref(ctx),y,t,tout[1])
end
function lsoda_reset(ctx::lsoda_context_t)
ccall((:lsoda_reset,liblsoda),Nothing,(Ref{lsoda_context_t},),Ref(ctx))
end
# written to wrap lsoda_free from C library but never used in practise as
# lsoda_context_t variables are handled on Julia's side
function lsoda_free(ctx::lsoda_context_t)
ccall((:lsoda_free,liblsoda),Nothing,(Ref{lsoda_context_t},),Ref(ctx))
nothing
end
function lsoda_free(ctx::Ref{lsoda_context_t})
ccall((:lsoda_free,liblsoda),Nothing,(Ref{lsoda_context_t},),(ctx))
nothing
end
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 233 | using LSODA
using Test
# write your own tests here
println("--> test1 =============")
include("test1.jl")
println("\n--> test2 =============")
include("test2.jl")
println("\n--> test common =============")
include("test_common.jl")
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 1196 | using LSODA, Printf
function fex(t::T1, y::T2, ydot::T3, data::T4) where {T1, T2, T3, T4}
x = unsafe_wrap(Array,y,neq)
xdot = unsafe_wrap(Array,ydot,neq)
xdot[1]=1.0E4 * x[2] * x[3] - .04E0 * x[1]
xdot[3]=3.0E7 * x[2] * x[2]
xdot[2]= -xdot[1] - xdot[3]
return Int32(0)
end
fex_c = @cfunction(fex,Cint,(Cdouble,Ptr{Cdouble},Ptr{Cdouble},Ptr{Nothing}))
const atol = Array{Float64}(undef,3)
const rtol = Array{Float64}(undef,3)
t = Array{Float64}(undef,1)
tout = Array{Float64}(undef,1)
const y = Array{Float64}(undef,3)
const neq = Int32(3)
y[1] = 1.0E0
y[2] = 0.0E0
y[3] = 0.0E0
t[1] = 0.0E0
tout[1] = 0.4E0
opt = lsoda_opt_t()
opt.ixpr = 0
opt.rtol = pointer(rtol)
opt.atol = pointer(atol)
opt.itask = 1
rtol[1] = 1.0E-4
rtol[2] = 1.0E-4
rtol[3] = 1.0E-4
atol[1] = 1.0E-6
atol[2] = 1.0E-10
atol[3] = 1.0E-6
ctx = lsoda_context_t()
ctx.function_ = fex_c
ctx.neq = neq
ctx.state = 1
lsoda_prepare(ctx,opt)
@time for i=1:12
lsoda(ctx,y,t,tout[1])
Printf.@printf("at t = %12.4e y= %14.6e %14.6e %14.6e\n",t[1],y[1], y[2], y[3])
if (ctx.state <= 0)
error("error istate = ", ctx.state)
end
tout[1] *= 10.0E0
end
println("Done!")
lsoda_free(ctx)
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 1286 | using LSODA
function rhs!(t, x, ydot, data)
ydot[1]=1.0E4 * x[2] * x[3] - .04E0 * x[1]
ydot[3]=3.0E7 * x[2] * x[2]
ydot[2]=-ydot[1] - ydot[3]
nothing
end
y0 = [1.,0.,0.]
println("\n####################################\n--> Use of a old wrapper for speed comparison")
tspan = [0., 0.4]
@time LSODA.lsoda_0(rhs!, y0, tspan, reltol= 1e-4,abstol = Vector([1.e-6,1.e-10,1.e-6]))
@time LSODA.lsoda_0(rhs!, y0, tspan, reltol= 1e-4,abstol = Vector([1.e-6,1.e-10,1.e-6]))
# case with a vector
println("\n####################################\n--> Use of a vector of times where output is required")
tspan = [4 .* 10.0^k for k=-1:10]
res = LSODA.lsoda(rhs!, y0, tspan, reltol= 1e-4,abstol = Vector([1.e-6,1.e-10,1.e-6]))
res = @time LSODA.lsoda(rhs!, y0, tspan, reltol= 1e-4,abstol = Vector([1.e-6,1.e-10,1.e-6]))
println(res)
# #case where we don't have to declare a new context
println("\n####################################\n--> Use of a lsoda_evolve!")
# y0 = [1.,0.,0.]
# tspan = [4.*10.0^k for k=-1:10]
# ctx, _ = LSODA.lsoda(rhs!, y0, tspan[1:2], reltol= 1e-4,abstol = Vector([1.e-6,1.e-10,1.e-6]))
# @time for k=2:length(tspan)
# LSODA.lsoda_evolve!(ctx,y0,tspan[k-1:k])
# @printf("at t = %12.4e y= %14.6e %14.6e %14.6e\n",tspan[k],y0[1], y0[2], y0[3])
# end
# lsoda_free(ctx)
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | code | 1040 | using LSODA, ODEProblemLibrary, Test
import ODEProblemLibrary: prob_ode_linear, prob_ode_2Dlinear
prob = prob_ode_linear
sol = solve(prob,lsoda(),saveat=[1/2])
@test sol.t == [1/2,1]
prob = prob_ode_2Dlinear
sol = solve(prob,lsoda(),saveat=[1/2])
@test sol.t == [1/2,1]
sol = solve(prob,lsoda(),saveat=1/10)
@test sol.t == collect(0:1/10:1)
prob = prob_ode_linear
sol = solve(prob,lsoda())
sol = solve(prob,lsoda(),save_everystep=true,saveat=[1/2])
@test 1/2 ∈ sol.t
prob = prob_ode_2Dlinear
sol = solve(prob,lsoda(),save_everystep=true,saveat=[1/2])
@test 1/2 ∈ sol.t
sol = solve(prob,lsoda(),save_everystep=true,saveat=1/2)
@test 1/2 ∈ sol.t
sol = solve(prob,lsoda(),save_everystep=true,saveat=[1/10,1/5,3/10])#,2/5,1/2,3/5,7/10])
@test 1/10 ∈ sol.t
@test 1/5 ∈ sol.t
@test 3/10 ∈ sol.t
sol = solve(prob,lsoda(),save_everystep=true,saveat=1/10)
for i in 2:length(sol.t)
@test sol.t[i] > sol.t[i-1]
end
for k in 0:1/10:1
@test k ∈ sol.t
end
sol = solve(prob,lsoda(),save_start=false,saveat=1/10)
sol.t[1] == 0.1
sol.u[1] != prob.u0
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | docs | 4362 | [](https://github.com/rveltz/LSODA.jl/actions/workflows/CI.yml)
[](https://coveralls.io/github/rveltz/LSODA.jl?branch=master)
[](https://rveltz.github.io/LSODA.jl/stable)
[](https://rveltz.github.io/LSODA.jl/latest)
# LSODA.jl
## Introduction
**LSODA.jl** is a Julia package that interfaces to the [liblsoda](https://github.com/sdwfrost/liblsoda) library, developed by [Simon Frost](http://www.vet.cam.ac.uk/directory/[email protected]) ([@sdwfrost](http://github.com/sdwfrost)), thereby providing a way to use the LSODA algorithm from Linda Petzold and Alan Hindmarsh from [Julia](http://julialang.org/). **[Clang.jl](https://github.com/ihnorton/Clang.jl)** has been used to write the library and **[Sundials.jl](https://github.com/JuliaDiffEq/Sundials.jl)** was a inspiring source.
## Installation
To install this package, run the command `add LSODA`.
## Simplified Functions
To solve an ODE, one can call the simplified solver:
```julia
function rhs!(t, x, ydot, data)
ydot[1]=1.0E4 * x[2] * x[3] - .04E0 * x[1]
ydot[3]=3.0E7 * x[2] * x[2]
ydot[2]=-ydot[1] - ydot[3]
nothing
end
y0 = [1.,0.,0.]
tspan = [0., 0.4]
res = lsoda(rhs!, y0, tspan, reltol= 1e-4, abstol = Vector([1.e-6,1.e-10,1.e-6]))
```
To reproduce the test example from liblsoda, on can use:
```julia
lsoda_0(rhs!, y0, tspan, reltol= 1e-4, abstol = Vector([1.e-6,1.e-10,1.e-6]))
```
This should give the following.
```
at t = 4.0000e-01 y= 9.851712e-01 3.386380e-05 1.479493e-02
at t = 4.0000e+00 y= 9.055333e-01 2.240655e-05 9.444430e-02
at t = 4.0000e+01 y= 7.158403e-01 9.186334e-06 2.841505e-01
at t = 4.0000e+02 y= 4.505250e-01 3.222964e-06 5.494717e-01
at t = 4.0000e+03 y= 1.831976e-01 8.941774e-07 8.168016e-01
at t = 4.0000e+04 y= 3.898729e-02 1.621940e-07 9.610125e-01
at t = 4.0000e+05 y= 4.936362e-03 1.984221e-08 9.950636e-01
at t = 4.0000e+06 y= 5.161832e-04 2.065786e-09 9.994838e-01
at t = 4.0000e+07 y= 5.179811e-05 2.072030e-10 9.999482e-01
at t = 4.0000e+08 y= 5.283524e-06 2.113420e-11 9.999947e-01
at t = 4.0000e+09 y= 4.658945e-07 1.863579e-12 9.999995e-01
at t = 4.0000e+10 y= 1.423392e-08 5.693574e-14 1.000000e+00
```
## JuliaDiffEq Common Interface
The functionality of LSODA.jl can be accessed through the JuliaDiffEq common interface. To do this, you build a problem object for like:
```julia
using LSODA, DiffEqBase
function rhs!(du, u, p, t)
du[1]=1.0E4 * u[2] * u[3] - .04E0 * u[1]
du[3]=3.0E7 * u[2] * u[2]
du[2]=-du[1] - du[3]
nothing
end
y0 = [1.,0.,0.]
tspan = (0., 0.4)
prob = ODEProblem(rhs!,y0,tspan)
```
This problem is solved by LSODA by using the lsoda() algorithm in the common `solve` command as follows:
```julia
sol = solve(prob,lsoda())
```
Many keyword arguments can be used to control the solver, its tolerances, and its output formats. For more information, please see the [DifferentialEquations.jl documentation](https://juliadiffeq.github.io/DiffEqDocs.jl/latest/).
## Citing
If using the algorithm through the DifferentialEquations.jl common interface,
please cite:
```
@article{rackauckas2017differentialequations,
title={Differentialequations. jl--a performant and feature-rich ecosystem for solving differential equations in julia},
author={Rackauckas, Christopher and Nie, Qing},
journal={Journal of Open Research Software},
volume={5},
number={1},
year={2017},
publisher={Ubiquity Press}
}
```
For the original algorithm, please cite:
- Alan Hindmarsh, ODEPACK, a Systematized Collection of ODE Solvers,
in Scientific Computing, edited by Robert Stepleman, Elsevier, 1983,
ISBN13: 978-0444866073, LC: Q172.S35.
- K Radhakrishnan, Alan Hindmarsh, Description and Use of LSODE, the Livermore
Solver for Ordinary Differential Equations, Technical report UCRL-ID-113855,
Lawrence Livermore National Laboratory, December 1993.
- Linda Petzold, Automatic Selection of Methods for Solving Stiff and Nonstiff
Systems of Ordinary Differential Equations, SIAM J. Sci. and Stat. Comput.,
4(1), 136–148.
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.7.5 | 49c00ced68a8033b30fb1009b8dea7871aa9e928 | docs | 2444 | # LSODA.jl: the LSODA algorithm for solving ordinary differential equations
```@contents
```
## Introduction
**LSODA.jl** is a Julia package that interfaces to the [liblsoda](https://github.com/sdwfrost/liblsoda) library, developped by [Simon Frost](http://www.vet.cam.ac.uk/directory/[email protected]) ([@sdwfrost](http://github.com/sdwfrost)), thereby providing a way to use the LSODA algorithm from Linda Petzold and Alan Hindmarsh from [Julia](http://julialang.org/). **[Clang.jl](https://github.com/ihnorton/Clang.jl)** has been used to write the library and **[Sundials.jl](https://github.com/JuliaDiffEq/Sundials.jl)** was a inspiring source.
## Installation
To install this package, run the command `Pkg.clone("https://github.com/rveltz/LSODA.jl.git")`
## Example
We first need to load the library.
```julia
using LSODA
```
We next need to define a function that provides the derivatives `ydot` given the time, `t`, initial conditions, `y`, and optionally some additional data, `data`. Note that this function modifies `ydot` in-place and returns `nothing`.
```julia
function rhs!(t, y, ydot, data)
ydot[1]=1.0E4 * y[2] * y[3] - .04E0 * y[1]
ydot[3]=3.0E7 * y[2] * y[2]
ydot[2]=-ydot[1] - ydot[3]
nothing
end
```
The model can be solved by providing an initial condition for the state variables, and a time span over which to simulate.
```
y0 = [1.,0.,0.]
tspan = [0., 0.4]
res = lsoda(rhs!, y0, tspan, reltol= 1e-4, abstol = Vector([1.e-6,1.e-10,1.e-6]))
```
This should give the following.
```
at t = 4.0000e-01 y= 9.851712e-01 3.386380e-05 1.479493e-02
at t = 4.0000e+00 y= 9.055333e-01 2.240655e-05 9.444430e-02
at t = 4.0000e+01 y= 7.158403e-01 9.186334e-06 2.841505e-01
at t = 4.0000e+02 y= 4.505250e-01 3.222964e-06 5.494717e-01
at t = 4.0000e+03 y= 1.831976e-01 8.941774e-07 8.168016e-01
at t = 4.0000e+04 y= 3.898729e-02 1.621940e-07 9.610125e-01
at t = 4.0000e+05 y= 4.936362e-03 1.984221e-08 9.950636e-01
at t = 4.0000e+06 y= 5.161832e-04 2.065786e-09 9.994838e-01
at t = 4.0000e+07 y= 5.179811e-05 2.072030e-10 9.999482e-01
at t = 4.0000e+08 y= 5.283524e-06 2.113420e-11 9.999947e-01
at t = 4.0000e+09 y= 4.658945e-07 1.863579e-12 9.999995e-01
at t = 4.0000e+10 y= 1.423392e-08 5.693574e-14 1.000000e+00
```
## Application programming interface
### Functions
```@docs
lsoda
```
```@docs
lsoda_evolve!
```
| LSODA | https://github.com/rveltz/LSODA.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 395 | using Documenter, WiSER
makedocs(
format = Documenter.HTML(),
sitename = "WiSER.jl",
authors = "Hua Zhou, Chris German",
clean = true,
debug = true,
pages = [
"index.md",
"model_fitting.md",
"simulation.md"
]
)
deploydocs(
repo = "github.com/OpenMendel/WiSER.jl.git",
target = "build",
deps = nothing,
make = nothing
)
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 16855 | module WiSER
using Base: kwarg_decl
using DataFrames, Tables, LinearAlgebra, MathOptInterface
using Printf, Reexport, Statistics, CategoricalArrays
import LinearAlgebra: BlasReal, copytri!
import DataFrames: DataFrame
using Random
import Random: GLOBAL_RNG
const MOI = MathOptInterface
@reexport using Ipopt
@reexport using NLopt
@reexport using StatsModels
@reexport using Distributions
export
# types
WSVarLmmObs,
WSVarLmmModel,
# functions
coef,
coefnames,
coeftable,
confint,
DataFrame,
fit!,
init_ls!,
init_mom!,
nlsv_obj!,
nclusters,
nobs,
rand!,
respdists,
rvarlmm,
rvarlmm!,
stderror,
update_res!,
update_wtmat!,
vcov
"""
WSVarLmmObs
WSVarLmmObs(y, X, Z, W)
A realization of within-subject variance linear mixed model data instance.
# Positional Arguments:
`y`: the response vector
`X`: the mean fixed effects covariate matrix
`Z`: the random location effects covariate matrix
`W`: the within-subject variance fixed effects covariate matrix
"""
struct WSVarLmmObs{T <: BlasReal}
# data
y :: Vector{T} # response
Xt :: Matrix{T} # X should include a column of 1's
Zt :: Matrix{T} # Random effect covars
Wt :: Matrix{T} # Covariates that affect WS variability
# working arrays
obj :: Vector{T} # instance objective value
∇β :: Vector{T} # gradient
∇τ :: Vector{T}
∇Lγ :: Matrix{T}
Hββ :: Matrix{T} # hessian
Hττ :: Matrix{T}
HτLγ :: Matrix{T}
HLγLγ :: Matrix{T}
res :: Vector{T} # residual vector
res2 :: Vector{T} # residual vector.^2
resnrm2 :: Vector{T} # sum of residual squares
expwτ :: Vector{T} # hold exp.(W * τ)
ztz :: Matrix{T} # Z'Z
ztres :: Vector{T} # Z'res
zlltzt_dg :: Vector{T}
storage_n1 :: Vector{T}
storage_p1 :: Vector{T}
storage_q1 :: Vector{T}
storage_pn :: Matrix{T}
storage_qn :: Matrix{T}
storage_ln :: Matrix{T}
storage_pp :: Matrix{T}
storage_qq :: Matrix{T}
storage_qp :: Matrix{T}
storage_q◺n :: Matrix{T}
# Woodbury structure for weight matrix Vinv = Dinv - U * U'
Dinv :: Vector{T}
Ut :: Matrix{T}
# for weighted objective eval
rt_Dinv_r :: Vector{T}
rt_UUt_r :: Vector{T}
rt_U :: Matrix{T}
Dinv_r :: Vector{T}
rt_UUt :: Matrix{T}
Zt_Dinv_r :: Vector{T}
rt_UUt_Z :: Matrix{T}
diagUUt_Dinv :: Vector{T}
Dinv_Z_L :: Matrix{T}
UUt_Z_L :: Matrix{T}
Ut_D_U :: Matrix{T}
Zt_Dinv_Z :: Matrix{T}
Lt_Zt_Dinv_Z_L :: Matrix{T}
Zt_UUt_Z :: Matrix{T}
Lt_Zt_UUt_Z_L :: Matrix{T}
# for gradient wrt τ
diagDVRV :: Vector{T}
# for gradient wrt Lγ
Zt_Dinv :: Matrix{T}
Zt_UUt_rrt_Dinv_Z :: Matrix{T}
Zt_UUt_rrt_UUt_Z :: Matrix{T}
Zt_UUt :: Matrix{T}
Lt_Zt_Dinv_r :: Vector{T}
Zt_Vinv_r :: Vector{T}
# for Hessian wrt τ
Wt_D_Dinv :: Matrix{T}
sqrtDinv_UUt :: Vector{T}
Ut_kr_Ut :: Matrix{T}
Wt_D_Ut_kr_Utt :: Matrix{T}
Wt_D_sqrtdiagDinv_UUt :: Matrix{T}
# for Hessian wrt Lγ
Zt_Vinv_Z :: Matrix{T}
Zt_Vinv :: Matrix{T}
end
function WSVarLmmObs(
y::AbstractVector{T},
X::AbstractMatrix{T},
Z::AbstractMatrix{T},
W::AbstractMatrix{T}
) where T <: BlasReal
n, p, q, l = size(X, 1), size(X, 2), size(Z, 2), size(W, 2)
q◺ = ◺(q)
# working arrays
obj = Vector{T}(undef, 1)
∇β = Vector{T}(undef, p)
∇τ = Vector{T}(undef, l)
∇Lγ = Matrix{T}(undef, q, q)
Hββ = Matrix{T}(undef, p, p)
Hττ = Matrix{T}(undef, l, l)
HτLγ = Matrix{T}(undef, l, q◺)
HLγLγ = Matrix{T}(undef, q◺, q◺)
res = Vector{T}(undef, n)
res2 = Vector{T}(undef, n)
resnrm2 = Vector{T}(undef, 1)
expwτ = Vector{T}(undef, n)
ztz = transpose(Z) * Z
ztres = Vector{T}(undef, q)
zlltzt_dg = Vector{T}(undef, n)
storage_n1 = Vector{T}(undef, n)
storage_p1 = Vector{T}(undef, p)
storage_q1 = Vector{T}(undef, q)
storage_pn = Matrix{T}(undef, p, n)
storage_qn = Matrix{T}(undef, q, n)
storage_ln = Matrix{T}(undef, l, n)
storage_pp = Matrix{T}(undef, p, p)
storage_qq = Matrix{T}(undef, q, q)
storage_qp = Matrix{T}(undef, q, p)
storage_q◺n = Matrix{T}(undef, q◺, n)
# added for weighted estimating equations
Dinv = Vector{T}(undef, n) #stores diag(exp(-wτ_0))
Ut = Matrix{T}(undef, q, n)
# denote, r as residual vector D as diagonal(exp(wτ))
# added for weigthed estimating equations
rt_Dinv_r = Vector{T}(undef, 1)
rt_UUt_r = Vector{T}(undef, 1)
rt_U = Matrix{T}(undef, 1, q)
Dinv_r = Vector{T}(undef, n)
rt_UUt = Matrix{T}(undef, 1, n)
Zt_Dinv_r = Vector{T}(undef, q)
rt_UUt_Z = Matrix{T}(undef, 1, q)
diagUUt_Dinv = Vector{T}(undef, n)
Dinv_Z_L = Matrix{T}(undef, n, q)
UUt_Z_L = Matrix{T}(undef, n, q)
Ut_D_U = Matrix{T}(undef, q, q)
Zt_Dinv_Z = Matrix{T}(undef, q, q)
Lt_Zt_Dinv_Z_L = Matrix{T}(undef, q, q)
Zt_UUt_Z = Matrix{T}(undef, q, q)
Lt_Zt_UUt_Z_L = Matrix{T}(undef, q, q)
# for gradient wrt τ
diagDVRV = Vector{T}(undef, n)
# for gradient wrt Lγ
Zt_Dinv = Matrix{T}(undef, q, n)
Zt_UUt_rrt_Dinv_Z = Matrix{T}(undef, q, q)
Zt_UUt_rrt_UUt_Z = Matrix{T}(undef, q, q)
Zt_UUt = Matrix{T}(undef, q, n)
Lt_Zt_Dinv_r = Vector{T}(undef, q)
Zt_Vinv_r = Vector{T}(undef, q)
# for Hessian wrt τ
Wt_D_Dinv = Matrix{T}(undef, l, n)
sqrtDinv_UUt = Vector{T}(undef, n)
Ut_kr_Ut = Matrix{T}(undef, abs2(q), n)
Wt_D_Ut_kr_Utt = Matrix{T}(undef, l, abs2(q))
Wt_D_sqrtdiagDinv_UUt = Matrix{T}(undef, l, n)
# for Hessian wrt Lγ
Zt_Vinv_Z = Matrix{T}(undef, q, q)
Zt_Vinv = Matrix{T}(undef, q, n)
# constructor
WSVarLmmObs{T}(
y, transpose(X), transpose(Z), transpose(W), obj,
∇β, ∇τ, ∇Lγ,
Hββ, Hττ, HτLγ, HLγLγ,
res, res2, resnrm2, expwτ, ztz, ztres, zlltzt_dg,
storage_n1, storage_p1, storage_q1,
storage_pn, storage_qn, storage_ln,
storage_pp, storage_qq, storage_qp, storage_q◺n,
Dinv, Ut, rt_Dinv_r, rt_UUt_r, rt_U, Dinv_r,
rt_UUt, Zt_Dinv_r, rt_UUt_Z, diagUUt_Dinv,
Dinv_Z_L, UUt_Z_L, Ut_D_U, Zt_Dinv_Z,
Lt_Zt_Dinv_Z_L, Zt_UUt_Z, Lt_Zt_UUt_Z_L,
diagDVRV,
Zt_Dinv, Zt_UUt_rrt_Dinv_Z, Zt_UUt_rrt_UUt_Z,
Zt_UUt, Lt_Zt_Dinv_r, Zt_Vinv_r, Wt_D_Dinv,
sqrtDinv_UUt, Ut_kr_Ut, Wt_D_Ut_kr_Utt,
Wt_D_sqrtdiagDinv_UUt, Zt_Vinv_Z, Zt_Vinv)
end
"""
WSVarLmmModel
Within-subject variance linear mixed model, which contains a vector of
`WSVarLmmObs` as data, model parameters, and working arrays.
WSVarLmmModel(obsvec; obswts, meannames, renames, wsvarnames)
# Positional arguments
- `obsvec`: Vector of WSVarLmmObs
# Keyword arguments
- `obswts`: Subject-level weight vector of observation weights, length of the `obsvec` object.
- `meannames`: Names of the mean fixed effects covariates
- `renames`: Names of the random location effects covariates
- `wsvarnames`: Names of the ws variance fixed effects covariates
"""
struct WSVarLmmModel{T <: BlasReal} <: MOI.AbstractNLPEvaluator
# data
data :: Vector{WSVarLmmObs{T}}
respname :: String
meannames :: Vector{String} # names of mean fixed effect variables
renames :: Vector{String} # names of random location effect variables
wsvarnames :: Vector{String} # names of ws var fixed effect variables
meanformula :: FormulaTerm
reformula :: FormulaTerm
wsvarformula :: FormulaTerm
ids :: Union{Vector{<:AbstractString}, Vector{<:Int}} # IDs of individuals/clusters in order
obswts :: Vector{T} # individual/cluster weights
# dimenions
p :: Int # number of mean parameters in linear regression
q :: Int # number of random effects
l :: Int # number of parameters for modeling WS variability
m :: Int # number of individuals/clusters
nis :: Vector{Int} # number of observations per cluster
nsum :: Int # number of observations (summed across individuals)
# sufficient statistics
xtx :: Matrix{T} # sum_i Xi'Xi
xty :: Vector{T} # sum_i Xi'yi
wtw :: Matrix{T} # sum_i Wi'Wi
ztz2 :: Matrix{T} # sum_i Zi'Zi ⊗ Zi'Zi
ztz2od :: Matrix{T} # sum_i (Zi'Zi ⊗ Zi'Zi - (Zi' ⊙ Zi')(Zi' ⊙ Zi')')
# parameters
β :: Vector{T} # p-vector of mean regression coefficients
τ :: Vector{T} # l-vector of WS variability regression coefficients
Lγ :: Matrix{T} # q by q lower triangular Cholesky factor of cov(γ)
Σγ :: Matrix{T} # q by q covariance matrix of γ
# working arrays
∇β :: Vector{T}
∇τ :: Vector{T}
∇Lγ :: Matrix{T}
∇Σγ :: Vector{T}
Hββ :: Matrix{T}
Hττ :: Matrix{T}
HτLγ :: Matrix{T}
HLγLγ :: Matrix{T}
HΣγΣγ :: Matrix{T}
# weighted NLS or unweighted NLS
iswtnls :: Vector{Bool}
# multi-threading or not
ismthrd :: Vector{Bool}
# model has been fit or not
isfitted :: Vector{Bool}
# for sandwich estimator
ψ :: Vector{T}
Ainv :: Matrix{T}
B :: Matrix{T}
vcov :: Matrix{T}
end
function WSVarLmmModel(
obsvec :: Vector{WSVarLmmObs{T}};
obswts :: Vector = [],
respname :: String = "y",
meannames :: Vector{String} = ["x$i" for i in 1:size(obsvec[1].Xt, 1)],
renames :: Vector{String} = ["z$i" for i in 1:size(obsvec[1].Zt, 1)],
wsvarnames :: Vector{String} = ["w$i" for i in 1:size(obsvec[1].Wt, 1)],
meanformula :: FormulaTerm = FormulaTerm(term(Symbol(respname)),
sum(term.(Symbol.(meannames)))),
reformula :: FormulaTerm = FormulaTerm(term(Symbol(respname)),
sum(term.(Symbol.(renames)))),
wsvarformula:: FormulaTerm = FormulaTerm(term(Symbol(respname)),
sum(term.(Symbol.(wsvarnames)))),
ids :: Union{Vector{<:AbstractString}, Vector{Int}} = collect(1:length(obsvec))
) where T <: BlasReal
# dimensions
p = size(obsvec[1].Xt, 1)
q = size(obsvec[1].Zt, 1)
l = size(obsvec[1].Wt, 1)
m = length(obsvec)
nis = map(o -> length(o.y), obsvec)
nsum = sum(nis)
q◺ = ◺(q)
# sufficient statistics
xtx = zeros(T, p, p)
xty = zeros(T, p)
wtw = zeros(T, l, l)
ztz2 = zeros(T, abs2(q), abs2(q))
ztz2od = zeros(T, abs2(q), abs2(q))
for obs in obsvec
# accumulate Xi'Xi
BLAS.syrk!('U', 'N', T(1), obs.Xt, T(1), xtx)
# accumulate Xi'yi
BLAS.gemv!('N', T(1), obs.Xt, obs.y, T(1), xty)
# accumulate Wi' * Wi
BLAS.syrk!('U', 'N', T(1), obs.Wt, T(1), wtw)
# accumulate Zi'Zi ⊗ Zi'Zi
kron_axpy!(obs.ztz, obs.ztz, ztz2)
# accumualte (Zi' ⊙ Zi')(Zi' ⊙ Zi')'
# Ut_kr_Ut used as scratch space to store Zi' ⊙ Zi'
kr_axpy!(obs.Zt, obs.Zt, fill!(obs.Ut_kr_Ut, 0))
BLAS.syrk!('U', 'N', T(1), obs.Ut_kr_Ut, T(1), ztz2od)
end
ztz2od .= ztz2 .- ztz2od
copytri!( xtx, 'U')
copytri!( wtw, 'U')
copytri!( ztz2, 'U')
copytri!(ztz2od, 'U')
# parameters
β = Vector{T}(undef, p)
τ = Vector{T}(undef, l)
Lγ = Matrix{T}(undef, q, q)
Σγ = Matrix{T}(undef, q, q)
# gradients
∇β = Vector{T}(undef, p)
∇τ = Vector{T}(undef, l)
∇Lγ = Matrix{T}(undef, q, q)
∇Σγ = Vector{T}(undef, abs2(q))
Hββ = Matrix{T}(undef, p, p)
Hττ = Matrix{T}(undef, l, l)
HτLγ = Matrix{T}(undef, l, q◺)
HLγLγ = Matrix{T}(undef, q◺, q◺)
HΣγΣγ = Matrix{T}(undef, abs2(q), abs2(q))
# weighted NLS fitting or not
iswtnls = [false]
# multi-threading or not
ismthrd = [false]
# has been fit or not
isfitted = [false]
# sandwich estimator
ψ = Vector{T}(undef, p + q◺ + l)
Ainv = Matrix{T}(undef, p + q◺ + l, p + q◺ + l)
B = Matrix{T}(undef, p + q◺ + l, p + q◺ + l)
vcov = Matrix{T}(undef, p + q◺ + l, p + q◺ + l)
# constructor
WSVarLmmModel{T}(
obsvec, respname, meannames, renames, wsvarnames,
meanformula, reformula, wsvarformula,
ids, obswts, p, q, l, m, nis, nsum,
xtx, xty, wtw, ztz2, ztz2od,
β, τ, Lγ, Σγ,
∇β, ∇τ, ∇Lγ, ∇Σγ,
Hββ, Hττ, HτLγ, HLγLγ, HΣγΣγ,
iswtnls, ismthrd, isfitted,
ψ, Ainv, B, vcov)
end
coefnames(m::WSVarLmmModel) = [m.meannames; m.wsvarnames]
coef(m::WSVarLmmModel) = [m.β; m.τ]
nobs(m::WSVarLmmModel) = m.nsum
nclusters(m::WSVarLmmModel) = m.m
stderror(m::WSVarLmmModel) = [sqrt(m.vcov[i, i]) for i in 1:(m.p + m.l)]
vcov(m::WSVarLmmModel) = m.vcov # include variance parts of Lγ?
confint(m::WSVarLmmModel, level::Real) = hcat(coef(m), coef(m)) +
stderror(m) * quantile(Normal(), (1. - level) / 2.) * [1. -1.]
confint(m::WSVarLmmModel) = confint(m, 0.95)
function getformula(m::WSVarLmmModel, s::String)
frmla = s == "mean" ? m.meanformula : s == "var" ?
m.wsvarformula : s == "re" ? m.reformula : nothing
rhs = length(terms(frmla.rhs)) == 1 ?
string(term(frmla.rhs)) : join(frmla.rhs, " + ")
return m.respname * " ~ " * rhs
end
function coeftable(m::WSVarLmmModel)
mstder = stderror(m)
mcoefs = coef(m)
wald = mcoefs ./ mstder
pvals = 2 * Distributions.ccdf.(Normal(), abs.(wald))
StatsModels.CoefTable(hcat(mcoefs, mstder, wald, pvals),
["Estimate", "Std. Error", "Z", "Pr(>|Z|)"],
coefnames(m), 4, 3)
end
function Base.show(io::IO, m::WSVarLmmModel)
if !m.isfitted[1]
@warn("The model has not been fit.")
return nothing
end
println(io)
println(io, "Within-subject variance estimation by robust regression (WiSER)")
println(io)
println(io, "Mean Formula:")
println(io, getformula(m, "mean"))
println(io, "Random Effects Formula:")
println(io, getformula(m, "re"))
println(io, "Within-Subject Variance Formula:")
println(io, getformula(m, "var"))
println(io)
println(io, "Number of individuals/clusters: $(m.m)")
println(io, "Total observations: $(m.nsum)")
println(io)
println(io, "Fixed-effects parameters:")
show(io, coeftable(m))
println(io)
println(io, "Random effects covariance matrix Σγ:")
Base.print_matrix(IOContext(io, :compact => true), [m.renames m.Σγ])
println(io)
println(io)
end
function config_solver(solver::MOI.AbstractOptimizer, solver_config::Dict)
for (key, val) in solver_config
MOI.set(solver, MOI.RawOptimizerAttribute(key), val)
end
end
include("nls.jl")
include("initialization.jl")
include("fit.jl")
include("df.jl")
include("rand.jl")
include("multivariate_calculus.jl")
include("sandwich.jl")
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 7067 | """
DataFrame(m::WSVarLmmModel)
Convert the data in `WSVarLmmModel` to a `DataFrame`.
"""
function DataFrame(m::WSVarLmmModel)
p, q, l, n = m.p, m.q, m.l, m.nsum
# preallocate arrays
id = Vector{eltype(m.ids)}(undef, n)
y = Matrix{Float64}(undef, n, 1)
X = Matrix{Float64}(undef, n, p)
Z = Matrix{Float64}(undef, n, q)
W = Matrix{Float64}(undef, n, l)
addweight = !isempty(m.obswts)
if addweight
weights = Vector{Float64}(undef, n)
end
#get names to reconstruct Dataframe properly
meannames = map(x -> join(x[2:end], ": "), (split.(m.meannames, ": ")))
renames = map(x -> join(x[2:end], ": "), (split.(m.renames, ": ")))
wsvarnames = map(x -> join(x[2:end], ": "), (split.(m.wsvarnames, ": ")))
# gather data
offset = 1
for (i, vlmmobs) in enumerate(m.data)
ni = length(vlmmobs.y)
rangei = offset:(offset + ni - 1)
id[rangei] .= m.ids[i]
y[rangei] = vlmmobs.y
X[rangei, :] = transpose(vlmmobs.Xt)
Z[rangei, :] = transpose(vlmmobs.Zt)
W[rangei, :] = transpose(vlmmobs.Wt)
if addweight
weights[rangei] .= m.obswts[i]
end
offset += ni
end
# Skip repeated variables in X, Z, W and don't copy interaction terms
Xcols = .![occursin("&", meanname) for meanname in meannames]
Zcols = .![(zname in meannames || occursin("&", zname)) for zname in renames]
Wcols = .![(wname in meannames || wname in renames ||
occursin("&", wname)) for wname in wsvarnames]
# Construct DataFrame
df = hcat(DataFrame(id = id),
DataFrame(y, [m.respname]),
DataFrame(X[:, Xcols], meannames[Xcols]),
DataFrame(Z[:, Zcols], renames[Zcols]),
DataFrame(W[:, Wcols], wsvarnames[Wcols]))
if addweight
df[!, :obswts] = weights
end
allnames = union(meannames, renames, wsvarnames)
filter!(x -> !occursin("&", x), allnames)
# Create columns for factored variables (for reusing original formula and GWAS)
isfactor = occursin.(": ", allnames)
splitnames = map(x -> split(x, ": ")[1], allnames)
for name in unique(splitnames)
numfactors = findall(isequal.(name, splitnames))
if (length(numfactors) == 1 && .!(any(isfactor[numfactors])))
continue
end
factors = map(x -> join(split(x, ": ")[2:end], ": "),
allnames[numfactors])
valholder = repeat([""], n)
for factor in factors
dfname = join([name, factor], ": ")
valholder[df[!, dfname] .== 1] .= factor
end
valholder[valholder .== ""] .= "Reference"
df[!, name] = valholder
df[!, name] = levels!(CategoricalArray(df[!, name]),
["Reference"; factors]);
end
df
end
"""
WSVarLmmModel(meanformula::FormulaTerm, reformula::FormulaTerm,
wsvarformula::FormulaTerm, idvar::Union{String, Symbol}, tbl)
Constructor of `WSVarLmmModel` from a Tables.jl compatible source.
# Positional arguments
- `meanformula`: formula for the mean fixed effects β (variables in X matrix).
- `reformula`: formula for the mean random effects γ (variables in Z matrix).
- `wsvarformula`: formula for the within-subject variance effects τ (variables in W matrix).
- `idvar`: id variable for groupings.
- `tbl:` data table holding all of the data for the model. It can be a
`DataFrame` or column-based table such as an `IndexedTable` from JuliaDB.
# Keyword arguments
- `wtvar`: variable name corresponding to the observation weights in the datatable.
# Example
```
vlmm3 = WSVarLmmModel(@formula(y ~ 1 + x2 + x3 + x4 + x5),
@formula(y ~ 1 + z2 + z3), @formula(y ~ 1 + w2 + w3 + w4 + w5), "id", df)
```
"""
WSVarLmmModel(
meanformula :: FormulaTerm,
reformula :: FormulaTerm,
wsvarformula :: FormulaTerm,
idvar :: Union{String, Symbol},
tbl;
wtvar :: Union{String, Symbol} = ""
) = WSVarLmmModel(meanformula, reformula, wsvarformula,
idvar, columntable(tbl); wtvar = wtvar)
function WSVarLmmModel(
meanformula :: FormulaTerm,
reformula :: FormulaTerm,
wsvarformula :: FormulaTerm,
idvar :: Union{String, Symbol},
tbl :: T;
wtvar :: Union{String, Symbol} = ""
) where T <: Tables.ColumnTable
idvar = Symbol(idvar)
iswtvar = !isempty(string(wtvar))
# keep original non-schema applied formulas for model object
orig_meanformla = deepcopy(meanformula)
orig_reformla = deepcopy(reformula)
orig_wsvarformla = deepcopy(wsvarformula)
function varlmmobs(tab)
y, X = modelcols(meanformula, tab)
Z = modelmatrix(reformula, tab)
W = modelmatrix(wsvarformula, tab)
return WSVarLmmObs(y, X, Z, W)
end
# collect all terms to perform dropping properly
if iswtvar
alltermformula = meanformula.lhs ~ sum(term.(union(terms(meanformula.rhs),
terms(reformula.rhs), terms(wsvarformula.rhs)))) +
term(idvar) + term(Symbol(wtvar))
else
alltermformula = meanformula.lhs ~ sum(term.(union(terms(meanformula.rhs),
terms(reformula.rhs), terms(wsvarformula.rhs)))) + term(idvar)
end
tbl, _ = StatsModels.missing_omit(tbl, alltermformula)
# apply df-wide schema
meanformula = apply_schema(meanformula, schema(meanformula, tbl))#, ydict))
reformula = apply_schema(reformula, schema(reformula, tbl))#, ydict))
wsvarformula = apply_schema(wsvarformula, schema(wsvarformula, tbl))#, ydict))
# variable names
respname = string(meanformula.lhs)
meannames = StatsModels.coefnames(meanformula.rhs)
# either array{Names} or string of one variable
meannames = typeof(meannames) <: Array ? ["β$i: " for i in 1:length(meannames)] .*
meannames : ["β1: " * meannames]
renames = StatsModels.coefnames(reformula.rhs)
renames = typeof(renames) <: Array ? ["γ$i: " for i in 1:length(renames)] .*
renames : ["γ1: " * renames]
wsvarnames = StatsModels.coefnames(wsvarformula.rhs)
wsvarnames = typeof(wsvarnames) <: Array ? ["τ$i: " for i in 1:length(wsvarnames)] .*
wsvarnames : ["τ1: " * wsvarnames]
# build grouped dataframe and record ID order
gdf = DataFrames.groupby(DataFrame(tbl, copycols = false), idvar)
ids = map(x -> x[1], keys(gdf))
if isempty(string(wtvar))
wts = []
else
cnames = Tables.columnnames(tbl)
wtvar = Symbol(wtvar)
wtvar in cnames ||
error("weight variable $wtvar not in datatable $tbl")
wts = combine(gdf, wtvar => first)[!, 2]
end
obsvec = combine(varlmmobs, gdf)[!, 2]
varlmm = WSVarLmmModel(obsvec, respname = respname,
meannames = meannames, renames = renames,
wsvarnames = wsvarnames, ids = ids, obswts = wts,
meanformula = orig_meanformla, reformula = orig_reformla,
wsvarformula = orig_wsvarformla)
return varlmm
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 8309 | """
fit!(m::WSVarLmmModel,
solver=IpoptSolver(print_level=0, mehrotra_algorithm="yes", max_iter=100);
init=init_ls!(m), runs = 2)
Fit a `WSVarLmmModel` object using a weighted NLS method.
# Positional arguments
- `m::WSVarLmmModel`: Model to fit.
- `solver`: Nonlinear programming solver to use. Common choices include:
- `Ipopt.IpoptSolver(print_level=0, mehrotra_algorithm="yes", warm_start_init_point="yes", max_iter=100)`.
- `Ipopt.IpoptSolver(print_level=0, watchdog_shortened_iter_trigger=3, max_iter=100)`.
- `Ipopt.IpoptSolver(print_level=0, max_iter=100)`.
- `KNITRO.KnitroSolver(outlev=3)`. (Knitro is commercial software)
- `NLopt.NLoptSolver(algorithm=:LD_MMA, maxeval=4000)`.
- `NLopt.NLoptSolver(algorithm=:LD_LBFGS, maxeval=4000)`.
# Keyword arguments
- `init`: Initialization strategy. `fit!` will use `m.τ` and `m.Lγ` to set the
weight matrices `Vi` and solve the weighted NLS to obtain an
estimate for `m.β`, `m.τ`, and `m.Lγ`. Choices for `init` include
- `init_ls!(m)` (default): initialize by the least squares analytical solution.
- `init_mom!(m)`: initialize by the unweighted NLS (MoM).
- `m`: initilize from user supplied values in `m.τ` and `m.Lγ`.
- `runs::Integer`: Number of weighted NLS runs; default is 2. Each run will use the
newest `m.τ` and `m.Lγ` to update the weight matrices `Vi` and solve the
new weighted NLS.
- `parallel::Bool`: Multi-threading or not. Default is `false`.
- `verbose::Bool`: Verbose display or not, Default is `true`.
"""
function fit!(
m :: WSVarLmmModel{T},
solver :: MOI.AbstractOptimizer = Ipopt.Optimizer();
solver_config::Dict =
Dict("print_level" => 0,
"mehrotra_algorithm" => "yes",
"warm_start_init_point" => "yes",
"max_iter" => 100),
init :: WSVarLmmModel{T} = init_ls!(m),
runs :: Integer = 2,
parallel :: Bool = false,
verbose :: Bool = true,
throw_on_failure :: Bool = false,
) where {T<:BlasReal}
solvertype = typeof(solver)
solvertype <: Ipopt.Optimizer ||
@warn("Optimizer object is $solvertype, `solver_config` may need to be defined.")
# Pass options to solver
config_solver(solver, solver_config)
# set up NLP optimization problem
npar = m.l + ◺(m.q)
MOI.empty!(solver)
lb = T[]
ub = T[]
NLPBlock = MOI.NLPBlockData(
MOI.NLPBoundsPair.(lb, ub), m, true
)
par0 = Vector{T}(undef, npar)
modelpar_to_optimpar!(par0, m)
solver_pars = MOI.add_variables(solver, npar)
for i in 1:npar
MOI.set(solver, MOI.VariablePrimalStart(), solver_pars[i], par0[i])
end
MOI.set(solver, MOI.NLPBlock(), NLPBlock)
MOI.set(solver, MOI.ObjectiveSense(), MOI.MIN_SENSE)
# optimize weighted NLS
m.iswtnls[1] = true
m.ismthrd[1] = parallel
xsol = similar(par0)
βprev, τprev, Lγprev = similar(m.β), similar(m.τ), similar(m.Lγ)
for run in 1:runs
copyto!(βprev, m.β); copyto!(τprev, m.τ), copyto!(Lγprev, m.Lγ)
tic = time() # start timing
# update Vi, then β and residuals with WLS
update_wtmat!(m)
# update τ and Lγ by WNLS
modelpar_to_optimpar!(par0, m)
for i in 1:npar
MOI.set(solver, MOI.VariablePrimalStart(), solver_pars[i], par0[i])
end
MOI.optimize!(solver)
toc = time()
optstat = MOI.get(solver, MOI.TerminationStatus())
optstat in (MOI.LOCALLY_SOLVED, MOI.ALMOST_LOCALLY_SOLVED) ||
(throw_on_failure ? error("Optimization unsuccessful; got $optstat") : @warn("Optimization unsuccessful; got $optstat"))
# Get solver solution values
fill!(xsol, zero(T))
for i in eachindex(xsol)
xsol[i] = MOI.get(solver, MOI.VariablePrimal(), MOI.VariableIndex(i))
end
optimpar_to_modelpar!(m, xsol)
verbose && @printf(
"run = %d, ‖Δβ‖ = %f, ‖Δτ‖ = %f, ‖ΔL‖ = %f, status = %s, time(s) = %f\n",
run,
norm(m.β - βprev ),
norm(m.τ - τprev ),
norm(m.Lγ - Lγprev),
optstat,
toc - tic)
end
m.isfitted[1] = true
# refresh objective, gradient, and Hessian
mul!(m.Σγ, m.Lγ, transpose(m.Lγ))
nlsv_obj!(m, true, true, false)
# sandwich estimator
sandwich!(m)
m
end
"""
modelpar_to_optimpar!(par, m)
Translate model parameters in `m` to optimization variables in `par`.
"""
function modelpar_to_optimpar!(
par :: AbstractVector{T},
m :: WSVarLmmModel{T}
) where {T<:BlasReal}
q, l = m.q, m.l
# τ
copyto!(par, m.τ)
# Lγ
offset = l + 1
@inbounds for j in 1:q, i in j:q
par[offset] = i == j ? log(max(m.Lγ[i, j], floatmin(T))) : m.Lγ[i, j]
offset += 1
end
par
end
"""
optimpar_to_modelpar!(m, par)
Translate optimization variables in `par` to the model parameters in `m`.
"""
function optimpar_to_modelpar!(
m :: WSVarLmmModel{T},
par :: Vector{T}
) where {T<:BlasReal}
q, l = m.q, m.l
# τ
copyto!(m.τ, 1, par, 1, l)
# Lγ
fill!(m.Lγ, 0)
offset = l + 1
@inbounds for j in 1:q, i in j:q
m.Lγ[i, j] = i == j ? exp(par[offset]) : par[offset]
offset += 1
end
m
end
function MOI.initialize(
m :: WSVarLmmModel,
requested_features :: Vector{Symbol}
)
for feat in requested_features
if !(feat in MOI.features_available(m))
error("Unsupported feature $feat")
end
end
end
MOI.features_available(m::WSVarLmmModel) = [:Grad, :Hess]
function MOI.eval_objective(
m :: WSVarLmmModel{T},
par :: AbstractVector{T}
) where {T<:BlasReal}
optimpar_to_modelpar!(m, par)
nlsv_obj!(m, false, false, false)
end
function MOI.eval_objective_gradient(
m :: WSVarLmmModel{T},
grad :: Vector{T},
par :: AbstractVector{T}
) where {T<:BlasReal}
q, l = m.q, m.l
optimpar_to_modelpar!(m, par)
obj = nlsv_obj!(m, true, false, false)
# gradient wrt τ
copyto!(grad, m.∇τ)
# gradient wrt Lγ
offset = l + 1
@inbounds for j in 1:q, i in j:q # traverse lower triangular of Lγ[i, j]
grad[offset] = m.∇Lγ[i, j]
i == j && (grad[offset] *= m.Lγ[i, j])
offset += 1
end
end
function MOI.eval_constraint(
m :: WSVarLmmModel,
g :: Vector{T},
par :: AbstractVector{T}
) where {T<:BlasReal}
return nothing
end
function MOI.hessian_lagrangian_structure(m::WSVarLmmModel)
# our Hessian is a dense matrix, work on the upper triangular part
npar = m.l + ◺(m.q)
arr1 = Vector{Int}(undef, ◺(npar))
arr2 = Vector{Int}(undef, ◺(npar))
idx = 1
@inbounds for j in 1:npar, i in 1:j
arr1[idx] = i
arr2[idx] = j
idx += 1
end
return collect(zip(arr1, arr2))
end
function MOI.eval_hessian_lagrangian(
m :: WSVarLmmModel,
H :: AbstractVector{T},
par :: AbstractVector{T},
σ :: T,
μ :: AbstractVector{T}
) where {T<:BlasReal}
q, l = m.q, m.l
# refresh obj, gradient, and hessian
optimpar_to_modelpar!(m, par)
nlsv_obj!(m, true, true, false)
# Hττ
idx = 1
@inbounds for j in 1:l, i in 1:j
H[idx] = m.Hττ[i, j]
idx += 1
end
j = 1 # index columns of HτLγ and HLγLγ
@inbounds for j2 in 1:q, j1 in j2:q # traverse lower triangular of Lγ[j1, j2]
# HτLγ
for i in 1:l # i index rows of HτLγ
H[idx] = m.HτLγ[i, j]
j1 == j2 && (H[idx] *= m.Lγ[j1, j2])
idx += 1
end
# HLγLγ
i = 1 # index rows of HLγLγ
for i2 in 1:q, i1 in i2:q # traverse lower triangular of Lγ[i1, i2]
i > j && break # skip lower triangular of HLγLγ
H[idx] = m.HLγLγ[i, j]
# different diagonal entries of Lγ
i1 == i2 && (H[idx] *= m.Lγ[i1, i2])
j1 == j2 && (H[idx] *= m.Lγ[j1, j2])
# same diagonal entry of Lγ
i1 == i2 == j1 == j2 && (H[idx] += m.∇Lγ[j1, j2] * m.Lγ[j1, j2])
idx += 1
i += 1
end
j += 1
end
lmul!(σ, H)
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 8445 | """
init_ls!(m::WSVarLmmModel; gniters::Integer = 5)
Initialize parameters of a `WSVarLmmModel` object from least squares estimate.
`m.β` is initialized to be `inv(sum(xi'xi)) * sum(xi'yi)`.
`m.Σγ` is initialized to be `inv(sum(zi'zi⊗zi'zi)) * sum(zi'ri⊗zi'ri)`.
`m.τ` is initialized to be `inv(sum(wi'wi)) * sum(wi'log(abs2(ri)))`.
If `gniters > 0`, run `gniters` Gauss-Newton iterations to improve τ.
"""
function init_ls!(
m :: WSVarLmmModel{T};
gniters :: Integer = 5,
noise_level :: T = zero(T),
rng = Random.GLOBAL_RNG
) where T <: BlasReal
# dimensions
q, l = m.q, m.l
# LS estimate for β
_, info = LAPACK.potrf!('U', copyto!(m.Hββ, m.xtx))
info > 0 && throw("design matrix X is rank deficient")
LAPACK.potrs!('U', m.Hββ, copyto!(m.β, m.xty))
# refresh residuals
update_res!(m)
# accumulate quantities for initilizing Σγ and τ
fill!(m.∇τ , 0) # to accumulate Wi' * log(ri.^2)
fill!(m.∇Σγ, 0) # to accumulate Zi'ri ⊗ Zi'ri
fill!(m.Lγ , 0) # scratch space to accumulate Zi'diag(r) diag(r)Zi
for obs in m.data
n = length(obs.y)
# storage_n1 = log(diag(rr'))
map!(r2 -> log(max(r2, floatmin(T))), obs.storage_n1, obs.res2)
# accumulate Wi' * log(ri.^2)
BLAS.gemv!('N', T(1), obs.Wt, obs.storage_n1, T(1), m.∇τ)
# accumulate Zi'ri ⊗ Zi'ri
kron_axpy!(obs.ztres, obs.ztres, m.∇Σγ)
# storage_qn = Zi'diag(r)
copyto!(obs.storage_qn, obs.Zt)
@inbounds for j in 1:n, i in 1:q
obs.storage_qn[i, j] *= obs.res[j]
end
# accmulate vec(Zi'diag(r) diag(r)Zi)
BLAS.syrk!('U', 'N', T(1), obs.storage_qn, T(1), m.Lγ)
end
if noise_level != zero(T) # add random noise to guarantee full rank of Z
m.Lγ .+= randn!(rng, m.∇Lγ) .* noise_level
end
copytri!(m.Lγ, 'U')
# LS estimate for Σγ
_, info = LAPACK.potrf!('U', copyto!(m.HΣγΣγ, m.ztz2od))
info > 0 && throw("design matrix Z is rank defficient")
# sum_i (Zi'ri ⊗ Zi'ri - vec(Zi'diag(r) diag(r)Zi))
@inbounds for i in eachindex(m.∇Σγ)
m.∇Σγ[i] -= m.Lγ[i]
end
LAPACK.potrs!('U', m.HΣγΣγ, m.∇Σγ)
_, info = LAPACK.potrf!('L', copyto!(m.Lγ, m.∇Σγ))
# make upper triangular of Lγ zero
@inbounds for j in 2:q, i in 1:j-1
m.Lγ[i, j] = 0
end
# Σγ is singular; set columns L[:, info:end] = 0
if info > 0
@warn("starting Lγ is rank deficient")
@inbounds for j in info:q, i in j:q
m.Lγ[i, j] = 0
end
end
# regress log(ri .* ri) on Wi to initialize τ
_, info = LAPACK.potrf!('U', copyto!(m.Hττ, m.wtw))
info > 0 && throw("design matrix W is singular")
LAPACK.potrs!('U', m.Hττ, copyto!(m.τ, m.∇τ))
# quick return if no GN iterations requested
gniters == 0 && (return m)
# Gauss-Newton iterations to improve τ
# NLS responses: obs.storage_n1 = res^2 - diag(Z L Lt Zt)
for obs in m.data
n = length(obs.y)
# storage_qn = Lγ' * Zt
mul!(obs.storage_qn, transpose(m.Lγ), obs.Zt)
# storage_n1 = diag(rr' - Z * L * L' * Zt)
@inbounds for j in 1:n
obs.storage_n1[j] = obs.res2[j]
for i in 1:q
obs.storage_n1[j] -= abs2(obs.storage_qn[i, j])
end
end
end
# Gauss-Newton iterations
for iter in 1:gniters
# accumulate ∇ and FIM
fill!(m.∇τ, 0)
fill!(m.Hττ, 0)
for obs in m.data
n = length(obs.y)
mul!(obs.expwτ, transpose(obs.Wt), m.τ)
obs.expwτ .= exp.(obs.expwτ)
# storage_ln = Wt * diag(expwτ)
copyto!(obs.storage_ln, obs.Wt)
@inbounds for j in 1:n, i in 1:l
obs.storage_ln[i, j] *= obs.expwτ[j]
end
# ∇i = Wt * diag(expwτ) * (ypseudo - expwτ)
# expwτ = ypseudo - expwτ
obs.expwτ .= obs.storage_n1 .- obs.expwτ
BLAS.gemv!('N', T(1), obs.storage_ln, obs.expwτ, T(1), m.∇τ)
# Hi = Wt * diag(expwτ) * diag(expwτ) * W
BLAS.syrk!('U', 'N', T(1), obs.storage_ln, T(1), m.Hττ)
end
# Gauss-Newton update
LAPACK.potrf!('U', m.Hττ)
LAPACK.potrs!('U', m.Hττ, m.∇τ) # newton direction
m.τ .+= m.∇τ
end
mul!(m.Σγ, m.Lγ, transpose(m.Lγ))
m
end
# """
# init_mom!(m::WSVarLmmModel, solver; init = init_ls!(m), parallel = false)
# Initialize `τ` and `Lγ` of a `VarLmmModel` object by method of moment (MoM)
# using residulas in `m.obs[i].res`. It involves solving an unweighted NLS problem.
# # Position arguments
# - `m`: A `WSVarLmmModel` object.
# - `solver`: NLP solver. Default is `IpoptSolver(print_level=0, mehrotra_algorithm="yes",
# warm_start_init_point="yes", max_iter=100)`.
# # Keyword arguments
# - `init`: Initlizer for the NLS problem. Default is `init_ls!(m)`. If `init=m`,
# then it uses the values provided in `m.τ` and `m.Lγ` as starting point.
# - `parallel::Bool`: Multi-threading. Default is `false`.
# """
# function init_mom!(
# m :: WSVarLmmModel{T},
# solver = Ipopt.IpoptSolver(print_level = 0, mehrotra_algorithm = "yes",
# warm_start_init_point = "yes",
# warm_start_bound_push = 1e-6, max_iter = 100);
# init :: WSVarLmmModel = init_ls!(m),
# parallel :: Bool = false
# ) where T <: BlasReal
# # set up NLP optimization problem
# npar = m.l + ◺(m.q)
# optm = MathProgBase.NonlinearModel(solver)
# lb = fill(-Inf, npar)
# ub = fill( Inf, npar)
# MathProgBase.loadproblem!(optm, npar, 0, lb, ub, Float64[], Float64[], :Min, m)
# # optimize unweighted obj function (MoM estimator)
# m.iswtnls[1] = false
# m.ismthrd[1] = parallel
# par0 = zeros(npar)
# modelpar_to_optimpar!(par0, m)
# MathProgBase.setwarmstart!(optm, par0)
# MathProgBase.optimize!(optm)
# optimpar_to_modelpar!(m, MathProgBase.getsolution(optm))
# optstat = MathProgBase.status(optm)
# optstat == :Optimal ||
# @warn("Optimization unsuccesful; got $optstat")
# mul!(m.Σγ, m.Lγ, transpose(m.Lγ))
# m
# end
"""
init_mom!(m::WSVarLmmModel, solver; init = init_ls!(m), parallel = false)
Initialize `τ` and `Lγ` of a `VarLmmModel` object by method of moment (MoM)
using residulas in `m.obs[i].res`. It involves solving an unweighted NLS problem.
# Position arguments
- `m`: A `WSVarLmmModel` object.
- `solver`: NLP solver. Default is `IpoptSolver(print_level=0, mehrotra_algorithm="yes",
warm_start_init_point="yes", max_iter=100)`.
# Keyword arguments
- `init`: Initlizer for the NLS problem. Default is `init_ls!(m)`. If `init=m`,
then it uses the values provided in `m.τ` and `m.Lγ` as starting point.
- `parallel::Bool`: Multi-threading. Default is `false`.
"""
function init_mom!(
m :: WSVarLmmModel{T},
solver :: MOI.AbstractOptimizer = Ipopt.Optimizer();
solver_config::Dict =
Dict("print_level" => 0,
"mehrotra_algorithm" => "yes",
"warm_start_init_point" => "yes",
"max_iter" => 100),
init :: WSVarLmmModel = init_ls!(m),
parallel :: Bool = false
) where T <: BlasReal
# Pass options to solver
config_solver(solver, solver_config)
# set up NLP optimization problem
npar = m.l + ◺(m.q)
MOI.empty!(solver)
lb = T[]
ub = T[]
NLPBlock = MOI.NLPBlockData(
MOI.NLPBoundsPair.(lb, ub), m, true
)
par0 = Vector{T}(undef, npar)
modelpar_to_optimpar!(par0, m)
solver_pars = MOI.add_variables(solver, npar)
for i in 1:npar
MOI.set(solver, MOI.VariablePrimalStart(), solver_pars[i], par0[i])
end
MOI.set(solver, MOI.NLPBlock(), NLPBlock)
MOI.set(solver, MOI.ObjectiveSense(), MOI.MIN_SENSE)
# optimize unweighted obj function (MoM estimator)
m.iswtnls[1] = false
m.ismthrd[1] = parallel
MOI.optimize!(solver)
# output
optstat = MOI.get(solver, MOI.TerminationStatus())
optstat in (MOI.LOCALLY_SOLVED, MOI.ALMOST_LOCALLY_SOLVED) ||
@warn("Optimization unsuccessful; got $optstat")
xsol = similar(par0)
for i in eachindex(xsol)
xsol[i] = MOI.get(solver, MOI.VariablePrimal(), MOI.VariableIndex(i))
end
optimpar_to_modelpar!(m, xsol)
mul!(m.Σγ, m.Lγ, transpose(m.Lγ))
m
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 5963 | export ◺, commutation, CopyMatrix, kron_axpy!, kr_axpy!, kr, mul!, vech,
Ct_At_kron_A_KC, Ct_At_kron_A_KC!,
Ct_A_kron_B_C, Ct_A_kron_B_C!,
Ct_A_kr_B, Ct_A_kr_B!
import LinearAlgebra: mul!
"""
◺(n::Integer)
Triangular number `n * (n + 1) / 2`.
"""
@inline ◺(n::Integer) = (n * (n + 1)) >> 1
"""
kron_axpy!(A, X, Y)
Overwrite `Y` with `A ⊗ X + Y`. Same as `Y += kron(A, X)` but
more memory efficient.
"""
function kron_axpy!(
A::AbstractVecOrMat{T},
X::AbstractVecOrMat{T},
Y::AbstractVecOrMat{T}
) where T <: Real
m, n = size(A, 1), size(A, 2)
p, q = size(X, 1), size(X, 2)
@assert size(Y, 1) == m * p
@assert size(Y, 2) == n * q
@inbounds for j in 1:n
coffset = (j - 1) * q
for i in 1:m
a = A[i, j]
roffset = (i - 1) * p
for l in 1:q
r = roffset + 1
c = coffset + l
for k in 1:p
Y[r, c] += a * X[k, l]
r += 1
end
end
end
end
Y
end
"""
kr_axpy!(A, X, Y)
Overwrite `Y` with `A ⊙ X + Y`, where `⊙` stands for the Khatri-Rao (columnwise
Kronecker) product. `A` and `X` need to have same number of columns.
"""
function kr_axpy!(
A::AbstractVecOrMat{T},
X::AbstractVecOrMat{T},
Y::AbstractVecOrMat{T}
) where T <: Real
@assert size(A, 2) == size(X, 2) == size(Y, 2)
m, n, p = size(A, 1), size(A, 2), size(X, 1)
@inbounds for j in 1:n
r = 1
for i in 1:m
aij = A[i, j]
for k in 1:p
Y[r, j] += aij * X[k, j]
r += 1
end
end
end
Y
end
kr(A::AbstractVecOrMat{T}, X::AbstractVecOrMat{T}) where T <: Real =
kr_axpy!(A, X, zeros(T, size(A, 1) * size(X, 1), size(A, 2)))
struct CopyMatrix <: AbstractMatrix{Int}
n::Int
end
Base.size(C::CopyMatrix) = (abs2(C.n), (C.n * (C.n + 1)) >> 1)
Base.IndexStyle(::Type{<:CopyMatrix}) = IndexCartesian()
function Base.getindex(C::CopyMatrix, i::Int, j::Int)
r, c = CartesianIndices((1:C.n, 1:C.n))[i].I
if r ≥ c && j == (c - 1) * C.n - ((c - 2) * (c - 1)) >> 1 + r - c + 1
return 1
else
return 0
end
end
"""
mul!(result, A, C::CopyMatrix)
Right-multiplying a matrix `A` by a copying matrix is equivalent to keeping
the columns of `A` corresponding to the lower triangular indices.
"""
function LinearAlgebra.mul!(
result :: AbstractVecOrMat,
A :: AbstractVecOrMat,
C :: CopyMatrix
)
n = isqrt(size(A, 2))
m = size(A, 1)
@assert size(result, 1) == m
@assert size(result, 2) == (n * (n + 1)) >> 1
ac, rc = 0, 0
@inbounds for j in 1:n, i in 1:n
ac += 1
i < j && continue
rc += 1
for k in 1:m
result[k, rc] = A[k, ac]
end
end
result
end
"""
mul!(result, Ct::Transpose{Int, CopyMatrix}, A)
Left-multiplying a matrix `A` by transpose of a copying matrix is equivalent to
keeping the rows of `A` corresponding to the lower triangular indices.
"""
LinearAlgebra.mul!(
result :: AbstractVecOrMat,
Ct :: Transpose{Int, CopyMatrix},
A :: AbstractVecOrMat
) = mul!(transpose(result), transpose(A), Ct.parent)
"""
vech!(v::AbstractVector, A::AbstractVecOrMat)
Overwrite vector `v` by the entries from lower triangular part of `A`.
"""
function vech!(v::AbstractVector, A::AbstractVecOrMat)
m, n = size(A, 1), size(A, 2)
idx = 1
@inbounds for j in 1:n, i in j:m
v[idx] = A[i, j]
idx += 1
end
v
end
function commutation(m::Integer, n::Integer)
K = zeros(Int, m * n, m * n)
colK = 1
@inbounds for j in 1:n, i in 1:m
rowK = n * (i - 1) + j
K[rowK, colK] = 1
colK += 1
end
K
end
"""
vech(A::AbstractVecOrMat) -> AbstractVector
Return the entries from lower triangular part of `A` as a vector.
"""
function vech(A::AbstractVecOrMat)
m, n = size(A, 1), size(A, 2)
vech!(similar(A, n * m - (n * (n - 1)) >> 1), A)
end
"""
Ct_At_kron_A_KC!(H, A, B)
Overwrite `H` by `H + C'(A'⊗A)KC`, where `K` is the commutation matrix and
`C` is the copying matrix.
"""
function Ct_At_kron_A_KC!(H::AbstractMatrix, A::AbstractMatrix)
q = size(A, 1)
@assert size(A, 2) == q
@assert size(H, 1) == size(H, 2) == (q * (q + 1)) >> 1
j = 1
@inbounds for w in 1:q, s in w:q
i = 1
for r in 1:q, v in r:q
H[i, j] += A[s, r] * A[v, w]
i += 1
end
j += 1
end
H
end
function Ct_At_kron_A_KC(A)
n◺ = ◺(size(A, 1))
H = zeros(eltype(A), n◺, n◺)
Ct_At_kron_A_KC!(H, A)
end
"""
Ct_A_kron_B_C!(H, A, B)
Overwrite `H` by `H + C'(A⊗B)C`, where `C` is the copying matrix.
"""
function Ct_A_kron_B_C!(
H::AbstractMatrix,
A::AbstractMatrix,
B::AbstractMatrix,
)
q = size(A, 1)
@assert size(A, 2) == size(B, 1) == size(B, 2) == q
j = 1
@inbounds for s in 1:q, w in s:q
i = 1
for r in 1:q, v in r:q
H[i, j] += A[r, s] * B[v, w]
i += 1
end
j += 1
end
H
end
function Ct_A_kron_B_C(A, B)
n◺ = ◺(size(A, 1))
H = zeros(eltype(A), n◺, n◺)
Ct_A_kron_B_C!(H, A, B)
end
"""
Ct_A_kr_B!(H, A, B)
Overwrite `H` by `H + C'(A⊙B)`, where `C` is the copying matrix and `⊙` is the
Khatri-Rao (column-wise Kronecker) product.
"""
function Ct_A_kr_B!(H, A, B)
@assert size(A) == size(B)
(q, n) = size(A)
@inbounds for c in 1:n
r = 1
for ia in 1:q
a = A[ia, c]
for ib in ia:q
H[r, c] += a * B[ib, c]
r += 1
end
end
end
H
end
function Ct_A_kr_B(A, B)
@assert size(A) == size(B)
(q, n) = size(A)
H = zeros(eltype(A), ◺(q), n)
Ct_A_kr_B!(H, A, B)
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 20847 | """
nlsv_obj!(obs::WSVarLmmObs, β, τ, Lγ, needgrad::Bool)
nlsv_obj!(m::WSVarLmmModel; needgrad::Bool)
Evaluate the nonlinear least squares (NLS) criterion for variance estimation at
the given data and parameter values. Gradient is calculated if `needgrad=true`.
Expected Hessian is calculated if `needhess=true`. If `updateres=true`, update
mean level residuals first. If `m.iswtnls[1]=true`, evaluate the weighted
`nlsv_obj!()` function. `update_wtmat!(m)` should be called to update the
weight matrix componentsprior to using the weighted version of `nlsv_obj!()`.
"""
function nlsv_obj!(
obs :: WSVarLmmObs{T},
β :: Vector{T},
τ :: Vector{T},
Lγ :: Matrix{T}, # must be lower triangular
:: Val{false}, # un-weighted fitting
needgrad :: Bool = true,
needhess :: Bool = true,
updateres :: Bool = false
) where T <: BlasReal
(q, n) = size(obs.Zt)
l = size(obs.Wt, 1)
###########
# objective
###########
# update the residual vector ri = y_i - Xi β
updateres && update_res!(obs, β)
# obs.storage_qq = (Z' * Z) * L
copyto!(obs.storage_qq, obs.ztz)
BLAS.trmm!('R', 'L', 'N', 'N', T(1), Lγ, obs.storage_qq)
# ∇Lγ = (Z' * Z) * L * L' * (Z' * Z) for now, needed for gradient later
needgrad && BLAS.syrk!('U', 'N', T(1), obs.storage_qq, T(0), obs.∇Lγ)
# HLγLγ = C'(L'Z'Z ⊗ Z'ZL)KC for now, needed for hessian later
needhess && Ct_At_kron_A_KC!(fill!(obs.HLγLγ, 0), obs.storage_qq)
# obs.storage_qq = L' * (Z' * Z) * L
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.storage_qq)
# storage_qn = L' * Z'
copyto!(obs.storage_qn, obs.Zt)
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.storage_qn)
# storage_q◺n = Cq' * (L'Z' ⊙ Z'), needed for hessian later
needhess && Ct_A_kr_B!(fill!(obs.storage_q◺n, 0), obs.storage_qn, obs.Zt)
# storage_q1 = L' * Z' * res
copyto!(obs.storage_q1, obs.ztres)
BLAS.trmv!('L', 'T', 'N', Lγ, obs.storage_q1)
# update W * τ
mul!(obs.expwτ, transpose(obs.Wt), τ)
obs.obj[1] = (1//2) * (abs2(norm(obs.storage_qq)) + abs2(obs.resnrm2[1]))
obs.obj[1] -= abs2(norm(obs.storage_q1))
map!(exp, obs.expwτ, obs.expwτ)
@inbounds for j in 1:n
obs.obj[1] += (1//2) * abs2(obs.expwτ[j])
obs.zlltzt_dg[j] = 0
for i in 1:q
obs.zlltzt_dg[j] += abs2(obs.storage_qn[i, j])
end
obs.obj[1] += obs.expwτ[j] * (obs.zlltzt_dg[j] - obs.res2[j])
end
###########
# gradient
###########
if needgrad
# wrt τ
@inbounds for j in 1:n
Rjj = obs.res2[j] - obs.expwτ[j] - obs.zlltzt_dg[j]
obs.storage_n1[j] = Rjj * obs.expwτ[j]
end
BLAS.gemv!('N', T(-1), obs.Wt, obs.storage_n1, T(0), obs.∇τ)
# wrt Lγ
# ∇Lγ = (Z' * R * Z) * Lγ
# obs.storage_qn = obs.Zt * Diagonal(sqrt.(obs.expwτ))
@inbounds for j in 1:n
sqrtej = sqrt(obs.expwτ[j])
for i in 1:q
obs.storage_qn[i, j] = sqrtej * obs.Zt[i, j]
end
end
# ∇Lγ = (Z' * Z) * L * L' * (Z' * Z) was computed earlier
# ∇Lγ += storage_qn * storage_qn' - ztres * ztres'
BLAS.syrk!('U', 'N', T(1), obs.storage_qn, T(1), obs.∇Lγ)
BLAS.syrk!('U', 'N', T(-1), obs.ztres, T(1), obs.∇Lγ)
copytri!(obs.∇Lγ, 'U')
# so far ∇Lγ holds ∇Σγ, now ∇Lγ = ∇Σγ * Lγ
# obs.∇Lγ = obs.∇Σγ, collect all then multiply by Lγ at model level for ∇Lγ.
# BLAS.trmm!('R', 'L', 'N', 'N', T(2), Lγ, obs.∇Lγ)
end
###########
# hessian
###########
if needhess
# Hττ = W' * Diagonal(expwτ.^2) * W
# storage_ln = W' * Diagonal(expwτ)
@inbounds for j in 1:n
ej = obs.expwτ[j]
for i in 1:l
obs.storage_ln[i, j] = ej * obs.Wt[i, j]
end
end
BLAS.syrk!('U', 'N', T(1), obs.storage_ln, T(0), obs.Hττ)
copytri!(obs.Hττ, 'U')
# HτLγ = 2 W' * Diagonal(expwτ) * (L'Z' ⊙ Z')' * Cq
# storage_ln = W' * Diagonal(expwτ) was computed above
# storage_q◺n = Cq' * (L'Z' ⊙ Z') was computed earlier
BLAS.gemm!('N', 'T', T(2), obs.storage_ln, obs.storage_q◺n, T(0), obs.HτLγ)
# HLγLγ = 2 [ C'(L'Z'ZL ⊗ Z'Z)C + C'(L'Z'Z ⊗ Z'ZL)KC ]
# HLγLγ = C'(L'Z'Z ⊗ Z'ZL)KC was calcualted earlier
Ct_A_kron_B_C!(obs.HLγLγ, obs.storage_qq, obs.ztz)
lmul!(2, obs.HLγLγ)
end
obs.obj[1]
end
function nlsv_obj!(
obs :: WSVarLmmObs{T},
β :: Vector{T},
τ :: Vector{T},
Lγ :: Matrix{T}, # must be lower triangular
:: Val{true}, # weighted fitting
needgrad :: Bool = true,
needhess :: Bool = true,
updateres :: Bool = false,
) where T <: BlasReal
(q, n) = size(obs.Zt)
l = size(obs.Wt, 1)
# update the residual vector ri = y_i - Xi β
updateres && update_res!(obs, β)
# Precompute and fill necessary objects
if needgrad
fill!(obs.∇τ, 0)
end
if needhess
fill!(obs.Hττ, 0)
fill!(obs.HτLγ, 0)
fill!(obs.storage_q◺n, 0)
end
# obs.expwτ = exp.(Wτ)
mul!(obs.expwτ, transpose(obs.Wt), τ)
# for ∇Lγ need storage_n1 = diag(exp.(0.5Wτ))
obs.expwτ .= exp.(obs.expwτ)
needgrad && (obs.storage_n1 .= sqrt.(obs.expwτ))
# terms to compute and store
mul!(obs.Lt_Zt_Dinv_r, transpose(Lγ), obs.Zt_Dinv_r)
mul!(obs.Dinv_Z_L, transpose(obs.Zt_Dinv), Lγ)
mul!(obs.UUt_Z_L, transpose(obs.Zt_UUt), Lγ)
# Lt_Zt_Dinv_Z_L = Zt_Dinv_Z * L
mul!(obs.Lt_Zt_Dinv_Z_L, obs.Zt_Dinv_Z, Lγ)
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.Lt_Zt_Dinv_Z_L)
# Lt_Zt_UUt_Z_L = Zt_UUt_Z_L
mul!(obs.Lt_Zt_UUt_Z_L, obs.Zt_UUt_Z, Lγ)
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.Lt_Zt_UUt_Z_L)
# Evalute objective function and precompute diag(Vinv*R*Vinv) for ∇τ
#############
# objective #
#############
# objective function sums
obs.obj[1] = zero(T)
obs.obj[1] += abs2(obs.rt_Dinv_r[1]) #1
obs.obj[1] -= 2 * obs.rt_UUt_r[1] * obs.rt_Dinv_r[1] #2
obs.obj[1] += abs2(obs.rt_UUt_r[1]) #7
# calculate Ut * D * U
fill!(obs.Ut_D_U, 0)
@inbounds for k in 1:n, j in 1:q, i in j:q
obs.Ut_D_U[i, j] += obs.Ut[j, k] * obs.Ut[i, k] * obs.expwτ[k]
end
copytri!(obs.Ut_D_U, 'L')
if needgrad
# storage_qn = Ut_D_U_Ut
mul!(obs.storage_qn, obs.Ut_D_U, obs.Ut)
fill!(obs.diagDVRV, 0)
@inbounds for j in 1:n, i in 1:q #avx speeds up
obs.diagDVRV[j] -= obs.storage_qn[i, j] * obs.Ut[i, j] * obs.expwτ[j] #τ 8
end
end
@inbounds @simd for j in 1:n
obs.obj[1] += (-2 * abs2(obs.Dinv_r[j]) #3
+ 4 * obs.rt_UUt[j] * obs.Dinv_r[j] #4
- 2 * abs2(obs.rt_UUt[j]) # 8
+ abs2(obs.Dinv[j]) * obs.expwτ[j] #11
- 2 * obs.diagUUt_Dinv[j] * obs.expwτ[j]) * obs.expwτ[j] #12
end
if needgrad
@inbounds @simd for j in 1:n
obs.diagDVRV[j] += (abs2(obs.Dinv_r[j]) #τ 1
- 2 * obs.Dinv_r[j] * obs.rt_UUt[j] #τ 2
- abs2(obs.Dinv[j]) * obs.expwτ[j] #τ 3
+ 2 * obs.diagUUt_Dinv[j] * obs.expwτ[j] #τ 4
+ abs2(obs.rt_UUt[j])) * obs.expwτ[j] #τ 7
end
end
@inbounds for j in 1:q #j-i looping for memory access
for i in 1:n
obs.obj[1] += 2 * (abs2(obs.Dinv_Z_L[i, j]) # 13
- obs.UUt_Z_L[i, j] * obs.Dinv_Z_L[i, j] # 14
- obs.Dinv_Z_L[i, j] * obs.UUt_Z_L[i, j] # 16
+ abs2(obs.UUt_Z_L[i, j])) * obs.expwτ[i]# 17
end
end
if needgrad
@inbounds for j in 1:q
for i in 1:n
obs.diagDVRV[i] += (-abs2(obs.Dinv_Z_L[i, j]) #τ 5
+ 2 * obs.Dinv_Z_L[i, j] * obs.UUt_Z_L[i, j] #τ 6
- abs2(obs.UUt_Z_L[i, j])) * obs.expwτ[i] #τ 9
end
end
end
#obs.storage_qq = L' * Z' * UU' * rr' * Dinv * Z * L
copyto!(obs.storage_qq, obs.Zt_UUt_rrt_Dinv_Z)
BLAS.trmm!('L', 'L', 'T', 'N', one(T), Lγ, obs.storage_qq)
BLAS.trmm!('R', 'L', 'N', 'N', one(T), Lγ, obs.storage_qq) #for #9
#use ∇Lγ as temporary storage
#obs.∇Lγ = L' Zt_UUt_rrt_UUt_Z * L
copyto!(obs.∇Lγ, obs.Zt_UUt_rrt_UUt_Z)
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.∇Lγ)
BLAS.trmm!('R', 'L', 'N', 'N', T(1), Lγ, obs.∇Lγ) #for #10
@inbounds for j in 1:q
obs.obj[1] -= 2 * abs2(obs.Lt_Zt_Dinv_r[j]) #5
obs.obj[1] += 2 * obs.storage_qq[j, j] #9
obs.obj[1] -= 2 * obs.∇Lγ[j, j] # 10
for i in 1:q
obs.obj[1] += obs.Ut_D_U[i, j]^2 #15
obs.obj[1] += 2 * obs.rt_UUt_Z[i] * Lγ[i, j] * obs.Lt_Zt_Dinv_r[j] #6
obs.obj[1] += abs2(obs.Lt_Zt_Dinv_Z_L[i, j]) #18
obs.obj[1] -= 2 * obs.Lt_Zt_Dinv_Z_L[i, j] * obs.Lt_Zt_UUt_Z_L[i,j] #19
obs.obj[1] += abs2(obs.Lt_Zt_UUt_Z_L[i, j]) #20
end
end
obs.obj[1] *= (1//2)
############
# Gradient #
############
if needgrad
#wrt τ
#∇τ = -W' * diag(D * Vinv * R * Vinv)
BLAS.gemv!('N', T(-1), obs.Wt, obs.diagDVRV, T(0), obs.∇τ)
#wrt Lγ
# ∇Lγ = -2(Z' * Vinv * R * Vinv * Z * L
# obs.storage_qq = (Z' * Vinv * Z) * L
copyto!(obs.storage_qq, obs.Zt_Vinv_Z)
BLAS.trmm!('R', 'L', 'N', 'N', T(1), Lγ, obs.storage_qq)
#∇Lγ = Z' * Vinv * Z * L * L' * Z' * Vinv * Z
BLAS.syrk!('U', 'N', T(1), obs.storage_qq, T(0), obs.∇Lγ)
# obs.storage_qn = Zt_Vinv * sqrt.(D)
# storage_n1 = diag(exp^{1/2 Wt})
@inbounds for j in 1:n, i in 1:q
obs.storage_qn[i, j] = obs.storage_n1[j] * obs.Zt_Vinv[i, j]
end
# obs.∇Lγ += Zt_Vinv_D_Vinv_Z
BLAS.syrk!('U', 'N', T(1), obs.storage_qn, T(1), obs.∇Lγ)
# obs.∇Lγ += Zt_Vinv_rrt_Vinv_Z
BLAS.syr!('U', T(-1), obs.Zt_Vinv_r, obs.∇Lγ)
copytri!(obs.∇Lγ, 'U')
# obs.∇Lγ = obs.∇Σγ, collect all then multiply by Lγ at model level for ∇Lγ.
end
###########
# Hessian #
###########
if needhess
#wrt τ
# Hττ = W' * D * Vinv .* Vinv * D * W
# storage_ln = Wt * D
@inbounds @simd for j in 1:n
for i in 1:l
obs.storage_ln[i, j] = obs.Wt[i, j] * obs.expwτ[j]
end
end
@inbounds for j in 1:n, i in 1:l
obs.Wt_D_Dinv[i, j] = obs.Dinv[j] * obs.storage_ln[i, j]
obs.Wt_D_sqrtdiagDinv_UUt[i, j] = obs.storage_ln[i, j] * obs.sqrtDinv_UUt[j]
end
BLAS.syrk!('U', 'N', T(1), obs.Wt_D_Dinv, T(0), obs.Hττ) #first term
BLAS.syrk!('U', 'N', T(-2), obs.Wt_D_sqrtdiagDinv_UUt, T(1), obs.Hττ) #second term
mul!(obs.Wt_D_Ut_kr_Utt, obs.storage_ln, transpose(obs.Ut_kr_Ut))
BLAS.syrk!('U', 'N', T(1), obs.Wt_D_Ut_kr_Utt, T(1), obs.Hττ) #third term
copytri!(obs.Hττ, 'U')
#wrt HτLγ
# HτLγ = 2 W' * Diagonal(expwτ) * (L'Z'(V^-1) ⊙ Z'(V^-1))' * Cq
# storage_ln = W' * Diagonal(expwτ) was computed above
# storage_q◺n = Cq' * (L'Z'(V^-1) ⊙ Z'(V^-1))
copyto!(obs.storage_qn, obs.Zt_Vinv)
BLAS.trmm!('L', 'L', 'T', 'N', one(T), Lγ, obs.storage_qn)
Ct_A_kr_B!(obs.storage_q◺n, obs.storage_qn, obs.Zt_Vinv)
BLAS.gemm!('N', 'T', T(2), obs.storage_ln, obs.storage_q◺n, zero(T), obs.HτLγ)
#wrt HLγLγ
# HLγLγ = 2 [ C'(L'Z'(V^-1)ZL ⊗ Z'(V^-1)Z)C + C'(L'Z'(V^-1)Z ⊗ Z'(V^-1)ZL)KC ]
# obs.storage_qq = (Z' (V^-1) Z) * L
copyto!(obs.storage_qq, obs.Zt_Vinv_Z)
BLAS.trmm!('R', 'L', 'N', 'N', T(1), Lγ, obs.storage_qq)
# HLγLγ = C'(L'Z' (V^-1) Z ⊗ Z' (V^-1) ZL)KC first
Ct_At_kron_A_KC!(fill!(obs.HLγLγ, 0), obs.storage_qq)
# obs.storage_qq = L' * (Z' (V^-1) Z) * L
BLAS.trmm!('L', 'L', 'T', 'N', T(1), Lγ, obs.storage_qq)
Ct_A_kron_B_C!(obs.HLγLγ, obs.storage_qq, obs.Zt_Vinv_Z)
lmul!(2, obs.HLγLγ)
end
obs.obj[1]
end
"""
nlsv_obj!(m::WSVarLmmModel, needgrad::Bool, needhess:Bool, updateres::Bool)
Calculate the objective function of a `WSVarLmmModel` object and optionally the
gradient and hessian.
"""
function nlsv_obj!(
m :: WSVarLmmModel{T},
needgrad :: Bool = true,
needhess :: Bool = true,
updateres :: Bool = false
) where T <: BlasReal
# accumulate obj and gradient
obj = zero(T)
if needgrad
fill!( m.∇β, 0)
fill!( m.∇τ, 0)
fill!( m.∇Lγ, 0)
end
if needhess
fill!(m.Hττ , 0)
fill!(m.HτLγ , 0)
fill!(m.HLγLγ, 0)
end
if m.ismthrd[1]
Threads.@threads for obs in m.data
nlsv_obj!(obs, m.β, m.τ, m.Lγ, Val(m.iswtnls[1]),
needgrad, needhess, updateres)
end
for obs in 1:length(m.data)
wtobs = isempty(m.obswts) ? one(T) : T(m.obswts[obs])
obj += wtobs * m.data[obs].obj[1]
if needgrad
BLAS.axpy!(wtobs, m.data[obs].∇τ , m.∇τ )
BLAS.axpy!(wtobs, m.data[obs].∇Lγ , m.∇Lγ)
end
if needhess
BLAS.axpy!(wtobs, m.data[obs].Hττ , m.Hττ )
BLAS.axpy!(wtobs, m.data[obs].HτLγ , m.HτLγ )
BLAS.axpy!(wtobs, m.data[obs].HLγLγ, m.HLγLγ)
end
end
else
for obs in 1:length(m.data)
wtobs = isempty(m.obswts) ? one(T) : T(m.obswts[obs])
obj += wtobs * nlsv_obj!(m.data[obs], m.β, m.τ, m.Lγ,
Val(m.iswtnls[1]), needgrad, needhess, updateres)
if needgrad
BLAS.axpy!(wtobs, m.data[obs].∇τ , m.∇τ )
BLAS.axpy!(wtobs, m.data[obs].∇Lγ , m.∇Lγ)
end
if needhess
BLAS.axpy!(wtobs, m.data[obs].Hττ , m.Hττ )
BLAS.axpy!(wtobs, m.data[obs].HτLγ , m.HτLγ )
BLAS.axpy!(wtobs, m.data[obs].HLγLγ, m.HLγLγ)
end
end
end
# multiply m.∇Lγ by Lγ once instead of at observation level
needgrad && BLAS.trmm!('R', 'L', 'N', 'N', T(2), m.Lγ, m.∇Lγ)
obj
end
"""
update_res!(obs, β)
Update the residual vector of `obs::WSVarLmmObs` according to `β`.
"""
function update_res!(
obs :: WSVarLmmObs{T},
β :: Vector{T}
) where T <: BlasReal
BLAS.gemv!('T', T(-1), obs.Xt, β, T(1), copyto!(obs.res, obs.y))
obs.res2 .= abs2.(obs.res)
obs.resnrm2[1] = sum(obs.res2)
mul!(obs.ztres, obs.Zt, obs.res)
obs.res
end
"""
update_res!(m::WSVarLmmModel)
Update residual vector of each observation in `m` according to `m.β`.
"""
function update_res!(m::WSVarLmmModel{T}) where T <: BlasReal
for obs in m.data
update_res!(obs, m.β)
end
nothing
end
"""
update_wtmat!(m::WSVarLmmModel)
Update the observation weight matrix according to the parameter values
`m.τ` and `m.Lγ`. Update `m.β` by WLS and update the residuals accordingly.
Precompute and store various objects needed to evaluate the objective function,
gradient, and Hessian. At return,
- `m.data[i].∇β = Xi' inv(Vi) ri`
- `m.data[i].Hββ = Xi' inv(Vi) Xi`
- `m.∇β = sum_i Xi' inv(Vi) ri`
- `m.Hββ = sum_i Xi' inv(Vi) Xi`
"""
function update_wtmat!(m::WSVarLmmModel{T}) where T <: BlasReal
p, q = m.p, m.q
# update Dinv and U such that Vinv = Dinv - U * U'
# accumulate quantities for updating β
fill!(m.Hββ, 0)
fill!(m.∇β , 0)
for (idx, obs) in enumerate(m.data)
# Form Dinv - UU^T (Woodbury structure of Vi inverse)
n = length(obs.y)
# Step 1: assemble Iq + Lt Zt diag(e^{-η}) Z L
# storage_qn = Lt Zt Diagonal(e^{-0.5η})
# storage_pn = Xt Diagonal(e^{-0.5η})
# storage_n1 = Diagonal(e^{-0.5η}) * y
copyto!(obs.storage_qn, obs.Zt)
BLAS.trmm!('L', 'L', 'T', 'N', T(1), m.Lγ, obs.storage_qn)
mul!(obs.expwτ, transpose(obs.Wt), m.τ)
@inbounds for j in 1:n
invsqrtj = exp(-(1//2)obs.expwτ[j])
obs.Dinv[j] = abs2(invsqrtj)
for i in 1:q
obs.storage_qn[i, j] *= invsqrtj
end
for i in 1:p
obs.storage_pn[i, j] = invsqrtj * obs.Xt[i, j]
end
obs.storage_n1[j] = invsqrtj * obs.y[j]
end
# storage_qq = Iq + Lt Zt diag(e^{-η}) Z L
BLAS.syrk!('U', 'N', T(1), obs.storage_qn, T(0), obs.storage_qq)
@inbounds for i in 1:q
obs.storage_qq[i, i] += 1
end
# Step 2: Cholesky: (Iq + Lt Zt diag(e^{-η}) Z L) = R'R
LAPACK.potrf!('U', obs.storage_qq)
# storage_qn = inv(R') Lt Zt diag(exp(-0.5η))
BLAS.trsm!('L', 'U', 'T', 'N', T(1), obs.storage_qq, obs.storage_qn)
# storage_qp = inv(R') Lt Zt diag(exp(-η)) X = U' X
mul!(obs.storage_qp, obs.storage_qn, transpose(obs.storage_pn))
# Step 3: accumulate X' Vinv X = X' Dinv X - X' U U' X
BLAS.syrk!('U', 'N', T(1), obs.storage_pn, T(0), obs.Hββ)
BLAS.syrk!('U', 'T', T(-1), obs.storage_qp, T(1), obs.Hββ)
copytri!(obs.Hββ, 'U')
wtobs = isempty(m.obswts) ? one(T) : T(m.obswts[idx])
BLAS.axpy!(wtobs, obs.Hββ, m.Hββ)
# Step 4: accumulate X' Vinv y = X' Dinv y - X' U U' y
mul!(obs.∇β, obs.storage_pn, obs.storage_n1)
mul!(obs.storage_q1, obs.storage_qn, obs.storage_n1)
BLAS.gemv!('T', T(-1), obs.storage_qp, obs.storage_q1, T(1), obs.∇β)
BLAS.axpy!(wtobs, obs.∇β, m.∇β)
end
# update β by WLS
copytri!(m.Hββ, 'U')
copyto!(m.data[1].storage_pp, m.Hββ) # m.data[1].storage_pp as scratch space
_, info = LAPACK.potrf!('U', m.data[1].storage_pp)
info > 0 && throw("sum_i Xi' Vi^{-1} Xi is not positive definite")
LAPACK.potrs!('U', m.data[1].storage_pp, copyto!(m.β, m.∇β))
# update residuals according to new β
update_res!(m)
# precompute quantities for obj, grad and Hess evaluations
for obs in m.data
n = length(obs.y)
# update obs.∇β = X' Vinv (y - Xβ) = X' Vinv y - X' Vinv X β
BLAS.gemv!('N', T(-1), obs.Hββ, m.β, T(1), obs.∇β)
# Update: Dinv_r, rt_Dinv_r, Ut = R'^{-1/2} Lt Zt diag(exp(-η))
# storange_qn = R'^{-1/2} Lt Zt diag(exp(-0.5η)) from 1st loop
copyto!(obs.Ut, obs.storage_qn)
obs.rt_Dinv_r[1] = 0
@inbounds for j in 1:n
obs.Dinv_r[j] = obs.Dinv[j] * obs.res[j]
obs.rt_Dinv_r[1] += obs.Dinv[j] * obs.res2[j]
invsqrtj = sqrt(obs.Dinv[j])
for i in 1:q
obs.Ut[i, j] *= invsqrtj
end
end
# Zt_Dinv_r
mul!(obs.Zt_Dinv_r, obs.Zt, obs.Dinv_r)
# rt_U and rt_UUt_r
mul!(obs.rt_U, transpose(obs.res), transpose(obs.Ut))
obs.rt_UUt_r[1] = abs2(norm(obs.rt_U))
# rt_UUt
mul!(obs.rt_UUt, obs.rt_U, obs.Ut)
# rt_UUt_Z
mul!(obs.rt_UUt_Z, obs.rt_UUt, transpose(obs.Zt))
# Zt_Dinv_r
mul!(obs.Zt_Dinv_r, obs.Zt, obs.Dinv_r)
# storage_qq = Ut * Z
mul!(obs.storage_qq, obs.Ut, transpose(obs.Zt))
mul!(obs.Zt_UUt, transpose(obs.storage_qq), obs.Ut)
# storage_qn = Zt * Dinv
fill!(obs.diagUUt_Dinv, 0)
@inbounds for j in 1:n
invj = obs.Dinv[j]
for i in 1:q
obs.diagUUt_Dinv[j] += abs2(obs.Ut[i, j])
obs.storage_qn[i, j] = obs.Zt[i, j] * invj
end
obs.diagUUt_Dinv[j] *= invj
end
copyto!(obs.Zt_Dinv, obs.storage_qn)
# obs.Zt_Vinv = Z' * Dinv
copyto!(obs.Zt_Vinv, obs.storage_qn)
mul!(obs.Zt_Dinv_Z, obs.storage_qn, transpose(obs.Zt))
# storage_qq = Zt * U
mul!(obs.storage_qq, obs.Zt, transpose(obs.Ut))
BLAS.syrk!('U', 'N', T(1), obs.storage_qq, T(0), obs.Zt_UUt_Z)
copytri!(obs.Zt_UUt_Z, 'U')
# for gradient wrt Lγ
mul!(obs.Zt_UUt_rrt_Dinv_Z, transpose(obs.rt_UUt_Z), transpose(obs.Zt_Dinv_r))
BLAS.syrk!('U', 'T', T(1), obs.rt_UUt_Z, T(0), obs.Zt_UUt_rrt_UUt_Z)
copytri!(obs.Zt_UUt_rrt_UUt_Z, 'U')
# for Hessian wrt τ
map!(sqrt, obs.sqrtDinv_UUt, obs.diagUUt_Dinv)
fill!(obs.Ut_kr_Ut, 0)
kr_axpy!(obs.Ut, obs.Ut, obs.Ut_kr_Ut)
# From earlier, obs.Zt_Vinv = Z' * Dinv, continue forming it
BLAS.axpy!(T(-1), obs.Zt_UUt, obs.Zt_Vinv)
copyto!(obs.Zt_Vinv_Z, obs.Zt_Dinv_Z)
BLAS.axpy!(T(-1), obs.Zt_UUt_Z, obs.Zt_Vinv_Z)
mul!(obs.Zt_Vinv_r, obs.Zt_Vinv, obs.res)
end
nothing
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 9398 | rand!(m :: WSVarLmmModel; kwargs...) = rand!(GLOBAL_RNG, m; kwargs...)
rvarlmm(Xs::Array{Matrix}, Zs::Array{Matrix},
Ws::Array{Matrix}, β::Vector, τ::Vector; kwargs...) =
rvarlmm(GLOBAL_RNG, Xs, Zs, Ws, β, τ; kwargs...)
rvarlmm!(meanformula::FormulaTerm, reformula::FormulaTerm,
wsvarformula::FormulaTerm, idvar::Union{Symbol, String},
datatable::DataFrame, β::Vector, τ::Vector; kwargs...) =
rvarlmm!(GLOBAL_RNG, meanformula, reformula,
wsvarformula, idvar,datatable, β, τ; kwargs...)
rvarlmm(X, Z, W, β, τ, Lγω, Lγ,
lγω, γω, z, respdist; kwargs...) = rvarlmm(GLOBAL_RNG, X, Z, W, β, τ, Lγω, Lγ,
lγω, γω, z, respdist; kwargs...)
eval_respdist(μy, vy, respdist; kwargs...) =
eval_respdist(GLOBAL_RNG, μy, vy, respdist; kwargs...)
"""
rand!(m::WSVarLmmModel; respdist = MvNormal, γωdist = MvNormal, Σγω = [], kwargs...)
Replaces the responses `m.data[i].y` with a simulated response based on:
- The data in the model object's data `X, Z, W` matrices.
- The parameter values in the model.
- The condistribution distribution of the response given the random effects.
- The distribution of the random effects.
- If simulating from MvTDistribution, you must specify the degrees of freedom via `df = x`.
"""
function rand!(rng::AbstractRNG,
m :: WSVarLmmModel;
respdist = MvNormal,
γωdist = MvNormal,
Σγω = [],
kwargs...)
q = m.q
isempty(Σγω) ? Σγω = [m.Lγ * transpose(m.Lγ) zeros(q);
zeros(1, q) 0.0] : Σγω
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
γω = Vector{Float64}(undef, q + 1)
z = similar(γω)
for ob in m.data
copyto!(ob.y, rvarlmm(rng, transpose(ob.Xt),
transpose(ob.Zt), transpose(ob.Wt),
m.β, m.τ, Lγω, Lγ, lγω, γω, z,
respdist; kwargs...))
end
end
"""
rvarlmm(Xs::Array{Matrix}, Zs::Array{Matrix},
Ws::Array{Matrix}, β::Vector, τ::Vector;
respdist = MvNormal, Σγ=[], Σγω=[])
Generate a simulated response from the `WSVarLmmModel` based on:
- `Xs`: array of each clusters `X`: mean fixed effects covariates
- `Zs`: array of each clusters `Z`: random location effects covariates
- `Ws`: array of each clusters `W`: within-subject variance fixed effects covariates
- `β`: mean fixed effects vector
- `τ`: within-subject variance fixed effects vector
- `respdist`: the distribution for response. Default is MvNormal.
- `Σγ`: random location effects covariance matrix.
- `Σγω`: joint random location and random scale effects covariance matrix (if generating from full model)
Output is an array of simulated responses that match the ordering in `Xs, Zs, Ws`.
-----------
rvarlmm!(meanformula::FormulaTerm, reformula::FormulaTerm,
wsvarformula::FormulaTerm, idvar::Union{Symbol, String},
datatable, β::Vector, τ::Vector; respdist = MvNormal, Σγ=[], Σγω=[],
respname::Symbol = :y)
Generate a simulated response from the VarLMM model based on a DataFrame `datatable`.
Note: **the datatable MUST be ordered by grouping variable for it to generate in the correct order.
This can be checked via `datatable == sort(datatable, idvar)`. The response is based on:
- `meanformula`: represents the formula for the mean fixed effects `β` (variables in X matrix)
- `reformula`: represents the formula for the mean random effects γ (variables in Z matrix)
- `wsvarformula`: represents the formula for the within-subject variance fixed effects τ (variables in W matrix)
- `idvar`: the id variable for groupings.
- `datatable`: DataFrame for the model. For this function it **must be in order**.
- `β`: mean fixed effects vector
- `τ`: within-subject variance fixed effects vector
- `respdist`: the distribution for response. Default is MvNormal.
- `Σγ`: random location effects covariance matrix.
- `Σγω`: joint random location and random scale effects covariance matrix (if generating from full model)
- `respname`: symbol representing the simulated response variable name.
`rvarlmm!()` produces a column in the datatable representing the new response.
It also returns the column `y` as a vector.
The datatable **must** be ordered by the ID variable for generated responses to match.
"""
function rvarlmm(rng::AbstractRNG, Xs::Array{Matrix{T}}, Zs::Array{Matrix{T}},
Ws::Array{Matrix{T}}, β::Vector{T},
τ::Vector{T}; respdist = MvNormal, Σγ=[], Σγω=[],
kwargs...,
) where T <: BlasReal
@assert length(Xs) == length(Zs) == length(Ws) "Number of provided X, Z, and W matrices do not match"
isempty(Σγ) && isempty(Σγω) && error("Neither the covariance matrix for γ
nor the covariance matrix for (γ, ω) have been specified. One must be.")
q = size(Zs[1], 2)
# Get Cholesky Factor
isempty(Σγω) ? Σγω = [Σγ zeros(q); zeros(1, q) 0.0] : Σγω
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
γω = Vector{Float64}(undef, q + 1)
z = similar(γω)
y = map(i -> rvarlmm(rng, Xs[i], Zs[i], Ws[i], β, τ, Lγω, Lγ,
lγω, γω, z, respdist),
1:length(Xs))
return y
end
function rvarlmm!(rng::AbstractRNG, meanformula::FormulaTerm, reformula::FormulaTerm,
wsvarformula::FormulaTerm, idvar::Union{Symbol, String},
datatable::DataFrame, β::Vector{T}, τ::Vector{T}; respdist = MvNormal, Σγ=[], Σγω=[],
respname::Symbol = :y, kwargs...
) where T <: BlasReal
isempty(Σγ) && isempty(Σγω) && error("Neither the covariance matrix for γ
nor the covariance matrix for (γ, ω) have been specified. One must be.")
if typeof(idvar) <: String
idvar = Symbol(idvar)
end
function rvarlmmob(rng, f1, f2, f3, subdata,
β, τ, Lγω, Lγ, lγω, γω, z, respdist)
X = modelmatrix(f1, subdata)
Z = modelmatrix(f2, subdata)
W = modelmatrix(f3, subdata)
return rvarlmm(rng, X, Z, W, β, τ, Lγω, Lγ, lγω, γω, z, respdist)
end
#apply df-wide schema
meanformula = apply_schema(meanformula, schema(meanformula, datatable))
reformula = apply_schema(reformula, schema(reformula, datatable))
wsvarformula = apply_schema(wsvarformula, schema(wsvarformula, datatable))
q = length(StatsModels.coefnames(reformula.rhs))
# Get Cholesky Factor
isempty(Σγω) ? Σγω = [Σγ zeros(q); zeros(1, q) 0.0] : Σγω
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
γω = Vector{Float64}(undef, q + 1)
z = similar(γω)
# Need sortperm of groupby var...
# if typeof(datatable) <: IndexedTable
# y = JuliaDB.groupby(x -> rvarlmmob(meanformula, reformula, wsvarformula,
# x, β, τ, Lγω, Lγ, lγω, γω, z, respdist, kwargs...), datatable, idvar) |>
# x -> column(x, 2) |> x -> vcat(x...)
# datatable = JuliaDB.transform(datatable, respname => y)
# else
# y = JuliaDB.groupby(x -> rvarlmmob(meanformula, reformula, wsvarformula,
# x, β, τ, Lγω, Lγ, lγω, γω, z, respdist, kwargs...), table(datatable), idvar) |>
# x -> column(x, 2) |> x -> vcat(x...)
y = combine(x -> rvarlmmob(rng, meanformula, reformula, wsvarformula,
x, β, τ, Lγω, Lγ, lγω, γω, z, respdist, kwargs...),
groupby(datatable, idvar)) |>
x -> x[!, 2]
datatable[!, respname] = y
# end
return datatable
end
function rvarlmm(rng::AbstractRNG, X, Z, W, β, τ, Lγω, Lγ,
lγω, γω, z, respdist; kwargs...)
q = size(Lγ, 1)
mul!(γω, Lγω, Distributions.rand!(rng, Normal(), z))
# generate y
μy = X * β + Z * γω[1:q]
@views vy = exp.(W * τ .+ dot(γω[1:q], lγω) .+ γω[end])
y = eval_respdist(rng, μy, vy, respdist; kwargs...)
return y
end
function eval_respdist(rng::AbstractRNG, μy, vy, respdist; df = [])
if respdist == MvNormal || respdist == Normal
return rand(rng, MvNormal(μy, Diagonal(vy)))
elseif respdist == MvTDist
isempty(df) ? error("degree of freedom for MvTDist not specified, use 'df' = x.") :
return rand(rng, MvTDist(df, μy, Matrix(Diagonal(vy))))
elseif respdist == Gamma
θparam = vy ./ μy
αparam = abs2.(μy) ./ vy
all(θparam .> 0) && all(αparam .> 0) ||
error("The current parameter/data does not allow for Gamma to be used. α, θ params must be > 0.")
return map((α, θ) -> rand(rng, Gamma(α, θ)), αparam, θparam)
elseif respdist == InverseGaussian
λparam = μy.^3 ./ vy
return map((μ , λ) -> rand(rng, InverseGaussian(μ , λ)), μy, λparam)
elseif respdist == InverseGamma
αparam = (abs2.(μy) .- 2) ./ vy
θparam = μy .* (αparam .- 1)
all(θparam .> 0) && all(αparam .> 0) ||
error("The current parameter/data does not allow for InverseGamma to be used. α, θ params must be > 0.")
return map((α, θ) -> rand(rng, InverseGamma(α, θ)), αparam, θparam)
elseif respdist == Uniform
bparams = μy .+ 0.5sqrt.(12vy)
aparams = 2μy .- bparams
return map((a, b) -> rand(rng, InverseGamma(a, b)), aparams, bparams)
else
error("Response distribution $respdist is not valid. Run respdists() to see available options.")
end
end
respdists() = [:MvNormal, :MvTDist, :Gamma, :InverseGaussian, :InverseGamma, :Uniform]
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 1633 | """
sandwich!(m::WSVarLmmModel)
Calculate the sandwich estimator of the asymptotic covariance of the parameters,
based on values `m.Hββ`, `m.Hττ`, `m.HτLγ`, `m.HLγLγ`, `m.data[i].∇β`,
`m.data[i].∇τ`, and `m.data[i].∇Lγ`. `m.vcov` is updated by the sandwich
estimator and returned.
"""
function sandwich!(m::WSVarLmmModel{T}) where T <: BlasReal
p, q, l = m.p, m.q, m.l
minv = inv(m.m)
# form A matrix in the sandwich formula
fill!(m.Ainv, 0)
m.Ainv[ 1:p, 1:p ] = m.Hββ
m.Ainv[ (p + 1):(p + l), (p + 1):(p + l)] = m.Hττ
m.Ainv[ (p + 1):(p + l), (p + l + 1):end ] = m.HτLγ
m.Ainv[(p + l + 1):end, (p + l + 1):end ] = m.HLγLγ
copytri!(m.Ainv, 'U')
lmul!(minv, m.Ainv)
# form B matrix in the sandwich formula
fill!(m.B, 0)
for obs in m.data
copyto!(m.ψ, 1 , obs.∇β)
copyto!(m.ψ, p + 1, obs.∇τ)
offset = p + l + 1
@inbounds for j in 1:q, i in j:q
m.ψ[offset] = obs.∇Lγ[i, j]
offset += 1
end
BLAS.syr!('U', T(1), m.ψ, m.B)
end
copytri!(m.B, 'U')
lmul!(minv, m.B)
# calculuate A (pseudo)-inverse
Aeval, Aevec = eigen(Symmetric(m.Ainv))
@inbounds for j in 1:size(m.Ainv, 2)
ej = Aeval[j]
invsqrtej = ej > sqrt(eps(T)) ? inv(sqrt(ej)) : T(0)
for i in 1:size(m.Ainv, 1)
Aevec[i, j] *= invsqrtej
end
end
mul!(m.Ainv, Aevec, transpose(Aevec))
# calculate vcov
mul!(Aevec , m.Ainv, m.B ) # use Avec as scratch space
mul!(m.vcov, Aevec, m.Ainv)
m.vcov .*= minv
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 2150 | module DFTest
# Test for df.jl and rand.jl functions
using DataFrames, Tables, LinearAlgebra, Random, Test, WiSER
Random.seed!(123)
rng = MersenneTwister(123)
t = columntable((
id = [1; 1; 2; 3; 3; 3; 4],
y = [missing; randn(rng, 6)],
x1 = ones(7),
x2 = randn(rng, 7),
x3 = randn(rng, 7),
z1 = ones(7),
z2 = randn(rng, 7),
w1 = ones(7),
w2 = randn(rng, 7),
w3 = randn(rng, 7),
obswts = [2.0; 2.0; 1.0; 0.5; 0.5; 0.5; 1.0]))
df = DataFrame(t)
f1 = @formula(y ~ x1 + x2 + x3)
f2 = @formula(y ~ z1 + z2)
f3 = @formula(y ~ w1 + w2 + w3)
vlma = WSVarLmmModel(f1, f2, f3, :id, t; wtvar = :obswts)
dfa = DataFrame(vlma)
vlmb = WSVarLmmModel(f1, f2, f3, :id, df; wtvar = :obswts)
dfb = DataFrame(vlmb)
β = zeros(3)
τ = zeros(3)
Σγ = Matrix{Float64}(I, 2, 2)
y = [randn(rng, 2) for i in 1:3]
Xs = [randn(rng, 2, 3) for i in 1:3]
Ws = [randn(rng, 2, 3) for i in 1:3]
Zs = [randn(rng, 2, 2) for i in 1:3]
@testset "WSVarLmmModel Constructor" begin
@test dfa == dfb == DataFrames.dropmissing(df)
end
@testset "Simulating Response" begin
@test rvarlmm!(rng, f1, f2, f3, :id, df, β, τ;
Σγ = Σγ, respname = :response)[1, :response] ≈ -3.7193661312903674
@test rvarlmm(rng, Xs, Zs, Ws, β, τ; Σγ = Σγ)[1][1] ≈ 2.3445518865974098
@test "response" in names(df)
vlma.β .= 0
vlma.τ .= 0
vlma.Lγ .= [1. 0; 0 1.]
ytest = vlma.data[1].y[1]
WiSER.rand!(rng, vlma)
@test vlma.data[1].y[1] != ytest
ytest = vlma.data[1].y[1]
WiSER.rand!(rng, vlma; respdist = MvNormal)
@test vlma.data[1].y[1] != ytest
ytest = vlma.data[1].y[1]
WiSER.rand!(rng, vlma; respdist = MvTDist, df = 10)
@test vlma.data[1].y[1] != ytest
ytest = vlma.data[1].y[1]
vlma.β[1] = 30.
WiSER.rand!(rng, vlma; respdist = Gamma)
@test vlma.data[1].y[1] != ytest
ytest = vlma.data[1].y[1]
WiSER.rand!(rng, vlma; respdist = InverseGaussian)
@test vlma.data[1].y[1] != ytest
ytest = vlma.data[1].y[1]
WiSER.rand!(rng, vlma; respdist = InverseGamma)
@test vlma.data[1].y[1] != ytest
end
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 3072 | module MVCalculusTest
using BenchmarkTools, LinearAlgebra, Random, Test, WiSER
@testset "kron_axpy!" begin
Random.seed!(123)
m, n = 100, 50
p, q = 30, 20
A = randn(m, n)
X = randn(p, q)
Y1 = zeros(m * p, n * q)
kron_axpy!(A, X, Y1)
@test Y1 == kron(A, X)
bm = @benchmark kron_axpy!($A, $X, $Y1) setup=(fill!($Y1, 0))
display(bm); println()
@test allocs(bm) == 0
Y2 = zeros(n * p, m * q)
kron_axpy!(transpose(A), X, Y2)
@test Y2 == kron(transpose(A), X)
bm = @benchmark kron_axpy!($(transpose(A)), $X, $Y2) setup=(fill!($Y2, 0))
display(bm); println()
@test allocs(bm) == 0
Y3 = zeros(m * q, n * p)
kron_axpy!(A, transpose(X), Y3)
@test Y3 == kron(A, transpose(X))
bm = @benchmark kron_axpy!($A, $(transpose(X)), $Y3) setup=(fill!($Y3, 0))
display(bm); println()
@test allocs(bm) == 0
Y4 = zeros(n * q, m * p)
kron_axpy!(transpose(A), transpose(X), Y4)
@test Y4 == kron(transpose(A), transpose(X))
bm = @benchmark kron_axpy!($(transpose(A)), $(transpose(X)), $Y4) setup=(fill!($Y4, 0))
display(bm); println()
@test allocs(bm) == 0
end
@testset "kr_axpy!" begin
Random.seed!(123)
m, n, p = 100, 50, 30
A = randn(m, n)
X = randn(p, n)
Y1 = zeros(m * p, n)
kr_axpy!(A, X, Y1)
@test Y1 == reshape([A[i1, j] * X[i2, j] for i2 in 1:p, i1 in 1:m, j in 1:n], m * p, n)
bm = @benchmark kr_axpy!($A, $X, $Y1) setup=(fill!($Y1, 0))
display(bm); println()
@test allocs(bm) == 0
end
@testset "CopyMatrix" begin
Random.seed!(123)
n = 50
C = CopyMatrix(n)
@test size(C) == (n * n, (n * (n + 1)) >> 1)
# display(C); println()
# display(Matrix{Float64}(C)); println()
M = randn(n, n)
@test vec(LowerTriangular(M)) == C * vech(LowerTriangular(M))
@test vec(M)' * C == vech(M)'
@test C' * vec(M) == vech(M)
result1 = similar(vec(M)' * C)
bm = @benchmark mul!($result1, $(transpose(vec(M))), $C)
display(bm); println()
@test allocs(bm) == 0
result2 = similar(C' * vec(M))
bm = @benchmark mul!($result2, $(transpose(C)), $(vec(M)))
display(bm); println()
@test_skip allocs(bm) == 0
end
@testset "Ct_At_kron_A_KC" begin
Random.seed!(123)
q = 5
q◺ = WiSER.◺(q)
A = randn(q, q); A = A'A * LowerTriangular(randn(q, q))
H = Ct_At_kron_A_KC(A)
Cq = CopyMatrix(q)
Kqq = commutation(q, q)
@test size(H) == (q◺, q◺)
@test issymmetric(H)
@test H == Cq' * kron(A', A) * Kqq * Cq
bm = @benchmark Ct_At_kron_A_KC!($H, $A) setup=(fill!($H, 0))
display(bm); println()
@test allocs(bm) == 0
end
@testset "Ct_A_kron_B_C" begin
Random.seed!(123)
q = 5
q◺ = WiSER.◺(q)
A = randn(q, q); A = A'A
B = randn(q, q); B = B'B
H = Ct_A_kron_B_C(A, B)
Cq = CopyMatrix(q)
@test size(H) == (q◺, q◺)
@test issymmetric(H)
@test all(eigvals(Symmetric(H)) .≥ 0)
@test H == Cq' * kron(A, B) * Cq
bm = @benchmark Ct_A_kron_B_C!($H, $A, $B) setup=(fill!($H, 0))
display(bm); println()
@test allocs(bm) == 0
end
@testset "Ct_A_kr_B" begin
Random.seed!(123)
q, n = 5, 10
q◺ = ◺(q)
A = randn(q, n)
B = randn(q, n)
H = Ct_A_kr_B(A, B)
Cq = CopyMatrix(q)
@test size(H) == (q◺, n)
@test H == Cq' * kr(A, B)
bm = @benchmark Ct_A_kr_B!($H, $A, $B) setup=(fill!($H, 0))
display(bm); println()
@test allocs(bm) == 0
end
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 6686 | # Test on a student T (conditional distribution of Y), gamma (distribution of γ),
# inv-gamma (distribution of ω) simulation data example
module MvtGammaInvgammaTest
using Distributions, InteractiveUtils, LinearAlgebra
using Random, Roots, SpecialFunctions, Test, WiSER
@info "MvT Gamma InvGamma Test"
@info "generate data"
Random.seed!(1234)
rng = MersenneTwister(1234)
# dimensions
m = 1000 # number of individuals
ns = rand(rng, 20:20, m) # numbers of observations per individual
p = 5 # number of fixed effects, including intercept
q = 3 # number of random effects, including intercept
l = 5 # number of WS variance covariates, including intercept
obsvec = Vector{WSVarLmmObs{Float64}}(undef, m)
# true parameter values
βtrue = [ 0.1; 6.5; -3.5; 1.0; 5 ]
τtrue = [-1.5; 1.5; -0.5; 0.0; 0.0]
Σγ = [1.5 0.5 0.3;
0.5 1.0 0.2;
0.3 0.2 0.5];
δγω = [0.0; 0.0; 0.0] # for easier generation of (γ, ω)
σω = 1.0
Σγω = [Σγ δγω; δγω' σω]
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
# parameters for Gamma random deviate ω
# If X ∼ Gamma(α, θ), then E[-ln(X)] = - ψ(α) - ln(θ), Var[-ln(X)] = ψ'(α).
# ωi = log(inv(X)) = - log(X)
# We want Var[ωi] = ψ'(α) = σω and E[ωi] = - ψ(α) - ln(θ) = 0
ωα = Roots.find_zero(x -> trigamma(x) - σω, 1)
ωα > 1 || error("ωα needs to be >1 for the existence of mean of inverse-gamma")
ωθ = exp(-digamma(ωα))
# parameters for Gamma random deviate γ
# If X ∼ Gamma(α, θ), then E[X] = αθ, Var[X] = αθ^2.
# We want Var[X] = 1 and don't care about mean (will shift it to 0)
γα = 4 # shape parameter
γθ = sqrt(inv(γα)) # scale parameter
# degree of freedom for t
ν = 6
# generate data
γ = Vector{Float64}(undef, q)
z = similar(γ) # hold vector of iid std normal
for i in 1:m
# first column intercept, remaining entries iid std normal
X = Matrix{Float64}(undef, ns[i], p)
X[:, 1] .= 1
@views randn!(rng, X[:, 2:p])
# first column intercept, remaining entries iid std normal
Z = Matrix{Float64}(undef, ns[i], q)
Z[:, 1] .= 1
@views randn!(rng, Z[:, 2:q])
# first column intercept, remaining entries iid std normal
W = Matrix{Float64}(undef, ns[i], l)
W[:, 1] .= 1
@views randn!(rng, W[:, 2:l])
# generate ω ∼ log-inv-gamma(ωα, ωθ)
ω = -log(rand(rng, Gamma(ωα, ωθ)))
# generate random effects: γ = Lγ * z
# z is iid Gamma with variance 1 and shifted to have mean 0
Distributions.rand!(rng, Gamma(γα, γθ), z)
z .-= γα * γθ # shift to have mean 0
mul!(γ, Lγ, z)
# generate y from t distribution (ν, μy, σ2ϵ)
μy = X * βtrue + Z * γ
σ2ϵ = W * τtrue .+ dot(γ, lγω) .+ ω
ysd = exp.(0.5 .* (σ2ϵ))
# note: variance of T(ν) is ν / (ν - 2)
y = μy + sqrt(((ν - 2) / ν)) .* ysd .* rand(rng, TDist(ν), ns[i])
# form a VarLmmObs instance
obsvec[i] = WSVarLmmObs(y, X, Z, W)
end
# form WSVarLmmModel
vlmm = WSVarLmmModel(obsvec);
@testset "fit! (start from LS fit)" begin
println(); println(); println()
for solver in [
# KNITRO.KnitroSolver(outlev=3), # outlev 0-6
Ipopt.Optimizer()
# Ipopt.IpoptSolver(print_level=0, watchdog_shortened_iter_trigger=3, max_iter=100),
# Ipopt.IpoptSolver(print_level = 0)
# Ipopt.IpoptSolver(print_level = 3, hessian_approximation = "limited-memory"),
# Ipopt.IpoptSolver(print_level = 3, obj_scaling_factor = 1 / m) # instable
# Ipopt.IpoptSolver(print_level = 5, mu_strategy = "adaptive") # same speed
# NLopt.NLoptSolver(algorithm = :LD_SLSQP, maxeval = 4000)
# NLopt.NLoptSolver(algorithm = :LD_MMA, maxeval = 4000),
# NLopt.NLoptSolver(algorithm = :LD_LBFGS, maxeval = 4000)
# NLopt.NLoptSolver(algorithm = :LN_BOBYQA, ftol_rel = 1e-12, ftol_abs = 1e-8, maxeval = 10000)
]
println("----------")
@show solver
println("----------")
@info "init_ls!"
@time init_ls!(vlmm)
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
@info "unweighted obj/gradient/hessian at init_ls"
@show nlsv_obj!(vlmm, true, true, true)
# println("∇β")
# display(vlmm.∇β); println()
# println("∇τ")
# display(vlmm.∇τ); println()
# println("∇Lγ")
# display(vlmm.∇Lγ); println()
# println("Hββ")
# display(vlmm.Hββ); println()
# println("Hττ")
# display(vlmm.Hττ); println()
# println("HτLγ")
# display(vlmm.HτLγ); println()
# println("HLγLγ")
# display(vlmm.HLγLγ); println()
# println("∇:")
# ∇ = [vlmm.∇τ; vech(vlmm.∇Lγ)]
# display(∇); println()
# println("FIM:")
# FIM = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(FIM); println()
# @show eigvals(Symmetric(FIM))
# @show Symmetric(FIM) \ ∇
@info "init_mom!"
@time init_mom!(vlmm, solver)
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
@info "weighted obj/gradient/hessian at init_ls"
init_ls!(vlmm)
vlmm.iswtnls[1] = true
update_wtmat!(vlmm)
@show nlsv_obj!(vlmm, true, true, true)
# println("∇β")
# display(vlmm.∇β); println()
# println("∇τ")
# display(vlmm.∇τ); println()
# println("∇Lγ")
# display(vlmm.∇Lγ); println()
# println("Hββ")
# display(vlmm.Hββ); println()
# println("Hττ")
# display(vlmm.Hττ); println()
# println("HτLγ")
# display(vlmm.HτLγ); println()
# println("HLγLγ")
# display(vlmm.HLγLγ); println()
println("∇:")
∇ = [vlmm.∇τ; vech(vlmm.∇Lγ)]
display(∇); println()
# println("FIM:")
# FIM = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(FIM); println()
# @show eigvals(Symmetric(FIM))
# @show Symmetric(FIM) \ ∇
@info "WNLS fitting"
@time WiSER.fit!(vlmm, solver, runs=2) # start from init_ls by default
@info "obj at solution"
@show nlsv_obj!(vlmm, true, true)
@info "estimates at solution"
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
# @info "gradient at solution"
# @show vlmm.∇β
# @show vlmm.∇τ
# @show vlmm.∇Lγ
@test sqrt(abs2(norm(vlmm.∇τ)) + abs2(norm(vlmm.∇Lγ))) < 1e-5
# @info "Hessian at solution"
# @show vlmm.Hββ
# @show vlmm.HLγLγ
# @show vlmm.HτLγ
# @show vlmm.Hττ
@info "inference at solution"
show(vlmm)
end
end
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 5684 | # Test on a normal (conditional distribution of Y), normal (distribution of γ),
# , lognormal (distribution of ω) simulation data example
module NormalNormalLognormalTest
using BenchmarkTools, InteractiveUtils
using LinearAlgebra, Profile, Random, Test, WiSER
@info "Normal Normal LogNormal Test"
@info "generate data"
Random.seed!(123)
rng = MersenneTwister(123)
# dimensions
m = 1000 # number of individuals
ns = rand(rng, 20:20, m) # numbers of observations per individual
p = 5 # number of fixed effects, including intercept
q = 3 # number of random effects, including intercept
l = 5 # number of WS variance covariates, including intercept
obsvec = Vector{WSVarLmmObs{Float64}}(undef, m)
# true parameter values
βtrue = [ 0.1; 6.5; -3.5; 1.0; 5 ]
τtrue = [-5.5; 1.5; -0.5; 0.0; 0.0]
Σγ = [1.5 0.5 0.3;
0.5 1.0 0.2;
0.3 0.2 0.5];
δγω = [0.2; 0.1; 0.05]
σω = [1.0]
Σγω = [Σγ δγω; δγω' σω]
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
# generate data
γω = Vector{Float64}(undef, q + 1)
z = similar(γω) # hold vector of iid std normal
for i in 1:m
# first column intercept, remaining entries iid std normal
X = Matrix{Float64}(undef, ns[i], p)
X[:, 1] .= 1
@views randn!(rng, X[:, 2:p])
# first column intercept, remaining entries iid std normal
Z = Matrix{Float64}(undef, ns[i], q)
Z[:, 1] .= 1
@views randn!(rng, Z[:, 2:q])
# first column intercept, remaining entries iid std normal
W = Matrix{Float64}(undef, ns[i], l)
W[:, 1] .= 1
@views randn!(rng, W[:, 2:l])
# generate random effects: γω = Lγω * z
mul!(γω, Lγω, randn!(rng, z))
# generate y
μy = X * βtrue + Z * γω[1:q]
@views ysd = exp.(0.5 .* (W * τtrue .+ dot(γω[1:q], lγω) .+ γω[end]))
y = ysd .* randn(rng, ns[i]) .+ μy
# form a VarLmmObs instance
obsvec[i] = WSVarLmmObs(y, X, Z, W)
end
# form VarLmmModel
vlmm = WSVarLmmModel(obsvec);
@testset "fit! (start from LS fit)" begin
println(); println(); println()
for solver in [
# KNITRO.KnitroSolver(outlev=3), # outlev 0-6
Ipopt.Optimizer()
# Ipopt.IpoptSolver(print_level=0, watchdog_shortened_iter_trigger=3, max_iter=100),
# Ipopt.IpoptSolver(print_level = 0)
# Ipopt.IpoptSolver(print_level = 3, hessian_approximation = "limited-memory"),
# Ipopt.IpoptSolver(print_level = 3, obj_scaling_factor = 1 / m) # less accurae, grad at 10^{-1}
# Ipopt.IpoptSolver(print_level = 3, mu_strategy = "adaptive") # same speek
# NLopt.NLoptSolver(algorithm = :LD_SLSQP, maxeval = 4000)
# NLopt.NLoptSolver(algorithm = :LD_MMA, maxeval = 4000),
# NLopt.NLoptSolver(algorithm = :LD_LBFGS, maxeval = 4000)
# NLopt.NLoptSolver(algorithm = :LN_BOBYQA, ftol_rel = 1e-12, ftol_abs = 1e-8, maxeval = 10000)
]
println("----------")
@show solver
println("----------")
@info "init_ls!"
@time init_ls!(vlmm)
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
@info "unweighted obj/gradient/hessian at init_ls"
@show nlsv_obj!(vlmm, true, true, true)
# println("∇β")
# display(vlmm.∇β); println()
# println("∇τ")
# display(vlmm.∇τ); println()
# println("∇Lγ")
# display(vlmm.∇Lγ); println()
# println("Hββ")
# display(vlmm.Hββ); println()
# println("Hττ")
# display(vlmm.Hττ); println()
# println("HτLγ")
# display(vlmm.HτLγ); println()
# println("HLγLγ")
# display(vlmm.HLγLγ); println()
# println("∇:")
# ∇ = [vlmm.∇τ; vech(vlmm.∇Lγ)]
# display(∇); println()
# println("FIM:")
# FIM = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(FIM); println()
# @show eigvals(Symmetric(FIM))
# @show Symmetric(FIM) \ ∇
@info "init_mom!"
@time init_mom!(vlmm, solver)
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
@info "weighted obj/gradient/hessian at init_ls"
init_ls!(vlmm)
vlmm.iswtnls[1] = true
update_wtmat!(vlmm)
@show nlsv_obj!(vlmm, true, true, true)
# println("∇β")
# display(vlmm.∇β); println()
# println("∇τ")
# display(vlmm.∇τ); println()
# println("∇Lγ")
# display(vlmm.∇Lγ); println()
# println("Hββ")
# display(vlmm.Hββ); println()
# println("Hττ")
# display(vlmm.Hττ); println()
# println("HτLγ")
# display(vlmm.HτLγ); println()
# println("HLγLγ")
# display(vlmm.HLγLγ); println()
println("∇:")
∇ = [vlmm.∇τ; vech(vlmm.∇Lγ)]
display(∇); println()
# println("FIM:")
# FIM = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(FIM); println()
# @show eigvals(Symmetric(FIM))
# @show Symmetric(FIM) \ ∇
@info "WNLS fitting"
@time WiSER.fit!(vlmm, solver, runs=2)
@info "obj at solution"
@show nlsv_obj!(vlmm, true, true)
@info "estimates at solution"
println("β")
display([βtrue vlmm.β]); println()
println("τ")
display([τtrue vlmm.τ]); println()
println("Lγω")
display(vlmm.Lγ); println()
display(Lγ); println()
# @info "gradient at solution"
# @show vlmm.∇β
# @show vlmm.∇τ
# @show vlmm.∇Lγ
@test sqrt(abs2(norm(vlmm.∇τ)) + abs2(norm(vlmm.∇Lγ))) < 1e-5
# @info "Hessian at solution"
# @show vlmm.Hββ
# @show vlmm.HLγLγ
# @show vlmm.HτLγ
# @show vlmm.Hττ
@info "inference at solution"
show(vlmm)
end
end
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 5750 | module PerfTest
using BenchmarkTools, InteractiveUtils
using LinearAlgebra, Profile, Random, Test, WiSER
@info "generate data"
Random.seed!(123)
# dimensions
m = 6000 # number of individuals
ns = rand(5:11, m) # numbers of observations per individual
p = 5 # number of fixed effects, including intercept
q = 3 # number of random effects, including intercept
l = 5 # number of WS variance covariates, including intercept
obsvec = Vector{WSVarLmmObs{Float64}}(undef, m)
# true parameter values
βtrue = [0.1; 6.5; -3.5; 1.0; 5]
τtrue = [-1.5; 1.5; -0.5; zeros(l - 3)]
Σγ = Matrix(Diagonal([2.0; 1.2; rand(q - 2)])) # full rank case
δγω = [0.2; 0.1; rand(q - 2) ./ 10]
σω = [1.0]
Σγω = [Σγ δγω; δγω' σω]
Lγω = cholesky(Symmetric(Σγω), check = false).L
Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
# generate data
γω = Vector{Float64}(undef, q + 1)
z = similar(γω) # hold vector of iid std normal
for i in 1:m
# first column intercept, remaining entries iid std normal
X = Matrix{Float64}(undef, ns[i], p)
X[:, 1] .= 1
@views randn!(X[:, 2:p])
# first column intercept, remaining entries iid std normal
Z = Matrix{Float64}(undef, ns[i], q)
Z[:, 1] .= 1
@views randn!(Z[:, 2:q])
# first column intercept, remaining entries iid std normal
W = Matrix{Float64}(undef, ns[i], l)
W[:, 1] .= 1
@views randn!(W[:, 2:l])
# generate random effects: γω = Lγω * z
mul!(γω, Lγω, randn!(z))
# generate y
μy = X * βtrue + Z * γω[1:q]
@views ysd = exp.(0.5 .* (W * τtrue .+ dot(γω[1:q], lγω) .+ γω[end]))
y = ysd .* randn(ns[i]) .+ μy
# form a VarLmmObs instance
obsvec[i] = WSVarLmmObs(y, X, Z, W)
end
# form VarLmmModel
vlmm = WSVarLmmModel(obsvec);
@testset "init_ls!" begin
# least squares starting point
init_ls!(vlmm)
@show vlmm.β
@show vlmm.τ
@show vlmm.Lγ
# @info "type stability"
# @code_warntype init_ls!(vlmm)
@info "benchmark"
bm = @benchmark init_ls!($vlmm, gniters=5)
display(bm); println()
@test_skip allocs(bm) == 0
# @info "profile"
# Profile.clear()
# @profile @btime init_ls!(vlmm; gniters=5)
# Profile.print(format=:flat)
end
@testset "update_wtmat!" begin
# set parameter values to be the truth
copy!(vlmm.β, βtrue)
copy!(vlmm.τ, τtrue)
vlmm.τ[1] = τtrue[1] + 0.5(abs2(lω) + abs2(norm(lγω + Lγ'lγω)))
vlmm.Lγ .= Lγ
@show vlmm.β
@show vlmm.τ
@show vlmm.Lγ
# update weight matrix
update_wtmat!(vlmm)
@show vlmm.β
@show vlmm.∇β
# @show vlmm.Hββ
# @info "type stability"
# @code_warntype update_wtmat!(vlmm)
@info "benchmark"
bm = @benchmark update_wtmat!($vlmm)
display(bm); println()
@test allocs(bm) == 0
# @info "profile"
# Profile.clear()
# @profile @btime update_wtmat!(vlmm)
# Profile.print(format=:flat)
end
@testset "nlsv_obj! (unweighted)" begin
# set parameter values to be the truth
copy!(vlmm.β, βtrue)
copy!(vlmm.τ, τtrue)
vlmm.τ[1] = τtrue[1] + 0.5(abs2(lω) + abs2(norm(lγω + Lγ'lγω)))
vlmm.Lγ .= Lγ
@show vlmm.β
@show vlmm.τ
@show vlmm.Lγ
# evaluate objective (at truth)
@info "obj/grad/hessian at true parameter values"
vlmm.iswtnls[1] = false
@show nlsv_obj!(vlmm, true, true, true)
@show vlmm.∇β
@show vlmm.∇τ
@show vlmm.∇Lγ
H = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(H); println()
@test norm(H - transpose(H)) / norm(H) < 1e-8
@test all(eigvals(Symmetric(H)) .≥ 0)
# @info "type stability"
# @code_warntype nlsv_obj!(vlmm.data[1], vlmm.β, vlmm.τ, vlmm.Lγ, true)
# @code_warntype nlsv_obj!(vlmm, true)
@info "benchmark"
# bm = @benchmark nlsv_obj!($vlmm.data[1], $vlmm.β, $vlmm.τ, $vlmm.Lγ, true)
# display(bm)
# @test allocs(bm) == 0
bm = @benchmark nlsv_obj!($vlmm, true, true, true)
display(bm); println()
@test allocs(bm) == 0
# @info "profile"
# Profile.clear()
# @profile @btime nlsv_obj!($vlmm, true, true)
# Profile.print(format=:flat)
end
@testset "nlsv_obj! (weighted)" begin
# set parameter values to be the truth
copy!(vlmm.β, βtrue)
copy!(vlmm.τ, τtrue)
vlmm.τ[1] = τtrue[1] + 0.5(abs2(lω) + abs2(norm(lγω + Lγ'lγω)))
vlmm.Lγ .= Lγ
@show vlmm.β
@show vlmm.τ
@show vlmm.Lγ
# update_wtmat then evaluate at the truth
update_wtmat!(vlmm)
vlmm.iswtnls[1] = true
@show nlsv_obj!(vlmm, true, true, true)
@show vlmm.∇β
@show vlmm.∇τ
@show vlmm.∇Lγ
H = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(H); println()
@test norm(H - transpose(H)) / norm(H) < 1e-8
@test all(eigvals(Symmetric(H)) .≥ 0)
# @info "type stability"
# @code_warntype nlsv_obj!(vlmm.data[1], vlmm.β, vlmm.τ, vlmm.Lγ, true)
# @code_warntype nlsv_obj!(vlmm, true)
@info "benchmark"
bm = @benchmark nlsv_obj!($vlmm, true, true, true)
display(bm); println()
@test allocs(bm) == 0
# @info "profile"
# Profile.clear()
# @profile @btime nlsv_obj!($vlmm, true, true, true)
# Profile.print(format=:flat)
end
@testset "nlsv_obj! (weighted) - parallel" begin
# set parameter values to be the truth
copy!(vlmm.β, βtrue)
copy!(vlmm.τ, τtrue)
vlmm.τ[1] = τtrue[1] + 0.5(abs2(lω) + abs2(norm(lγω + Lγ'lγω)))
vlmm.Lγ .= Lγ
@show vlmm.β
@show vlmm.τ
@show vlmm.Lγ
# update_wtmat then evaluate at the truth
update_wtmat!(vlmm)
vlmm.iswtnls[1] = true
vlmm.ismthrd[1] = true
@show nlsv_obj!(vlmm, true, true, true)
@show vlmm.∇β
@show vlmm.∇τ
@show vlmm.∇Lγ
H = [vlmm.Hττ vlmm.HτLγ; vlmm.HτLγ' vlmm.HLγLγ]
# display(H); println()
@test norm(H - transpose(H)) / norm(H) < 1e-8
@test all(eigvals(Symmetric(H)) .≥ 0)
# @info "type stability"
# @code_warntype nlsv_obj!(vlmm.data[1], vlmm.β, vlmm.τ, vlmm.Lγ, true)
# @code_warntype nlsv_obj!(vlmm, true)
@info "benchmark"
vlmm.iswtnls[1] = true
vlmm.ismthrd[1] = true
bm = @benchmark nlsv_obj!($vlmm, true, true, true)
display(bm); println()
# @info "profile"
# Profile.clear()
# @profile @btime nlsv_obj!($vlmm, true, true, true)
# Profile.print(format=:flat)
end
end | WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 233 | module PkgTest
include("df_test.jl")
include("normal_normal_lognormal_test.jl")
# include("mvt_gamma_invgamma_test.jl")
# include("mvcalc_test.jl")
# include("perf_test.jl")
# include("startingpoint_test.jl")
end # PkgTest module
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | code | 4912 | # This test reproduces a case, where all starting strategies failed to
# produce a reasonable estimate. Estimated τ diverges.
module DebugTest
using DataFrames, LinearAlgebra, Random, Test, WiSER
function createvlmm(t, k, j)
p = 5 # number of fixed effects, including intercept
q = 2 # number of random effects, including intercept
l = 5 # number of WS variance covariates, including intercept
q◺ = ◺(q)
# true parameter values
global βtrue = [0.1; 6.5; -3.5; 1.0; 5]
global τtrue = [0.0; 0.5; -0.2; 0.5; 0.0]
Σγ = Matrix(Diagonal([2.0; 1.2]))
δγω = [0.2; 0.1]
σω = [1.0]
Σγω = [Σγ δγω; δγω' σω]
Lγω = cholesky(Symmetric(Σγω), check = false).L
global Lγ = Lγω[1:q, 1:q]
lγω = Lγω[q + 1, 1:q]
lω = Lγω[q + 1, q + 1]
vechLγ = vech(Lγ)
# generate data
γω = Vector{Float64}(undef, q + 1)
z = similar(γω) # hold vector of iid std normal
intervals = zeros(p + l, 2) #hold intervals
curcoverage = zeros(p + l) #hold current coverage resutls
trueparams = [βtrue; τtrue] #hold true parameters
#simulation parameters
samplesizes = collect(1000:1000:6000)
ns = [10; 25; 50; 100; 1000]
m = samplesizes[t]
ni = ns[k] # number of observations per individual
obsvec = Vector{WSVarLmmObs{Float64}}(undef, m)
println("rep $j obs per person $ni samplesize $m")
Random.seed!(j + 100000k + 1000t)
for i in 1:m
# first column intercept, remaining entries iid std normal
X = Matrix{Float64}(undef, ni, p)
X[:, 1] .= 1
@views Distributions.rand!(Normal(), X[:, 2:p])
# first column intercept, remaining entries iid std normal
Z = Matrix{Float64}(undef, ni, q)
Z[:, 1] .= 1
@views Distributions.rand!(Normal(), Z[:, 2:q])
# first column intercept, remaining entries iid std normal
W = Matrix{Float64}(undef, ni, l)
W[:, 1] .= 1
@views Distributions.rand!(Normal(), W[:, 2:l])
# generate random effects: γω = Lγω * z
mul!(γω, Lγω, Distributions.rand!(Normal(), z))
# generate y
μy = X * βtrue + Z * γω[1:q]
@views vy = exp.(W * τtrue .+ dot(γω[1:q], lγω) .+ γω[end])
y = rand(MvNormal(μy, Diagonal(vy)))
# form a VarLmmObs instance
obsvec[i] = WSVarLmmObs(y, X, Z, W)
end
# form VarLmmModel
vlmm = WSVarLmmModel(obsvec);
return vlmm
end
vlmm1 = createvlmm(1, 1, 203) # trouble case
# vlmm1 = createvlmm(2, 1, 233)
### If you revert back to initialize intercept only these cases will also fail:
# ts = [2, 2, 2, 4, 1, 3, 2, 1, 1, 2]
# ks = [1, 1, 1, 1, 2, 1, 2, 1, 1, 1]
# js = [35, 53, 109, 148, 168, 174, 100, 7, 203, 233]
# solver = Ipopt.IpoptSolver(print_level=0)
solver = Ipopt.IpoptSolver(print_level=0, mehrotra_algorithm="yes", max_iter=100)
# solver = NLopt.NLoptSolver(algorithm = :LN_BOBYQA,
# ftol_rel = 1e-12, ftol_abs = 1e-8, maxeval = 10000)
# solver = NLopt.NLoptSolver(algorithm = :LD_SLSQP, maxeval = 4000)
# solver = NLopt.NLoptSolver(algorithm = :LD_MMA, maxeval = 4000)
# solver = NLopt.NLoptSolver(algorithm = :LD_LBFGS, maxeval = 4000)
@info "starting point by LS init_ls!"
WiSER.init_ls!(vlmm1)
println("β"); display(vlmm1.β); println()
println("τ"); display(vlmm1.τ); println()
println("Lγ"); display(vlmm1.Lγ); println()
@show norm(vlmm1.β - βtrue)
@show norm(vlmm1.τ[2:end] - τtrue[2:end])
@show norm(vlmm1.Lγ - Lγ)
@info "unweighted NLS fit (start from LS)"
@time init_mom!(vlmm1, solver)
println("β"); display(vlmm1.β); println()
println("τ"); display(vlmm1.τ); println()
println("Lγ"); display(vlmm1.Lγ); println()
@show norm(vlmm1.β - βtrue)
@show norm(vlmm1.τ[2:end] - τtrue[2:end])
@show norm(vlmm1.Lγ - Lγ)
# @info "weighted NLS fit (start from true τ and LS β, Σγ)"
# init_ls!(vlmm1)
# # vlmm1.τ .= [-0.1; 0.5; -0.2; 0.5; 0.0]
# # vlmm1.τ .= [0.27, 0.14, -0.04, 0.13, -0.02] # LS (x5 works)
# # vlmm1.τ .= [2.09, 0.13, -0.019, 0.15, -0.0007] # log-LS (x5 works)
# fit!(vlmm1, solver; init = vlmm1, runs = 2)
# println("β"); display(vlmm1.β); println()
# println("τ"); display(vlmm1.τ); println()
# println("Lγ"); display(vlmm1.Lγ); println()
# show(vlmm1)
@info "weighted fit (start from LS)"
@time fit!(vlmm1, solver; init = init_ls!(vlmm1), runs = 2)
println("β"); display(vlmm1.β); println()
println("τ"); display(vlmm1.τ); println()
println("Lγ"); display(vlmm1.Lγ); println()
@show norm(vlmm1.β - βtrue)
@show norm(vlmm1.τ[2:end] - τtrue[2:end])
@show norm(vlmm1.Lγ - Lγ)
@info "weighted fit (start from MoM)"
@time fit!(vlmm1, solver; init = init_mom!(vlmm1, solver), runs = 2)
println("β"); display(vlmm1.β); println()
println("τ"); display(vlmm1.τ); println()
println("Lγ"); display(vlmm1.Lγ); println()
@show norm(vlmm1.β - βtrue)
@show norm(vlmm1.τ[2:end] - τtrue[2:end])
@show norm(vlmm1.Lγ - Lγ)
end
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | docs | 2249 | # WiSER.jl
| **Documentation** | **Build Status** | **Code Coverage** |
|-------------------|------------------|--------------------|
| [](https://openmendel.github.io/WiSER.jl/stable) [](https://openmendel.github.io/WiSER.jl/dev/) | [](https://github.com/OpenMendel/WiSER.jl/actions/workflows/ci.yml) | [](https://codecov.io/gh/OpenMendel/WiSER.jl) |
WiSER stands for **wi**thin-**s**ubject variance **e**stimation by **r**obust regression. It is a regression aproach for estimating the effects of predictors on the within-subject variation in a longitudinal setting.
WiSER.jl requires Julia v1.0 or later. See documentation for usage. I
This package is registered in the default Julia package registry, and can be installed through standard package installation procedure: e.g., running the following code in Julia REPL.
```julia
using Pkg
pkg"add WiSER"
```
## Citation
The methods and applications of this software package are detailed in the following publication:
*German CA, Sinsheimer JS, Zhou J, Zhou H. WiSER: Robust and scalable estimation and inference of within-subject variances from intensive longitudinal data. Biometrics. 2021 Jun 18:10.1111/biom.13506. doi: 10.1111/biom.13506. Epub ahead of print. PMID: 34142722; PMCID: PMC8683571.*
If you use [OpenMendel](https://openmendel.github.io) analysis packages in your research, please cite the following reference in the resulting publications:
*Zhou H, Sinsheimer JS, Bates DM, Chu BB, German CA, Ji SS, Keys KL, Kim J, Ko S, Mosher GD, Papp JC, Sobel EM, Zhai J, Zhou JJ, Lange K. OPENMENDEL: a cooperative programming project for statistical genetics. Hum Genet. 2020 Jan;139(1):61-71. doi: 10.1007/s00439-019-02001-z. Epub 2019 Mar 26. PMID: 30915546; PMCID: [PMC6763373](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6763373/).*
## Acknowledgments
This project has been supported by the National Institutes of Health under awards R01GM053275, R01HG006139, R25GM103774, and 1R25HG011845.
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | docs | 7479 | # WiSER.jl Introduction
`WiSER.jl` implements a regression method for modeling the within-subject variability of a longitudinal measurement. It stands for **wi**thin-**s**ubject variance **e**stimation by robust **r**egression.
This package requires Julia v1.0 or later, which can be obtained from https://julialang.org/downloads/ or by building Julia from the sources in the https://github.com/JuliaLang/julia repository.
The package has not yet been registered and must be installed using the repository location. Start Julia and use the ] key to switch to the package manager REPL
```{julia}
(@v1.5) Pkg> add https://github.com/OpenMendel/WiSER.jl
```
Use the backspace key to return to the Julia REPL.
## Model
WiSER was created to efficiently estimate effects of covarariates on within-subject (WS) variability in logitudinal data. The following graphic depicts the motiviation for WiSER and what it can model.

The figure above displays systolic blood pressure (SBP) measured for two patients followed up over 40-visits. At baseline, we see a difference in both mean and variability of SBP between the two patients. After the 20th visit, patient 1 goes on blood pressure medication and their mean and WS variance of SBP more similarly match patient 2's. It can be of clinical importance to model what factors associated with these baseline differences in mean and WS variance as well as how being on medication (a time-varying covariate) affects these measures. WiSER is able to simultaneously model (time-invariant and time-varying) covariates' effects on mean and within-subject variability of longitudinal traits.
The mean fixed effects are estimated in $\boldsymbol{\beta}$, the within-subject variance fixed effects are estimated by $\boldsymbol{\tau}$, and the random effects covariance matrix is estimated in $\boldsymbol{\Sigma}_{\boldsymbol{\gamma}}$.
## Model Details
In addition to mean levels, it can be important to model factors influencing within-subject variability of longitudinal outcomes. We utilize a modified linear mixed effects model that allows for within-subject variability to be modeled through covariates. It is motivated by a Mixed Effects Multiple Location Scale Model introduced by [Dzubar et al. (2020)](https://link.springer.com/article/10.3758/s13428-019-01322-1), but WiSER dispenses with the normal assumptions and is much faster than the likelihood method implemented in the [MixWILD](https://reach-lab.github.io/MixWildGUI/) software.
The procedure assumes the following model for the data:
Data:
- ``y_{ij}`` longitudinal response of subject ``i`` at time ``j``
- ``\textbf{x}_{ij}`` mean fixed effects covariates of subject ``i`` at time ``j``
- ``\textbf{z}_{ij}`` random (location) effects covariates of subject ``i`` at time ``j``
- ``\textbf{w}_{ij}`` within-subject variance fixed effects covariates of subject ``i`` at time ``j``
Parameters:
- ``\boldsymbol{\beta}`` mean fixed effects
- ``\boldsymbol{\tau}`` within-subject variance fixed effects
- ``\boldsymbol{\boldsymbol{\gamma}_i}`` random location effects of subject ``i``
- ``\boldsymbol{\Sigma}_{\boldsymbol{\gamma}}`` random (location) effects covariance matrix
- ``\omega_i`` random scale effect of subject ``i``
- ``\sigma_\omega^2`` variance of random scale effect
- ``\boldsymbol{\Sigma}_{\boldsymbol{\gamma} \omega}`` joint random effects covariance matrix
Other:
- ``\mathcal{D(a, b)}`` unspecified distribution with mean ``a`` and variance ``b``
- ``\epsilon_{ij}`` error term of subject ``i`` and time ``j`` capturing within-subject variability
The longitduinal data are modeled via:
```math
\begin{aligned}
y_{ij} &=& \textbf{x}_{ij}^T\boldsymbol{\beta} + \textbf{z}_{ij}^T\boldsymbol{\gamma}_i + \epsilon_{ij}, \quad \epsilon_{ij} \sim \mathcal{D}(0, \sigma_{\epsilon_{ij}}^2), \\
\boldsymbol{\gamma_i} &=& (\gamma_{i1}, \gamma_{i2}, \cdots, \gamma_{iq})^T \sim \mathcal{D}(\mathbf{0}_{q}, \boldsymbol{\Sigma}_{\boldsymbol{\gamma}}),
\end{aligned}
```
where
```math
\begin{aligned}
\sigma_{\epsilon_{ij}}^2 = \exp (\textbf{w}_{ij}^T \boldsymbol{\tau} + \boldsymbol{\ell}_{\boldsymbol{\gamma} \omega}^T \boldsymbol{\gamma_i} + \omega_i), \quad \omega_i \sim \mathcal{D}(0, \sigma_\omega^2)
\end{aligned}
```
represents the within-subject variance with $\boldsymbol{\ell}_{\gamma \omega}^T$ coming from the Cholesky factor of the covariance matrix of the joint distribution of random effects ($\boldsymbol{\gamma}_i$, $\omega_i$).
The joint distribution of random effects is
```math
\begin{aligned}
\begin{pmatrix}
\boldsymbol{\gamma_i} \\ \omega_i
\end{pmatrix} \sim \mathcal{D}(\mathbf{0}_{q+1}, \boldsymbol{\Sigma}_{\boldsymbol{\gamma} \omega})
\end{aligned}
```
and denote the Cholesky decomposition of the covariance matrix $\boldsymbol{\Sigma_{\gamma w}}$ as
```math
\begin{aligned}
\boldsymbol{\Sigma}_{\boldsymbol{\gamma} \omega} &=& \begin{pmatrix}
\boldsymbol{\Sigma}_{\boldsymbol{\gamma}} & \boldsymbol{\sigma}_{\boldsymbol{\gamma} \omega} \\
\boldsymbol{\sigma}_{\boldsymbol{\gamma} \omega}^T & \sigma_\omega^2
\end{pmatrix} = \textbf{L} \textbf{L}^T, \quad
\textbf{L} = \begin{pmatrix}
\textbf{L}_{\boldsymbol{\gamma}} & \mathbf{0} \\
\boldsymbol{\ell}_{\boldsymbol{\gamma} \omega}^T & \ell_{\omega}
\end{pmatrix},
\end{aligned}
```
where $\textbf{L}_{\boldsymbol{\gamma}}$ is a $q \times q$ upper triangular matrix with positive diagonal entries and $\ell_{\omega} > 0$. The elements of $\boldsymbol{\Sigma}_{\boldsymbol{\gamma} \omega}$ can be expressed in terms of the Cholesky factors as:
```math
\begin{aligned}
\boldsymbol{\Sigma}_{\boldsymbol{\gamma}} &=& \textbf{L}_{\boldsymbol{\gamma}} \textbf{L}_{\boldsymbol{\gamma}}^T \\
\boldsymbol{\sigma}_{\boldsymbol{\gamma} \omega} &=& \textbf{L}_{\boldsymbol{\gamma}} \boldsymbol{\ell}_{\boldsymbol{\gamma} \omega} \\
\sigma_\omega^2 &=& \boldsymbol{\ell}_{\boldsymbol{\gamma} \omega}^T \boldsymbol{\ell}_{\boldsymbol{\gamma} \omega} + \ell_{\omega}^2
\end{aligned}
```
In Dzubuar et al's estimation, they assume all unspecified distributions above are Normal distributions. Our estimation procedure is robust and only needs that the mean and variance of those random variables hold. In their MixWILD software, they fit the model through maximum likelihood, requiring numerically intensive numerical integration.
We have derived a computationally efficient and statistically robust method for obtaining estimates of $\boldsymbol{\beta}, \boldsymbol{\tau}, \text{and}, \boldsymbol{\Sigma_\gamma}$. The mean fixed effects $\boldsymbol{\beta}$ are estimated by weighted least squares, while the variance components $\boldsymbol{\tau}$ and $\boldsymbol{\Sigma_\gamma}$ are estimated via a weighted nonlinear least squares approach motivated by the method of moments. WiSER does not estimate any parameters associated with the random scale effect $\omega_i$ or any association between $\boldsymbol{\gamma}_i$ and $\omega_i$. These are treated as nuissance parameters that get absorbed into the intercept of $\boldsymbol{\tau}$.
**NOTE**: When the true data has a random scale effect with non-zero variance $\sigma^2_\omega$, WiSER's estimates of $\boldsymbol{\beta}$, non-intercept values of $\boldsymbol{\tau}$, and $\boldsymbol{\Sigma_\gamma}$ are consistent. In this case, the intercept of $\boldsymbol{\tau}$ absorbs effects from $\sigma^2_\omega$.
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | docs | 37986 | # Model Fitting
`WiSER.jl` implements a regression method for modeling the within-subject variability of a longitudinal measurement. It stands for **wi**thin-**s**ubject variance **e**stimation by robust **r**egression.
Here we cover model construction and parameter estimation using WiSER.
```julia
using CSV, DataFrames, WiSER
```
## Example data
The example dataset, `sbp.csv`, is contained in `data` folder of the package. It is a simulated datatset with 500 individuals, each having 9 to 11 observations. The outcome, systolic blood pressure (SBP), is a function of other covariates. Below we read in the data as a `DataFrame` using the [CSV package](https://juliadata.github.io/CSV.jl). WiSER.jl can take other data table objects that comply with the `Tables.jl` format, such as `IndexedTables` from the [JuliaDB](https://github.com/JuliaData/JuliaDB.jl) package.
```julia
filepath = normpath(joinpath(dirname(pathof(WiSER)), "../data/"))
df = DataFrame(CSV.File(filepath * "sbp.csv"));
```
```jldoctest
5011×8 DataFrame
Row │ id sbp agegroup gender bmi meds bmi_std obswt
│ Int64 Float64 Float64 String Float64 String Float64 Float64
──────┼────────────────────────────────────────────────────────────────────────
1 │ 1 159.586 3.0 Male 23.1336 NoMeds -1.57733 4.0
2 │ 1 161.849 3.0 Male 26.5885 NoMeds 1.29927 4.0
3 │ 1 160.484 3.0 Male 24.8428 NoMeds -0.154204 4.0
4 │ 1 161.134 3.0 Male 24.9289 NoMeds -0.0825105 4.0
5 │ 1 165.443 3.0 Male 24.8057 NoMeds -0.185105 4.0
6 │ 1 160.053 3.0 Male 24.1583 NoMeds -0.72415 4.0
7 │ 1 162.1 3.0 Male 25.2543 NoMeds 0.188379 4.0
8 │ 1 163.153 3.0 Male 24.3951 NoMeds -0.527037 4.0
⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮
5005 │ 500 155.672 3.0 Female 24.4651 NoMeds -0.468741 3.0
5006 │ 500 148.389 3.0 Female 25.8129 NoMeds 0.653514 3.0
5007 │ 500 152.491 3.0 Female 24.5818 NoMeds -0.371555 3.0
5008 │ 500 153.844 3.0 Female 25.721 NoMeds 0.57693 3.0
5009 │ 500 150.164 3.0 Female 24.3545 NoMeds -0.560843 3.0
5010 │ 500 150.248 3.0 Female 23.8532 NoMeds -0.978159 3.0
5011 │ 500 152.433 3.0 Female 26.1232 NoMeds 0.911814 3.0
```
## Formulate model
First we will create a `WSVarLmmModel` object from the dataframe.
The `WSVarLmmModel()` function takes the following arguments:
- `meanformula`: the formula for the mean fixed effects β (variables in X matrix).
- `reformula`: the formula for the mean random effects γ (variables in Z matrix).
- `wsvarformula`: the formula for the within-subject variance fixed effects τ (variables in W matrix).
- `idvar`: the id variable for groupings.
- `tbl`: the datatable holding all of the data for the model. Can be a `DataFrame` or various types of tables that comply with `Tables.jl` formatting, such as an `IndexedTable`.
- `wtvar`: Optional argument of variable name holding subject-level weights in the `tbl`.
For documentation of the `WSVarLmmModel` function, type `?WSVarLmmModel` in Julia REPL.
```@docs
WSVarLmmModel
```
We will model sbp as a function of age, gender, and bmi_std. `bmi_std` is the centered and scaled `bmi`. The following commands fit the following model:
$\text{sbp}_{ij} = \beta_0 + \beta_1 \cdot \text{agegroup}_{ij} + \beta_2 \cdot \text{gender}_{i} + \beta_3 \cdot \text{bmi}_{ij} + \gamma_{i0} + \gamma_{i1} \cdot \text{bmi} + \epsilon_{ij}$
``\epsilon_{ij}`` has mean 0 and variance ``\sigma^2_{\epsilon_{ij}}``
``\gamma_{i} = (\gamma_{i0}, \gamma_{i1})`` has mean **0** and variance ``\Sigma_\gamma``
$\sigma^2_{\epsilon_{ij}} = \exp(\tau_0 + \tau_1 \cdot \text{agegroup}_{ij} + \tau_2 \cdot \text{gender}_{i} + \tau_3 \cdot \text{bmi}_{ij})$
```julia
vlmm = WSVarLmmModel(
@formula(sbp ~ 1 + agegroup + gender + bmi_std + meds),
@formula(sbp ~ 1 + bmi_std),
@formula(sbp ~ 1 + agegroup + meds + bmi_std),
:id, df);
```
The `vlmm` object has the appropriate data formalated above. We can now use the `fit!()` function to fit the model.
## Fit model
Main arguments of the `fit!()` function are:
* `m::WSVarLmmModel`: The model to fit.
* `solver`: Non-linear programming solver to be used.
* `runs::Integer`: Number of weighted nonlinear least squares runs. Default is 2.
For a complete documentation, type `?WSVarLmmModel` in Julia REPL.
```@docs
fit!
```
```julia
WiSER.fit!(vlmm)
```
******************************************************************************
This program contains Ipopt, a library for large-scale nonlinear optimization.
Ipopt is released as open source code under the Eclipse Public License (EPL).
For more information visit https://github.com/coin-or/Ipopt
******************************************************************************
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.201188
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.080523
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi_std 0.296424 0.0139071 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.5212 0.393792 -6.40 <1e-09
τ2: agegroup 1.50759 0.135456 11.13 <1e-28
τ3: meds: OnMeds -0.435225 0.0621076 -7.01 <1e-11
τ4: bmi_std 0.0052695 0.0224039 0.24 0.8140
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00196 0.0181387
"γ2: bmi_std" 0.0181387 0.000549357
The estimated coefficients and random effects covariance parameters can be retrieved by
```julia
coef(vlmm)
```
9-element Vector{Float64}:
106.3082866175766
14.984423626293006
10.074886642511672
0.29642385700569635
-10.110677648545401
-2.5211956122840613
1.5075882029989467
-0.43522497609297117
0.005269501831413771
or individually
```julia
vlmm.β
```
5-element Vector{Float64}:
106.3082866175766
14.984423626293006
10.074886642511672
0.29642385700569635
-10.110677648545401
```julia
vlmm.τ
```
4-element Vector{Float64}:
-2.5211956122840613
1.5075882029989467
-0.43522497609297117
0.005269501831413771
```julia
vlmm.Σγ
```
2×2 Matrix{Float64}:
1.00196 0.0181387
0.0181387 0.000549357
The variance-covariance matrix of the estimated parameters (β, τ, Lγ) can be rerieved by
```julia
vlmm.vcov
```
12×12 Matrix{Float64}:
0.0206899 -0.00753187 -0.00618382 … -0.000123531 0.0644858
-0.00753187 0.00400999 0.000152994 4.07896e-5 -0.0194226
-0.00618382 0.000152994 0.0100558 4.35497e-5 -0.0299542
5.60981e-5 -4.80751e-5 0.000108448 8.06623e-6 0.00149567
-0.00311952 -0.000362412 0.00122535 -7.1571e-5 0.0168424
-0.00652959 0.00207365 0.00276734 … 0.00217472 -1.70443
0.00229271 -0.000743467 -0.000951293 -0.000740359 0.58213
-0.000719608 0.000263081 0.000294779 0.000197117 -0.152908
3.10756e-5 1.70391e-5 -0.00011849 -5.50781e-5 0.0266044
0.000166021 -3.24178e-6 -0.00011537 9.0954e-6 -0.00139559
-0.000123531 4.07896e-5 4.35497e-5 … 7.84536e-5 -0.0244586
0.0644858 -0.0194226 -0.0299542 -0.0244586 19.1312
Confidence intervals for $\boldsymbol{\beta}, \boldsymbol{\tau}$ can be obtained by `confint`. By default it returns 95% confidence intervals ($\alpha$ level = 0.05).
```julia
confint(vlmm)
```
9×2 Matrix{Float64}:
106.026 106.59
14.8603 15.1085
9.87834 10.2714
0.269167 0.323681
-10.3516 -9.86976
-3.29301 -1.74938
1.2421 1.77308
-0.556954 -0.313496
-0.0386413 0.0491803
```julia
# 90% confidence interval
confint(vlmm, 0.1)
```
9×2 Matrix{Float64}:
106.29 106.326
14.9765 14.9924
10.0623 10.0875
0.294676 0.298171
-10.1261 -10.0952
-2.57068 -2.47171
1.49057 1.52461
-0.44303 -0.42742
0.0024542 0.00808481
**Note**: The default solver for WiSER.jl is :
`Ipopt.IpoptSolver(print_level=0, mehrotra_algorithm = "yes", warm_start_init_point="yes", max_iter=100)`
This was chosen as it a free, open-source solver and the options typically reduce line search and lead to much faster fitting than other options. However, it can be a bit more instable. Below are tips to help improve estimation if the fit seems very off or fails. Switching the solver options or removing them and assigning it to the base Ipopt Solver `Ipopt.IpoptSolver(max_iter=100)` can take longer to converge but is usually a bit more stable.
## Tips for improving estimation
`fit!` may fail due to various reasons. Often it indicates ill-conditioned data or an inadequate model. Following strategies may improve the fit.
### Standardize continuous predictors
In above example, we used the standardardized `bmi`. If we used the original `bmi` variable, the estimates of τ are instable, reflected by the large standard errors.
```julia
# using unscaled bmi causes ill-conditioning
vlmm_bmi = WSVarLmmModel(
@formula(sbp ~ 1 + agegroup + gender + bmi + meds),
@formula(sbp ~ 1 + bmi),
@formula(sbp ~ 1 + agegroup + meds + bmi),
:id, df);
WiSER.fit!(vlmm_bmi)
```
run = 1, ‖Δβ‖ = 0.208950, ‖Δτ‖ = 0.445610, ‖ΔL‖ = 2.027674, status = Optimal, time(s) = 0.079164
run = 2, ‖Δβ‖ = 0.032012, ‖Δτ‖ = 0.014061, ‖ΔL‖ = 0.780198, status = Optimal, time(s) = 0.125981
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi + meds
Random Effects Formula:
sbp ~ 1 + bmi
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
────────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
────────────────────────────────────────────────────────────
β1: (Intercept) 100.131 0.319906 313.00 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi 0.246808 0.0115793 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.63101 17.2804 -0.15 0.8790
τ2: agegroup 1.50759 5.69286 0.26 0.7911
τ3: meds: OnMeds -0.435225 1.37021 -0.32 0.7508
τ4: bmi 0.00438748 0.0281074 0.16 0.8760
────────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 0.484542 0.00557087
"γ2: bmi" 0.00557087 0.000380843
### Increase `runs`
Increasing `runs` (default is 2) takes more computing resources but can be useful to get more precise estimates. If we set `runs=3` when using original `bmi` (ill-conditioned), the estimated τ are more accurate. The estimate of Σγ is still off though.
```julia
# improve estimates from ill-conditioned data by more runs
WiSER.fit!(vlmm_bmi, runs=3)
```
run = 1, ‖Δβ‖ = 0.208950, ‖Δτ‖ = 0.445610, ‖ΔL‖ = 2.027674, status = Optimal, time(s) = 0.085767
run = 2, ‖Δβ‖ = 0.032012, ‖Δτ‖ = 0.014061, ‖ΔL‖ = 0.780198, status = Optimal, time(s) = 0.129032
run = 3, ‖Δβ‖ = 0.008059, ‖Δτ‖ = 0.000678, ‖ΔL‖ = 0.083976, status = Optimal, time(s) = 0.154331
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi + meds
Random Effects Formula:
sbp ~ 1 + bmi
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
────────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
────────────────────────────────────────────────────────────
β1: (Intercept) 100.139 0.315745 317.15 <1e-99
β2: agegroup 14.9839 0.0633172 236.65 <1e-99
β3: gender: Male 10.0753 0.10027 100.48 <1e-99
β4: bmi 0.246528 0.0114083 21.61 <1e-99
β5: meds: OnMeds -10.1109 0.122778 -82.35 <1e-99
τ1: (Intercept) -2.63079 0.453424 -5.80 <1e-08
τ2: agegroup 1.5079 0.0253371 59.51 <1e-99
τ3: meds: OnMeds -0.435791 0.051245 -8.50 <1e-16
τ4: bmi 0.00436541 0.0178825 0.24 0.8071
────────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 0.377439 0.00949012
"γ2: bmi" 0.00949012 0.000238614
### Try different nonlinear programming (NLP) solvers
A different solver may remedy the issue. By default, `WiSER.jl` uses the [Ipopt](https://github.com/jump-dev/Ipopt.jl) solver, but it can use any solver that supports [MathProgBase.jl](https://github.com/JuliaOpt/MathProgBase.jl). Check documentation of `fit!` for commonly used NLP solvers. In our experience, [Knitro.jl](https://github.com/JuliaOpt/KNITRO.jl) works the best, but it is a commercial software.
```julia
# watchdog_shortened_iter_trigger option in IPOPT can sometimes be more robust to numerical issues
WiSER.fit!(vlmm, Ipopt.IpoptSolver(print_level=0, watchdog_shortened_iter_trigger=3, max_iter=100))
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.081864
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.068715
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi_std 0.296424 0.0139071 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.5212 0.393792 -6.40 <1e-09
τ2: agegroup 1.50759 0.135456 11.13 <1e-28
τ3: meds: OnMeds -0.435225 0.0621076 -7.01 <1e-11
τ4: bmi_std 0.0052695 0.0224039 0.24 0.8140
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00196 0.0181387
"γ2: bmi_std" 0.0181387 0.000549357
```julia
# print Ipopt iterates for diagnostics
WiSER.fit!(vlmm, Ipopt.IpoptSolver(print_level=5, mehrotra_algorithm="yes", warm_start_init_point="yes"))
```
This is Ipopt version 3.13.4, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 0
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 28
Total number of variables............................: 7
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 0
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 2.8331778e+04 0.00e+00 1.00e+02 0.0 0.00e+00 - 0.00e+00 0.00e+00 0
1 2.8314110e+04 0.00e+00 2.49e+01 -11.0 3.08e-01 - 1.00e+00 1.00e+00f 1
2 2.8312135e+04 0.00e+00 3.32e+00 -11.0 2.63e-01 - 1.00e+00 1.00e+00f 1
3 2.8311752e+04 0.00e+00 1.36e+00 -11.0 2.08e-01 - 1.00e+00 1.00e+00f 1
4 2.8311700e+04 0.00e+00 3.34e-01 -11.0 1.25e-01 - 1.00e+00 1.00e+00f 1
5 2.8311697e+04 0.00e+00 2.79e-02 -11.0 3.98e-02 - 1.00e+00 1.00e+00f 1
6 2.8311697e+04 0.00e+00 2.40e-04 -11.0 3.48e-03 - 1.00e+00 1.00e+00f 1
7 2.8311697e+04 0.00e+00 5.47e-06 -11.0 2.47e-05 - 1.00e+00 1.00e+00f 1
8 2.8311697e+04 0.00e+00 9.63e-08 -11.0 1.20e-08 - 1.00e+00 1.00e+00f 1
9 2.8311697e+04 0.00e+00 5.21e-09 -11.0 1.61e-10 - 1.00e+00 1.00e+00f 1
Number of Iterations....: 9
(scaled) (unscaled)
Objective...............: 1.6226171160602307e+04 2.8311697021847336e+04
Dual infeasibility......: 5.2103428765066918e-09 9.0910940997446232e-09
Constraint violation....: 0.0000000000000000e+00 0.0000000000000000e+00
Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00
Overall NLP error.......: 5.2103428765066918e-09 9.0910940997446232e-09
Number of objective function evaluations = 10
Number of objective gradient evaluations = 10
Number of equality constraint evaluations = 0
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 0
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 9
Total CPU secs in IPOPT (w/o function evaluations) = 0.008
Total CPU secs in NLP function evaluations = 0.057
EXIT: Optimal Solution Found.
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.081372
This is Ipopt version 3.13.4, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 0
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 28
Total number of variables............................: 7
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 0
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 2.7170793e+04 0.00e+00 1.52e+01 0.0 0.00e+00 - 0.00e+00 0.00e+00 0
1 2.7170194e+04 0.00e+00 3.80e+00 -11.0 3.15e-01 - 1.00e+00 1.00e+00f 1
2 2.7170055e+04 0.00e+00 1.91e+00 -11.0 3.07e-01 - 1.00e+00 1.00e+00f 1
3 2.7170020e+04 0.00e+00 9.10e-01 -11.0 2.86e-01 - 1.00e+00 1.00e+00f 1
4 2.7170013e+04 0.00e+00 3.93e-01 -11.0 2.47e-01 - 1.00e+00 1.00e+00f 1
5 2.7170011e+04 0.00e+00 1.35e-01 -11.0 1.82e-01 - 1.00e+00 1.00e+00f 1
6 2.7170011e+04 0.00e+00 2.58e-02 -11.0 9.30e-02 - 1.00e+00 1.00e+00f 1
7 2.7170011e+04 0.00e+00 1.16e-03 -11.0 2.12e-02 - 1.00e+00 1.00e+00f 1
8 2.7170011e+04 0.00e+00 9.88e-06 -11.0 9.61e-04 - 1.00e+00 1.00e+00f 1
9 2.7170011e+04 0.00e+00 2.64e-07 -11.0 2.10e-06 - 1.00e+00 1.00e+00f 1
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
10 2.7170011e+04 0.00e+00 4.68e-09 -11.0 6.64e-09 - 1.00e+00 1.00e+00f 1
Number of Iterations....: 10
(scaled) (unscaled)
Objective...............: 2.7170011141755771e+04 2.7170011141755771e+04
Dual infeasibility......: 4.6827675070915120e-09 4.6827675070915120e-09
Constraint violation....: 0.0000000000000000e+00 0.0000000000000000e+00
Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00
Overall NLP error.......: 4.6827675070915120e-09 4.6827675070915120e-09
Number of objective function evaluations = 11
Number of objective gradient evaluations = 11
Number of equality constraint evaluations = 0
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 0
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 10
Total CPU secs in IPOPT (w/o function evaluations) = 0.008
Total CPU secs in NLP function evaluations = 0.057
EXIT: Optimal Solution Found.
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.068690
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi_std 0.296424 0.0139071 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.5212 0.393792 -6.40 <1e-09
τ2: agegroup 1.50759 0.135456 11.13 <1e-28
τ3: meds: OnMeds -0.435225 0.0621076 -7.01 <1e-11
τ4: bmi_std 0.0052695 0.0224039 0.24 0.8140
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00196 0.0181387
"γ2: bmi_std" 0.0181387 0.000549357
```julia
# use Knitro (require installation of Knitro software and Knitro.jl)
# Using KNITRO
# WiSER.fit!(vlmm, KNITRO.KnitroSolver(outlev=3));
```
```julia
# use NLopt
WiSER.fit!(vlmm, NLopt.NLoptSolver(algorithm=:LD_MMA, maxeval=4000))
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.162196, ‖ΔL‖ = 0.100050, status = Optimal, time(s) = 0.148150
run = 2, ‖Δβ‖ = 0.005248, ‖Δτ‖ = 0.008747, ‖ΔL‖ = 0.001335, status = Optimal, time(s) = 0.052991
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633238 236.63 <1e-99
β3: gender: Male 10.0749 0.100277 100.47 <1e-99
β4: bmi_std 0.296421 0.0139114 21.31 <1e-99
β5: meds: OnMeds -10.1106 0.122912 -82.26 <1e-99
τ1: (Intercept) -2.53263 0.102707 -24.66 <1e-99
τ2: agegroup 1.51161 0.038887 38.87 <1e-99
τ3: meds: OnMeds -0.435901 0.0524849 -8.31 <1e-16
τ4: bmi_std 0.0057698 0.0218516 0.26 0.7917
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00228 0.0179118
"γ2: bmi_std" 0.0179118 0.00441744
Using a different solver can even help without the need for standardizing predictors. If we use the NLOPT solver with the `LD_MMA` algorithm on the model where bmi is not standardized we don't see heavily inflated standard errors.
```julia
# Using other solvers can work without standardizing
WiSER.fit!(vlmm_bmi, NLopt.NLoptSolver(algorithm=:LD_MMA, maxeval=4000))
```
run = 1, ‖Δβ‖ = 0.208950, ‖Δτ‖ = 0.143776, ‖ΔL‖ = 1.528229, status = Optimal, time(s) = 0.604965
run = 2, ‖Δβ‖ = 0.026830, ‖Δτ‖ = 0.000125, ‖ΔL‖ = 0.000257, status = Optimal, time(s) = 0.046570
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi + meds
Random Effects Formula:
sbp ~ 1 + bmi
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 100.126 0.323755 309.26 <1e-99
β2: agegroup 14.9849 0.0633317 236.61 <1e-99
β3: gender: Male 10.0748 0.10029 100.46 <1e-99
β4: bmi 0.246967 0.0117298 21.05 <1e-97
β5: meds: OnMeds -10.1094 0.122977 -82.21 <1e-99
τ1: (Intercept) -3.01501 0.811039 -3.72 0.0002
τ2: agegroup 1.50948 0.0468194 32.24 <1e-99
τ3: meds: OnMeds -0.426979 0.0519209 -8.22 <1e-15
τ4: bmi 0.0192299 0.0368267 0.52 0.6016
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 3.89413 -0.127903
"γ2: bmi" -0.127903 0.00561294
### Try different starting points
Initialization matters as well. By default, `fit!` uses a crude least squares estimate as the starting point. We can also try a method of moment estimate or user-supplied values.
```julia
# MoM starting point
WiSER.fit!(vlmm, init = init_mom!(vlmm))
```
run = 1, ‖Δβ‖ = 0.036245, ‖Δτ‖ = 0.188207, ‖ΔL‖ = 0.127483, status = Optimal, time(s) = 0.062208
run = 2, ‖Δβ‖ = 0.006798, ‖Δτ‖ = 0.009128, ‖ΔL‖ = 0.050049, status = Optimal, time(s) = 0.059064
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
────────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
────────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.143831 739.12 <1e-99
β2: agegroup 14.9846 0.063327 236.62 <1e-99
β3: gender: Male 10.0747 0.100282 100.46 <1e-99
β4: bmi_std 0.296596 0.013989 21.20 <1e-99
β5: meds: OnMeds -10.1107 0.122973 -82.22 <1e-99
τ1: (Intercept) -2.52233 0.218068 -11.57 <1e-30
τ2: agegroup 1.5079 0.0759423 19.86 <1e-87
τ3: meds: OnMeds -0.434951 0.0549139 -7.92 <1e-14
τ4: bmi_std 0.00527178 0.0220323 0.24 0.8109
────────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00193 0.0180064
"γ2: bmi_std" 0.0180064 0.000967577
```julia
# user-supplied starting point in vlmm.β, vlmm.τ, vlmm.Lγ
vlmm.β .= [106.0; 15.0; 10.0; 0.3; -10.0]
vlmm.τ .= [-2.5; 1.5; -0.5; 0.0]
vlmm.Lγ .= [1.0 0.0; 0.0 0.0]
fit!(vlmm, init = vlmm)
```
run = 1, ‖Δβ‖ = 0.337743, ‖Δτ‖ = 0.069850, ‖ΔL‖ = 0.017323, status = Optimal, time(s) = 0.078268
run = 2, ‖Δβ‖ = 0.003050, ‖Δτ‖ = 0.004463, ‖ΔL‖ = 0.001185, status = Optimal, time(s) = 0.104889
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
────────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
────────────────────────────────────────────────────────────
β1: (Intercept) 106.309 0.143859 738.98 <1e-99
β2: agegroup 14.984 0.0633192 236.64 <1e-99
β3: gender: Male 10.0754 0.100275 100.48 <1e-99
β4: bmi_std 0.296078 0.0136905 21.63 <1e-99
β5: meds: OnMeds -10.1108 0.122807 -82.33 <1e-99
τ1: (Intercept) -2.52144 0.0576657 -43.73 <1e-99
τ2: agegroup 1.50787 0.0253351 59.52 <1e-99
τ3: meds: OnMeds -0.436135 0.0512042 -8.52 <1e-16
τ4: bmi_std 0.00525556 0.0214765 0.24 0.8067
────────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00203 0.0184422
"γ2: bmi_std" 0.0184422 0.000339423
## Additional Features
WiSER.jl has additional features that may benefit some users. These include parallelizaiton and observation weights.
### Parallelization
WiSER.jl by default will not run objective function evaluations in parallel, but one at a time. In many cases (small number of individuals/relatively small number of observations per individual) it is faster to not parallelize the code as the internal overhead in setting up evaluations on multiple threads takes longer than the evaluations. However, with large numbers of observations per individual, or many individuals, it can be faster to parallelize.
In order to allow for parallelization, the julia environmental variable `JULIA_NUM_THREADS` should be set to a value greater than 1. This must be set before julia launches and can be done in couple ways:
- Setting a default number of threads for Julia to launch with in a `.bash_profile` file by adding a line `export JULIA_NUM_THREADS=X`. where X is the number of threads you wish to make the default.
- Before launching julia in the terminal, export the variable as done below:
```
$ export JULIA_NUM_THREADS=X
$ julia
```
This is different from the threads available used by BLAS commands. To check this number of threads for parallelization, run the following:
```julia
Threads.nthreads()
```
4
We see there are 4 threads available.
To parallelize the objective function in WiSER, simply add the keyword argument `parallel = true` in the `fit!()` function.
```julia
WiSER.fit!(vlmm, parallel = true)
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.237454
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.158717
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi_std 0.296424 0.0139071 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.5212 0.393792 -6.40 <1e-09
τ2: agegroup 1.50759 0.135456 11.13 <1e-28
τ3: meds: OnMeds -0.435225 0.0621076 -7.01 <1e-11
τ4: bmi_std 0.0052695 0.0224039 0.24 0.8140
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00196 0.0181387
"γ2: bmi_std" 0.0181387 0.000549357
We can see slight timing differences at this sample size:
```julia
@time WiSER.fit!(vlmm, parallel = false);
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.065098
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.070394
0.141617 seconds (417 allocations: 38.531 KiB)
```julia
@time WiSER.fit!(vlmm, parallel = true);
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.150904
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.164348
0.325590 seconds (1.95 k allocations: 174.953 KiB)
### Observation Weights
It can be useful for some users to fit WiSER with observation weights. We have implemented this feature, which can be done in the model constructor via the `wtvar` keyword. Note: Within each individual, observation weights are the same. We assume weights are per-indiviudal.
In the example data, the dataframe has a column `obswt`, corresponding to observation weights for each individual.
```julia
vlmm_wts = WSVarLmmModel(
@formula(sbp ~ 1 + agegroup + gender + bmi_std + meds),
@formula(sbp ~ 1 + bmi_std),
@formula(sbp ~ 1 + agegroup + meds + bmi_std),
:id, df; wtvar = :obswt);
@time WiSER.fit!(vlmm_wts)
```
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.158033, ‖ΔL‖ = 0.102058, status = Optimal, time(s) = 0.062162
run = 2, ‖Δβ‖ = 0.006134, ‖Δτ‖ = 0.007594, ‖ΔL‖ = 0.056873, status = Optimal, time(s) = 0.078738
0.146753 seconds (447 allocations: 40.500 KiB)
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
────────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
────────────────────────────────────────────────────────────
β1: (Intercept) 106.309 0.143971 738.41 <1e-99
β2: agegroup 14.9841 0.0633336 236.59 <1e-99
β3: gender: Male 10.0748 0.100288 100.46 <1e-99
β4: bmi_std 0.296066 0.0139064 21.29 <1e-99
β5: meds: OnMeds -10.1101 0.122602 -82.46 <1e-99
τ1: (Intercept) -2.51639 0.267541 -9.41 <1e-20
τ2: agegroup 1.50717 0.0914489 16.48 <1e-60
τ3: meds: OnMeds -0.445596 0.0366911 -12.14 <1e-33
τ4: bmi_std 0.00634263 0.00812327 0.78 0.4349
────────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.05449 0.0279852
"γ2: bmi_std" 0.0279852 0.000792437
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | docs | 7657 | # Simulating responses
The `rvarlmm()` and `rvarlmm!()` functions can be used to generate a respone from user-supplied data and parameters. The `rand!()` command can be used to overwrite the response in a VarLmmModel object based on the parameters and optional user-supplied distribution.
The `rand!(m::WSVarLmmModel; respdist = MvNormal, γωdist = MvNormal, Σγω = [], kwargs...)` function replaces the responses `m.data[i].y` with a simulated response based on:
- The data in the model object's data `X, Z, W` matrices.
- The parameter values in the model.
- The conditional distribution of the response given the random effects.
- The distribution of the random effects.
- If simulating from MvTDistribution, you must specify the degrees of freedom via `df = x`.
The `rvarlmm()` takes arrays of matricies of the data in addition to the reponse. It generates a simulated response from the VarLMM model based on:
- `Xs`: array of each clusters `X`: mean fixed effects covariates
- `Zs`: array of each clusters `Z`: random location effects covariates
- `Ws`: array of each clusters `W`: within-subject variance fixed effects covariates
- `β`: mean fixed effects vector
- `τ`: within-subject variance fixed effects vector
- `respdist`: the distribution for response. Default is MvNormal.
- `Σγ`: random location effects covariance matrix.
- `Σγω`: joint random location and random scale effects covariance matrix (if generating from full model).
- If simulating from MvTDistribution, you must specify the degrees of freedom via `df = x`.
The `rvarlmm!()` function can be used to generate a simulated response from the VarLMM model based on a dataframe and place the generated response into the datatable with the `respname` field.
Note: **the dataframe MUST be ordered by grouping variable for it to generate in the correct order.**
This can be checked via `dataframe == sort(dataframe, idvar)`. The response is based on:
- `meanformula`: represents the formula for the mean fixed effects `β` (variables in X matrix)
- `reformula`: represents the formula for the mean random effects γ (variables in Z matrix)
- `wsvarformula`: represents the formula for the within-subject variance fixed effects τ (variables in W matrix)
- `idvar`: the id variable for groupings.
- `dataframe`: the dataframe holding all of the data for the model. For this function it **must be in order**.
- `β`: mean fixed effects vector
- `τ`: within-subject variance fixed effects vector
- `respdist`: the distribution for response. Default is MvNormal.
- `Σγ`: random location effects covariance matrix.
- `Σγω`: joint random location and random scale effects covariance matrix (if generating from full model)
- `respname`: symbol representing the simulated response variable name.
- If simulating from MvTDistribution, you must specify the degrees of freedom via `df = x`.
For both functions, only one of the Σγ or Σγω matrices have to be specified in order to use the function. Σγ can be used to specify that the generative model will not include a random scale component. It outputs `ys`: an array of reponse `y` that match the order of the data arrays (`Xs, Zs, Ws`).
We can start by loading the pacakges, data, and fitting a model.
```julia
using CSV, DataFrames, Random, WiSER
filepath = normpath(joinpath(dirname(pathof(WiSER)), "../data/"))
df = DataFrame(CSV.File(filepath * "sbp.csv"))
vlmm = WSVarLmmModel(
@formula(sbp ~ 1 + agegroup + gender + bmi_std + meds),
@formula(sbp ~ 1 + bmi_std),
@formula(sbp ~ 1 + agegroup + meds + bmi_std),
:id, df);
WiSER.fit!(vlmm)
```
******************************************************************************
This program contains Ipopt, a library for large-scale nonlinear optimization.
Ipopt is released as open source code under the Eclipse Public License (EPL).
For more information visit https://github.com/coin-or/Ipopt
******************************************************************************
run = 1, ‖Δβ‖ = 0.037311, ‖Δτ‖ = 0.166678, ‖ΔL‖ = 0.100999, status = Optimal, time(s) = 0.215811
run = 2, ‖Δβ‖ = 0.005220, ‖Δτ‖ = 0.006748, ‖ΔL‖ = 0.048735, status = Optimal, time(s) = 0.083461
Within-subject variance estimation by robust regression (WiSER)
Mean Formula:
sbp ~ 1 + agegroup + gender + bmi_std + meds
Random Effects Formula:
sbp ~ 1 + bmi_std
Within-Subject Variance Formula:
sbp ~ 1 + agegroup + meds + bmi_std
Number of individuals/clusters: 500
Total observations: 5011
Fixed-effects parameters:
───────────────────────────────────────────────────────────
Estimate Std. Error Z Pr(>|Z|)
───────────────────────────────────────────────────────────
β1: (Intercept) 106.308 0.14384 739.07 <1e-99
β2: agegroup 14.9844 0.0633245 236.63 <1e-99
β3: gender: Male 10.0749 0.100279 100.47 <1e-99
β4: bmi_std 0.296424 0.0139071 21.31 <1e-99
β5: meds: OnMeds -10.1107 0.122918 -82.26 <1e-99
τ1: (Intercept) -2.5212 0.393792 -6.40 <1e-09
τ2: agegroup 1.50759 0.135456 11.13 <1e-28
τ3: meds: OnMeds -0.435225 0.0621076 -7.01 <1e-11
τ4: bmi_std 0.0052695 0.0224039 0.24 0.8140
───────────────────────────────────────────────────────────
Random effects covariance matrix Σγ:
"γ1: (Intercept)" 1.00196 0.0181387
"γ2: bmi_std" 0.0181387 0.000549357
Once the model has been fit, we can overwrite the response variable simulating a new response based on the model's current parameters. This is done by calling the `rand!()` function on the model object. Here we simulate from a multivariate normal dsitribution for $y$.
```julia
yoriginal = copy(vlmm.data[1].y)
Random.seed!(123)
WiSER.rand!(vlmm; respdist = MvNormal)
[yoriginal vlmm.data[1].y]
```
9×2 Matrix{Float64}:
159.586 163.223
161.849 161.898
160.484 160.667
161.134 165.167
165.443 162.258
160.053 163.019
162.1 162.065
163.153 161.422
166.675 160.552
Other response distributions have been coded. To get a list of available distributions use `respdists()`
```julia
respdists()
```
6-element Vector{Symbol}:
:MvNormal
:MvTDist
:Gamma
:InverseGaussian
:InverseGamma
:Uniform
```julia
WiSER.rand!(vlmm; respdist = InverseGamma)
vlmm.data[1].y
```
9-element Vector{Float64}:
163.1357991396798
165.1430739396795
162.08532565729797
159.6822122192519
161.57688777672846
164.58912923247945
157.98192168704628
164.12521616207954
163.73003300792996
We can also simulate a response variable from a dataframe and a formula.
If you don't want to overwrite the response variable in the dataframe, you can use the `respname` optional keyword argument to specify the desired variable name to save the response variable as.
```julia
df = DataFrame(id = [1; 1; 2; 3; 3; 3; 4], y = randn(7),
x2 = randn(7), x3 = randn(7), z2 = randn(7), w2 = randn(7), w3 = randn(7))
f1 = @formula(y ~ 1 + x2 + x3)
f2 = @formula(y ~ 1 + z2)
f3 = @formula(y ~ 1 + w2 + w3)
β = zeros(3)
τ = zeros(3)
Σγ = [1. 0.; 0. 1.]
rvarlmm!(f1, f2, f3, :id, df, β, τ;
Σγ = Σγ, respname = :response)
[df[!, :y] df[!, :response]]
```
7×2 Matrix{Float64}:
-0.474718 0.0945213
-0.615475 2.06463
-0.577114 3.7559
-1.14809 1.66547
-0.53171 -0.250939
1.26381 -0.400644
-1.32798 -1.67888
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.6 | 84b26e39bce597a45909a57b86d21751a628c723 | docs | 589 | # Instructions for modify WiSER.jl package documentation
Changes to the package documentation are done by modifying the Jupyter notebooks in this folder. Follow following steps.
1. On terminal, within the `WiSER/docs/src/notebooks` folder, start julia.
2. To exactly reproduce the notebooks, activate the enrivonment by `import Pkg; Pkg.activate()`.
3. Work on the notebooks for any documentation changes and improvement.
4. To save updated documentation, in Jupyter notebook, `File -> Download as -> Markdown (.md)` download the Markdown (.md) files in the `WiSER/docs/src` folder.
| WiSER | https://github.com/OpenMendel/WiSER.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 462 | using Documenter, SigmaRidgeRegression
makedocs(;
modules=[SigmaRidgeRegression],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/nignatiadis/SigmaRidgeRegression.jl/blob/{commit}{path}#L{line}",
sitename="SigmaRidgeRegression.jl",
authors="Nikos Ignatiadis <[email protected]>",
assets=String[],
)
deploydocs(;
repo="github.com/nignatiadis/SigmaRidgeRegression.jl",
)
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 6189 | using Pkg
Pkg.activate(@__DIR__)
using RCall
using SigmaRidgeRegression
using StatsBase
using Plots
using Statistics
using Random
using MLJ
using Tables
using LaTeXTabulars
using LaTeXStrings
using BenchmarkTools
R"""
data("CLL_data", package="MOFAdata")
# use methylation data, gene expression data and drug responses as predictors
CLL_data <- CLL_data[1:3]
CLL_data <- lapply(CLL_data,t)
ngr <- sapply(CLL_data,ncol)
CLL_data <- Reduce(cbind, CLL_data)
#only include patient samples profiles in all three omics
CLL_data2 <- CLL_data[apply(CLL_data,1, function(p) !any(is.na(p))),]
dim(CLL_data2)
# prepare design matrix and response
X <- CLL_data2[,!grepl("D_002", colnames(CLL_data))]
y <- rowMeans(CLL_data2[,grepl("D_002", colnames(CLL_data))])
annot <- rep(1:3, times = ngr-c(5,0,0)) # group annotations to drugs, meth and RNA
ngr_prime <- ngr-c(5,0,0)
"""
# run with seed from Velten & Huber
R"""
set.seed(9876)
foldid <- sample(rep(seq(10), length=nrow(X)))
"""
@rget foldid
resample_ids = [ ( findall(foldid .!= k), findall(foldid .== k) ) for k in 1:10 ]
@rget X
@rget y
@rget ngr_prime
groups = GroupedFeatures(Int.(ngr_prime))
Random.seed!(1)
rand_idx = sample(1:121, 121, replace=false)
Xdrug_resample = X[rand_idx, 1:305]
Xgaussian = randn(121, 100)
Xnoise = [X Xdrug_resample Xgaussian]
groups_noise = GroupedFeatures([Int.(ngr_prime); 305; 100])
X_table = MLJ.table(X);
X_plus_noise = MLJ.table(Xnoise);
function single_table_line(X, y, resampling, _model, model_name; tuning_name=nothing, sigdigits=3)
ridge_machine = machine(_model, X, y)
fit!(ridge_machine)
best_param = ridge_machine.report.best_param
if isnothing(tuning_name)
tuning_string =""
else
best_param = round(best_param; sigdigits=sigdigits)
tuning_string = L"%$(tuning_name) = %$(best_param)"
end
λs = round.(deepcopy(ridge_machine.report.best_λs), sigdigits=sigdigits)
#if isa(_model.ridge, SingleGroupRidgeRegressor)
# λs = fill(λs[1], 3)
#end
ridge_benchmark = @benchmark fit!(machine($(_model), $(X), $(y)))
time_loo = round(mean(ridge_benchmark.times)/(1e9), sigdigits=sigdigits)
eval_ridge = evaluate!(ridge_machine, resampling=resampling, measure=rms)
_rmse = round(eval_ridge.measurement[1], sigdigits=sigdigits)
#[model_name, tuning_string, λs, _rmse, time_loo]
[model_name, tuning_string, λs..., time_loo, _rmse]
end
single_ridge = SingleGroupRidgeRegressor(decomposition = :woodbury, groups=groups, λ=1.0, center=true, scale=true)
loo_single_ridge = LooRidgeRegressor(ridge = deepcopy(single_ridge))
single_ridge_noise = SingleGroupRidgeRegressor(decomposition = :woodbury, groups=groups_noise, λ=1.0, center=true, scale=true)
loo_single_ridge_noise = LooRidgeRegressor(ridge = deepcopy(single_ridge_noise))
sigma_ridge = SigmaRidgeRegressor(groups=groups, decomposition = :woodbury, σ=0.01, center=true, scale=true)
loo_sigmaridge = LooRidgeRegressor(ridge=deepcopy(sigma_ridge), tuning=SigmaRidgeRegression.DefaultTuning(scale=:linear, param_min_ratio=0.001))
sigma_ridge_noise = SigmaRidgeRegressor(groups=groups_noise, decomposition = :woodbury, σ=0.01, center=true, scale=true)
loo_sigmaridge_noise = LooRidgeRegressor(ridge=deepcopy(sigma_ridge_noise), tuning=SigmaRidgeRegression.DefaultTuning(scale=:linear, param_min_ratio=0.001))
multi_ridge = MultiGroupRidgeRegressor(groups; decomposition = :woodbury, center=true, scale=true)
loo_multi_ridge = LooRidgeRegressor(ridge = deepcopy(multi_ridge), rng=MersenneTwister(1))
multi_ridge_noise = MultiGroupRidgeRegressor(groups_noise; decomposition = :woodbury, center=true, scale=true)
loo_multi_ridge_noise = LooRidgeRegressor(ridge = deepcopy(multi_ridge_noise), rng=MersenneTwister(1))
#CV(nfolds=5, shuffle=true, rng=1))
glasso = GroupLassoRegressor(groups=groups, decomposition = :woodbury, center=true, scale=true)
cv_glasso = TunedRidgeRegressor(ridge=deepcopy(glasso), resampling= Holdout(shuffle=true, rng=1), tuning=DefaultTuning(param_min_ratio=1e-5))
glasso_noise = GroupLassoRegressor(groups=groups_noise, decomposition = :woodbury, center=true, scale=true)
cv_glasso_noise = TunedRidgeRegressor(ridge=deepcopy(glasso_noise), resampling= Holdout(shuffle=true, rng=1), tuning=DefaultTuning(param_min_ratio=1e-5))
line_single_ridge = single_table_line(X_table, y, resample_ids, loo_single_ridge, "\textbf{Single Ridge}")
line_single_ridge_noise = single_table_line(X_plus_noise, y, resample_ids, loo_single_ridge_noise, "\textbf{Single Ridge}")
line_sigma_ridge = single_table_line(X_table, y, resample_ids, loo_sigmaridge, L"$\sigmacv$\textbf{-Ridge}"; tuning_name=L"\sigmacv")
line_sigma_ridge_noise = single_table_line(X_plus_noise, y, resample_ids, loo_sigmaridge_noise, L"$\sigmacv$\textbf{-Ridge}"; tuning_name=L"\sigmacv")
line_multi_ridge = single_table_line(X_table, y, resample_ids, loo_multi_ridge, "\textbf{Multi Ridge}")
line_multi_ridge_noise = single_table_line(X_plus_noise, y, resample_ids, loo_multi_ridge_noise, "\textbf{Multi Ridge}")
line_glasso = single_table_line(X_table, y, resample_ids, cv_glasso, L"\textbf{Group Lasso}", tuning_name=L"$\widehat{\lambda}^{gl}")
line_glasso_noise = single_table_line(X_plus_noise, y, resample_ids, cv_glasso_noise, L"\textbf{Group Lasso}", tuning_name=L"$\widehat{\lambda}^{gl}")
tbl_spec = Tabular("lllllll")
line1 = ["", "Tuning", L"$\widehat{\lambda}_{\text{Drugs}}$", L"\widehat{\lambda}_{\text{Methyl}}", L"\widehat{\lambda}_{\text{RNA}}", "Time (s)", "10-fold RMSE"]
lines = [line1, Rule(), line_sigma_ridge, line_single_ridge, line_multi_ridge, line_glasso]
latex_tabular("cll_analysis.tex", tbl_spec, lines)
tbl_spec2 = Tabular("lllllllll")
line1 = ["", "Tuning", L"$\widehat{\lambda}_{\text{Drugs}}$",
L"\widehat{\lambda}_{\text{Methyl}}",
L"\widehat{\lambda}_{\text{RNA}}",
L"\widehat{\lambda}_{\text{Noise1}}",
L"\widehat{\lambda}_{\text{Noise2}}",
"Time (s)", "10-fold RMSE"]
lines = [line1, Rule(), line_sigma_ridge_noise, line_single_ridge_noise, line_multi_ridge_noise, line_glasso_noise]
latex_tabular("cll_analysis_noise.tex", tbl_spec2, lines)
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 8415 | using Pkg
Pkg.activate(@__DIR__)
using Pkg.Artifacts
using CSV
using DataFrames
using StatsBase
using SigmaRidgeRegression
using LaTeXStrings
using LinearAlgebra
using MLJ
using Random
using ColorSchemes
using Plots
using StatsPlots
using PGFPlotsX
#------------------------------------------------------------------
# Code that generated the Artifact file Artifacts.toml
#------------------------------------------------------------------
#using ArtifactUtils
#add_artifact!(
# "Artifacts.toml",
# "YearPredictionMSD",
# "https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip",
# force=true,
# )
#--------------------------------------------------------------------------------------
# Command below may take a while since it will automatically download the Million Song
# Dataset from the UCI repository (around 400 MB).
#--------------------------------------------------------------------------------------
msd_filepath = joinpath(artifact"YearPredictionMSD", "YearPredictionMSD.txt")
msd = CSV.File(msd_filepath, header=false) |> DataFrame
function feature_map(X) #; noisegroups=10, noisefeatures=50)
mean_features = X[:, 1:12]
var_features = X[:, 13:24]
sd_features = sqrt.(var_features)
#cv_features = sd_features ./ abs.(mean_features)
cov_features = X[:, 25:90]
cor_features = zeros(size(cov_features))
cnt = 0
for offset=1:11
for i=1:(12-offset)
cnt = cnt + 1
cor_features[:,cnt] = cov_features[:,cnt] ./ sqrt.(var_features[:,i] .* var_features[:,i+offset])
end
end
#noise_features = randn(size(X,1)#, noisegroups*noisefeatures )
grp = GroupedFeatures([12, 12, 66, 66]) #, fill(noisefeatures, noisegroups)))
(MLJ.table([mean_features sd_features cov_features cor_features]), grp)
end
Y = Float64.(msd[:, 1])
X, groups = feature_map(Matrix(msd[:, 2:91]))
train_idx = 1:463_715
test_idx = (1:51_630) .+ 463_715
single_ridge = SingleGroupRidgeRegressor(decomposition = :cholesky, groups=groups, λ=1.0, center=true, scale=true)
loo_single_ridge = LooRidgeRegressor(ridge = deepcopy(single_ridge))
sigma_ridge = SigmaRidgeRegressor(groups=groups, decomposition = :cholesky, σ=0.01, center=true, scale=true)
loo_sigmaridge = LooRidgeRegressor(ridge=deepcopy(sigma_ridge), tuning=SigmaRidgeRegression.DefaultTuning(scale=:linear, param_min_ratio=0.001))
multi_ridge = MultiGroupRidgeRegressor(groups; decomposition = :cholesky, center=true, scale=true)
loo_multi_ridge = LooRidgeRegressor(ridge = deepcopy(multi_ridge), rng=MersenneTwister(1))
glasso = GroupLassoRegressor(groups=groups, decomposition = :cholesky, center=true, scale=true)
holdout_glasso = TunedRidgeRegressor(ridge=deepcopy(glasso), resampling= Holdout(shuffle=true, rng=1), tuning=DefaultTuning(param_min_ratio=1e-5))
ns_subsample = [200; 500; 1000]
n_montecarlo = 20
Random.seed!(1)
mse_array = Array{Float64}(undef, length(ns_subsample), n_montecarlo, 4)
time_array = Array{Float64}(undef, length(ns_subsample), n_montecarlo, 4)
λs_array = Array{Any}(undef, length(ns_subsample), n_montecarlo, 4)
for (k, n_subsample) in enumerate(ns_subsample)
for j in Base.OneTo(n_montecarlo)
train_idx_subsample = sample(train_idx, n_subsample, replace=false)
resampling_idx = [(train_idx_subsample, test_idx)]
for (i,mach) in enumerate([loo_sigmaridge, loo_single_ridge, loo_multi_ridge, holdout_glasso])
time_array[k,j,i] = @elapsed begin
_mach = machine(mach, X, Y)
_eval = evaluate!(_mach, resampling=resampling_idx, measure=l2)
end
mse_array[k,j,i] = _eval.measurement[1]
λs_array[k,j,i] = deepcopy(_mach.report.best_λs)
end
end
end
pgfplotsx()
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amsmath}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amssymb}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{bm}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\blambda}{\bm{\lambda}}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\risk}[1]{\bm{R}(#1)}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage[bbgreekl]{mathbbol}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\sigmacv}{\bbsigma}")
_orange = RGB{Float64}(0.933027,0.665164,0.198652)
_orange2 = RGB{Float64}(0.85004,0.540122,0.136212)
method_names = [L"\sigmacv-\textrm{Ridge}" L"\textrm{Single Ridge}" L"\textrm{Multi Ridge}" L"\textrm{Group Lasso}"]
_thickness_scaling = 1.8
mse_plot = plot(
dotplot(method_names, mse_array[1,:,:],
title=L"n\;=\;%$(ns_subsample[1])",
frame = :box, grid=nothing,
yguide = "Mean squared error",
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
thickness_scaling = _thickness_scaling,
ylim = (88,141),
color=_orange),
dotplot(method_names, mse_array[2,:,:],
title=L"n\;=\;%$(ns_subsample[2])",
frame = :box, grid=nothing,
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
ylim = (88,141),
thickness_scaling = _thickness_scaling,
color=_orange),
dotplot(method_names, mse_array[3,:,:],
title=L"n\;=\;%$(ns_subsample[3])",
frame = :box, grid=nothing,
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
thickness_scaling = _thickness_scaling,
ylim = (88,141),
color=_orange), size=(1650,400), layout=(1,3))
savefig(mse_plot, "one_million_songs_mse.pdf")
time_plot = plot(
dotplot(method_names, time_array[1,:,:],
title=L"n\;=\;%$(ns_subsample[1])",
frame = :box, grid=nothing,
yguide = "Time (seconds)",
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
ylim = (-0.5,10.5),
thickness_scaling = _thickness_scaling,
color=_orange),
dotplot(method_names, time_array[2,:,:],
title=L"n\;=\;%$(ns_subsample[2])",
frame = :box, grid=nothing,
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
thickness_scaling = _thickness_scaling,
ylim = (-0.5,10.5),
color=_orange),
dotplot(method_names, time_array[3,:,:],
title=L"n\;=\;%$(ns_subsample[3])",
frame = :box, grid=nothing,
label=nothing,
markerstrokecolor=_orange2,
markerstrokewidth=0.5,
thickness_scaling = _thickness_scaling,
ylim = (-0.5,10.5),
color=_orange), size=(1650,400), layout=(1,3))
savefig(time_plot, "one_million_songs_time.pdf")
_trunc = 20
λs_mean = min.(getindex.(λs_array,1), 20)
λs_std = min.(getindex.(λs_array,2), 20)
λs_cov = min.(getindex.(λs_array,3), 20)
λs_cor = min.(getindex.(λs_array,4), 20)
λs_sigma_ridge = [λs_mean[1,:,1] λs_std[1,:,1] λs_cov[1,:,1] λs_cor[1,:,1]]
feature_names = ["mean" "std" "cov" "cor"]
λs_names = [L"\hat{\lambda}_{\textrm{%$n}}" for n in feature_names]
λ_plot_params = (frame = :box, grid=nothing,
label="",
markerstrokecolor=:purple,
markerstrokewidth=0.5,
markercolor= RGB{Float64}(205/256,153/256,255/256),
thickness_scaling = 1.7,
ylim = (-0.9,_trunc + 0.8))
#λ_yguide=L"\min\{\widehat{\lambda},20\}",
method_names_short = [L"\sigmacv-\textrm{Ridge}" L"\textrm{Single}" L"\textrm{Multi}" L"\textrm{GLasso}"]
plot(
dotplot(method_names_short, λs_mean[1,:,:]; yguide=L"\min\{\widehat{\lambda},20\}", title=λs_names[1]),
dotplot(method_names_short, λs_std[1,:,:], title=λs_names[2]),
dotplot(method_names_short, λs_cov[1,:,:], title=λs_names[3]),
dotplot(method_names_short, λs_cor[1,:,:], title=λs_names[4]),
size=(1500,270), layout=(1,4); λ_plot_params...)
savefig("one_million_song_lambdas_n200.pdf")
plot(
dotplot(method_names_short, λs_mean[2,:,:]; yguide=L"\min\{\widehat{\lambda},20\}"),
dotplot(method_names_short, λs_std[2,:,:]),
dotplot(method_names_short, λs_cov[2,:,:]),
dotplot(method_names_short, λs_cor[2,:,:]),
size=(1500,260), layout=(1,4); λ_plot_params...)
savefig("one_million_song_lambdas_n1000.pdf")
plot(
dotplot(method_names_short, λs_mean[3,:,:]; yguide=L"\min\{\widehat{\lambda},20\}"),
dotplot(method_names_short, λs_std[3,:,:]),
dotplot(method_names_short, λs_cov[3,:,:]),
dotplot(method_names_short, λs_cor[3,:,:]),
size=(1500,260), layout=(1,4); λ_plot_params...)
savefig("one_million_song_lambdas_n5000.pdf")
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 4278 | using Pkg
Pkg.activate(@__DIR__)
using SigmaRidgeRegression
using LinearAlgebra
using StatsBase
using Plots
using Statistics
using MLJ
using LaTeXStrings
using Random
using PGFPlotsX
pgfplotsx()
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amsmath}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amssymb}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{bm}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage[bbgreekl]{mathbbol}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\sigmacv}{\bbsigma}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\loo}{\operatorname{CV}^*}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\blambda}{\bm{\lambda}}")
main_cols = [:purple :green :grey]# [:black :purple :green]
Random.seed!(1)
σ = 4.0
grp = GroupedFeatures([25;25;25])
n = 400
p = grp.p
#ρ = 0.7
#Σ = [ρ^(abs(i-j)) for i=1:p,j=1:p]
#Σ_chol = cholesky(Σ)
X = randn(n, p)# * Σ_chol.UL
αs = sqrt.([4.0;8.0;12.0])
#r#ange(2.0, 2.75, 3.5; length=3)#1.0:5.0
β = random_betas(grp, αs)
group_summary(grp, β, norm)
sum(abs2, β)
Y = X*β .+ σ .* randn(n)
loo_ridge = LooRidgeRegressor(ridge=SingleGroupRidgeRegressor(λ=1.0))
loo_ridge_machine = machine(loo_ridge, MLJ.table(X), Y)
loo_sigma_ridge = LooRidgeRegressor(ridge=SigmaRidgeRegressor(groups=grp), tuning=SigmaRidgeRegression.DefaultTuning(scale=:linear, param_min_ratio=0.001))
loo_sigmaridge_machine = machine(loo_sigma_ridge, MLJ.table(X), Y)
fit!(loo_ridge_machine)
fit!(loo_sigmaridge_machine)
σs = loo_sigmaridge_machine.report.params
loo_sigmaridge_machine.report.loos
param_subset = loo_sigmaridge_machine.report.params .<= 6.2
λs = Matrix(hcat(loo_sigmaridge_machine.report.λs...)')
λs_subset = λs[param_subset, :]
βs_list = [fit!(machine(SigmaRidgeRegressor(groups=grp, σ=σ), MLJ.table(X),Y)).fitresult.coef for σ in σs]
βs = Matrix(hcat(βs_list...)')
plot(loo_sigmaridge_machine.report.params[param_subset], λs_subset)
plot(loo_sigmaridge_machine.report.params, loo_sigmaridge_machine.report.loos)
loo_λ = loo_ridge_machine.report.best_param
#with gr
#plot(sqrt.(σs_squared1), mypath1.loos)
pl_left = plot(loo_sigmaridge_machine.report.params[param_subset], λs_subset,
legend=:topleft, linecolor=main_cols,
linestyle=[ :dot :dashdot :dash], xlab=L"\sigmacv",
ylab=L"\widehat{\lambda}(\sigmacv)",
background_color_legend = :transparent,
foreground_color_legend = :transparent,
grid = nothing, frame=:box,
label=["Group 1" "Group 2" "Group 3"],
thickness_scaling = 1.8,
size=(550,400))
savefig(pl_left, "intro_figure_left.tikz")
pl_right = plot(loo_sigmaridge_machine.report.params[param_subset], loo_sigmaridge_machine.report.loos[param_subset], color=:darkblue,
grid = nothing, frame=:box,
background_color_legend = :transparent,
foreground_color_legend = :transparent,
xlab=L"\sigmacv", ylab=L"\loo(\sigmacv)", label=nothing,
thickness_scaling = 1.8,
size=(550,400), ylim=(18.2,19.2), yticks=[18.4;18.6;18.8;19.0])
savefig(pl_right, "intro_figure_right.tikz")
pl_left_tree = plot(loo_sigmaridge_machine.report.params, λs,
ylim = (0,15), legend=:topleft, color=main_cols,
linestyle=[ :dot :dashdot :dash],
background_color_legend = :transparent,
foreground_color_legend = :transparent,
grid = nothing, frame=:box,
xlab=L"\sigmacv",
ylab= L"\widehat{\lambda}_k(\sigmacv)",
label=["Group 1" "Group 2" "Group 3"],
thickness_scaling = 1.8,
size=(550,400))
savefig(pl_left_tree, "christmas_tree_left.tikz")
cols_rep = hcat([fill(col, 1, 25) for col in [:purple; :green; :grey]]...)
ltype_rep = hcat([fill(lt, 1, 25) for lt in [:solid; :dash; :dot]]...)
pl_right_tree = plot(σs, βs, alpha=0.6,
linewidth=0.8,
label="",
grid = nothing, frame=:box,
ylab= L"\widehat{w}_j(\widehat{\blambda}(\sigmacv))",
xlab=L"\sigmacv",
color=cols_rep, ylim=(-2.1,2.1),
thickness_scaling = 1.8,
size=(550,400))
savefig(pl_right_tree, "christmas_tree_right.tikz")
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
Subsets and Splits