licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1378 |
using BesselK, BenchmarkTools, Printf
include("shared.jl")
const PAIRS = ((0.5, 1.0, "half-integer"),
(1.0, 1.0, "whole integer"),
(3.001, 1.0, "near-integer order"),
(3.001, 8.0, "near-integer order, borderline arg"),
(1.85, 1.0, "small order"),
(1.85, 8.0, "small order, borderline arg"),
(1.85, 14.0, "intermediate arg"),
(1.85, 29.0, "large intermediate arg"),
(1.85, 35.0, "large argument"))
for (j, (v, x, descriptor)) in enumerate(PAIRS)
t_us = (@belapsed BesselK._besselk($v, $x) samples=1_000)*1e9 # our code
t_am = (@belapsed BesselK.besselk($v, $x) samples=1_000)*1e9 # AMOS
t_us_d = (@belapsed adbesselkdv($v, $x) samples=1_000)*1e9 # our code
t_am_d = (@belapsed fdbesselkdv($v, $x) samples=1_000)*1e9 # AMOS
t_us_d2 = (@belapsed adbesselkdvdv($v, $x) samples=1_000)*1e9 # our code
t_am_d2 = (@belapsed fdbesselkdvdv($v, $x) samples=1_000)*1e9 # AMOS
if j != length(PAIRS)
@printf "(%1.3f, %1.0f) & %1.0f & %1.0f & %1.0f & %1.0f & %1.0f & %1.0f & %s \\\\\n" v x t_us t_am t_us_d t_am_d t_us_d2 t_am_d2 descriptor
else
@printf "(%1.3f, %1.0f) & %1.0f & %1.0f & %1.0f & %1.0f & %1.0f & %1.0f & %s\n" v x t_us t_am t_us_d t_am_d t_us_d2 t_am_d2 descriptor
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1557 |
using LinearAlgebra, BesselK, BenchmarkTools, Printf, StaticArrays
include("shared.jl")
const GRIDN = 24
const GRID1D = range(0.0, 1.0, length=GRIDN)
const PTS = map(x->SVector{2,Float64}(x[1], x[2]),
vec(collect.(Iterators.product(GRID1D, GRID1D))))
function checkcovmat(fn, p, v)
_p = pv(1.0, p, v)
M = assemble_matrix(fn, PTS, _p)
em = eigmin(M)
Mf = cholesky!(M, check=false)
if issuccess(Mf)
ld = logdet(Mf)
else
ld = NaN
end
(issuccess(Mf) ? "S" : "F", em, ld)
end
const VRANGE = (0.4, 1.25, 3.5)
const PRANGE = (0.01, 1.0, 100.0)
# For now, I might even keep the timing in seconds.
for (j, (v, p)) in enumerate(Iterators.product(VRANGE, PRANGE))
_p = pv(1.0, p, v)
# test 1: assembly time.
t_u = @belapsed assemble_matrix(matern_us, $PTS, $_p) samples=8
t_a = @belapsed assemble_matrix(matern_amos, $PTS, $_p) samples=8
# test 2: Cholesky success or failure:
(s_u, em_u, ld_u) = checkcovmat(matern_us, p, v)
(s_a, em_a, ld_a) = checkcovmat(matern_amos, p, v)
# PRINTING:
# (v,p) pair
# times (u,p)
# eigmins (u,p)
# eigmin difference
# logdets (u.p)
# logdet difference
if j != length(VRANGE)*length(PRANGE)
@printf "(%1.3f, %1.2f) & %1.1e & %1.1e & %1.2e & %1.2e & %1.2e & %1.2e & %1.2e & %1.2e \\\\\n" p v t_u t_a em_u em_a abs(em_u-em_a) ld_u ld_a abs(ld_u-ld_a)
else
@printf "(%1.3f, %1.2f) & %1.1e & %1.1e & %1.2e & %1.2e & %1.2e & %1.2e & %1.2e & %1.2e\n" p v t_u t_a em_u em_a abs(em_u-em_a) ld_u ld_a abs(ld_u-ld_a)
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1504 |
using LinearAlgebra
include("shared.jl")
const GRIDN = 24
function checkcovmat(fn, p, v; display_small_piece=false)
grid1d = range(0.0, 1.0, length=GRIDN)
pts = vec(collect.(Iterators.product(grid1d, grid1d)))
M = assemble_matrix(fn, pts, [1.0, p, v])
em = eigmin(M)
if display_small_piece # just a debugging thing, really.
println("Principle 5x5 minor:")
display(M[1:5, 1:5])
end
Mf = cholesky!(M, check=false)
(issuccess(Mf), em)
end
translate_result(res) = res ? :SUCCESS : :FAILURE
const VRANGE = (0.4, 0.55, 0.755, 0.99, 1.01, 1.55, 2.05, 3.05, 4.05)
const PRANGE = (0.01, 0.1, 1.0, 10.0, 100.0, 1000.0)
for (v, p) in Iterators.product(VRANGE, PRANGE)
println("\n(v,p) = ($v, $p):")
print("AMOS:")
amos_succ = :PLACEHOLDER
us_succ = :PLACEHOLDER
(amos_em, us_em) = (0.0, 0.0)
try
(amos_succ, amos_em) = checkcovmat(matern_amos, p, v)
println("$(translate_result(amos_succ)), $amos_em")
catch
amos_succ = :FAILURE
println(amos_succ)
end
print("US: ")
try
(us_succ, us_em) = checkcovmat(matern_us, p, v)
println("$(translate_result(us_succ)), $us_em")
catch
us_succ = :FAILURE
println(us_succ)
end
println("Eigmin difference: $(abs(amos_em-us_em))")
println("Eigmin rtol: $(abs(amos_em-us_em)/abs(amos_em))")
if amos_succ && !us_succ
println("######################")
println("!!!!WE FAILED WHERE AMOS SUCCEEDED, INVESTIGATE!!!")
println("######################")
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1163 |
using BenchmarkTools, SpecialFunctions, FiniteDifferences, ForwardDiff, StaticArrays
include("shared.jl")
const FDFAST(fn, x) = (fn(x+h)-fn(x))/h
const FD2 = central_fdm(2, 1)
dbesselk_fdfast(v, x) = FDFAST(_v->besselk(_v, x), v)
dbesselk_fd2(v, x) = FD2(_v->besselk(_v, x), v)
dbesselk_ad(v, x) = ForwardDiff.derivative(_v->_besselk(_v, x), v)
#=
# And note zero allocations for the AD version. Pretty good. And faster by
# significant margins---like a factor of five---than even the most reckless FD.
for (v, x) in Iterators.product((0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.25, 3.0, 4.25),
(1e-4, 0.25, 2.5, 5.0, 7.5, 12.0, 25.0))
println("(v,x) = ($v, $x):")
print("FDFAST:")
@btime dbesselk_fdfast($v, $x)
print("FD2: ")
@btime dbesselk_fd2($v, $x)
print("AD: ")
@btime dbesselk_ad($v, $x)
print("\n\n")
end
=#
# matern function:
const oo = @SVector ones(2)
const zz = @SVector zeros(2)
@inline pv(v) = @SVector [1.1, 1.1, v]
function matern_d3_ad(v)
ForwardDiff.derivative(_v->BesselK.matern(oo, zz, pv(_v)), v)
end
function matern_d3_fd(v)
FDFAST(_v->BesselK.matern(oo, zz, pv(_v)), v)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1392 |
# Observations:
#
# By and large, we really smoke AMOS in speed. Especially for large arg.
# Assembling a reasonably large matrix (1024 x 1024) gets done in about half the
# time. Which may seem like nothing, but a factor of two never hurts...
using BenchmarkTools, SpecialFunctions, StaticArrays
include("../besk.jl")
include("shared.jl")
const NUs = (0.1, 0.25, 0.5, 0.75, 0.999, 1.0, 1.001, 1.25, 1.5, 1.75, 2.0,
2.05, 2.75, 3.1, 3.8, 4.3, 4.9)
const TINY_X = 1e-4
const SMALL_X = 0.25
const MID_X = 7.5
const LARGE_X = 20.0
const Xs = (1e-4, 0.25, 5.0, 7.5, 8.5, 14.0, 17.0, 29.0, 31.0, 50.0)
const PTS = [rand(3).*10.0 for _ in 1:1024]
const PRMS = @SVector [1.0, 1.0, 1.25]
print("\n\n")
println("##################")
println("HEAT ONE: pointwise timings.")
println("##################")
print("\n\n")
for (v, x) in Iterators.product(NUs, Xs)
println("(v,x) = ($v, $x):")
print("Timing for AMOS:")
@btime SpecialFunctions.besselk($v, $x)
print("Timing for us:")
try
@btime _besselk($v, $x)
catch
println("FAILURE/ERROR OUT.")
end
print("\n\n")
end
print("\n\n")
println("##################")
println("HEAT TWO: kernel matrix assembly.")
println("##################")
print("\n\n")
println("Timings for AMOS:")
@btime assemble_matrix(matern_amos, $PTS, $PRMS)
println("Timings for US:")
@btime assemble_matrix(matern_us, $PTS, $PRMS)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1812 |
using LinearAlgebra
include("shared.jl")
const GRIDPTS = let GRIDN = 24
grid1d = range(0.0, 1.0, length=GRIDN)
pts = vec(collect.(Iterators.product(grid1d, grid1d)))
end
const RANDPTS = [rand(2) for _ in 1:(24*24)]
function assemblecovmat(fn, p, pts)
M = assemble_matrix(fn, pts, p, v)
em = eigmin(M)
if display_small_piece # just a debugging thing, really.
println("Principle 5x5 minor:")
display(M[1:5, 1:5])
end
Mf = cholesky(M, check=false)
(issuccess(Mf), em)
end
translate_result(res) = res ? :SUCCESS : :FAILURE
const VRANGE = (0.4, 0.55, 0.755, 0.99, 1.01, 1.55, 2.05, 3.05, 4.05)
const PRANGE = (0.01, 0.1, 1.0, 10.0, 100.0, 1000.0)
# Cases to look more into:
# ((RAND,GRID), v=(3.05, 4.05), p=0.1)
#
# otherwise, things look good: rtols can be larger than you might expect at
# times, but that appears to be happening when both eigenvalues are exact to
# more or less eps() precision where atol is more relevant anyway.
#
for (case, v, p) in Iterators.product((:GRID, :RAND), VRANGE, PRANGE)
pts = (case == :GRID) ? GRIDPTS : RANDPTS
M_amos = assemble_matrix(matern_amos, pts, (1.0, p, v))
M_us = assemble_matrix(matern_us, pts, (1.0, p, v))
# pointwise checks:
println("($case, v=$v, p=$p):")
println("atol difference: $(maximum(abs, M_amos - M_us))")
println("rtol difference: $(maximum(abs, tolfun.(zip(M_amos, M_us))))")
# eigenvalue checks, assuming successful cholesky:
cholflag = issuccess(cholesky(M_amos, check=false))
if cholflag
ev_amos = eigvals(M_amos)
ev_us = eigvals(M_us)
println("largest eig atol: $(maximum(abs, ev_amos-ev_us))")
println("largest eig rtol: $(maximum(abs, tolfun.(zip(ev_amos, ev_us))))")
else
println("Cholesky for M_amos failed, skipping eigenvalue checks.")
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1696 |
using BenchmarkTools, StaticArrays
include("shared.jl")
const oo = @SVector ones(2)
const zz = @SVector zeros(2)
const TESTING_PARAMS = (@SVector [1.25, 0.25, 0.5],
@SVector [1.25, 5.25, 0.5],
@SVector [1.25, 0.25, 0.75],
@SVector [1.25, 5.25, 0.75],
@SVector [1.25, 0.25, 1.0],
@SVector [1.25, 5.25, 1.0],
@SVector [1.25, 0.25, 1.75],
@SVector [1.25, 5.25, 1.75],
@SVector [1.25, 0.25, 3.0],
@SVector [1.25, 5.25, 3.0],
@SVector [1.25, 0.25, 4.75],
@SVector [1.25, 5.25, 4.75])
for pp in TESTING_PARAMS
println("\n###")
println("Parameters $pp")
println("###\n")
println("Fast finite diff, h=$h:")
@btime matern_fdfast_d1($oo, $zz, $pp)
@btime matern_fdfast_d2($oo, $zz, $pp)
@btime matern_fdfast_d3($oo, $zz, $pp)
println("Adaptive finite diff, order 2:")
@btime matern_fd2_d1($oo, $zz, $pp)
@btime matern_fd2_d2($oo, $zz, $pp)
@btime matern_fd2_d3($oo, $zz, $pp)
println("Adaptive finite diff, order 5:")
@btime matern_fd5_d1($oo, $zz, $pp)
@btime matern_fd5_d2($oo, $zz, $pp)
@btime matern_fd5_d3($oo, $zz, $pp)
println("Complex step, h=$ch:")
try
@btime matern_cstep_d1($oo, $zz, $pp)
@btime matern_cstep_d2($oo, $zz, $pp)
@btime matern_cstep_d3($oo, $zz, $pp)
catch
println("Failure/error. Probably at integer values.")
end
println("Autodiff:")
@btime matern_ad_d1($oo, $zz, $pp)
@btime matern_ad_d2($oo, $zz, $pp)
@btime matern_ad_d3($oo, $zz, $pp)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1133 |
using BesselK, SpecialFunctions, ArbNumerics
include("shared.jl")
function wbesselkxv(v,x)
try
return BesselK.adbesselkxv(v,x)
catch
return NaN
end
end
function rbesselkxv(v,x)
_v = ArbReal(v)
_x = ArbReal(x)
xv = _x^_v
Float64(xv*ArbNumerics.besselk(ArbFloat(v), ArbFloat(x)))
end
abesselkxv(v,x) = (x^v)*SpecialFunctions.besselk(v, x)
const VGRID = range(0.25, 10.0, length=100)
const XGRID = range(0.0, 8.0, length=201)[2:end] # since other impls can't do x=0.
const BASELINE = [rbesselkxv(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const AMOS = [abesselkxv(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const OURSOL = [wbesselkxv(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const TOLS_A = atolfun.(zip(BASELINE, AMOS))
const TOLS_U = atolfun.(zip(BASELINE, OURSOL))
const TOLS_AU = rtolfun.(zip(AMOS, OURSOL))
#=
gnuplot_save_matrix!("../plotdata/atols_amos.csv", TOLS_A, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_ours.csv", TOLS_U, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/rtols_amos_ours.csv", TOLS_AU, VGRID, XGRID)
=#
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1339 |
using SpecialFunctions, ForwardDiff, FiniteDifferences
include("shared.jl")
const BIG_FD = central_fdm(10,1)
const BIG_FD_O2 = central_fdm(10,2)
xvbesselkdv(v,x) = ForwardDiff.derivative(_v->BesselK.adbesselkxv(_v,x), v)
xvbesselkdv2(v,x) = ForwardDiff.derivative(_v->xvbesselkdv(_v,x), v)
beskxv(v,x) = besselk(v,x)*(x^v)
dxvbesselkdv(v, x) = BIG_FD(_v->beskxv(_v,x), v)
dxvbesselkdv2(v, x) = BIG_FD_O2(_v->beskxv(_v,x), v)
fastdxvbesselkdv2(v, x) = (beskxv(v+2e-6, x) - 2*beskxv(v+1e-6, x) + beskxv(v, x))/1e-12
const VGRID = range(0.25, 10.0, length=101) # to avoid integer v.
const XGRID = range(0.0, 50.0, length=201)[2:end] # to avoid zero x.
const BASELINE = [dxvbesselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const OURSOL = [xvbesselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const FASTFD = [fastdxvbesselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const TOLS = atolfun.(zip(BASELINE, OURSOL))
const FDTOLS = atolfun.(zip(BASELINE, FASTFD))
const DIFTOLS = log10.(TOLS) .- log10.(FDTOLS)
gnuplot_save_matrix!("../plotdata/atols_deriv2_xv_fd.csv", FDTOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv2_xv_ad.csv", TOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv2_xv_fdad.csv", DIFTOLS, VGRID, XGRID)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 999 |
module BesselK
using Bessels
export adbesselk, adbesselkxv, matern
# Here's a work-around since ForwardDiff#481 got merged, making iszero(x)
# check that the value AND partials of x are zero. Conceptually, I'm
# sympathetic that this is the more correct choice. It just doesn't quite work
# for the way this code needs to branch.
_iszero(x) = (0 <= x) & (x <= 0)
include("gamma.jl") # gamma function, for the moment ripped from Bessels.jl
include("besk_ser.jl") # enhanced direct series. The workhorse for small-ish args.
include("besk_as.jl") # asymptotic expansion for large arg.
include("uk_polys.jl") # Uk polynomials, now generated statically ahead of time.
include("besk_asv.jl") # uniform expansion for large order.
include("besk_temme.jl") # Temme recurrence series for small-ish args. For AD.
include("besk.jl") # putting it all together with appropriate branching.
include("matern.jl") # a basic Matern covariance function
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 2125 |
@inline isnearint(v, tol) = abs(v-round(v)) < tol
# Unlike the previous version of this function, this inner function now ASSUMES
# that v isa Dual, and so it only hits branches that are relevant for AD.
function _besselk(v, x, maxit, tol, order)
if abs(x) < 4 && (v < 2.85) && !isnearint(v, 0.01)
return _besselk_ser(v, x, maxit, tol, false)
elseif abs(x) < 8.5
return _besselk_temme(v, x, maxit, tol, false)
elseif abs(x) < 15.0
return _besselk_asv(v, x, Val(12), Val(false))
elseif abs(x) < 30.0
return _besselk_asv(v, x, Val(8), Val(false))
elseif abs(v) > 1.5
return _besselk_asv(v, x, Val(6), Val(false))
else
return _besselk_as(v, x, order)
end
end
# Just has some different cutoffs, which for whatever reason work a bit better.
# At some point this function could be improved a lot, which is part of why I'm
# okay with splitting it like this for the moment.
function _besselkxv(v, x, maxit, tol, order)
if abs(x) < 4 && (v < 5.75) && !isnearint(v, 0.01)
return _besselk_ser(v, x, maxit, tol, true)
elseif abs(x) < 6.0
return _besselk_temme(v, x, maxit, tol, true)
elseif abs(x) < 15.0
return _besselk_asv(v, x, Val(12), Val(true))
elseif abs(x) < 30.0
return _besselk_asv(v, x, Val(8), Val(true))
elseif abs(v) > 1.5
return _besselk_asv(v, x, Val(6), Val(true))
else
return _besselk_as(v, x, order)*exp(v*log(x)) # temporary, until float pows in 1.9.
end
end
adbesselk(v::AbstractFloat, x::AbstractFloat) = Bessels.besselk(v, x)
adbesselk(v, x) = _besselk(v, x, 100, 1e-12, 6)
# TODO (cg 2022/09/09 12:22): with newer julia and/or package versions, I'm
# getting allocations in the second derivative if I'm not careful. So for now
# I'm going back to this, which unfortunately is still NaN at zero, even though
# that value is well-defined. I suppose I could put the limit in if x is zero,
# but I don't love that.
function adbesselkxv(v::AbstractFloat, x::AbstractFloat)
iszero(x) && return _gamma(v)*2^(v-1)
Bessels.besselk(v, x)*(x^v)
end
adbesselkxv(v, x) = _iszero(x) ? _gamma(v)*2^(v-1) : _besselkxv(v, x, 100, 1e-12, 6)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1137 |
# This is an amalgam of my original asymptotic series expansion and the
# improvements provided by Michael Helton and Oscar Smith in Bessels.jl, where
# this is more or less a pending PR (#48).
# To be replaced with Bessels.SQRT_PID2 when PR is merged.
const SQRT_PID2 = sqrt(pi/2)
# For now, no exponential improvement. It requires the exponential integral
# function, which would either need to be lifted from SpecialFunctions.jl or
# re-implemented. And with an order of, like, 10, this seems to be pretty
# accurate and still faster than the uniform asymptotic expansion.
function _besselk_as(v::V, x::T, order) where {V,T}
fv = 4*v*v
_z = x
ser_v = one(T)
floatj = one(T)
ak_numv = fv - floatj
factj = one(T)
twofloatj = one(T)
eightj = T(8)
for _ in 1:order
# add to the series:
term_v = ak_numv/(factj*_z*eightj)
ser_v += term_v
# update ak and _z:
floatj += one(T)
twofloatj += T(2)
factj *= floatj
fourfloatj = twofloatj*twofloatj
ak_numv *= (fv - fourfloatj)
_z *= x
eightj *= T(8)
end
pre_multiply = SQRT_PID2*exp(-x)/sqrt(x)
pre_multiply*ser_v
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 613 |
@generated function _besselk_vz_asv(v, z, ::Val{N}, ::Val{M}) where{N,M}
quote
ez = sqrt(1+z*z)+log(z/(1+sqrt(1+z*z)))
pz = inv(sqrt(1+z*z))
out = sqrt(pi/(2*v))/sqrt(sqrt(1+z*z))
mulval = M ? exp(v*log(z*v)-v*ez) : exp(-v*ez)
(ser, sgn, _v) = (zero(z), one(z), one(v))
evaled_polys = tuple($([:($(Symbol(:uk_, j, :_poly))(pz)) for j in 0:(N-1)]...))
Base.Cartesian.@nexprs $N j -> begin
ser += sgn*evaled_polys[j]/_v
sgn *= -one(z)
_v *= v
end
mulval*out*ser
end
end
_besselk_asv(v, z, maxorder, modify) = _besselk_vz_asv(v, z/v, maxorder, modify)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1337 |
function _besselk_ser(v, x, maxit, tol, modify)
T = promote_type(typeof(x), typeof(v))
out = zero(T)
oneT = one(T)
twoT = oneT+oneT
# precompute a handful of things:
xd2 = x/twoT
xd22 = xd2*xd2
half = oneT/twoT
if modify
e2v = exp2(v)
xd2_v = (x^(2*v))/e2v
xd2_nv = e2v
else
lxd2 = log(xd2)
xd2_v = exp(v*lxd2)
xd2_nv = exp(-v*lxd2)
end
gam_v = _gamma(v)
gam_nv = _gamma(-v)
gam_1mv = -gam_nv*v # == gamma(one(T)-v)
gam_1mnv = gam_v*v # == gamma(one(T)+v)
xd2_pow = oneT
fact_k = oneT
floatk = convert(T, 0)
(gpv, gmv) = (gam_1mnv, gam_1mv)
# One final re-compression of a few things:
_t1 = gam_v*xd2_nv*gam_1mv
_t2 = gam_nv*xd2_v*gam_1mnv
# now the loop using Oana's series expansion, with term function manually
# inlined for max speed:
for _j in 0:maxit
t1 = half*xd2_pow
tmp = _t1/(gmv*fact_k)
tmp += _t2/(gpv*fact_k)
term = t1*tmp
out += term
((abs(term) < tol) && _j>5) && return out
# Use the trick that gamma(1+k+1+v) == gamma(1+k+v)*(1+k+v) to skip gamma calls:
(gpv, gmv) = (gpv*(oneT+v+floatk), gmv*(oneT-v+floatk))
xd2_pow *= xd22
fact_k *= (floatk+1)
floatk += T(1)
end
throw(error("$maxit iterations reached without achieving atol $tol."))
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 7251 |
const _GA = MathConstants.γ # because I don't like unicode...
const QUADGAMMA1 = -2.40411380631918857 # ψ^(2)(1)
const HEXGAMMA1 = -24.88626612344087823 # ψ^(4)(1)
# special methods for cosh(mu(v,x))*(x^vf) and sinh(...).
@inline coshmuxv(v, x) = (exp2(v) + exp2(-v)*x^(2*v))/2
@inline sinhmuxv(v, x) = (exp2(v) - exp2(-v)*x^(2*v))/2
const TPCOEF = (1, 0, (pi^2)/6, 0, (7*pi^4)/360, 0, (31*pi^6)/15120)
@inline tp_taylor0(v) = evalpoly(v, TPCOEF) # trig part
const G1COEF = (_GA,
0,
(2*_GA^3 - _GA*pi^2 - 2*QUADGAMMA1)/12,
0,
(12*_GA^5 - 20*(_GA^3)*pi^2 + _GA*pi^4 - 120*(_GA^2)*QUADGAMMA1 + 20*(pi^2)*QUADGAMMA1 - 12*HEXGAMMA1)/1440)
@inline g1_taylor0(v) = -evalpoly(v, G1COEF)
const G2COEF = (1.0,
0,
(_GA^2 - (pi^2)/6)/2,
0,
(60*_GA^4 - 60*(_GA*pi)^2 + pi^4 - 240*_GA*QUADGAMMA1)/1440)
@inline g2_taylor0(v) = evalpoly(v, G2COEF)
const SHCOEF = (1, 0, 1/6, 0, 1/120, 0, 1/5040, 0, 1/362880)
@inline sh_taylor0(v) = evalpoly(v, SHCOEF) # sinh part
# TODO: there is still a problem when v is not zero but z is zero. The NaN might
# be mathematically correct, and it isn't a branch that adbesselkxv hits.
@inline function f0_expansion0(v::V, z, modify) where{V}
_t = tp_taylor0(v)
_g1 = g1_taylor0(v)
_g2 = g2_taylor0(v)
if !modify
mu = v*log(2/z)
_cm = cosh(mu)
_sm = sinh(mu)
_sh = sh_taylor0(mu)*log(2/z)
else
_cm = coshmuxv(v, z)
if _iszero(v)
# Because of the branching here, if v is zero, then I know that z is NOT zero.
mu = v*log(2/z)
_sh = (z^v)*sh_taylor0(mu)*log(2/z)
else
_sh = sinhmuxv(v, z)/v
end
end
_t*(_g1*_cm + _g2*_sh)
end
# EVEN Cheby coefs for computing the gamma pair thing when v is not near zero.
# (but |v| is <= 1/2).
const A2N = (1.843740587300906,
-0.076852840844786,
0.001271927136655,
-0.000004971736704,
-0.000000033126120,
0.000000000242310,
-0.000000000000170,
-0.000000000000001
)
# EVEN Cheby coefs for computing the gamma pair thing when v is not near zero.
# (but |v| is <= 1/2).
const A2Np1 = (-0.283876542276024,
0.001706305071096,
0.000076309597586,
-0.000000865920800,
0.000000001745136,
0.000000000009161,
-0.000000000000034)
# This gives g1 and g2 directly when v is not near zero and completely avoids
# calls to gamma functions.
@inline function temmegammas(v)
twov = one(v)+one(v)
_v = twov*v
(tv_even, tv_odd) = (one(v), _v)
(ser_even, ser_odd) = (A2N[1]/2, A2Np1[1]*tv_odd)
@inbounds for n in 2:7
# get next even value. Note that tv_even is now T_2(_v).
tv_even = twov*_v*tv_odd - tv_even
# add to the even term series:
ser_even += A2N[n]*tv_even
# get the next odd term:
tv_odd = twov*_v*tv_even - tv_odd
# add to the odd term series:
ser_odd += A2Np1[n]*tv_odd
end
# one more term for the evens:
tv_even = twov*_v*tv_odd - tv_even
ser_even += A2N[8]*tv_even
# now return:
(ser_odd/v, ser_even)
end
function temme_pair(v, z, maxit, tol, modify=false)
@assert abs(v) <= 1/2 "This internal routine is only for |v|<=1/2."
# Some very low-level objects:
onez = one(z)
twoz = onez+onez
zd2 = z/twoz
_2dz = twoz/z
# Creating the necessary things to get initial f, p, q:
if abs(v) < 0.001
g1 = g1_taylor0(v)
g2 = g2_taylor0(v)
else
(g1, g2) = temmegammas(v)
end
(gp, gm) = (inv(-(g1*twoz*v - g2*twoz)/twoz), inv((g1*twoz*v + g2*twoz)/twoz))
# p0 and q0 terms, branches for if we're modifying by (x^vf):
if !modify
p0 = (exp(-v*log(zd2))/twoz)*gp
q0 = (exp(v*log(zd2))/twoz)*gm
else
p0 = exp2(v-one(v))*gp
q0 = exp2(-v-one(v))*(z^(2*v))*gm
end
# cosh and sinh terms for f0, branches for if we're modifying by (x^vf):
if !modify
mu = v*log(_2dz)
_cm = cosh(mu)
_sm = sinh(mu)
else
_cm = coshmuxv(v, z)
_sm = sinhmuxv(v, z)
end
# One more branch for f0, which is to check if v is near zero or z is near two.
if _iszero(z) && modify
f0 = one(z)
else
if abs(v) < 0.001
f0 = f0_expansion0(v, z, modify)
elseif abs(z-2)<0.001
_s = sh_taylor0(v*log(_2dz)) # manually plug in mu.
#f0 = (v*pi/sinpi(v))*(g1*cosh(mu) + g2*log(_2dz)*_s)
f0 = (v*pi/sinpi(v))*(g1*_cm + g2*log(_2dz)*_s)
else
# Temme's form is:
#f0 = (v*pi/sinpi(v))*(g1*cosh(mu) + g2*log(_2dz)*sinh(mu)/mu)
# But if I modify to this, I get rid of a near singularity as z->0:
f0 = (v*pi/sinpi(v))*(g1*_cm + g2*_sm/v)
end
end
(_f, _p, _q) = (f0, p0, q0)
# a few other odds and ends for efficient looping:
(ser_kv, ser_kvp1) = (f0, _p)
(factk, _floatk) = (onez, onez)
(v2, _zd4, _z) = (v*v, z*z/(twoz + twoz), z*z/(twoz + twoz))
for k in 1:maxit
_f = (k*_f + _p + _q)/(_floatk^2 - v2)
_p /= (_floatk-v)
_q /= (_floatk+v)
ck = _z/factk
# update term for besselk(v, z).
term_v = ck*_f
ser_kv += term_v
# update term for besselk(v+1, z).
term_vp1 = ck*(_p - k*_f)
ser_kvp1 += term_vp1
if max(abs(term_v), abs(term_vp1)) < tol
if !modify
return (ser_kv, ser_kvp1*_2dz)
else
return (ser_kv, ser_kvp1*2) # note that I'm multiplying by a z, so cancel manually.
end
end
_floatk += onez
factk *= _floatk
_z *= _zd4
end
throw(error("Term tolerance $tol not reached in $maxit iters for (v,z) = ($v, $z)."))
end
# NOTE: In the modified scaling of (x^v)*besselk(v,x), there is a problem for
# integer v: (x^0)*besselk(0,x) is just besselk(0,x). And that is still Inf for
# x=0. Which really breaks the whole strategy of integer derivatives for the
# rescaled Bessel here. BUT: there is a workaround! You don't actually need
# besselk(0, x) for the modified recurrence, so we just throw away that value.
function _besselk_temme(v, z, maxit, tol, mod)
@assert v > -1/2 "This routine does not presently handle the case of v > -1/2."
_p = floor(v)
(v - _p > 1/2) && (_p += one(_p))
vf = v - _p
twov = one(v)+one(v)
_v = vf
kvp2 = zero(v)
(kv, kvp1) = temme_pair(_v, z, maxit, tol, mod)
# check if any recurrence is necessary:
v <= one(v)/twov && return kv
v <= (twov + one(v))/twov && return kvp1
# if it is necessary, perform it:
if mod
# not necessarily the "right" way to handle this, but seems to stop the
# propagation of NaNs in AD.
#
# a slightly different recurrence for (x^v)*besselk(v,x).
if _iszero(z)
# special case for z=0:
for _ in 1:(Int(_p)-1)
kvp2 = twov*(_v+one(v))*kvp1
_v += one(v)
kv = kvp1
kvp1 = kvp2
end
else
z2 = z*z
for _ in 1:(Int(_p)-1)
kvp2 = twov*(_v+one(v))*kvp1 + z2*kv
_v += one(v)
kv = kvp1
kvp1 = kvp2
end
end
else
for _ in 1:(Int(_p)-1)
kvp2 = ((twov*(_v+one(v)))/z)*kvp1 + kv
_v += one(v)
kv = kvp1
kvp1 = kvp2
end
end
kvp2
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 2778 |
#
# The below file is adapted from Bessels.jl
# (https://github.com/JuliaMath/Bessels.jl/blob/master/src/gamma.jl),
# with this particular implementation entirely due to Michael Helton.
# In Bessels.jl, it is hard-typed to only be F64 or F32, as they point out that
# AD of the functions is better handled by rules than by passing it through
# routines directly. This is definitely true, but for this package I don't want
# to put ForwardDiff into the dependency tree, and so I can't manually apply any
# rules. For the moment, I have removed the type restrictions.
#
##############################################################################
########################## Begin file ########################################
##############################################################################
# Adapted from Cephes Mathematical Library (MIT license https://en.smath.com/view/CephesMathLibrary/license) by Stephen L. Moshier
const SQ2PI = 2.5066282746310007
function _gamma(x)
if x > zero(x)
return _gamma_pos(x)
else
isinteger(x) && throw(DomainError(x, "NaN result for non-NaN input."))
xp1 = abs(x) + 1.0
return π / sinpi(xp1) / _gamma_pos(xp1)
end
end
# only have a Float64 implementations
function _gamma_pos(x)
if x > 11.5
return large_gamma(x)
elseif x <= 11.5
return small_gamma(x)
elseif isnan(x)
return x
end
end
function large_gamma(x::T) where{T}
isinf(x) && return x
w = inv(x)
s = (
8.333333333333331800504e-2, 3.472222222230075327854e-3, -2.681327161876304418288e-3, -2.294719747873185405699e-4,
7.840334842744753003862e-4, 6.989332260623193171870e-5, -5.950237554056330156018e-4, -2.363848809501759061727e-5,
7.147391378143610789273e-4
)
w = w * evalpoly(w, s) + one(T)
# lose precision on following block
y = exp((x))
# avoid overflow
v = x^(0.5 * x - 0.25)
y = v * (v / y)
return SQ2PI * y * w
end
function small_gamma(x::T) where{T}
P = (
1.000000000000000000009e0, 8.378004301573126728826e-1, 3.629515436640239168939e-1, 1.113062816019361559013e-1,
2.385363243461108252554e-2, 4.092666828394035500949e-3, 4.542931960608009155600e-4, 4.212760487471622013093e-5
)
Q = (
9.999999999999999999908e-1, 4.150160950588455434583e-1, -2.243510905670329164562e-1, -4.633887671244534213831e-2,
2.773706565840072979165e-2, -7.955933682494738320586e-4, -1.237799246653152231188e-3, 2.346584059160635244282e-4,
-1.397148517476170440917e-5
)
z = one(T)
while x >= 3.0
x -= one(T)
z *= x
end
while x < 2.0
z /= x
x += one(T)
end
x -= T(2)
p = evalpoly(x, P)
q = evalpoly(x, Q)
return z * p / q
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 752 |
# A standard Matern covariance function. This is not the best parameterization,
# but it is the most popular, so here we are.
_norm(t) = sqrt(sum(z->z*z, t))
"""
matern(x, y, params)
computes σ² * \\mathcal{M}_{ν}(||x-y||/ρ), where params = (σ, ρ, ν) and \\mathcal{M} is the Matern covariance function, parameterized as
\\mathcal{M}_{ν}(t) = σ^2 2^{1 - ν} Γ(ν)^{-1} (\\sqrt{2 ν} t / ρ)^{ν} \\mathcal{K}_{ν}(\\sqrt{2 ν} t / ρ).
For more information, see Stein (1999), Interpolation of Spatial Data: Some Theory for Kriging.
"""
function matern(x, y, params)
(sg, rho, nu) = (params[1], params[2], params[3])
dist = _norm(x-y)
_iszero(dist) && return sg^2
arg = sqrt(2*nu)*dist/rho
(sg*sg*(2^(1-nu))/_gamma(nu))*adbesselkxv(nu, arg)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 9265 |
struct UkPolynomial{N,P}
coef_skip_zeros::P
constant::Float64
end
function UkPolynomial(coefv::Vector{T}) where{T}
# find the first non-zero coefficient, assuming the first coefficient is a
# zero-order constant:
first_nonzero_index = findfirst(!iszero, coefv)
# now take the coefficients that are not zero, which is every other one:
if first_nonzero_index == 1
constant = coefv[1]
next_nonzero_index = findfirst(!iszero, coefv[2:end])
if isnothing(next_nonzero_index)
if length(coefv) > 1
throw(error("These coefficients don't correspond to a U_k polynomial."))
end
return UkPolynomial{0,Nothing}(nothing, coefv[1])
end
first_nonzero_index = next_nonzero_index+1
else
constant = 0.0
end
nzcoef = vcat(zero(T), coefv[first_nonzero_index:2:end])
coef_skip_zeros = length(nzcoef) < 20 ? tuple(nzcoef...) : nzcoef
(N,P) = (first_nonzero_index-1, typeof(coef_skip_zeros))
UkPolynomial{N,P}(coef_skip_zeros, constant)
end
# the case of a simple polynomial with no leading zeros.
(Uk::UkPolynomial{0,P})(x) where{P} = evalpoly(x^2, Uk.coef_skip_zeros)/x + Uk.constant
# the case of a zero-order polynomial:
(Uk::UkPolynomial{0,Nothing})(x) = Uk.constant
# the nontrivial case of a polynomial that DOES have leading zeros:
function (Uk::UkPolynomial{N,P})(x) where{N,P}
pre_multiply = x^(N-2)
pv = evalpoly(x^2, Uk.coef_skip_zeros)
pre_multiply*pv + Uk.constant
end
const uk_0_poly=UkPolynomial([1.0, ])
const uk_1_poly=UkPolynomial([0.0, 0.125, 0.0, -0.20833333333333334, ])
const uk_2_poly=UkPolynomial([0.0, 0.0, 0.0703125, 0.0, -0.4010416666666667, 0.0, 0.3342013888888889, ])
const uk_3_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0732421875, 0.0, -0.8912109375, 0.0, 1.8464626736111112, 0.0, -1.0258125964506173, ])
const uk_4_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.112152099609375, 0.0, -2.3640869140625, 0.0, 8.78912353515625, 0.0, -11.207002616222995, 0.0, 4.669584423426247, ])
const uk_5_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.22710800170898438, 0.0, -7.368794359479631, 0.0, 42.53499874538846, 0.0, -91.81824154324003, 0.0, 84.63621767460074, 0.0, -28.212072558200244, ])
const uk_6_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5725014209747314, 0.0, -26.491430486951554, 0.0, 218.1905117442116, 0.0, -699.5796273761327, 0.0, 1059.9904525279999, 0.0, -765.2524681411816, 0.0, 212.5701300392171, ])
const uk_7_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.7277275025844574, 0.0, -108.09091978839464, 0.0, 1200.9029132163525, 0.0, -5305.646978613405, 0.0, 11655.393336864536, 0.0, -13586.550006434136, 0.0, 8061.722181737308, 0.0, -1919.4576623184068, ])
const uk_8_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.074042001273483, 0.0, -493.915304773088, 0.0, 7109.514302489364, 0.0, -41192.65496889756, 0.0, 122200.46498301747, 0.0, -203400.17728041555, 0.0, 192547.0012325315, 0.0, -96980.5983886375, 0.0, 20204.29133096615, ])
const uk_9_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 24.380529699556064, 0.0, -2499.830481811209, 0.0, 45218.76898136274, 0.0, -331645.1724845636, 0.0, 1.2683652733216248e6, 0.0, -2.813563226586534e6, 0.0, 3.763271297656404e6, 0.0, -2.998015918538106e6, 0.0, 1.311763614662977e6, 0.0, -242919.18790055133, ])
const uk_10_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 110.01714026924674, 0.0, -13886.089753717039, 0.0, 308186.40461266245, 0.0, -2.785618128086455e6, 0.0, 1.328876716642182e7, 0.0, -3.756717666076335e7, 0.0, 6.634451227472903e7, 0.0, -7.410514821153264e7, 0.0, 5.095260249266463e7, 0.0, -1.970681911843222e7, 0.0, 3.2844698530720375e6, ])
const uk_11_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 551.3358961220206, 0.0, -84005.43360302408, 0.0, 2.24376817792245e6, 0.0, -2.4474062725738734e7, 0.0, 1.420629077975331e8, 0.0, -4.958897842750303e8, 0.0, 1.1068428168230145e9, 0.0, -1.621080552108337e9, 0.0, 1.5535968995705795e9, 0.0, -9.39462359681578e8, 0.0, 3.255730741857656e8, 0.0, -4.932925366450995e7, ])
const uk_12_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3038.0905109223845, 0.0, -549842.3275722886, 0.0, 1.739510755397817e7, 0.0, -2.2510566188941535e8, 0.0, 1.5592798648792577e9, 0.0, -6.563293792619284e9, 0.0, 1.79542137311556e10, 0.0, -3.302659974980072e10, 0.0, 4.128018557975397e10, 0.0, -3.463204338815877e10, 0.0, 1.868820750929582e10, 0.0, -5.866481492051846e9, 0.0, 8.14789096118312e8, ])
const uk_13_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 18257.75547429318, 0.0, -3.871833442572612e6, 0.0, 1.4315787671888906e8, 0.0, -2.167164983223796e9, 0.0, 1.763473060683497e10, 0.0, -8.786707217802325e10, 0.0, 2.879006499061506e11, 0.0, -6.453648692453765e11, 0.0, 1.008158106865382e12, 0.0, -1.098375156081223e12, 0.0, 8.19218669548577e11, 0.0, -3.990961752244664e11, 0.0, 1.1449823773202577e11, 0.0, -1.4679261247695614e10, ])
const uk_14_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 118838.42625678328, 0.0, -2.918838812222081e7, 0.0, 1.247009293512711e9, 0.0, -2.1822927757529232e10, 0.0, 2.0591450323241003e11, 0.0, -1.1965528801961816e12, 0.0, 4.612725780849132e12, 0.0, -1.2320491305598287e13, 0.0, 2.334836404458184e13, 0.0, -3.1667088584785152e13, 0.0, 3.056512551993531e13, 0.0, -2.051689941093443e13, 0.0, 9.109341185239896e12, 0.0, -2.406297900028503e12, 0.0, 2.8646403571767896e11, ])
const uk_15_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 832859.3040162894, 0.0, -2.3455796352225152e8, 0.0, 1.1465754899448242e10, 0.0, -2.2961937296824658e11, 0.0, 2.4850009280340854e12, 0.0, -1.663482472489248e13, 0.0, 7.437312290867914e13, 0.0, -2.3260483118893994e14, 0.0, 5.230548825784446e14, 0.0, -8.574610329828949e14, 0.0, 1.0269551960827622e15, 0.0, -8.894969398810261e14, 0.0, 5.427396649876595e14, 0.0, -2.2134963870252512e14, 0.0, 5.417751075510603e13, 0.0, -6.019723417234003e12, ])
const uk_16_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.252951493434797e6, 0.0, -2.0016469281917765e9, 0.0, 1.1099740513917906e11, 0.0, -2.5215584749128555e12, 0.0, 3.1007436472896465e13, 0.0, -2.3665253045164925e14, 0.0, 1.2126758042503475e15, 0.0, -4.3793258383640155e15, 0.0, 1.1486706978449752e16, 0.0, -2.226822513391114e16, 0.0, 3.213827526858623e16, 0.0, -3.4447226006485136e16, 0.0, 2.705471130619707e16, 0.0, -1.5129826322457674e16, 0.0, 5.705782159023669e15, 0.0, -1.301012723549699e15, 0.0, 1.3552215870309362e14, ])
const uk_17_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0069589531988926e7, 0.0, -1.807822038465807e10, 0.0, 1.1287091454108745e12, 0.0, -2.886383763141477e13, 0.0, 4.000444570430363e14, 0.0, -3.450385511846272e15, 0.0, 2.0064271476309532e16, 0.0, -8.270945651585064e16, 0.0, 2.4960365126160426e17, 0.0, -5.62631788074636e17, 0.0, 9.575335098169137e17, 0.0, -1.233611693196069e18, 0.0, 1.1961991142756303e18, 0.0, -8.592577980317544e17, 0.0, 4.4347954614171885e17, 0.0, -1.5552983504313898e17, 0.0, 3.3192764720355212e16, 0.0, -3.2541926196426675e15, ])
const uk_18_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.2593921650476694e8, 0.0, -1.7228323871735056e11, 0.0, 1.2030115826419195e13, 0.0, -3.4396530474307606e14, 0.0, 5.33510697870884e15, 0.0, -5.160509319348522e16, 0.0, 3.376676249790609e17, 0.0, -1.5736434765189596e18, 0.0, 5.402894876715981e18, 0.0, -1.3970803516443374e19, 0.0, 2.7572829816505184e19, 0.0, -4.1788614446568374e19, 0.0, 4.859942729324835e19, 0.0, -4.301555703831442e19, 0.0, 2.8465212251676553e19, 0.0, -1.3639420410571586e19, 0.0, 4.4702009640123085e18, 0.0, -8.966114215270461e17, 0.0, 8.301957606731907e16, ])
const uk_19_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.836255180230434e9, 0.0, -1.7277040123530002e12, 0.0, 1.3412416915180642e14, 0.0, -4.2619355104269e15, 0.0, 7.351663610930973e16, 0.0, -7.921651119323832e17, 0.0, 5.789887667664653e18, 0.0, -3.0255665989903716e19, 0.0, 1.1707490535797255e20, 0.0, -3.434621399768417e20, 0.0, 7.756704953461136e20, 0.0, -1.3602037772849937e21, 0.0, 1.8571089321463448e21, 0.0, -1.9677247077053117e21, 0.0, 1.601689857369359e21, 0.0, -9.824438427689853e20, 0.0, 4.39279220088871e20, 0.0, -1.3512175034359957e20, 0.0, 2.556380296052923e19, 0.0, -2.242438856186774e18, ])
const uk_20_poly=UkPolynomial([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.646840080706557e10, 0.0, -1.8187262038511043e13, 0.0, 1.5613123930484675e15, 0.0, -5.484033603883292e16, 0.0, 1.0461721131134348e18, 0.0, -1.2483700995047234e19, 0.0, 1.0126774169536592e20, 0.0, -5.891794135069496e20, 0.0, 2.548961114664971e21, 0.0, -8.40591581710835e21, 0.0, 2.1487414815055883e22, 0.0, -4.302534303482378e22, 0.0, 6.7836616429518815e22, 0.0, -8.423222750084318e22, 0.0, 8.194331005435126e22, 0.0, -6.173206302884411e22, 0.0, 3.5284358439034075e22, 0.0, -1.478774352843361e22, 0.0, 4.285296082829493e21, 0.0, -7.671943936729004e20, 0.0, 6.393286613940834e19, ])
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 953 |
using Polynomials
function Uk_polynomials(max_order)
P0 = Polynomial([1.0])
out = [P0]
mul_int = Polynomial([1.0, 0.0, -5.0])
mul_frnt = Polynomial([0.0, 0.0, 1.0, 0.0, -1.0])/2
for j in 1:max_order
Pjm1 = out[end]
Pjm1_int = integrate(mul_int*Pjm1)/8
Pjm1_drv = derivative(Pjm1)
newP = mul_frnt*Pjm1_drv + Pjm1_int - Pjm1_int(0.0)/8
push!(out, newP)
end
out
end
open("uk_polys.jl", "w") do out
uk_polys = Uk_polynomials(20)
names = String[]
redirect_stdout(out) do
run(`cat ukpoly.jl`)
println("\n\n\n")
for (j,pj) in enumerate(uk_polys)
c = pj.coeffs
stem = string("uk_", j-1)
pnm = string(stem, "_poly")
push!(names, string(pnm, ","))
str1 = string("const ", pnm, "=UkPolynomial([", map(x->string(x, ", "), c)..., "])")
println(str1)
end
println()
println(string("const UK_POLYS = [", reduce(*, names), "]"))
println()
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1450 |
struct UkPolynomial{N,P}
coef_skip_zeros::P
constant::Float64
end
function UkPolynomial(coefv::Vector{T}) where{T}
# find the first non-zero coefficient, assuming the first coefficient is a
# zero-order constant:
first_nonzero_index = findfirst(!iszero, coefv)
# now take the coefficients that are not zero, which is every other one:
if first_nonzero_index == 1
constant = coefv[1]
next_nonzero_index = findfirst(!iszero, coefv[2:end])
if isnothing(next_nonzero_index)
if length(coefv) > 1
throw(error("These coefficients don't correspond to a U_k polynomial."))
end
return UkPolynomial{0,Nothing}(nothing, coefv[1])
end
first_nonzero_index = next_nonzero_index+1
else
constant = 0.0
end
nzcoef = vcat(zero(T), coefv[first_nonzero_index:2:end])
coef_skip_zeros = length(nzcoef) < 20 ? tuple(nzcoef...) : nzcoef
(N,P) = (first_nonzero_index-1, typeof(coef_skip_zeros))
UkPolynomial{N,P}(coef_skip_zeros, constant)
end
# the case of a simple polynomial with no leading zeros.
(Uk::UkPolynomial{0,P})(x) where{P} = evalpoly(x^2, Uk.coef_skip_zeros)/x + Uk.constant
# the case of a zero-order polynomial:
(Uk::UkPolynomial{0,Nothing})(x) = Uk.constant
# the nontrivial case of a polynomial that DOES have leading zeros:
function (Uk::UkPolynomial{N,P})(x) where{N,P}
pre_multiply = x^(N-2)
pv = evalpoly(x^2, Uk.coef_skip_zeros)
pre_multiply*pv + Uk.constant
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 6683 |
#
# NOTE (cg 2022/09/09 10:48): this is my original implementation of this
# function. But Bessels.jl has been progressing and developing better routines,
# some of which are derived from this code, and so I'm now completely letting
# that package handle the case when v isa AbstractFloat. I leave this code here
# only for people coming from the paper that want to see it.
#
@inline isnearint(v, tol) = abs(v-round(v)) < tol
# TODO (cg 2021/10/22 17:04): The cutoffs chosen here are actually to some
# degree chosen as a balance between pointwise accuracy AND AD derivative
# accuracy. For example, the series gets worse faster for derivatives than it
# does for raw evals. Not entirely sure what to make of that, but here we are.
#
# TODO (cg 2021/10/27 10:28): the weak point is clearly with |z| between, say, 8
# and 15. We only get atols on the order of 1e-12 for that, which is not quite
# good enough to declare total victory. I can only manage to match the speed of
# AMOS in that part of the domain, too.
function _besselk(v, x, maxit=100, tol=1e-12, order=6)
@assert x >= zero(x) DomainError("x needs to be non-negative.")
iszero(x) && return Inf
(real(v) < 0) && return _besselk(-v, x, maxit, tol, order)
# TODO (cg 2021/11/16 16:44): this is not the right way to test if you're
# doing AD. But I don't want to hard-bake the ForwardDiff.Dual type in here.
is_ad = !(v isa AbstractFloat)
# Special cases, now just half-integers:
#
# TODO (cg 2021/12/16 12:41): for the moment, specifically at half
# integer values the second derivatives are more accurate using the direct
# series, even at values in which for direct evaluations v is large enough
# that the series isn't so great. It isn't earth-shattering and the temme one
# is much more expensive (300 ns vs 100 ns on my machine), but in the interest
# of maximum safety I'm switching to this behavior.
#
# What would really be nice is some way if checking if (v isa Dual{T,V,N}
# where T<: Dual). But again, I'm worried about getting too stuck with
# ForwardDiff.
if isinteger(v-1/2)# && (x < 8.5)
if is_ad && (x < 8.5)
return _besselk_ser(v, x, maxit, tol, false)
elseif !is_ad
# Probably don't need the is_ad correction here.
return _besselk_as(v, x, Int(ceil(v)), false)
end
end
#
# General cases:
#
# TODO (cg 2021/11/01 18:03): These branches are not perfect. If you go into
# ./testing/accuracy.jl and track down the largest rtols between this code and
# AMOS, you will be able to fiddle around with what version you use and get a
# better rtol/atol. But I'm at the point where whenever I tweak something like
# that, something else gets worse and makes it a wash. I think for the time
# being I have to stop playing with things.
if abs(x) < 8.5 # (x < 9)
if (v > 2.85) || isnearint(v, 0.01) || ((x > 4) && is_ad)
return _besselk_temme(v,x,maxit,tol,false) # direct series.
else
return _besselk_ser(v,x,maxit,tol,false) # direct series.
end
elseif abs(x) < 15.0
return _besselk_asv(v,x,12) # uniform large order expn.
elseif abs(x) < 30.0
return _besselk_asv(v,x,8)
else
if abs(v) > 1.5 || is_ad
return _besselk_asv(v,x,6)
else
if is_ad
return _besselk_as(v,x,order)
else
return _besselk_as(v,x,order,false)
end
end
end
end
# A very simple wrapper that will use AMOS when possible, but fall back to this
# code. So now you can use AD on this but still get AMOS for direct evals.
#
# TODO (cg 2021/11/05 16:57): what's the most sensible naming thing here?
# Calling it besselk and not exporting it seems reasonable enough, but users
# will obviously want to import it. So not obvious what's best to do here.
function adbesselk(v, x, maxit=100, tol=1e-12, order=5)
if (v isa AbstractFloat) && isinteger(v) && in(typeof(x), (Float32, Float64))
return _besselk_int(v, x)
elseif (v isa AbstractFloat) && isinteger(v-1/2) && in(typeof(x), (Float32, Float64))
return _besselk_halfint(v, x)
elseif v isa AbstractFloat
SpecialFunctions.besselk(v, x)
else
_besselk(v, x, maxit, tol)
end
end
# Not exactly a taylor series, but accurate enough.
#
# TODO (cg 2021/11/10 18:33): an enhancement here would be something that also
# worked for integers. But that seems hard. I did the whole Temme thing because
# integers are hard. But as it turns out, we really need it, because the Temme
# recursion depends on (x^v)*besselk(v,x) ->_{z->0} some finite number. But for
# v=0, which is needed to compute for _all_ integer v, that doesn't hold! An
# expansion like this that is valid for all v, including integer v, but is ALSO
# AD-compatible would take care of that entirely, because the K_{v+1}(x) term in
# the Temme series doesn't need f0.
@inline function besselkxv_t0(v, x)
gv = gamma(v)
_2v = 2^(v-1)
cof = (gv*_2v, zero(v), (_2v/4)*gv/(v-1), zero(v), (_2v/16)*gv/(v*v - 3*v + 2))
evalpoly(x, cof)
end
# Note that the special series cutofs are low compared to the above function. In
# general, the adbesselk* functions that are exported really should be pretty
# near machine precision here or should just fall back to AMOS when possible. It
# turns out that the *xv modifications in the code are really only helpful when
# the argument is pretty small.
function adbesselkxv(v, x, maxit=100, tol=1e-12, order=5)
(iszero(v) && iszero(x)) && return Inf
is_ad = !(v isa AbstractFloat)
xcut = is_ad ? 6.0 : 2.0
if !isinteger(v) && (abs(x) <= 1e-8) # use Taylor at zero.
return besselkxv_t0(v, x)
elseif (x < xcut) && (v < 5.75) && !isnearint(v, 0.01)
return _besselk_ser(v, x, maxit, tol, true)
elseif (x < xcut)
return _besselk_temme(v, x, maxit, tol, true)
elseif is_ad && (x > xcut) && (x < 15.0)
return _besselk_asv(v, x, 12, true)
else
return adbesselk(v, x, maxit, tol, order)*(x^v)
end
end
# Unlike adbesselkxv, this function is pure BesselK.jl, including the cases in
# which special branches for (x^v)*besselk(v,x) don't come up. This is primarily
# used for testing.
function _besselkxv(v, x, maxit=100, tol=1e-12, order=5)
(iszero(v) && iszero(x)) && return Inf
is_ad = !(v isa AbstractFloat)
xcut = is_ad ? 6.0 : 2.0
if !isinteger(v) && (abs(x) <= 1e-8) # use Taylor at zero.
return besselkxv_t0(v, x)
elseif (x < xcut) && (v < 5.75) && !isnearint(v, 0.01)
return _besselk_ser(v, x, maxit, tol, true)
elseif (x < xcut)
return _besselk_temme(v, x, maxit, tol, true)
elseif is_ad && (x > xcut) && (x < 15.0)
return _besselk_asv(v, x, 12, true)
else
return _besselk(v, x, maxit, tol, order)*(x^v)
end
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 3036 |
# A maximally fast upper branch incomplete gamma function when the first
# argument is a non-positive integer.
#
# (_gamma_upper_negative_integer)
@inline function _g_u_n_i(s::Int64, x, g0, expnx)
n = -s
ser = zero(x)
_x = one(x)
_sgn = copy(_x)
facn = convert(typeof(x), factorial(n))
fac = facn/n
for k in 0:(n-1)
term = _sgn*fac*_x
ser += term
_x *= x
_sgn *= -one(x)
fac /= (n-k-1)
end
((expnx/_x)*ser + _sgn*g0)/facn
end
# The best speed I was able to accomplish with this thing was just to
# compartmentalize it into its own function. Definitely not ideal, but so it is.
#
# TODO (cg 2021/10/26 15:58): get rid of all the remaining factorial calls so
# that this won't literally break for order greater than, like, 19.
function exponential_improvement(v, x, l, m)
onex = one(x)
twox = onex+onex
fv = 4*v*v
ser = zero(x)
_z = x
floatj = onex
ak_num = fv - floatj
factj = onex
twofloatj = onex
eightj = 8
expx = exp(2*x)
expnx = exp(-2*x)
expintx = -expinti(-2*x)
ser = expx*factorial(l-1)*_g_u_n_i(1-l, 2*x, expintx, expnx)/(2*pi)
for j in 1:(m-1)
# add to the series:
s = Int(l-j)
_g = expx*factorial(Int(s-1))*_g_u_n_i(1-s, 2*x, expintx, expnx)/(2*pi)
term = ak_num/(factj*_z*eightj)*_g
ser += term
# update ak and _z:
floatj += onex
twofloatj += twox
factj *= floatj
ak_num *= (fv - twofloatj^2)
_z *= x
eightj *= 8
end
_sgn = isodd(l) ? -one(x) : one(x)
ser*_sgn*twox*cospi(v)
end
function _besselk_as(v, x, order, use_remainder=true, modify=false)
onex = one(x)
twox = onex+onex
fv = 4*v*v
ser = zero(x)
_z = x
ser = onex #zero(x) #onex/_z
floatj = onex
ak_num = fv - floatj
factj = onex
twofloatj = onex
eightj = 8
for j in 1:order
# add to the series:
term = ak_num/(factj*_z*eightj)
ser += term
# update ak and _z:
floatj += onex
twofloatj += twox
factj *= floatj
ak_num *= (fv - twofloatj^2)
_z *= x
eightj *= 8
end
if use_remainder
_rem = exponential_improvement(v, x, Int(order+1), Int(order))
ser += _rem
end
# if you're modifying as (x^v)*besselk(v,x), since the series part is pretty
# stable numerically, what we want to deal with is the (x^v)*exp(-x). That's
# the problem of potentially huge*tiny.
if modify
mulval = exp(v*log(x)-x)
else
mulval = exp(-x)
end
sqrt(pi/(x*twox))*mulval*ser
end
# A refined version of my (CG) base version, thanks to Michael Helton and Oscar Smith (see https://github.com/heltonmc/Bessels.jl/issues/25)
const SQRT_PID2(::Type{Float64}) = 1.2533141373155003
function _besselk_halfint(v::T, x) where{T}
v = abs(v)
invx = inv(x)
b0 = b1 = SQRT_PID2(Float64)*sqrt(invx)*exp(-x)
twodx = 2*invx
_v = T(1/2)
while _v < v
b0, b1 = b1, muladd(b1, twodx*_v, b0)
_v += one(T)
end
b1
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 4735 |
using Test, BenchmarkTools, BesselK, SpecialFunctions, FiniteDifferences, ForwardDiff
const VGRID = range(0.25, 10.0, length=100)
const XGRID = range(0.0, 50.0, length=201)[2:end]
const VX = collect(Iterators.product(VGRID, XGRID))
const REF_FD1 = central_fdm(10,1)
const REF_FD2 = central_fdm(10,2)
atolfun(tru, est) = isnan(est) ? NaN : (isinf(tru) ? 0.0 : abs(tru-est))
besselkxv(v,x) = besselk(v,x)*(x^v)
fd_dbesselk_dv(v, x) = REF_FD1(_v->besselk(_v, x), v)
fd2_dbesselk_dv_dv(v, x) = REF_FD2(_v->besselk(_v, x), v)
fd_dbesselkxv_dv(v, x) = REF_FD1(_v->besselkxv(_v, x), v)
fd2_dbesselkxv_dv_dv(v, x) = REF_FD2(_v->besselkxv(_v, x), v)
ad_dbesselk_dv(v, x) = ForwardDiff.derivative(_v->adbesselk(_v, x), v)
ad2_dbesselk_dv_dv(v, x) = ForwardDiff.derivative(_v->ad_dbesselk_dv(_v, x), v)
ad_dbesselkxv_dv(v, x) = ForwardDiff.derivative(_v->adbesselkxv(_v, x), v)
ad2_dbesselkxv_dv_dv(v, x) = ForwardDiff.derivative(_v->ad_dbesselkxv_dv(_v, x), v)
# direct accuracy:
@testset "direct eval" begin
println("\nDirect evaluations:")
for (ref_fn, cand_fn, case) in ((besselk, adbesselk, :standard),
(besselkxv, adbesselkxv, :rescaled))
amos_ref = map(vx->ref_fn(vx[1], vx[2]), VX)
candidate = map(vx->cand_fn(vx[1], vx[2]), VX)
atols = map(a_c->atolfun(a_c[1], a_c[2]), zip(amos_ref, candidate))
ix = findall(x-> x <= 1000.0, amos_ref)
thresh = case == :standard ? 5e-11 : 2e-12
(maxerr, maxix) = findmax(abs, atols[ix])
(maxerr_v, maxerr_x) = VX[ix][maxix]
println("Case $case:")
println("worst (v,x): ($maxerr_v, $maxerr_x)")
println("Ref value: $(amos_ref[ix][maxix])")
println("Est value: $(candidate[ix][maxix])")
println("Abs error: $(round(maxerr, sigdigits=3))")
@test maxerr < thresh
end
println()
end
# test derivative accuracy:
@testset "first derivative" begin
println("\nFirst derivatives:")
for (ref_fn, cand_fn, case) in ((fd_dbesselk_dv, ad_dbesselk_dv, :standard),
(fd_dbesselkxv_dv, ad_dbesselkxv_dv, :rescaled))
amos_ref = map(vx->ref_fn(vx[1], vx[2]), VX)
candidate = map(vx->cand_fn(vx[1], vx[2]), VX)
atols = map(a_c->atolfun(a_c[1], a_c[2]), zip(amos_ref, candidate))
ix = findall(x-> x <= 1000.0, amos_ref)
thresh = case == :standard ? 4e-9 : 2e-6
(maxerr, maxix) = findmax(abs, atols[ix])
(maxerr_v, maxerr_x) = VX[ix][maxix]
println("Case $case:")
println("worst (v,x): ($maxerr_v, $maxerr_x)")
println("Ref value: $(amos_ref[ix][maxix])")
println("Est value: $(candidate[ix][maxix])")
println("Abs error: $(round(maxerr, sigdigits=3))")
@test maxerr < thresh
end
println()
end
# test second derivative accuracy:
@testset "second derivative" begin
println("\nSecond derivatives:")
for (ref_fn, cand_fn, case) in ((fd2_dbesselk_dv_dv, ad2_dbesselk_dv_dv, :standard),
(fd2_dbesselkxv_dv_dv, ad2_dbesselkxv_dv_dv, :rescaled))
amos_ref = map(vx->ref_fn(vx[1], vx[2]), VX)
candidate = map(vx->cand_fn(vx[1], vx[2]), VX)
atols = map(a_c->atolfun(a_c[1], a_c[2]), zip(amos_ref, candidate))
ix = findall(x-> x <= 100.0, amos_ref)
thresh = case == :standard ? 5e-7 : 5e-6
(maxerr, maxix) = findmax(abs, atols[ix])
(maxerr_v, maxerr_x) = VX[ix][maxix]
println("Case $case:")
println("worst (v,x): ($maxerr_v, $maxerr_x)")
println("Ref value: $(amos_ref[ix][maxix])")
println("Est value: $(candidate[ix][maxix])")
println("Abs error: $(round(maxerr, sigdigits=3))")
@test maxerr < thresh
end
println()
end
# Testing the _xv versions really slows down the test script, and in general
# there are no no routines.
@testset "confirm no allocations" begin
VGRID_ALLOC = (0.25, 1.0-1e-8, 1.0, 1.5, 2.1, 3.0, 3.5, 4.8)
XGRID_ALLOC = range(0.0, 50.0, length=11)[2:end]
VX_ALLOC = collect(Iterators.product(VGRID_ALLOC, XGRID_ALLOC))
ad_alloc_test(v,x) = @ballocated ad_dbesselk_dv($v,$x) samples=1
ad2_alloc_test(v,x) = @ballocated ad2_dbesselk_dv_dv($v,$x) samples=1
ad_alloc_test_xv(v,x) = @ballocated ad_dbesselkxv_dv($v,$x) samples=1
ad2_alloc_test_xv(v,x) = @ballocated ad2_dbesselkxv_dv_dv($v,$x) samples=1
ad_allocs = map(vx->ad_alloc_test(vx[1], vx[2]), VX_ALLOC)
ad2_allocs = map(vx->ad2_alloc_test(vx[1], vx[2]), VX_ALLOC)
ad_allocs_xv = map(vx->ad_alloc_test_xv(vx[1], vx[2]), VX_ALLOC)
ad2_allocs_xv = map(vx->ad2_alloc_test_xv(vx[1], vx[2]), VX_ALLOC)
@test all(iszero, ad_allocs)
@test all(iszero, ad2_allocs)
@test all(iszero, ad_allocs_xv)
@test all(iszero, ad2_allocs_xv)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | docs | 7541 |
# BesselK.jl
[build-latest-img]: https://github.com/cgeoga/BesselK.jl/workflows/CI/badge.svg
[build-url]: https://github.com/cgeoga/BesselK.jl/actions?query=workflow
[![][build-latest-img]][build-url]
This package implements one function: the modified second-kind Bessel function
Kᵥ(x). It is designed specifically to be automatically differentiable **with
ForwardDiff.jl**, including providing derivatives with respect to the order
parameter `v` **that are fast and non-allocating in the entire domain for both
first and second order**.
Derivatives with respect to \nu are significantly faster than any finite
differencing method, including the most naive fixed-step minimum-order method,
and in almost all of the domain are meaningful more accurate. Particularly near
the origin you should expect to gain at least 3-5 digits. Second derivatives are
even more dramatic, both in terms of the speedup and accuracy gains, now
commonly giving 10+ more digits of accuracy.
As a happy accident/side-effect, if you're willing to give up the last couple
digits of accuracy, you could also use `ForwardDiff.jl` on this code for
derivatives with respect to argument for an order-of-magnitude speedup. In some
casual testing the argument-derivative errors with this code are never worse
than `1e-12`, and they turn 1.4 μs with allocations into 140 ns without any
allocations.
In order to avoid naming conflicts with other packages, this package exports
three functions:
* `matern`: the Matern covariance function in its most common parameterization.
See the docstrings for more info.
* `adbesselk`: Gives Kᵥ(x), using `Bessels.jl` if applicable and our more
specialized order-AD codes otherwise.
* `adbesselkxv`: Gives Kᵥ(x)*(x^v), using `Bessels.jl` if applicable and our
more specialized order-AD codes otherwise.
Here is a very basic demo:
```julia
using ForwardDiff, SpecialFunctions, BesselK
(v, x) = (1.1, 2.1)
# For regular evaluations, you get what you're used to getting:
@assert isapprox(besselk(v, x), adbesselk(v, x))
@assert isapprox((x^v)*besselk(v, x), adbesselkxv(v, x))
# But now you also get good (and fast!) derivatves:
@show ForwardDiff.derivative(_v->adbesselk(_v, x), v) # good to go.
@show ForwardDiff.derivative(_v->adbesselkxv(_v, x), v) # good to go.
```
# A note to people coming here from the paper
You'll see that this repo defines a great deal of specific derivative functions
in the files in `./paperscripts`. **This is only because we specifically tested
those quantities in the paper**. If you're just here to fit a Matern covariance
function, then you should **not** be doing that. Your code, at least in the
simplest case, should probably look more like this:
```julia
using ForwardDiff, BesselK
function my_covariance_function(loc1, loc2, params)
... # your awesome covariance function, presumably using adbesselk somewhere.
end
const my_data = ... # load in your data
const my_locations = ... # load in your locations
# Create your likelihood and use ForwardDiff for the grad and Hessian:
function nll(params)
K = cholesky!(Symmetric([my_covariance_function(x, y, params)
for x in my_locations, y in my_locations]))
0.5*(logdet(K) + dot(my_data, K\my_data))
end
nllg(params) = ForwardDiff.gradient(nll, params)
nllh(params) = ForwardDiff.hessian(nll, params)
my_mle = some_optimizer(init_params, nll, nllg, nllh, ...)
```
Or something like that. You of course do not *have* to do it this way, and could
manually implement the gradient and Hessian of the likelihood after manually
creating derivatives of the covariance function itself (see
`./example/matern.jl` for a demo of that), and manual implementations,
particularly for the Hessian, will be faster if they are thoughtful enough. But
what I mean to emphasize here is that in general you should *not* be doing
manual chain rule or derivative computations of your covariance function itself.
Let the AD handle that for you and enjoy the power that Julia's composability
offers.
# Limitations
For the moment there are two primary limitations:
* **AD compatibility with `ForwardDiff.jl` only**. The issue here is that in one
particular case I use a different function branch of one is taking a
derivative with respect to `v` or just evaluating `besselk(v, x)`. The way that
is currently checked in the code is with `if (v isa AbstractFloat)`, which may
not work properly for other methods.
* **Only derivatives up to the second are checked and confirmed accurate.** The
code uses a large number of local polynomial expansions at slightly hairy
values of internal intermediate functions, and so at some sufficiently high
level of derivative those local polynomials won't give accurate partial
information.
# Also consider: `Bessels.jl`
This software package was written with the pretty specific goal of computing
derivatives of Kᵥ(x) with respect to the order using `ForwardDiff.jl`. While it
is in general a bit faster than AMOS, we give up a few digits of accuracy here
and there in the interest of better and faster derivatives. If you just want the
fastest possible Kᵥ(x) for floating point order and argument (as in, you don't
need to do AD), then you would probably be better off using
[`Bessels.jl`](https://github.com/heltonmc/Bessels.jl).
This code now uses `Bessels.jl` whenever possible, so now the only question is
really about whether you need AD. If you need AD with respect to order, use this
package. If you don't, then this package offers nothing beyond what `Bessels.jl`
does.
# Implementation details
See the reference for an entire paper discussing the implementation. But in a
word, this code uses several routines to evaluate Kᵥ accurately on different
parts of the domain, and has to use some non-standard to maintain AD
compatibility and correctness. When `v` is an integer or half-integer, for
example, a lot of additional work is required.
The code is also pretty well-optimized, and you can benchmark for yourself or
look at the paper to see that in several cases the `ForwardDiff.jl`-generated
derivatives are faster than a single call to `SpecialFunctions.besselk`. To
achieve this performance, particularly for second derivatives, some work was
required to make sure that all of the function calls are non-allocating, which
means switching from raw `Tuple`s to `Polynomial` types in places where the
polynomials are large enough and things like that. Again this arguably makes the
code look a bit disorganized or inconsistent, but to my knowledge it is all
necessary. If somebody looking at the source finds a simplification, I would
love to see it, either in terms of an issue or a PR or an email or a patch file
or anything.
# Citation
If you use this package in your research that gets compiled into some kind of
report/article/poster/etc, please cite [this paper](https://arxiv.org/abs/2201.00090):
```
@misc{GMSS_2022,
title={Fitting Mat\'ern Smoothness Parameters Using Automatic Differentiation},
author={Christopher J. Geoga and Oana Marin and Michel Schanen and Michael L. Stein},
year={2022},
journal={Statistics and Computing}
}
```
While this package ostensibly only covers a single function, putting all of this
together and making it this fast and accurate was really a lot of work. I would
*really* appreciate you citing this paper if this package was useful in your
research. Like, for example, if you used this package to fit a Matern smoothness
parameter with second order optimization methods.
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | docs | 1526 |
This folder is a sort of haphazard collection of scripts that we used in the
paper. Not all of them ended up providing results that went directly into the
paper, and a lot of them were also absorbed into the extensive testing in the
package itself. But we include them all here for the curious people who would
like to obtain results from the paper. Of course, as the versions of BesselK.jl
change the exact numbers here will also change a bit, although if I did
everything right the first tagged release of this code (or maybe the initial
commit) should be almost the exact source that was used to generate the results
in v1 of the paper.
Should anything come up that you'd like to discuss, please don't hesitate to
contact me. You can find my email addresses on my website, which you can find by
googling my name (Chris Geoga).
Some misc notes:
-- I would _not_ suggest using the fitting scripts in `./demo/` as the basis of
your own code for estimating parameters. You could certainly do much worse and
it does leverage my generic go-to package `GPMaxlik.jl`, which has a ton of nice
features (not that I'm biased). But I'd sooner suggest looking at the example
files in that repo as a template.
-- I've re-organized the code a bit and some code lives in `../examples/`. I've
done my best to make sure that all of these tests still run as they should after
the re-org, but if something doesn't, please open an issue or email me or
something. It's probably just an accident that I will be able to resolve
immediately.
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 650 | using Documenter
using ExactOptimalTransport
makedocs(;
modules=[ExactOptimalTransport],
repo="https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl/blob/{commit}{path}#L{line}",
sitename="ExactOptimalTransport.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://juliaoptimaltransport.github.io/ExactOptimalTransport.jl",
assets=String[],
),
pages=["Home" => "index.md"],
strict=true,
checkdocs=:exports,
)
deploydocs(;
repo="github.com/JuliaOptimalTransport/ExactOptimalTransport.jl",
push_preview=true,
devbranch="main",
)
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 430 | module ExactOptimalTransport
using Distances
using MathOptInterface
using Distributions
using FillArrays
using PDMats
using QuadGK
using StatsBase: StatsBase
using LinearAlgebra
using SparseArrays
export emd, emd2
export ot_cost, ot_plan, wasserstein, squared2wasserstein
export discretemeasure
const MOI = MathOptInterface
include("distances/bures.jl")
include("utils.jl")
include("exact.jl")
include("wasserstein.jl")
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 14122 | """
ot_plan(c, μ, ν; kwargs...)
Compute the optimal transport plan for the Monge-Kantorovich problem with source and target
marginals `μ` and `ν` and cost `c`.
The optimal transport plan solves
```math
\\inf_{\\gamma \\in \\Pi(\\mu, \\nu)} \\int c(x, y) \\, \\mathrm{d}\\gamma(x, y)
```
where ``\\Pi(\\mu, \\nu)`` denotes the couplings of ``\\mu`` and ``\\nu``.
See also: [`ot_cost`](@ref)
"""
function ot_plan end
"""
ot_cost(c, μ, ν; kwargs...)
Compute the optimal transport cost for the Monge-Kantorovich problem with source and target
marginals `μ` and `ν` and cost `c`.
The optimal transport cost is the scalar value
```math
\\inf_{\\gamma \\in \\Pi(\\mu, \\nu)} \\int c(x, y) \\, \\mathrm{d}\\gamma(x, y)
```
where ``\\Pi(\\mu, \\nu)`` denotes the couplings of ``\\mu`` and ``\\nu``.
See also: [`ot_plan`](@ref)
"""
function ot_cost end
#############
# Discrete OT
#############
"""
emd(μ, ν, C, optimizer)
Compute the optimal transport plan `γ` for the Monge-Kantorovich problem with source
histogram `μ`, target histogram `ν`, and cost matrix `C` of size `(length(μ), length(ν))`
which solves
```math
\\inf_{γ ∈ Π(μ, ν)} \\langle γ, C \\rangle.
```
The corresponding linear programming problem is solved with the user-provided `optimizer`.
Possible choices are `Tulip.Optimizer()` and `Clp.Optimizer()` in the `Tulip` and `Clp`
packages, respectively.
"""
function emd(μ, ν, C, model::MOI.ModelLike)
# check size of cost matrix
nμ = length(μ)
nν = length(ν)
size(C) == (nμ, nν) || error("cost matrix `C` must be of size `(length(μ), length(ν))`")
nC = length(C)
# define variables
x = MOI.add_variables(model, nC)
xmat = reshape(x, nμ, nν)
# define objective function
T = float(eltype(C))
zero_T = zero(T)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(float.(vec(C)), x), zero_T),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
# add non-negativity constraints
for xi in x
MOI.add_constraint(model, xi, MOI.GreaterThan(zero_T))
end
# add constraints for source
for (xrow, μi) in zip(eachrow(xmat), μ)
f = MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(one(μi), xi) for xi in xrow], zero(μi)
)
MOI.add_constraint(model, f, MOI.EqualTo(μi))
end
# add constraints for target
for (xcol, νi) in zip(eachcol(xmat), ν)
f = MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm(one(νi), xi) for xi in xcol], zero(νi)
)
MOI.add_constraint(model, f, MOI.EqualTo(νi))
end
# compute optimal solution
MOI.optimize!(model)
status = MOI.get(model, MOI.TerminationStatus())
status === MOI.OPTIMAL || error("failed to compute optimal transport plan: ", status)
p = MOI.get(model, MOI.VariablePrimal(), x)
γ = reshape(p, nμ, nν)
return γ
end
"""
emd2(μ, ν, C, optimizer; plan=nothing)
Compute the optimal transport cost (a scalar) for the Monge-Kantorovich problem with source
histogram `μ`, target histogram `ν`, and cost matrix `C` of size `(length(μ), length(ν))`
which is given by
```math
\\inf_{γ ∈ Π(μ, ν)} \\langle γ, C \\rangle.
```
The corresponding linear programming problem is solved with the user-provided `optimizer`.
Possible choices are `Tulip.Optimizer()` and `Clp.Optimizer()` in the `Tulip` and `Clp`
packages, respectively.
A pre-computed optimal transport `plan` may be provided.
"""
function emd2(μ, ν, C, optimizer; plan=nothing)
γ = if plan === nothing
# compute optimal transport plan
emd(μ, ν, C, optimizer)
else
# check dimensions
size(C) == (length(μ), length(ν)) ||
error("cost matrix `C` must be of size `(length(μ), length(ν))`")
size(plan) == size(C) || error(
"optimal transport plan `plan` and cost matrix `C` must be of the same size",
)
plan
end
return dot(γ, C)
end
###################################
# Semidiscrete and continuous 1D OT
###################################
"""
ot_plan(c, μ::ContinuousUnivariateDistribution, ν::UnivariateDistribution)
Compute the optimal transport plan for the Monge-Kantorovich problem with univariate
distributions `μ` and `ν` as source and target marginals and cost function `c` of
the form ``c(x, y) = h(|x - y|)`` where ``h`` is a convex function.
In this setting, the optimal transport plan is the Monge map
```math
T = F_\\nu^{-1} \\circ F_\\mu
```
where ``F_\\mu`` is the cumulative distribution function of `μ` and ``F_\\nu^{-1}`` is the
quantile function of `ν`.
See also: [`ot_cost`](@ref), [`emd`](@ref)
"""
function ot_plan(c, μ::ContinuousUnivariateDistribution, ν::UnivariateDistribution)
# Use T instead of γ to indicate that this is a Monge map.
T(x) = quantile(ν, cdf(μ, x))
return T
end
"""
ot_cost(
c, μ::ContinuousUnivariateDistribution, ν::UnivariateDistribution; plan=nothing
)
Compute the optimal transport cost for the Monge-Kantorovich problem with univariate
distributions `μ` and `ν` as source and target marginals and cost function `c` of
the form ``c(x, y) = h(|x - y|)`` where ``h`` is a convex function.
In this setting, the optimal transport cost can be computed as
```math
\\int_0^1 c(F_\\mu^{-1}(x), F_\\nu^{-1}(x)) \\mathrm{d}x
```
where ``F_\\mu^{-1}`` and ``F_\\nu^{-1}`` are the quantile functions of `μ` and `ν`,
respectively.
A pre-computed optimal transport `plan` may be provided.
See also: [`ot_plan`](@ref), [`emd2`](@ref)
"""
function ot_cost(
c, μ::ContinuousUnivariateDistribution, ν::UnivariateDistribution; plan=nothing
)
cost, _ = if plan === nothing
quadgk(0, 1) do q
return c(quantile(μ, q), quantile(ν, q))
end
else
quadgk(0, 1) do q
x = quantile(μ, q)
return c(x, plan(x))
end
end
return cost
end
################
# Discrete 1D OT
################
# internal iterator for discrete one-dimensional OT problems
# it returns tuples that consist of the indices of the source and target histograms
# and the optimal flow between the corresponding points
struct Discrete1DOTIterator{T,M,N}
mu::M
nu::N
end
# histograms `μ` and `ν` are expected to be iterators of the histograms where the
# corresponding support is sorted
function Discrete1DOTIterator(μ, ν)
T = Base.promote_eltype(μ, ν)
return Discrete1DOTIterator{T,typeof(μ),typeof(ν)}(μ, ν)
end
Base.IteratorEltype(::Type{<:Discrete1DOTIterator}) = Base.HasEltype()
Base.eltype(::Type{<:Discrete1DOTIterator{T}}) where {T} = Tuple{Int,Int,T}
Base.length(d::Discrete1DOTIterator) = length(d.mu) + length(d.nu) - 1
# we iterate through the source and target histograms
function Base.iterate(
d::Discrete1DOTIterator{T}, (i, j, μnext, νnext)=(1, 1, iterate(d.mu), iterate(d.nu))
) where {T}
# if we are done with iterating through the source and/or target histogram,
# iteration is stopped
if μnext === nothing || νnext === nothing
return nothing
end
# unpack next values and states of the source and target histograms
μiter, μstate = μnext
νiter, νstate = νnext
# compute next value of the iterator: indices of source and target histograms
# and optimal flow between the corresponding points
min_iter, max_iter = minmax(μiter, νiter)
iter = (i, j, min_iter)
# compute next state of the iterator
diff = max_iter - min_iter
state = if μiter < max_iter
# move forward in the source histogram
(i + 1, j, iterate(d.mu, μstate), (diff, νstate))
else
# move forward in the target histogram
(i, j + 1, (diff, μstate), iterate(d.nu, νstate))
end
return iter, state
end
"""
ot_plan(c, μ::DiscreteNonParametric, ν::DiscreteNonParametric)
Compute the optimal transport cost for the Monge-Kantorovich problem with univariate
discrete distributions `μ` and `ν` as source and target marginals and cost function `c`
of the form ``c(x, y) = h(|x - y|)`` where ``h`` is a convex function.
In this setting, the optimal transport plan can be computed analytically. It is returned as
a sparse matrix.
See also: [`ot_cost`](@ref), [`emd`](@ref)
"""
function ot_plan(_, μ::DiscreteNonParametric, ν::DiscreteNonParametric)
# Unpack the probabilities of the two distributions
# Note: support of `DiscreteNonParametric` is sorted
μprobs = probs(μ)
νprobs = probs(ν)
T = Base.promote_eltype(μprobs, νprobs)
return if μprobs isa FillArrays.AbstractFill &&
νprobs isa FillArrays.AbstractFill &&
length(μprobs) == length(νprobs)
# Special case: discrete uniform distributions of the same "size"
k = length(μprobs)
sparse(1:k, 1:k, T(first(μprobs)), k, k)
else
# Generic case
# Create the iterator
iter = Discrete1DOTIterator(μprobs, νprobs)
# create arrays for the indices of the two histograms and the optimal flow between the
# corresponding points
n = length(iter)
I = Vector{Int}(undef, n)
J = Vector{Int}(undef, n)
W = Vector{T}(undef, n)
# compute the sparse optimal transport plan
@inbounds for (idx, (i, j, w)) in enumerate(iter)
I[idx] = i
J[idx] = j
W[idx] = w
end
sparse(I, J, W, length(μprobs), length(νprobs))
end
end
"""
ot_cost(
c, μ::DiscreteNonParametric, ν::DiscreteNonParametric; plan=nothing
)
Compute the optimal transport cost for the Monge-Kantorovich problem with discrete
univariate distributions `μ` and `ν` as source and target marginals and cost function `c`
of the form ``c(x, y) = h(|x - y|)`` where ``h`` is a convex function.
In this setting, the optimal transport cost can be computed analytically.
A pre-computed optimal transport `plan` may be provided.
See also: [`ot_plan`](@ref), [`emd2`](@ref)
"""
function ot_cost(c, μ::DiscreteNonParametric, ν::DiscreteNonParametric; plan=nothing)
# Extract support and probabilities of discrete distributions
# Note: support of `DiscreteNonParametric` is sorted
μsupport = support(μ)
νsupport = support(ν)
μprobs = probs(μ)
νprobs = probs(ν)
return if μprobs isa FillArrays.AbstractFill &&
νprobs isa FillArrays.AbstractFill &&
length(μprobs) == length(νprobs)
# Special case: discrete uniform distributions of the same "size"
# In this case we always just compute `sum(c.(μsupport .- νsupport))` and scale it
# We use pairwise summation and avoid allocations
# (https://github.com/JuliaLang/julia/pull/31020)
T = Base.promote_eltype(μprobs, νprobs)
T(first(μprobs)) *
sum(Broadcast.instantiate(Broadcast.broadcasted(c, μsupport, νsupport)))
else
# Generic case
_ot_cost(c, μsupport, μprobs, νsupport, νprobs, plan)
end
end
# compute cost from scratch if no plan is provided
function _ot_cost(c, μsupport, μprobs, νsupport, νprobs, ::Nothing)
# create the iterator
iter = Discrete1DOTIterator(μprobs, νprobs)
# compute the cost
return sum(w * c(μsupport[i], νsupport[j]) for (i, j, w) in iter)
end
# if a sparse plan is provided, we just iterate through the non-zero entries
function _ot_cost(c, μsupport, _, νsupport, _, plan::SparseMatrixCSC)
# extract non-zero flows
I, J, W = findnz(plan)
# compute the cost
return sum(w * c(μsupport[i], νsupport[j]) for (i, j, w) in zip(I, J, W))
end
# fallback: compute cost matrix (probably often faster to compute cost from scratch)
function _ot_cost(c, μsupport, _, νsupport, _, plan)
return dot(plan, StatsBase.pairwise(c, μsupport, νsupport))
end
################
# OT Gaussians
################
"""
ot_cost(::SqEuclidean, μ::MvNormal, ν::MvNormal)
Compute the squared 2-Wasserstein distance between normal distributions `μ` and `ν` as
source and target marginals.
In this setting, the optimal transport cost can be computed as
```math
W_2^2(\\mu, \\nu) = \\|m_\\mu - m_\\nu \\|^2 + \\mathcal{B}(\\Sigma_\\mu, \\Sigma_\\nu)^2,
```
where ``\\mu = \\mathcal{N}(m_\\mu, \\Sigma_\\mu)``,
``\\nu = \\mathcal{N}(m_\\nu, \\Sigma_\\nu)``, and ``\\mathcal{B}`` is the Bures metric.
See also: [`ot_plan`](@ref), [`emd2`](@ref)
"""
function ot_cost(::SqEuclidean, μ::MvNormal, ν::MvNormal)
return sqeuclidean(μ.μ, ν.μ) + sqbures(μ.Σ, ν.Σ)
end
"""
ot_cost(::SqEuclidean, μ::Normal, ν::Normal)
Compute the squared 2-Wasserstein distance between univariate normal distributions `μ` and
`ν` as source and target marginals.
See also: [`ot_plan`](@ref), [`emd2`](@ref)
"""
function ot_cost(::SqEuclidean, μ::Normal, ν::Normal)
return (μ.μ - ν.μ)^2 + (μ.σ - ν.σ)^2
end
"""
ot_plan(::SqEuclidean, μ::MvNormal, ν::MvNormal)
Compute the optimal transport plan for the Monge-Kantorovich problem with multivariate
normal distributions `μ` and `ν` as source and target marginals and cost function
``c(x, y) = \\|x - y\\|_2^2``.
In this setting, for ``\\mu = \\mathcal{N}(m_\\mu, \\Sigma_\\mu)`` and
``\\nu = \\mathcal{N}(m_\\nu, \\Sigma_\\nu)``, the optimal transport plan is the Monge
map
```math
T \\colon x \\mapsto m_\\nu
+ \\Sigma_\\mu^{-1/2}
{\\big(\\Sigma_\\mu^{1/2} \\Sigma_\\nu \\Sigma_\\mu^{1/2}\\big)}^{1/2}\\Sigma_\\mu^{-1/2}
(x - m_\\mu).
```
See also: [`ot_cost`](@ref), [`emd`](@ref)
"""
function ot_plan(::SqEuclidean, μ::MvNormal, ν::MvNormal)
Σμsqrt = μ.Σ^(-1 / 2)
A = Σμsqrt * sqrt(_gaussian_ot_A(μ.Σ, ν.Σ)) * Σμsqrt
mμ = μ.μ
mν = ν.μ
T(x) = mν + A * (x - mμ)
return T
end
"""
ot_plan(::SqEuclidean, μ::Normal, ν::Normal)
Compute the optimal transport plan for the Monge-Kantorovich problem with
normal distributions `μ` and `ν` as source and target marginals and cost function
``c(x, y) = \\|x - y\\|_2^2``.
See also: [`ot_cost`](@ref), [`emd`](@ref)
"""
function ot_plan(::SqEuclidean, μ::Normal, ν::Normal)
mμ = μ.μ
mν = ν.μ
a = ν.σ / μ.σ
T(x) = mν + a * (x - mμ)
return T
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 2086 | struct FiniteDiscreteMeasure{X<:AbstractVector,P<:AbstractVector}
support::X
p::P
function FiniteDiscreteMeasure{X,P}(support::X, p::P) where {X,P}
length(support) == length(p) || error("length of `support` and `p` must be equal")
isprobvec(p) || error("`p` must be a probability vector")
return new{X,P}(support, p)
end
end
"""
discretemeasure(
support::AbstractVector,
probs::AbstractVector{<:Real}=FillArrays.Fill(inv(length(support)), length(support)),
)
Construct a finite discrete probability measure with `support` and corresponding
`probabilities`. If the probability vector argument is not passed, then
equal probability is assigned to each entry in the support.
# Examples
```julia
using KernelFunctions
# rows correspond to samples
μ = discretemeasure(RowVecs(rand(7,3)), normalize!(rand(10),1))
# columns correspond to samples, each with equal probability
ν = discretemeasure(ColVecs(rand(3,12)))
```
!!! note
If `support` is a 1D vector, the constructed measure will be sorted,
e.g. for `mu = discretemeasure([3, 1, 2],[0.5, 0.2, 0.3])`, then
`mu.support` will be `[1, 2, 3]` and `mu.p` will be `[0.2, 0.3, 0.5]`.
Also, avoid passing 1D distributions as `RowVecs(rand(3))` or `[[1],[3],[4]]`,
since this will be dispatched to the multivariate case instead
of the univariate case for which the algorithm is more efficient.
!!! warning
This function and in particular its return values are not stable and might be changed in future releases.
"""
function discretemeasure(
support::AbstractVector{<:Real},
probs::AbstractVector{<:Real}=Fill(inv(length(support)), length(support)),
)
return DiscreteNonParametric(support, probs)
end
function discretemeasure(
support::AbstractVector,
probs::AbstractVector{<:Real}=Fill(inv(length(support)), length(support)),
)
return FiniteDiscreteMeasure{typeof(support),typeof(probs)}(support, probs)
end
Distributions.support(d::FiniteDiscreteMeasure) = d.support
Distributions.probs(d::FiniteDiscreteMeasure) = d.p
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 1602 | """
wasserstein(μ, ν; metric=Euclidean(), p=Val(1), kwargs...)
Compute the `p`-Wasserstein distance with respect to the `metric` between measures `μ` and
`ν`.
Order `p` can be provided as a scalar of type `Real` or as a parameter of a value type
`Val(p)`. For certain combinations of `metric` and `p`, such as `metric=Euclidean()` and
`p=Val(2)`, the computations are more efficient if `p` is specified as a value type. The
remaining keyword arguments are forwarded to [`ot_cost`](@ref).
See also: [`squared2wasserstein`](@ref), [`ot_cost`](@ref)
"""
function wasserstein(μ, ν; metric=Euclidean(), p::Union{Real,Val}=Val(1), kwargs...)
cost = ot_cost(p2distance(metric, p), μ, ν; kwargs...)
return prt(cost, p)
end
# compute the cost function corresponding to a metric and exponent `p`
p2distance(metric, ::Val{1}) = metric
p2distance(metric, ::Val{P}) where {P} = (x, y) -> metric(x, y)^P
p2distance(d::Euclidean, ::Val{2}) = SqEuclidean(d.thresh)
p2distance(metric, p) = (x, y) -> metric(x, y)^p
# compute the `p` root
prt(x, ::Val{1}) = x
prt(x, ::Val{2}) = sqrt(x)
prt(x, ::Val{3}) = cbrt(x)
prt(x, ::Val{P}) where {P} = x^(1 / P)
prt(x, p) = x^(1 / p)
"""
squared2wasserstein(μ, ν; metric=Euclidean(), kwargs...)
Compute the squared 2-Wasserstein distance with respect to the `metric` between measures `μ`
and `ν`.
The remaining keyword arguments are forwarded to [`ot_cost`](@ref).
See also: [`wasserstein`](@ref), [`ot_cost`](@ref)
"""
function squared2wasserstein(μ, ν; metric=Euclidean(), kwargs...)
return ot_cost(p2distance(metric, Val(2)), μ, ν; kwargs...)
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 2277 | # Code from @devmotion
# https://github.com/devmotion/\
# CalibrationErrorsDistributions.jl/blob/main/src/distances/bures.jl
"""
tr_sqrt(A::AbstractMatrix)
Compute ``\\operatorname{tr}\\big(A^{1/2}\\big)``.
"""
tr_sqrt(A::AbstractMatrix) = LinearAlgebra.tr(sqrt(A))
tr_sqrt(A::PDMats.PDMat) = tr_sqrt(A.mat)
tr_sqrt(A::PDMats.PDiagMat) = sum(sqrt, A.diag)
tr_sqrt(A::PDMats.ScalMat) = A.dim * sqrt(A.value)
"""
_gaussian_ot_A(A::AbstractMatrix, B::AbstractMatrix)
Compute
```math
A^{1/2} B A^{1/2}.
```
"""
function _gaussian_ot_A(A::AbstractMatrix, B::AbstractMatrix)
sqrt_A = sqrt(A)
return sqrt_A * B * sqrt_A
end
function _gaussian_ot_A(A::PDMats.PDiagMat, B::AbstractMatrix)
return sqrt.(A.diag) .* B .* sqrt.(A.diag')
end
function _gaussian_ot_A(A::StridedMatrix, B::PDMats.PDMat)
return PDMats.X_A_Xt(B, sqrt(A))
end
_gaussian_ot_A(A::PDMats.PDMat, B::PDMats.PDMat) = _gaussian_ot_A(A.mat, B)
_gaussian_ot_A(A::AbstractMatrix, B::PDMats.PDiagMat) = _gaussian_ot_A(B, A)
_gaussian_ot_A(A::PDMats.PDMat, B::StridedMatrix) = _gaussian_ot_A(B, A)
"""
sqbures(A::AbstractMatrix, B::AbstractMatrix)
Compute the squared Bures metric
```math
\\operatorname{tr}(A) + \\operatorname{tr}(B)
- \\operatorname{tr}\\Big({\\big(A^{1/2} B A^{1/2}\\big)}^{1/2}\\Big).
```
"""
function sqbures(A::AbstractMatrix, B::AbstractMatrix)
return LinearAlgebra.tr(A) + LinearAlgebra.tr(B) - 2 * tr_sqrt(_gaussian_ot_A(A, B))
end
# diagonal matrix
function sqbures(A::PDMats.PDiagMat, B::PDMats.PDiagMat)
if !(A.dim == B.dim)
throw(ArgumentError("matrices must have the same dimensions."))
end
return sum(zip(A.diag, B.diag)) do (x, y)
abs2(sqrt(x) - sqrt(y))
end
end
# scaled identity matrix
function sqbures(A::PDMats.ScalMat, B::AbstractMatrix)
return LinearAlgebra.tr(A) + LinearAlgebra.tr(B) - 2 * sqrt(A.value) * tr_sqrt(B)
end
sqbures(A::AbstractMatrix, B::PDMats.ScalMat) = sqbures(B, A)
sqbures(A::PDMats.ScalMat, B::PDMats.ScalMat) = A.dim * abs2(sqrt(A.value) - sqrt(B.value))
# combinations
function sqbures(A::PDMats.PDiagMat, B::PDMats.ScalMat)
sqrt_B = sqrt(B.value)
return sum(A.diag) do x
abs2(sqrt(x) - sqrt_B)
end
end
sqbures(A::PDMats.ScalMat, B::PDMats.PDiagMat) = sqbures(B, A)
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 718 | # Code from @devmotion
# https://github.com/devmotion/\
# CalibrationErrorsDistributions.jl/blob/main/src/distances/bures.jl
using ExactOptimalTransport
using LinearAlgebra
using Random
using PDMats
@testset "bures.jl" begin
function _sqbures(A, B)
sqrt_A = sqrt(A)
return tr(A) + tr(B) - 2 * tr(sqrt(sqrt_A * B * sqrt_A'))
end
function rand_matrices(n)
A = randn(n, n)
B = A' * A + I
return B, PDMat(B), PDiagMat(diag(B)), ScalMat(n, B[1])
end
for (x, y) in Iterators.product(rand_matrices(10), rand_matrices(10))
xfull = Matrix(x)
yfull = Matrix(y)
@test ExactOptimalTransport.sqbures(x, y) ≈ _sqbures(xfull, yfull)
end
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 9227 | using ExactOptimalTransport
using Distances
using FillArrays
using GLPK
using PythonOT: PythonOT
using Tulip
using MathOptInterface
using Distributions
using HCubature
using LinearAlgebra
using Random
using SparseArrays
const MOI = MathOptInterface
const POT = PythonOT
Random.seed!(100)
@testset "exact.jl" begin
@testset "Earth-Movers Distance" begin
M = 200
N = 250
μ = normalize!(rand(M), 1)
ν = normalize!(rand(N), 1)
@testset "example" begin
# create random cost matrix
C = pairwise(SqEuclidean(), rand(1, M), rand(1, N); dims=2)
# compute optimal transport map and cost with POT
pot_P = POT.emd(μ, ν, C)
pot_cost = POT.emd2(μ, ν, C)
# compute optimal transport map and cost with Tulip and GLPK
for T in (Tulip.Optimizer, GLPK.Optimizer)
lp = T()
P = emd(μ, ν, C, lp)
@test size(C) == size(P)
@test MOI.get(lp, MOI.TerminationStatus()) == MOI.OPTIMAL
@test maximum(abs, P .- pot_P) < 1e-2
lp = T()
cost = emd2(μ, ν, C, lp)
@test dot(C, P) ≈ cost atol = 1e-5
@test MOI.get(lp, MOI.TerminationStatus()) == MOI.OPTIMAL
@test cost ≈ pot_cost atol = 1e-5
end
end
@testset "pre-computed plan" begin
# create random cost matrix
C = pairwise(SqEuclidean(), rand(1, M), rand(1, N); dims=2)
# compute optimal transport map with Tulip and GLPK
for T in (Tulip.Optimizer, GLPK.Optimizer)
P = emd(μ, ν, C, T())
# do not use μ and ν to ensure that provided map is used
cost = emd2(similar(μ), similar(ν), C, T(); plan=P)
@test cost ≈ emd2(μ, ν, C, T())
end
end
# https://github.com/JuliaOptimalTransport/OptimalTransport.jl/issues/71
@testset "cost matrix with integers" begin
C = pairwise(SqEuclidean(), rand(1:10, 1, M), rand(1:10, 1, N); dims=2)
emd2(μ, ν, C, Tulip.Optimizer())
end
end
@testset "1D Optimal Transport for Convex Cost" begin
@testset "continuous distributions" begin
# two normal distributions (has analytical solution)
μ = Normal(randn(), 1 + rand())
ν = Normal(randn(), 1 + rand())
# compute OT plan
γ = ot_plan(sqeuclidean, μ, ν)
for x in randn(10)
@test γ(x) ≈ invlogcdf(ν, logcdf(μ, x))
end
# compute OT cost
c = ot_cost(sqeuclidean, μ, ν)
@test c ≈ (mean(μ) - mean(ν))^2 + (std(μ) - std(ν))^2
# do not use ν to ensure that the provided plan is used
@test ot_cost(sqeuclidean, μ, Normal(randn(), rand()); plan=γ) ≈ c
end
@testset "semidiscrete case" begin
μ = Normal(randn(), rand())
νprobs = normalize!(rand(30), 1)
ν = Categorical(νprobs)
# compute OT plan
γ = ot_plan(euclidean, μ, ν)
for x in randn(10)
@test γ(x) ≈ invlogcdf(ν, logcdf(μ, x))
end
# compute OT cost, without and with provided plan
# do not use ν in the second case to ensure that the provided plan is used
c = ot_cost(euclidean, μ, ν)
@test ot_cost(euclidean, μ, Categorical(reverse(νprobs)); plan=γ) ≈ c
# check that OT cost is consistent with OT cost of a discretization
m = 500
xs = rand(μ, m)
μdiscrete = fill(1 / m, m)
C = pairwise(Euclidean(), xs', (1:length(νprobs))'; dims=2)
for optimizer in (Tulip.Optimizer(), GLPK.Optimizer())
c2 = emd2(μdiscrete, νprobs, C, optimizer)
@test c2 ≈ c rtol = 1e-1
end
end
@testset "discrete case" begin
# different random sources and target marginals:
# non-uniform + different size, uniform + different size, uniform + equal size
for (μ, ν) in (
(
DiscreteNonParametric(randn(30), normalize!(rand(30), 1)),
DiscreteNonParametric(randn(50), normalize!(rand(50), 1)),
),
(
DiscreteNonParametric(randn(30), Fill(1 / 30, 30)),
DiscreteNonParametric(randn(50), Fill(1 / 50, 50)),
),
(
DiscreteNonParametric(randn(30), Fill(1 / 30, 30)),
DiscreteNonParametric(randn(30), Fill(1 / 30, 30)),
),
)
# extract support, probabilities, and "size"
μsupport = support(μ)
μprobs = probs(μ)
m = length(μprobs)
νsupport = support(ν)
νprobs = probs(ν)
n = length(νprobs)
# compute OT plan
γ = @inferred(ot_plan(euclidean, μ, ν))
@test γ isa SparseMatrixCSC
@test size(γ) == (m, n)
@test vec(sum(γ; dims=2)) ≈ μ.p
@test vec(sum(γ; dims=1)) ≈ ν.p
# consistency checks
I, J, W = findnz(γ)
@test all(w > zero(w) for w in W)
@test sum(W) ≈ 1
@test sort(unique(I)) == 1:m
@test sort(unique(J)) == 1:n
@test sort(I .+ J) == if μprobs isa Fill && νprobs isa Fill && m == n
# Optimized version for special case (discrete uniform + equal size)
2:2:(m + n)
else
# Generic case (not optimized)
2:(m + n)
end
# compute OT cost
c = @inferred(ot_cost(euclidean, μ, ν))
# compare with computation with explicit cost matrix
# DiscreteNonParametric sorts the support automatically, here we have to sort
# manually
C = pairwise(Euclidean(), μsupport', νsupport'; dims=2)
for optimizer in (Tulip.Optimizer(), GLPK.Optimizer())
c2 = emd2(μprobs, νprobs, C, optimizer)
@test c2 ≈ c rtol = 1e-5
end
# compare with POT
# disabled currently since https://github.com/PythonOT/POT/issues/169 causes bounds
# error
# @test γ ≈ POT.emd_1d(μ.support, ν.support; a=μ.p, b=μ.p, metric="euclidean")
# @test c ≈ POT.emd2_1d(μ.support, ν.support; a=μ.p, b=μ.p, metric="euclidean")
# do not use the probabilities of μ and ν to ensure that the provided plan is
# used
μ2 = DiscreteNonParametric(μsupport, reverse(μprobs))
ν2 = DiscreteNonParametric(νsupport, reverse(νprobs))
c2 = @inferred(ot_cost(euclidean, μ2, ν2; plan=γ))
@test c2 ≈ c
c2 = @inferred(ot_cost(euclidean, μ2, ν2; plan=Matrix(γ)))
@test c2 ≈ c
end
end
end
@testset "Multivariate Gaussians" begin
@testset "translation with constant covariance" begin
m = randn(100)
τ = rand(100)
Σ = Matrix(Hermitian(rand(100, 100) + 100I))
μ = MvNormal(m, Σ)
ν = MvNormal(m .+ τ, Σ)
@test ot_cost(SqEuclidean(), μ, ν) ≈ norm(τ)^2
x = rand(100, 10)
T = ot_plan(SqEuclidean(), μ, ν)
@test pdf(ν, mapslices(T, x; dims=1)) ≈ pdf(μ, x)
end
@testset "comparison to grid approximation" begin
μ = MvNormal([0, 0], [1 0; 0 2])
ν = MvNormal([10, 10], [2 0; 0 1])
# Constructing circular grid approximation
# Angular grid step
θ = collect(0:0.2:(2π))
θx = cos.(θ)
θy = sin.(θ)
# Radius grid step
δ = collect(0:0.2:1)
μsupp = [0.0 0.0]
νsupp = [10.0 10.0]
for i in δ[2:end]
a = [θx .* i θy .* i * 2]
b = [θx .* i * 2 θy .* i] .+ [10 10]
μsupp = vcat(μsupp, a)
νsupp = vcat(νsupp, b)
end
# Create discretized distribution
μprobs = normalize!(pdf(μ, μsupp'), 1)
νprobs = normalize!(pdf(ν, νsupp'), 1)
C = pairwise(SqEuclidean(), μsupp', νsupp'; dims=2)
for optimizer in (Tulip.Optimizer(), GLPK.Optimizer())
@test emd2(μprobs, νprobs, C, optimizer) ≈ ot_cost(SqEuclidean(), μ, ν) rtol =
1e-3
end
# Use hcubature integration to perform ``\\int c(x,T(x)) d\\mu``
T = ot_plan(SqEuclidean(), μ, ν)
c_hcubature, _ = hcubature([-10, -10], [10, 10]) do x
return sqeuclidean(x, T(x)) * pdf(μ, x)
end
@test ot_cost(SqEuclidean(), μ, ν) ≈ c_hcubature rtol = 1e-3
end
end
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 411 | using ExactOptimalTransport
using SafeTestsets
using Test
@testset "ExactOptimalTransport" begin
@safetestset "Utilities" begin
include("utils.jl")
end
@safetestset "Exact OT" begin
include("exact.jl")
end
@safetestset "Wasserstein distance" begin
include("wasserstein.jl")
end
@safetestset "Bures distance" begin
include("bures.jl")
end
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 1948 | using ExactOptimalTransport
using LinearAlgebra
using Random
using Test
using Distributions
Random.seed!(100)
@testset "utils.jl" begin
@testset "FiniteDiscreteMeasure" begin
@testset "Univariate Finite Discrete Measure" begin
n = 100
m = 80
μsupp = rand(n)
νsupp = rand(m)
μprobs = normalize!(rand(n), 1)
μ = ExactOptimalTransport.discretemeasure(μsupp, μprobs)
ν = ExactOptimalTransport.discretemeasure(νsupp)
# check if it vectors are indeed probabilities
@test isprobvec(μ.p)
@test isprobvec(probs(μ))
@test ν.p == ones(m) ./ m
@test probs(ν) == ones(m) ./ m
# check if it assigns to DiscreteNonParametric when Vector/Matrix is 1D
@test μ isa DiscreteNonParametric
@test ν isa DiscreteNonParametric
# check if support is correctly assinged
@test sort(μsupp) == μ.support
@test sort(μsupp) == support(μ)
@test sort(vec(νsupp)) == ν.support
@test sort(vec(νsupp)) == support(ν)
end
@testset "Multivariate Finite Discrete Measure" begin
n = 10
m = 3
μsupp = [rand(m) for i in 1:n]
νsupp = [rand(m) for i in 1:n]
μprobs = normalize!(rand(n), 1)
μ = ExactOptimalTransport.discretemeasure(μsupp, μprobs)
ν = ExactOptimalTransport.discretemeasure(νsupp)
# check if it vectors are indeed probabilities
@test isprobvec(μ.p)
@test isprobvec(probs(μ))
@test ν.p == ones(n) ./ n
@test probs(ν) == ones(n) ./ n
# check if support is correctly assinged
@test μsupp == μ.support
@test μsupp == support(μ)
@test νsupp == ν.support
@test νsupp == support(ν)
end
end
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | code | 3012 | using ExactOptimalTransport
using Distances
using Distributions
using Random
using Test
Random.seed!(100)
@testset "wasserstein.jl" begin
@testset "p2distance" begin
for metric in (Euclidean(), Euclidean(0.01), TotalVariation())
@test ExactOptimalTransport.p2distance(metric, Val(1)) === metric
end
@test ExactOptimalTransport.p2distance(Euclidean(), Val(2)) == SqEuclidean()
@test ExactOptimalTransport.p2distance(Euclidean(0.01), Val(2)) == SqEuclidean(0.01)
p = randexp()
x = randn(10)
y = randn(10)
for metric in (Euclidean(), TotalVariation())
for _p in (p, Val(p))
pmetric = ExactOptimalTransport.p2distance(metric, _p)
@test pmetric(x, y) ≈ metric(x, y)^p
end
end
end
@testset "prt" begin
x = randexp()
for p in (1, 2, 3, randexp())
@test ExactOptimalTransport.prt(x, p) ≈ x^(1 / p)
@test ExactOptimalTransport.prt(x, Val(p)) ≈ x^(1 / p)
end
end
@testset "wasserstein" begin
μ = Normal(randn(), randexp())
ν = Normal(randn(), randexp())
for p in (1, 2, 3, randexp()), metric in (Euclidean(), TotalVariation())
for _p in (p, Val(p))
# without additional keyword arguments
w = wasserstein(μ, ν; p=_p, metric=metric)
@test w ≈ ot_cost((x, y) -> metric(x, y)^p, μ, ν)^(1 / p)
# with pre-computed plan (random `ν` ensures that plan is used)
T = ot_plan((x, y) -> metric(x, y)^p, μ, ν)
w2 = wasserstein(μ, Normal(randn(), rand()); p=_p, metric=metric, plan=T)
@test w ≈ w2
end
end
# check that `Euclidean` is the default `metric`
for p in (1, 2, 3, randexp()), _p in (p, Val(p))
w = wasserstein(μ, ν; p=_p)
@test w ≈ wasserstein(μ, ν; p=_p, metric=Euclidean())
end
# check that `Val(1)` is the default `p`
for metric in (Euclidean(), TotalVariation())
w = wasserstein(μ, ν; metric=metric)
@test w ≈ wasserstein(μ, ν; p=Val(1), metric=metric)
end
end
@testset "squared2wasserstein" begin
μ = Normal(randn(), randexp())
ν = Normal(randn(), randexp())
for metric in (Euclidean(), TotalVariation())
# without additional keyword arguments
w = squared2wasserstein(μ, ν; metric=metric)
@test w ≈ ot_cost((x, y) -> metric(x, y)^2, μ, ν)
# with pre-computed plan (random `ν` ensures that plan is used)
T = ot_plan((x, y) -> metric(x, y)^2, μ, ν)
w2 = squared2wasserstein(μ, Normal(randn(), rand()); metric=metric, plan=T)
@test w ≈ w2
end
# check that `Euclidean` is the default `metric`
w = squared2wasserstein(μ, ν)
@test w ≈ squared2wasserstein(μ, ν; metric=Euclidean())
end
end
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | docs | 2779 | # ExactOptimalTransport.jl <a href='https://juliaoptimaltransport.github.io/ExactOptimalTransport.jl/dev'><img src="docs/src/assets/logo.svg" align="right" height="138.5" /></a>
*Solving unregularized optimal transport problems with Julia*
[](https://JuliaOptimalTransport.github.io/ExactOptimalTransport.jl/stable)
[](https://JuliaOptimalTransport.github.io/ExactOptimalTransport.jl/dev)
[](https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl/actions?query=workflow%3ACI+branch%3Amain)
[](https://zenodo.org/badge/latestdoi/402808845)
[](https://codecov.io/gh/JuliaOptimalTransport/ExactOptimalTransport.jl)
[](https://coveralls.io/github/JuliaOptimalTransport/ExactOptimalTransport.jl?branch=main)
[](https://github.com/invenia/BlueStyle)
This package provides some [Julia](https://julialang.org/) implementations of algorithms for solving
unregularized optimal transport (Kantorovich) problems.
## Example
```julia
using ExactOptimalTransport
using Distances
using Tulip
# uniform histograms
μ = fill(1/250, 250)
ν = fill(1/200, 200)
# random cost matrix
C = pairwise(SqEuclidean(), rand(1, 250), rand(1, 200); dims=2)
# compute optimal transport map with Tulip
lp = Tulip.Optimizer()
P = emd(μ, ν, C, lp)
# compute optimal transport cost without recomputing the plan
emd2(μ, ν, C, lp; plan=P)
```
Please see the documentation pages for further information.
## Related packages
- [OptimalTransport.jl](https://github.com/JuliaOptimalTransport/OptimalTransport.jl): Julia implementation of
algorithms for regularized optimal transport problems with GPU support.
- [StochasticOptimalTransport.jl](https://github.com/JuliaOptimalTransport/StochasticOptimalTransport.jl): Julia implementation of stochastic optimization algorithms for large-scale optimal transport.
- [PythonOT.jl](https://github.com/JuliaOptimalTransport/PythonOT.jl): Julia interface for the [Python Optimal Transport (POT) package](https://pythonot.github.io/).
## Contributing
Contributions are more than welcome! Please feel free to submit an issue or pull request in this repository.
## Note
This package was originally part of [OptimalTransport.jl](https://github.com/JuliaOptimalTransport/OptimalTransport.jl).
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.5 | aca11e5cbf419be6778707f4ddc90d486bc79e92 | docs | 1191 | # ExactOptimalTransport.jl
ExactOptimalTransport.jl is a Julia package for solving the unregularized
optimal transport (Kantorovich) problems.
```@docs
emd
emd2
```
```@docs
ot_plan
ot_plan(::Any, ::ExactOptimalTransport.ContinuousUnivariateDistribution, ::ExactOptimalTransport.UnivariateDistribution)
ot_plan(::Any, ::ExactOptimalTransport.DiscreteNonParametric, ::ExactOptimalTransport.DiscreteNonParametric)
ot_plan(::ExactOptimalTransport.SqEuclidean, ::ExactOptimalTransport.Normal, ::ExactOptimalTransport.Normal)
ot_plan(::ExactOptimalTransport.SqEuclidean, ::ExactOptimalTransport.MvNormal, ::ExactOptimalTransport.MvNormal)
```
```@docs
ot_cost
ot_cost(::Any, ::ExactOptimalTransport.ContinuousUnivariateDistribution, ::ExactOptimalTransport.UnivariateDistribution)
ot_cost(::Any, ::ExactOptimalTransport.DiscreteNonParametric, ::ExactOptimalTransport.DiscreteNonParametric)
ot_cost(::ExactOptimalTransport.SqEuclidean, ::ExactOptimalTransport.Normal, ::ExactOptimalTransport.Normal)
ot_cost(::ExactOptimalTransport.SqEuclidean, ::ExactOptimalTransport.MvNormal, ::ExactOptimalTransport.MvNormal)
```
```@docs
wasserstein
squared2wasserstein
```
```@docs
discretemeasure
```
| ExactOptimalTransport | https://github.com/JuliaOptimalTransport/ExactOptimalTransport.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | code | 1097 | using BenchmarkTools
using SVDSketch
using Images
using Arpack
using LinearAlgebra
function readimage()
# Read image
img = Array{Float64}(channelview(load("image1.jpg")))
p, m, n = size(img)
A = m > n ? [img[1, :, :] img[2, :, :] img[3, :, :]] : [img[1, :, :]; img[2, :, :]; img[3, :, :]]
return A
end
tol = 0.1
function checkrank(normA, S)
r = length(S)
errqb = sqrt.(1 .- cumsum(S.^2) ./ normA)
rT = searchsortedfirst(errqb, tol, rev=true)
println("Initial rank=$r, Truncated rank=$rT")
end
function benchimage(A)
m, n = size(A)
b = max(convert(Integer, floor(min(m, n) / 100)), 20)
normA = sum(abs2, A)
println("farPCA, P=1")
display(@benchmark checkrank($normA, svdsketch($A, $tol, blocksize=$b, poweriter=1)[2]))
# checkrank(normA, svdsketch(A, tol, blocksize=b, poweriter=1))
println("farPCA, P=5")
display(@benchmark checkrank($normA, svdsketch($A, $tol, blocksize=$b, poweriter=5)[2]))
println("svds")
display(@benchmark checkrank($normA, svds($A, nsv=427)[1].S))
end
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | code | 1175 | # Note: MKL must be imported after MKLSparse. There's an issue about this in MKLSparse's repo.
using MKLSparse
using MKL
using BenchmarkTools
using SVDSketch
using Arpack
using LinearAlgebra
using SparseArrays
using Scanf
function readmatrix()
cnt = 948464
I = Vector{Int64}(undef, cnt)
J = Vector{Int64}(undef, cnt)
V = Vector{Float64}(undef, cnt)
open("SNAP.dat", "r") do io
for i=1:cnt
r, I[i], J[i], V[i] = @scanf(io, "%d %d %lf", Int64, Int64, Float64)
end
end
return sparse(I, J, V)
end
tol = 0.5
function checkrank(normA, S)
r = length(S)
errqb = sqrt.(1 .- cumsum(S.^2) ./ normA)
rT = searchsortedfirst(errqb, tol, rev=true)
println("Initial rank=$r, Truncated rank=$rT")
end
function benchsparse(A)
m, n = size(A)
b = max(convert(Integer, floor(min(m, n) / 100)), 20)
normA = sum(abs2, A)
println("farPCA, P=1")
display(@benchmark checkrank($normA, svdsketch($A, $tol, blocksize=$b, poweriter=1)[2]))
println("farPCA, P=5")
display(@benchmark checkrank($normA, svdsketch($A, $tol, blocksize=$b, poweriter=5)[2]))
end
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | code | 296 | push!(LOAD_PATH, "../src/")
using Documenter, SVDSketch
makedocs(
format = Documenter.HTML(
canonical = "https://blog.zhupengfei.com.cn/SVDSketch.jl/stable/",
),
sitename="SVDSketch.jl"
)
deploydocs(
repo = "github.com/zhaowenlan1779/SVDSketch.jl.git",
)
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | code | 5391 | module SVDSketch
export svdsketch
using LinearAlgebra: BlasFloat, Hermitian, eigen!, SVD, tr, mul!, BLAS
using ElasticArrays
using Random: randn!
eps(T) = Base.eps(T)
eps(::Type{Complex{T}}) where {T} = eps(T)
function eigSVD(A)
transposed = false
if size(A, 1) < size(A, 2)
A = A'
transposed = true
end
D, V = eigen!(Hermitian(A' * A))
D = real(D)
# Eliminate negative & very small eigenvalues
idx = searchsortedfirst(D, eps(eltype(A))^(1/2))
if idx > length(D)
error("SVDSketch: Could not find eigenvalue for eigSVD. Your `tol` may be too small.")
end
D, V = D[idx:end], V[:, idx:end]
S = sqrt.(D)
U = A * (V ./ S')
if transposed
return V, S, U
else
return U, S, V
end
end
@doc raw"""
svdsketch(A[, tol]; [maxrank, blocksize, maxiter, poweriter]) -> (U, S, V, apxerror)
Returns the singular value decomposition (SVD) of a low-rank matrix sketch of ``A``.
The matrix sketch only reflects the most important features of ``A`` (up to a tolerance),
which enables faster calculation of the SVD of large matrices compared to using `svds`.
The sketch of ``A`` satisfies that ``\|U \Sigma V^T - A\|_F / \|A\|_F \leq \text{tol}``.
The default value for `tol` is `eps(eltype(A))^(1/4)`.
In addition to the SVD, the vector `apxerror` is returned, whose entries represent the
relative approximation error in each iteration, ``\|U \Sigma V^T - A\|_F / \|A\|_F``.
The length of `apxerror` is equal to the number of iterations, and `apxerror[end]` is
the relative approximation error of the output.
# Options
`maxrank`: The rank of the matrix sketch will not exceed `maxrank`.
The default value is `minimum(size(A))`.
`blocksize`: A larger value reduces the number of needed iterations, but might also
result in the result having higher rank than necessary to achieve convergence.
The default value is `min(max(floor(Integer, 0.1*size(A, 1)), 5), maxrank)`.
`maxiter`: The maximum number of iterations for the algorithm.
The default value is `maxrank ÷ blocksize`.
`poweriter`: The number of power iterations performed within each iteration of the algorithm.
Power iterations improve the orthogonality of the ``U`` and ``V`` outputs.
The default value is 1.
"""
svdsketch(A::AbstractMatrix{<:BlasFloat}, tol=eps(eltype(A))^(1/4); kwargs...) = _svdsketch(A, tol; kwargs...)
function svdsketch(A::AbstractMatrix{T}; kwargs...) where T
Tnew = typeof(zero(T)/sqrt(one(T)))
svdsketch(convert(AbstractMatrix{Tnew}, A); kwargs...)
end
function svdsketch(A::AbstractMatrix{T}, tol; kwargs...) where T
Tnew = typeof(zero(T)/sqrt(one(T)))
svdsketch(convert(AbstractMatrix{Tnew}, A), tol; kwargs...)
end
function _svdsketch(A, tol;
maxrank::Integer = minimum(size(A)),
blocksize::Integer = min(max(floor(Integer, 0.1*size(A, 1)), 5), maxrank),
maxiter::Integer = maxrank ÷ blocksize,
poweriter::Integer = 1)
if blocksize > maxrank
throw(ArgumentError("Block size cannot be larger than max rank"))
end
maxrank = maxrank ÷ blocksize * blocksize
m, n = size(A)
normA = sum(abs2, A)
Z = Matrix{eltype(A)}(undef, 0, 0)
Y = ElasticMatrix{eltype(A)}(undef, m, 0)
sizehint!(Y, m, 10 * blocksize)
W = ElasticMatrix{eltype(A)}(undef, n, 0)
sizehint!(W, n, 10 * blocksize)
WTW = Matrix{eltype(A)}(undef, 0, 0)
w = Matrix{eltype(A)}(undef, n, blocksize)
y = Matrix{eltype(A)}(undef, m, blocksize)
apxerror = Vector{Float64}(undef, maxiter)
# oldblocksize = blocksize
sizeB = 0
for i = 1:maxiter
randn!(w)
alpha = 0.0
for j = 1:poweriter
mul!(y, A, w)
if i > 1
x = Z \ (W' * w)
mul!(w, A', y, 1, -alpha)
mul!(w, W, x, -1, 1)
else
mul!(w, A', y, 1, -alpha)
end
w, ss, = eigSVD(w)
if j > 1 && ss[1] > alpha
alpha = (alpha + ss[1]) / 2
end
end
if size(w, 2) == blocksize
mul!(y, A, w)
mul!(w, A', y)
else # eigSVD exited early
y = A * w
w = A' * y
end
if i > 1
ytYtemp = y' * Y
Z = [Z ytYtemp'; ytYtemp y'*y]
wtWtemp = w' * W
WTW = [WTW wtWtemp'; wtWtemp w'*w]
else
Z = y' * y
WTW = w' * w
end
append!(Y, y)
append!(W, w)
sizeB += blocksize
apxerror[i] = sqrt(max(1 - real(tr(Hermitian(Z) \ Hermitian(WTW))) / normA, 0))
if apxerror[i] < tol || sizeB >= maxrank
apxerror = apxerror[1:i]
break
end
# Adjust block size
# if i > 1 && apxerror[i] > apxerror[i - 1] / 2
# blocksize += oldblocksize
# end
if sizeB + blocksize > maxrank
blocksize = maxrank - sizeB
end
if size(w, 2) != blocksize
break
end
end
D, V = eigen!(Hermitian(Z))
d = sqrt.(D)
VS = V ./ d'
mul!(V, Hermitian(WTW), VS)
mul!(WTW, VS', V)
D2, V2 = eigen!(Hermitian(WTW), sortby=λ->-abs(λ))
d = sqrt.(D2)
VS *= V2
Y *= VS
W = W * VS ./ d'
return Y, d, W, apxerror
end
end
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | code | 2733 | using SVDSketch
using Test, LinearAlgebra, SparseArrays, StableRNGs
function testSVDMatch(r1, r2, rank=size(r1.S, 1))
approxEq(a, b) = sum(abs, a .* b) ≈ 1
@test r1.S[1:rank] ≈ r2.S[1:rank]
@testset "singular vectors" begin
for j = 1:rank
@test approxEq(r1.U[:, j], r2.U[:, j])
@test approxEq(r1.V[:, j], r2.V[:, j])
end
end
end
@testset "dense" begin
rng = StableRNG(123)
@testset "real" begin
A = rand(rng, 1:10, 10, 10) # Integer matrix, tests promotion as well
U, S, V, = svdsketch(A)
r2 = svd(A)
testSVDMatch(SVD(U, S, V'), r2)
@test_throws ArgumentError svdsketch(A, blocksize=100)
end
@testset "maxrank" begin
A = rand(rng, 1:10, 10, 10) # Integer matrix, tests promotion as well
U, S, = svdsketch(A, maxrank=3)
@test maximum(size(S)) <= 3
end
@testset "complex" begin
A = rand(rng, 1:10, 10, 10) + rand(rng, 1:10, 10, 10) * im
U, S, V, = svdsketch(A)
r2 = svd(A)
testSVDMatch(SVD(U, S, V'), r2)
@test_throws ArgumentError svdsketch(A, blocksize=100)
end
end
# Following test cases are borrowed from Arpack.jl
@testset "sparse" begin
@testset "real" begin
A = sparse([1, 1, 2, 3, 4], [2, 1, 1, 3, 1], [2.0, -1.0, 6.1, 7.0, 1.5])
U, S, V, = svdsketch(A)
r2 = svd(Array(A))
testSVDMatch(SVD(U, S, V'), r2)
@test_throws ArgumentError svdsketch(A, blocksize=100)
end
@testset "maxrank" begin
A = sparse([1, 1, 2, 3, 4], [2, 1, 1, 3, 1], [2.0, -1.0, 6.1, 7.0, 1.5])
U, S, = svdsketch(A, maxrank=2)
@test maximum(size(S)) <= 2
end
@testset "complex" begin
A = sparse([1, 1, 2, 3, 4], [2, 1, 1, 3, 1], exp.(im*[2.0:2:10;]), 5, 4)
U, S, V, = svdsketch(A, blocksize=1)
r2 = svd(Array(A))
testSVDMatch(SVD(U, S, V'), r2)
end
end
@testset "low rank" begin
rng = StableRNG(123)
@testset "rank $r" for r in [2, 5, 10, 100]
m, n = 3*r, 4*r
FU = qr(randn(rng, Float64, m, r))
U = Matrix(FU.Q)
S = 0.1 .+ sort(rand(rng, r), rev=true)
FV = qr(randn(rng, Float64, n, r))
V = Matrix(FV.Q)
A = U*Diagonal(S)*V'
@testset "blocksize $b" for b in [0, r-1, r+1]
@testset "tol $t" for t in [eps(Float64)^(1/4), eps(Float64)^(1/3), 0.01]
U, S, V, = b == 0 ? svdsketch(A, t) : svdsketch(A, t, blocksize=b)
@test size(S, 1) == r
@test S[1:r] ≈ S
@test U'*U ≈ Matrix{Float64}(I, r, r)
@test V'*V ≈ Matrix{Float64}(I, r, r)
end
end
end
end
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | docs | 547 | # SVDSketch
[](https://github.com/zhaowenlan1779/SVDSketch.jl/actions/workflows/CI.yml?query=branch%3Amain)
This work is led by the [THU-numbda](https://github.com/THU-numbda) group, and based on the [farPCA](https://github.com/THU-numbda/farPCA) project.
This is a Julia implementation of the `svdsketch` function from Matlab, based on
[an improved version](https://github.com/THU-numbda/farPCA/tree/main) of the original algorithm.
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | docs | 297 | # Benchmarks
Please find the binary files at [image1.jpg](https://github.com/zhaowenlan1779/SVDSketch.jl/raw/ca246ea325a7e49ea8c09775db7c356396950296/bench/image1.jpg)
and [SNAP.dat](https://github.com/zhaowenlan1779/SVDSketch.jl/raw/ca246ea325a7e49ea8c09775db7c356396950296/bench/SNAP.dat).
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.2.0 | 60e6efe7f3db70d6a2e10f3fa8535361132b3ced | docs | 238 | # SVDSketch.jl Documentation
This is a Julia implementation of the `svdsketch` function from Matlab, based on
[an improved version](https://github.com/THU-numbda/farPCA/tree/main) of the original algorithm.
```@docs
svdsketch
```
| SVDSketch | https://github.com/zhaowenlan1779/SVDSketch.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 2139 | using Libdl
MKL_FOUND = true
function _find_gcclibdir()
gcc_bin = Sys.which("gcc")
if "/usr/bin" == gcc_bin[1:8]
return _find_gcclibdir_in_system()
else
return _find_gcclibdir_custom()
end
end
function _find_gcclibdir_in_system()
for v in [5,6,7,8,9]
gcclibdir = "/usr/lib/gcc/x86_64-linux-gnu/$v"
if isdir(gcclibdir)
l = Libdl.find_library(joinpath(gcclibdir,"libgomp"))
if l != ""
return gcclibdir
end
end
end
MKL_FOUND = false
s = """
libgomp could not be found in system. Try
\$ sudo apt-get install libgomp1
"""
@warn s
end
function _find_gcclibdir_custom()
gcc_bin = Sys.which("gcc")
gcc_root = gcc_bin[1:(end-8)]
gcclibdir = joinpath(gcc_root,"lib64")
if ! isdir(gcclibdir)
error("lib64 directory not found in GCC installation: $gcclibdir")
end
gcclibdir
end
deps_jl = "deps.jl"
if isfile(deps_jl)
rm(deps_jl)
end
mklroot = haskey(ENV,"MKLROOT") ? ENV["MKLROOT"] : ""
if !haskey(ENV,"MKLROOT")
MKL_FOUND = false
s = """
Environment variable MKLROOT not found.
Please install intel mkl math library and rebuild.
"""
@warn s
else
@info "MKLROOT found at: $mklroot"
end
mkllibdir = joinpath(mklroot,"lib/intel64")
if ! isdir(mkllibdir)
MKL_FOUND = false
s = """
MKL lib directory not found: $mkllibdir
"""
@warn s
else
@info "MKL libraries found at: $mkllibdir"
end
gcclibdir = haskey(ENV,"GRIDAP_PARDISO_LIBGOMP_DIR") ? ENV["GRIDAP_PARDISO_LIBGOMP_DIR"] : ""
if gcclibdir == ""
gcclibdir = _find_gcclibdir()
if ! isdir(gcclibdir)
MKL_FOUND = false
s = """
GCC lib directory not found: $gcclibdir
"""
@warn s
else
@info "GCC libraries found at: $gcclibdir"
end
else
@info "Skipping search of GCC libraries, using the value of GRIDAP_PARDISO_LIBGOMP_DIR instead"
end
open(deps_jl,"w") do f
println(f, "# This file is automatically generated")
println(f, "# Do not edit")
println(f)
println(f, :(const MKL_FOUND = $MKL_FOUND))
println(f, :(const mklroot = $mklroot))
println(f, :(const mkllibdir = $mkllibdir))
println(f, :(const gcclibdir = $gcclibdir))
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 448 | using Documenter, GridapPardiso
makedocs(;
modules=[GridapPardiso],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/gridap/GridapPardiso.jl/blob/{commit}{path}#L{line}",
sitename="GridapPardiso.jl",
authors="Francesc Verdugo <[email protected]>, Víctor Sande <[email protected]>",
assets=String[],
)
deploydocs(;
repo="github.com/gridap/GridapPardiso.jl",
)
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 2022 | module GridapPardiso
using Libdl
using SparseArrays
using SparseMatricesCSR
using Gridap.Algebra
using Gridap.FESpaces
using Gridap.Helpers
import Gridap.Algebra: LinearSolver
import Gridap.Algebra: SymbolicSetup, NumericalSetup
import Gridap.Algebra: symbolic_setup, numerical_setup, numerical_setup!
import Gridap.Algebra: solve, solve!
export PardisoSolver
export new_pardiso_handle
export new_iparm
export pardiso_data_type
export pardisoinit!
export pardiso!
export pardiso_64!
export pardiso_getdiag!
include("load_mkl.jl")
deps_jl = joinpath(@__DIR__, "..", "deps", "deps.jl")
if !isfile(deps_jl)
s = """
Package GridapPardiso not installed properly.
"""
error(s)
end
include(deps_jl)
const pardisoinit_sym = Ref{Ptr}()
const pardiso_sym = Ref{Ptr}()
const pardiso_64_sym = Ref{Ptr}()
#const pardiso_getenv_sym = Ref{Ptr}()
#const pardiso_setenv_sym = Ref{Ptr}()
const pardiso_getdiag_sym = Ref{Ptr}()
#const pardiso_export_sym = Ref{Ptr}()
#const pardiso_handle_store_sym = Ref{Ptr}()
#const pardiso_handle_restore_sym = Ref{Ptr}()
#const pardiso_handle_delete_sym = Ref{Ptr}()
const MKL_PARDISO_LOADED = Ref(false)
function __init__()
if MKL_FOUND
libmkl = load_mkl_gcc(mkllibdir,gcclibdir)
pardisoinit_sym[] = Libdl.dlsym(libmkl,:pardisoinit)
pardiso_sym[] = Libdl.dlsym(libmkl,:pardiso )
pardiso_64_sym[] = Libdl.dlsym(libmkl,:pardiso_64 )
#pardiso_getenv_sym[] = Libdl.dlsym(libmkl,:pardiso_getenv)
#pardiso_setenv_sym[] = Libdl.dlsym(libmkl,:pardiso_setenv)
pardiso_getdiag_sym[] = Libdl.dlsym(libmkl,:pardiso_getdiag)
#pardiso_export_sym[] = Libdl.dlsym(libmkl,:pardiso_export)
#pardiso_handle_store_sym[] = Libdl.dlsym(libmkl,:pardiso_handle_store)
#pardiso_handle_restore_sym[] = Libdl.dlsym(libmkl,:pardiso_handle_restore)
#pardiso_handle_delete_sym[] = Libdl.dlsym(libmkl,:pardiso_handle_delete)
MKL_PARDISO_LOADED[] = true
end
end
include("bindings.jl")
include("PardisoParameters.jl")
include("LinearSolver.jl")
end # module
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 12512 | #
# Maximum number of factors with identical sparsity structure
# that must be kept in memory at the same time
const maxfct = 1
# Actual matrix for the solution phase. The value must be: 1 <= mnum <= maxfct.
const mnum = 1
# Number of right-hand sides that need to be solved for
const nrhs = 1
const MTYPE_UNKNOWN = 0
new_pardiso_handle() = zeros(Int, 64)
new_iparm() = zeros(Int, 64)
function new_iparm(mtype::Integer)
pt = new_pardiso_handle()
iparm = Vector{Int32}(new_iparm())
pardisoinit!(pt,mtype,iparm)
iparm
end
getptr(S::SparseMatrixCSC) = S.colptr
getptr(S::SparseMatrixCSR) = S.rowptr
getptr(S::SymSparseMatrixCSR) = getptr(S.uppertrian)
getindices(S::SymSparseMatrixCSR) = colvals(S)
getindices(S::SparseMatrixCSC) = rowvals(S)
getindices(S::SparseMatrixCSR) = colvals(S)
hascolmajororder(::Type{<:SymSparseMatrixCSR}) = false
hascolmajororder(a::SymSparseMatrixCSR) = hascolmajororder(SymSparseMatrixCSR)
hascolmajororder(::Type{<:SparseMatrixCSC}) = true
hascolmajororder(a::SparseMatrixCSC) = hascolmajororder(SparseMatrixCSC)
hascolmajororder(a::SparseMatrixCSR) = false
get_pardiso(::Type{<:Int32}) = pardiso!
get_pardiso(::Type{<:Int64}) = pardiso_64!
has_0_based_storage(mat) = false
has_0_based_storage(mat::SparseMatrixCSR{0}) = true
has_0_based_storage(mat::SymSparseMatrixCSR{0}) = true
function get_mtype(mtype,mat::AbstractSparseMatrix{T}) where T
error("Unsupported eltype $(T)")
end
# For the moment we use the matrix type but we could use
# other properties of the matrix as well
function get_mtype(mtype,mat::AbstractSparseMatrix{Float64})
mtype == MTYPE_UNKNOWN ? MTYPE_REAL_NON_SYMMETRIC : mtype
end
function get_mtype(mtype,mat::AbstractSparseMatrix{Complex{Float64}})
mtype == MTYPE_UNKNOWN ? MTYPE_COMPLEX_NON_SYMMETRIC : mtype
end
function get_mtype(mtype,mat::SymSparseMatrixCSR{Bi,Float64} where Bi)
mtype == MTYPE_UNKNOWN ? MTYPE_REAL_SYMMETRIC_INDEFINITE : mtype
end
function get_mtype(mtype,mat::SymSparseMatrixCSR{Bi,Complex{Float64}} where Bi)
mtype == MTYPE_UNKNOWN ? MTYPE_COMPLEX_SYMMETRIC : mtype
end
"""
struct PardisoSolver{Ti} <: LinearSolver
Gridap LinearSolver implementation for Intel Pardiso MKL solver.
Official Intel Pardiso MKL documentation:
https://software.intel.com/en-us/mkl-developer-reference-fortran-intel-mkl-pardiso-parallel-direct-sparse-solver-interface
"""
struct PardisoSolver <: LinearSolver
mtype :: Int
iparm :: Vector{Int32}
msglvl :: Int
"""
function PardisoSolver(
mtype::Integer,
iparm::AbstractVector{<:Integer},
msglvl::Integer)
PardisoSolver inner constructor.
"""
function PardisoSolver(
mtype::Integer,
iparm::AbstractVector{<:Integer},
msglvl::Integer)
@assert length(iparm) == 64
@assert mtype in (MTYPE_UNKNOWN,
MTYPE_REAL_STRUCTURALLY_SYMMETRIC,
MTYPE_REAL_SYMMETRIC_POSITIVE_DEFINITE,
MTYPE_REAL_SYMMETRIC_INDEFINITE,
MTYPE_REAL_NON_SYMMETRIC,
MTYPE_COMPLEX_STRUCTURALLY_SYMMETRIC,
MTYPE_COMPLEX_HERMITIAN_POSITIVE_DEFINITE,
MTYPE_COMPLEX_HERMITIAN_INDEFINITE,
MTYPE_COMPLEX_SYMMETRIC,
MTYPE_COMPLEX_NON_SYMMETRIC
)
new(Int(mtype), Vector{Int32}(iparm), Int(msglvl))
end
end
"""
function PardisoSolver(;
mtype=MTYPE_UNKNOWN,
iparm=new_iparm(mtype),
msglvl=MSGLVL_QUIET)
PardisoSolver outer constructor via optional key-word arguments.
"""
function PardisoSolver(;
mtype=MTYPE_UNKNOWN,
iparm=new_iparm(mtype),
msglvl=MSGLVL_QUIET)
PardisoSolver(mtype,iparm,msglvl)
end
# mutable needed for the finalizer
mutable struct PardisoSymbolicSetup{T,Ti,A<:AbstractSparseMatrix} <: SymbolicSetup
mtype::Int
iparm::Vector{Ti}
msglvl::Int
eltype::Type{T}
pt::Vector{Int}
mat::A # We need to take ownership
end
function symbolic_setup(ps::PardisoSolver,mat::AbstractSparseMatrix{T,Ti}) where {T,Ti}
pt = new_pardiso_handle()
mtype = get_mtype(ps.mtype,mat)
#pardisoinit!(pt,mtype,ps.iparm) # Warning! This would overwrite iparm
iparm = Vector{Ti}(copy(ps.iparm))
indexing = has_0_based_storage(mat) ? PARDISO_ZERO_BASED_INDEXING : PARDISO_ONE_BASED_INDEXING
iparm[IPARM_ONE_OR_ZERO_BASED_INDEXING] = indexing
msglvl = ps.msglvl
m,n = size(mat)
phase = PHASE_ANALYSIS
f! = get_pardiso(Ti)
err = f!( pt, # Handle to internal data structure. The entries must be set to zero prior to the first call to pardiso
maxfct, # Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
mnum, # Actual matrix for the solution phase. The value must be: 1 <= mnum <= maxfct.
mtype, # Defines the matrix type, which influences the pivoting method
phase, # Controls the execution of the solver (11 == Analysis)
n, # Number of equations in the sparse linear systems of equations
nonzeros(mat), # Contains the non-zero elements of the coefficient matrix A corresponding to the indices in ja
getptr(mat), # Pointers to columns in CSR format
getindices(mat), # Column indices of the CSR sparse matrix
Vector{Ti}(), # Permutation vector
nrhs, # Number of right-hand sides that need to be solved for
iparm, # This array is used to pass various parameters to Intel MKL PARDISO
msglvl, # Message level information
Vector{T}(), # Array, size (n, nrhs). On entry, contains the right-hand side vector/matrix
Vector{T}()) # Array, size (n, nrhs). If iparm(6)=0 it contains solution vector/matrix X
pardiso_report_error(err)
pss = PardisoSymbolicSetup(mtype,iparm,msglvl,T,pt,mat)
return finalizer(pardiso_finalize, pss)
end
function pardiso_finalize(pss::PardisoSymbolicSetup{T,Ti}) where {T,Ti}
mtype = pss.mtype
pt = pss.pt
iparm = pss.iparm
msglvl = pss.msglvl
mat = pss.mat
m,n = size(mat)
phase = PHASE_RELEASE_INTERNAL_MEMORY
f! = get_pardiso(Ti)
err = f!( pt, # Handle to internal data structure. The entries must be set to zero prior to the first call to pardiso
maxfct, # Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
mnum, # Actual matrix for the solution phase. The value must be: 1 <= mnum <= maxfct.
mtype, # Defines the matrix type, which influences the pivoting method
phase, # Controls the execution of the solver (11 == Analysis)
n, # Number of equations in the sparse linear systems of equations
nonzeros(mat), # Contains the non-zero elements of the coefficient matrix A corresponding to the indices in ja
getptr(mat), # Pointers to columns in CSR format
getindices(mat), # Column indices of the CSR sparse matrix
Vector{Ti}(), # Permutation vector
nrhs, # Number of right-hand sides that need to be solved for
iparm, # This array is used to pass various parameters to Intel MKL PARDISO
msglvl, # Message level information
Vector{T}(), # Array, size (n, nrhs). On entry, contains the right-hand side vector/matrix
Vector{T}()) # Array, size (n, nrhs). If iparm(6)=0 it contains solution vector/matrix X
pardiso_report_error(err)
end
struct PardisoNumericalSetup{T,Ti} <: NumericalSetup
pss::PardisoSymbolicSetup{T,Ti} # We need to take ownership here
end
function numerical_setup(pss::PardisoSymbolicSetup{T,Ti},mat::AbstractSparseMatrix{T,Ti}) where {T,Ti}
pns = PardisoNumericalSetup(pss)
numerical_setup!(pns,mat)
pns
end
function numerical_setup!(pns::PardisoNumericalSetup{T,Ti},mat::AbstractSparseMatrix{T,Ti}) where {T,Ti}
mtype = pns.pss.mtype
iparm = pns.pss.iparm
msglvl = pns.pss.msglvl
pt = pns.pss.pt
m,n = size(mat)
phase = PHASE_NUMERICAL_FACTORIZATION
f! = get_pardiso(Ti)
err = f!( pt, # Handle to internal data structure. The entries must be set to zero prior to the first call to pardiso
maxfct, # Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
mnum, # Actual matrix for the solution phase. The value must be: 1 <= mnum <= maxfct.
mtype, # Defines the matrix type, which influences the pivoting method
phase, # Controls the execution of the solver (11 == Analysis)
n, # Number of equations in the sparse linear systems of equations
nonzeros(mat), # Contains the non-zero elements of the coefficient matrix A corresponding to the indices in ja
getptr(mat), # Pointers to columns in CSR format
getindices(mat), # Column indices of the CSR sparse matrix
Vector{Ti}(), # Permutation vector
nrhs, # Number of right-hand sides that need to be solved for
iparm, # This array is used to pass various parameters to Intel MKL PARDISO
msglvl, # Message level information
Vector{T}(), # Array, size (n, nrhs). On entry, contains the right-hand side vector/matrix
Vector{T}()) # Array, size (n, nrhs). If iparm(6)=0 it contains solution vector/matrix X
pardiso_report_error(err)
end
function solve!(x::AbstractVector{T},pns::PardisoNumericalSetup{T,Ti},b::AbstractVector{T}) where {T,Ti}
mtype = pns.pss.mtype
iparm = pns.pss.iparm
msglvl = pns.pss.msglvl
pt = pns.pss.pt
mat = pns.pss.mat
n = length(x)
@assert n == length(b)
@assert n == size(mat,1)
@assert n == size(mat,2)
phase = PHASE_SOLVE_ITERATIVE_REFINEMENT
set_iparm_transpose!(iparm,mat)
f! = get_pardiso(Ti)
err = f!( pt, # Handle to internal data structure. The entries must be set to zero prior to the first call to pardiso
maxfct, # Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
mnum, # Actual matrix for the solution phase. The value must be: 1 <= mnum <= maxfct.
mtype, # Defines the matrix type, which influences the pivoting method
phase, # Controls the execution of the solver (11 == Analysis)
n, # Number of equations in the sparse linear systems of equations
nonzeros(mat), # Contains the non-zero elements of the coefficient matrix A corresponding to the indices in ja
getptr(mat), # Pointers to columns in CSR format
getindices(mat), # Column indices of the CSR sparse matrix
Vector{Ti}(), # Permutation vector
nrhs, # Number of right-hand sides that need to be solved for
iparm, # This array is used to pass various parameters to Intel MKL PARDISO
msglvl, # Message level information
b, # Array, size (n, nrhs). On entry, contains the right-hand side vector/matrix
x) # Array, size (n, nrhs). If iparm(6)=0 it contains solution vector/matrix X
reset_iparm_transpose!(iparm,mat)
pardiso_report_error(err)
x
end
function set_iparm_transpose!(iparm,mat)
if hascolmajororder(mat)
if iparm[IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED] == PARDISO_SOLVE_LINEAR_SYSTEM
iparm[IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED] = PARDISO_SOLVE_TRANSPOSED_SYSTEM
elseif iparm[IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED] == PARDISO_SOLVE_TRANSPOSED_SYSTEM
iparm[IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED] = PARDISO_SOLVE_LINEAR_SYSTEM
else
error(string("GridapPardiso Error: iparm[",
IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED,"] = ",
iparm[IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED],
" not supported."))
end
end
end
function reset_iparm_transpose!(iparm,mat)
set_iparm_transpose!(iparm,mat)
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 11530 |
# https://software.intel.com/en-us/mkl-developer-reference-fortran-intel-mkl-pardiso-parameters-in-tabular-form
###############################################################
# MSGLVL: Pardiso verbosity
###############################################################
const MSGLVL_QUIET = 0
const MSGLVL_VERBOSE = 1
###############################################################
# MTYPE: Pardiso matrix type
# This scalar value defines the matrix type. PARDISO supports the following matrices
###############################################################
# Real matrices
const MTYPE_REAL_STRUCTURALLY_SYMMETRIC = 1
const MTYPE_REAL_SYMMETRIC_POSITIVE_DEFINITE = 2
const MTYPE_REAL_SYMMETRIC_INDEFINITE = -2
const MTYPE_REAL_NON_SYMMETRIC = 11
# Complex matrices
const MTYPE_COMPLEX_STRUCTURALLY_SYMMETRIC = 3
const MTYPE_COMPLEX_HERMITIAN_POSITIVE_DEFINITE = 4
const MTYPE_COMPLEX_HERMITIAN_INDEFINITE = -4
const MTYPE_COMPLEX_SYMMETRIC = 6
const MTYPE_COMPLEX_NON_SYMMETRIC = 13
###############################################################
# PHASE: Controls the execution of the solver
#
# Usually it is a two- or three-digit integer.
# The first digit indicates the starting phase of execution and the second digit indicates the ending phase.
# Intel MKL PARDISO has the following phases of execution:
# 1. Phase 1: Fill-reduction analysis and symbolic factorization
# 2. Phase 2: Numerical factorization
# 3. Phase 3: Forward and Backward solve including iterative refinement.
# This phase can be divided into two or three separate substitutions: forward, backward, and diagonal.
# 4. Termination and Memory Release Phase (PHASE ≤ 0)
###############################################################
const PHASE_ANALYSIS = 11
const PHASE_ANALYSIS_NUMERICAL_FACTORIZATION = 12
const PHASE_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT = 13
const PHASE_NUMERICAL_FACTORIZATION = 22
const PHASE_SELECTED_INVERSION = -22
const PHASE_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT = 23
const PHASE_SOLVE_ITERATIVE_REFINEMENT = 33
const PHASE_SOLVE_ITERATIVE_REFINEMENT_FORWARD_SUBSTITUTION = 331
const PHASE_SOLVE_ITERATIVE_REFINEMENT_DIAGONAL_SUBSTITUTION = 332
const PHASE_SOLVE_ITERATIVE_REFINEMENT_BACKWARD_SUBSTITUTION = 333
const PHASE_RELEASE_INTERNAL_MEMORY = 0
const PHASE_RELEASE_ALL_INTERNAL_MEMORY = -1
"""
pardiso_report_error(code::Int)
Report Pardiso error given its code.
"""
function pardiso_report_error(code::Int)
if code < 0
code == -1 &&
error(string("Pardiso Error (", code, "): ", "Input inconsistent."))
code == -2 &&
error(string("Pardiso Error (", code, "): ", "Not enough memory."))
code == -3 &&
error(string("Pardiso Error (", code, "): ", "Reordering problem."))
code == -4 &&
error(string("Pardiso Error (", code, "): ",
"Zero pivot, numerical fact. or iterative refinement problem."))
code == -5 &&
error(string("Pardiso Error (", code, "): ", "Unclassified (internal) error."))
code == -6 &&
error(string("Pardiso Error (", code, "): ", "Teordering failed (matrix types 11, 13 only)."))
code == -7 &&
error(string("Pardiso Error (", code, "): ", "Diagonal matrix is singular."))
code == -8 &&
error(string("Pardiso Error (", code, "): ", "32-bit integer overflow problem."))
code == -10 &&
error(string("Pardiso Error (", code, "): ", "Error opening OOC files."))
code == -11 &&
error(string("Pardiso Error (", code, "): ", "Read/write error with OOC files."))
code == -12 &&
error(string("Pardiso Error (", code, "): ", "pardiso_64 called from 32-bit library."))
code == -13 &&
error(string("Pardiso Error (", code, "): ", "Interrupted by the (user-defined) mkl_progress function."))
code == -15 &&
error(string("Pardiso Error (", code, "): ",
"Internal error which can appear for iparm(24)=10 and iparm(13)=1. ",
"Try switch matching off (set iparm(13)=0 and rerun.)."))
error(string("Pardiso Error (", code, "): ", "Unknown error code."))
end
end
###############################################################
# Parameter values for IPARM[12]
# IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED
###############################################################
const PARDISO_SOLVE_LINEAR_SYSTEM = 0 # linear system.
const PARDISO_SOLVE_CONJUGATE_TRANSPOSED_SYSTEM = 1 # conjugate transposed system
const PARDISO_SOLVE_TRANSPOSED_SYSTEM = 2 # transposed system
###############################################################
# Parameter values for IPARM[35]
# IPARM_ONE_OR_ZERO_BASED_INDEXING
###############################################################
const PARDISO_ONE_BASED_INDEXING = 0 # One-based indexing
const PARDISO_ZERO_BASED_INDEXING = 1 # Zero-based indexing
###############################################################
# Iparm parameters
#
# [I] -> input
# [O] -> output
# for iparm[i], where i is:
#
###############################################################
# 1: [I] Use default values
const IPARM_USE_DEFAULT_VALUES = 1
# 2: [I] Fill-in reducing ordering for the input matrix.
const IPARM_FILL_IN_REDUCING_ORDERING = 2
# 3: [-] Reserved. Set to zero.
# 4: [I] Preconditioned CGS/CG.
const IPARM_PRECONDITIONED_CGS/CG = 4
# 5: [I] User permutation.
const IPARM_USER_PERMUTATION = 5
# 6: [I] Write solution on x.
const IPARM_WRITE_SOLUTION_ON_X = 6
# 7: [O] Number of iterative refinement steps performed.
const IPARM_NUMBER_ITERATIVE_REFINEMENT_STEPS = 7
# 8: [I] Iterative refinement step.
const IPARM_ITERATIVE_REFINEMENT_STEP = 8
# 9: [-] Reserved. Set to zero.
# 10: [I] Pivoting perturbation.
const IPARM_PIVOTING_PERTURBATION = 10
# 11: [I] Scaling vectors.
const IPARM_SCALING_VECTORS = 11
# 12: [I] Solve with transposed or conjugate transposed matrix A.
const IPARM_TRANSPOSED_OR_CONJUGATED_TRANSPOSED = 12
# 13: [I] Improved accuracy using (non-) symmetric weighted matching.
const IPARM_NON_SYMMETRIC_WEIGHTED_MATCHING = 13
# 14: [O] Number of perturbed pivots.
const IPARM_NUMBER_OF_PERTURBED_PIVOTS = 14
# 15: [O] Peak memory on symbolic factorization.
const IPARM_PEAK_MEMORY_ON_SYMBOLIC_FACTORIZATION = 15
# 16: [O] Permanent memory on symbolic factorization.
const IPARM_PERMANENT_MEMORY_ON_SYMBOLIC_FACTORIZATION = 16
# 17: [O] Size of factors/Peak memory on numerical factorization and solution.
const
IPARM_SIZE_OF_FACTORS_PEAK_MEMORY_ON_NUMERICAL_FACTORIZATION_AND_SOLUTION = 17
# 18: [I/O] Report the number of non-zero elements in the factors.
const IPARM_REPORT_NUMBER_OF_NON_ZEROS_IN_FACTORS = 18
# 19: [I/O] Report number of floating point operations (in 106 floating point operations) that are necessary to factor the matrix A.
const IPARM_REPORT_NUMBER_OF_FLOATING_POINT_OPERATIONS = 19
# 20: [O] Report CG/CGS diagnostics.
const IPARM_REPORT_CG_CGS_DIAGNOSTICS = 20
# 21: [I] Pivoting for symmetric indefinite matrices.
const IPARM_PRIVOTING_FOR_SYMMETRIC_INDEFINITE_MATRICES = 21
# 22: [O] Inertia: number of positive eigenvalues.
const IPARM_NUMBER_OF_POSITIVE_EIGENVALUES = 22
# 23: [O] Inertia: number of negative eigenvalues.
const IPARM_NUMBER_OF_NEGATIVE_EIGENVALUES = 23
# 24: [I] Parallel factorization control.
const IPARM_PARALLEL_FACTORIZATION_CONTROL = 24
# 25: [I] Parallel forward/backward solve control.
const IPARM_PARALLEL_FORWARD_BACKWARD_SOLVE_CONTROL = 25
# 26: [-] Reserved. Set to zero.
# 27: [I] Matrix checker.
const IPARM_MATRIX_CHECKER = 27
# 28: [I] Single or double precision Intel MKL PARDISO.
const IPARM_SINGLE_OR_DOUBLE_PRECISION = 28
# 29: [-] Reserved. Set to zero.
# 30: [O] Number of zero or negative pivots.
const IPARM_NUMBER_OF_ZERO_OR_NEGATIVE_PIVOTS = 30
# 31: [I] Partial solve and computing selected components of the solution vectors.
const IPARM_PARTIAL_SOLVE_AND_COMPUTING_SELECTED_COMPONENTS = 31
# 32: [-] Reserved. Set to zero.
# 33: [-] Reserved. Set to zero.
# 34: [I] Optimal number of OpenMP threads for conditional numerical reproducibility (CNR) mode.
const IPARM_OPTIMAL_NUMBER_OF_OPENMPI_THREADS = 34
# 35: [I] One- or zero-based indexing of columns and rows.
const IPARM_ONE_OR_ZERO_BASED_INDEXING = 35
# 36: [I/O] Schur complement matrix computation control.
const IPARM_SCHUR_COMPLEMENT_MATRIX = 36
# 37: [I] Format for matrix storage.
const IPARM_FORMAT_FOR_MATRIX_STORAGE = 37
# 38: [-] Reserved. Set to zero.
# 39: [-] Enable low rank update to accelerate factorization for multiple matrices with identical structure and similar values.
const IPARM_ENABLE_LOW_RANK = 39
# 40: [-] Reserved. Set to zero.
# 41: [-] Reserved. Set to zero.
# 42: [-] Reserved. Set to zero.
# 43: [-] Control parameter for the computation of the diagonal of inverse matrix.
const
IPARM_CONTROL_PARAMETER_FOR_COMPUTATION_OF_THE_DIAGONAL_OF_INVERSE_MATRIX = 43
# 44: [-] Reserved. Set to zero.
# 45: [-] Reserved. Set to zero.
# 46: [-] Reserved. Set to zero.
# 47: [-] Reserved. Set to zero.
# 48: [-] Reserved. Set to zero.
# 49: [-] Reserved. Set to zero.
# 50: [-] Reserved. Set to zero.
# 51: [-] Reserved. Set to zero.
# 52: [-] Reserved. Set to zero.
# 53: [-] Reserved. Set to zero.
# 54: [-] Reserved. Set to zero.
# 55: [-] Reserved. Set to zero.
# 56: [-] Diagonal and pivoting control.
# 57: [-] Reserved. Set to zero.
# 58: [-] Reserved. Set to zero.
# 59: [-] Reserved. Set to zero.
# 60: [I] Intel MKL PARDISO mode.
const IPARM_INTEL_MKL_PARDISO_MODE = 60
# 61: [-] Reserved. Set to zero.
# 62: [-] Reserved. Set to zero.
# 63: [O] Size of the minimum OOC memory for numerical factorization and solution.
const IPARM_SIZE_OF_THE_MINIMUM_OCC_MEMORY = 63
# 64: [-] Reserved. Set to zero.
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 3457 |
macro check_if_loaded()
quote
if ! MKL_PARDISO_LOADED[]
error("MKL pardiso is not properly loaded")
end
end
end
function pardisoinit!(
pt::Vector{Int},
mtype::Integer,
iparm::Vector{Int32})
@check_if_loaded
ccall(
pardisoinit_sym[],
Cvoid, (
Ptr{Int},
Ptr{Int32},
Ptr{Int32}),
pt,
Ref(Int32(mtype)),
iparm)
end
function pardiso!(
pt::Vector{Int},
maxfct::Integer,
mnum::Integer,
mtype::Integer,
phase::Integer,
n::Integer,
a::Vector{T},
ia::Vector{Int32},
ja::Vector{Int32},
perm::Vector{Int32},
nrhs::Integer,
iparm::Vector{Int32},
msglvl::Integer,
b::Vector{T},
x::Vector{T}) where T
@check_if_loaded
@assert T == pardiso_data_type(mtype,iparm)
err = Ref(zero(Int32))
ccall(
pardiso_sym[],
Cvoid, (
Ptr{Int},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Cvoid},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Int32},
Ptr{Cvoid},
Ptr{Cvoid},
Ptr{Int32}),
pt,
Ref(Int32(maxfct)),
Ref(Int32(mnum)),
Ref(Int32(mtype)),
Ref(Int32(phase)),
Ref(Int32(n)),
a,
ia,
ja,
perm,
Ref(Int32(nrhs)),
iparm,
Ref(Int32(msglvl)),
b,
x,
err)
return Int(err[])
end
function pardiso_64!(
pt::Vector{Int},
maxfct::Integer,
mnum::Integer,
mtype::Integer,
phase::Integer,
n::Integer,
a::Vector{T},
ia::Vector{Int64},
ja::Vector{Int64},
perm::Vector{Int64},
nrhs::Integer,
iparm::Vector{Int64},
msglvl::Integer,
b::Vector{T},
x::Vector{T}) where T
@check_if_loaded
@assert T == pardiso_data_type(mtype,iparm)
err = Ref(zero(Int64))
ccall(
pardiso_64_sym[],
Cvoid, (
Ptr{Int},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Cvoid},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Int64},
Ptr{Cvoid},
Ptr{Cvoid},
Ptr{Int64}),
pt,
Ref(Int64(maxfct)),
Ref(Int64(mnum)),
Ref(Int64(mtype)),
Ref(Int64(phase)),
Ref(Int64(n)),
a,
ia,
ja,
perm,
Ref(Int64(nrhs)),
iparm,
Ref(Int64(msglvl)),
b,
x,
err)
return Int(err[])
end
function pardiso_getdiag!(
pt::Vector{Int},
df::Vector{T},
da::Vector{T},
mnum::Integer,
mtype::Integer,
iparm::Vector{<:Integer}) where T
@assert T == pardiso_data_type(mtype,iparm)
pardiso_getdiag!(pt,df,da,mnum)
end
function pardiso_getdiag!(
pt::Vector{Int},
df::Vector{T},
da::Vector{T},
mnum::Integer) where T
@check_if_loaded
err = Ref(zero(Int32))
ccall(
pardiso_getdiag_sym[],
Cvoid,(
Ptr{Int},
Ptr{Cvoid},
Ptr{Cvoid},
Ptr{Int32},
Ptr{Int32}),
pt,
df,
da,
Ref(Int32(mnum)),
err)
return Int(err[])
end
function pardiso_data_type(mtype::Integer,iparm::Vector{<:Integer})
# Rules taken from
# https://software.intel.com/en-us/mkl-developer-reference-fortran-pardiso-data-type
T::DataType = Any
if mtype in (1,2,-2,11)
if iparm[28] == 0
T = Float64
else
T = Float32
end
elseif mtype in (3,6,13,4,-4)
if iparm[28] == 0
T = Complex{Float64}
else
T = Complex{Float32}
end
else
error("Unknown matrix type: mtype = $mtype")
end
T
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 506 |
function load_mkl_gcc(mkllibdir,gcclibdir)
lmkl_intel_lp64 = joinpath(mkllibdir,"libmkl_intel_lp64")
lmkl_gnu_thread = joinpath(mkllibdir,"libmkl_gnu_thread")
lmkl_core = joinpath(mkllibdir,"libmkl_core")
lgomp = joinpath(gcclibdir,"libgomp")
flags = Libdl.RTLD_LAZY | Libdl.RTLD_DEEPBIND | Libdl.RTLD_GLOBAL
Libdl.dlopen(lgomp, flags)
Libdl.dlopen(lmkl_core, flags)
Libdl.dlopen(lmkl_gnu_thread, flags)
libmkl = Libdl.dlopen(lmkl_intel_lp64, flags)
libmkl
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 7981 | module LinearSolverTests
using Gridap.Algebra
using GridapPardiso
using Test
using SparseArrays
using SparseMatricesCSR
tol = 1.0e-13
#####################################################
# SparseMatrixCSC
#####################################################
#
# Matrix from Intel MKL Pardiso examples
#
# DATA ia /1,4,6,9,12,14/
# DATA ja
# 1 /1,2, 4,
# 2 1,2,
# 3 3,4,5,
# 4 1, 3,4,
# 5 2, 5/
# DATA a
# 1 / 1.d0,-1.d0, -3.d0,
# 2 -2.d0, 5.d0,
# 3 4.d0, 6.d0, 4.d0,
# 4 -4.d0, 2.d0, 7.d0,
# 5 8.d0, -5.d0/
#####################################################
I_ = [1,1,1,2,2,3,3,3,4,4,4,5,5]
J_ = [1,2,4,1,2,3,4,5,1,3,4,2,5]
V_ = [1,-1,-3,-2,5,4,6,4,-4,2,7,8,-5]
rows = 5
cols = 5
# pardiso!
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSC,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSC,I,J,V,rows, cols)
A = sparse(I,J,V,rows,cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_NON_SYMMETRIC, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
if Int == Int64
# pardiso_64!
I = Vector{Int64}(); J = Vector{Int64}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSC,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSC,I,J,V,rows, cols)
A = sparse(I,J,V,rows, cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_NON_SYMMETRIC, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
end
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Complex{Float64}}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSC,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSC,I,J,V,rows, cols)
A = sparse(I,J,V,rows,cols)
b = ones(Complex{Float64},size(A)[2])
x = similar(b)
ps = PardisoSolver(msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
#####################################################
# SparseMatrixCSR
#####################################################
#
# Matrix from Intel MKL Pardiso examples
#
# DATA ia /1,4,6,9,12,14/
# DATA ja
# 1 /1,2, 4,
# 2 1,2,
# 3 3,4,5,
# 4 1, 3,4,
# 5 2, 5/
# DATA a
# 1 / 1.d0,-1.d0, -3.d0,
# 2 -2.d0, 5.d0,
# 3 4.d0, 6.d0, 4.d0,
# 4 -4.d0, 2.d0, 7.d0,
# 5 8.d0, -5.d0/
#####################################################
I_ = [1,1,1,2,2,3,3,3,4,4,4,5,5]
J_ = [1,2,4,1,2,3,4,5,1,3,4,2,5]
V_ = [1,-1,-3,-2,5,4,6,4,-4,2,7,8,-5]
rows = 5
cols = 5
# pardiso!
for Bi in (0,1)
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSR,I,J,V,rows,cols)
A = sparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_NON_SYMMETRIC, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
if Int == Int64
# pardiso_64!
I = Vector{Int64}(); J = Vector{Int64}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSR,I,J,V,rows,cols)
A = sparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_NON_SYMMETRIC, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
end
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Complex{Float64}}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SparseMatrixCSR,I,J,V,rows,cols)
A = sparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(Complex{Float64},size(A)[2])
x = similar(b)
ps = PardisoSolver(msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
end
#####################################################
# SymSparseMatrixCSR
#####################################################
#
# Matrix from Intel MKL Pardiso examples
#
# ia = (/ 1, 5, 8, 10, 12, 15, 17, 18, 19 /)
# ja = (/ 1, 3, 6, 7, &
# 2, 3, 5, &
# 3, 8, &
# 4, 7, &
# 5, 6, 7, &
# 6, 8, &
# 7, &
# 8 /)
# a = (/ 7.d0, 1.d0, 2.d0, 7.d0, &
# -4.d0, 8.d0, 2.d0, &
# 1.d0, 5.d0, &
# 7.d0, 9.d0, &
# 5.d0, 1.d0, 5.d0, &
# -1.d0, 5.d0, &
# 11.d0, &
# 5.d0 /)
#####################################################
I_ = [1,1,1,1,2,2,2,3,3,4,4,5,5,5,6,6,7,8]
J_ = [1,3,6,7,2,3,5,3,8,4,7,5,6,7,6,8,7,8]
V_ = [7,1,2,7,-4,8,2,1,5,7,9,5,1,5,-1,5,11,5]
rows = 8
cols = 8
# pardiso!
for Bi in (0,1)
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SymSparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SymSparseMatrixCSR,I,J,V,rows,cols)
A = symsparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_SYMMETRIC_INDEFINITE, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
if Int == Int64
# pardiso_64!
I = Vector{Int64}(); J = Vector{Int64}(); V = Vector{Float64}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SymSparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SymSparseMatrixCSR,I,J,V,rows,cols)
A = symsparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(size(A)[2])
x = similar(b)
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_SYMMETRIC_INDEFINITE, msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
end
I = Vector{Int32}(); J = Vector{Int32}(); V = Vector{Complex{Float64}}()
for (ik, jk, vk) in zip(I_,J_,V_)
push_coo!(SymSparseMatrixCSR,I,J,V,ik,jk,vk)
end
finalize_coo!(SymSparseMatrixCSR,I,J,V,rows,cols)
A = symsparsecsr(Val{Bi}(),I,J,V,rows,cols)
b = ones(Complex{Float64},size(A)[2])
x = similar(b)
ps = PardisoSolver(msglvl=GridapPardiso.MSGLVL_VERBOSE)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
@test maximum(abs.(A*x-b)) < tol
test_linear_solver(ps, A, b, x)
end
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 1851 | module bingingstests
using GridapPardiso
using Test
using SparseArrays
# Define linear system
mtype = GridapPardiso.MTYPE_REAL_NON_SYMMETRIC
A = sparse([
0. -2 3 0
-2 4 -4 1
-3 5 1 1
1 -3 0 2 ])
n = A.n
b = Float64[1, 3, 2, 5]
x = zeros(Float64,n)
# Create the pardiso internal handler
pt = new_pardiso_handle()
# pardisoinit!
iparm = Vector{Int32}(new_iparm())
pardisoinit!(pt,mtype,iparm)
# pardiso! (solving the transpose of the system above)
maxfct = 1
mnum = 1
phase = GridapPardiso.PHASE_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT
a = A.nzval
ia = Vector{Int32}(A.colptr)
ja = Vector{Int32}(A.rowval)
perm = zeros(Int32,n)
nrhs = 1
msglvl = 0
err = pardiso!(
pt,maxfct,mnum,mtype,phase,n,a,ia,ja,perm,nrhs,iparm,msglvl,b,x)
tol = 1.0e-13
@test err == 0
@test maximum(abs.(A'*x-b)) < tol
# pardiso! (solving the system above)
iparm[12] = 2
err = pardiso!(
pt,maxfct,mnum,mtype,phase,n,a,ia,ja,perm,nrhs,iparm,msglvl,b,x)
@test err == 0
@test maximum(abs.(A*x-b)) < tol
@test pardiso_data_type(mtype,iparm) == Float64
# pardiso_getdiag!
iparm[56] = 1
pt = new_pardiso_handle()
err = pardiso!(
pt,maxfct,mnum,mtype,phase,n,a,ia,ja,perm,nrhs,iparm,msglvl,b,x)
df = zeros(Float64,n)
da = zeros(Float64,n)
err = pardiso_getdiag!(pt,df,da,mnum,mtype,iparm)
@test err == 0
err = pardiso_getdiag!(pt,df,da,mnum)
@test err == 0
if Int == Int64
# pardiso_64! (solving the transpose of the system above)
pt = new_pardiso_handle()
a = A.nzval
ia = A.colptr
ja = A.rowval
perm = zeros(Int64,n)
iparm = Vector{Int64}(new_iparm())
err = pardiso_64!(
pt,maxfct,mnum,mtype,phase,n,a,ia,ja,perm,nrhs,iparm,msglvl,b,x)
@test err == 0
# pardiso_64! (solving the transpose of the system above)
@test maximum(abs.(A'*x-b)) < tol
end
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 1255 | module FEMDriver
using Test
using Gridap
using GridapPardiso
using SparseMatricesCSR
tol = 1e-10
domain = (0,1,0,1,0,1)
partition = (10,10,10)
# Simple 2D data for debugging. TODO: remove when fixed.
domain = (0,1,0,1)
partition = (3,3)
model = CartesianDiscreteModel(domain,partition)
order=1
reffe = ReferenceFE(lagrangian,Float64,order)
V = FESpace(model,
reffe,
conformity=:H1,
dirichlet_tags="boundary")
U = TrialFESpace(V)
trian = get_triangulation(model)
dΩ = Measure(trian,2)
a(u,v)=∫(∇(v)⋅∇(u))dΩ
f(x)=x[1]*x[2]
l(v)=∫(v*f)dΩ
# With non-symmetric storage
assem = SparseMatrixAssembler(SparseMatrixCSR{1,Float64,Int},Vector{Float64},U,V)
op = AffineFEOperator(a,l,U,V,assem)
ls = PardisoSolver()
solver = LinearFESolver(ls)
uh = solve(solver,op)
x = get_free_dof_values(uh)
A = get_matrix(op)
b = get_vector(op)
r = A*x - b
@test maximum(abs.(r)) < tol
# With symmetric storage
assem = SparseMatrixAssembler(SymSparseMatrixCSR{1,Float64,Int},Vector{Float64},U,V)
op = AffineFEOperator(a,l,U,V,assem)
ls = PardisoSolver()
solver = LinearFESolver(ls)
uh = solve(solver,op)
x = get_free_dof_values(uh)
A = get_matrix(op)
b = get_vector(op)
r = A*x - b
@test maximum(abs.(r)) < tol
end #module
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | code | 292 | module GridapPardisoTests
using GridapPardiso
using Test
if GridapPardiso.MKL_PARDISO_LOADED[]
@testset "Pardiso bindings" begin include("bindings.jl") end
@testset "Linear solver" begin include("LinearSolver.jl") end
@testset "FEM driver" begin include("femdriver.jl") end
end
end
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | docs | 4799 | # GridapPardiso
[](https://gridap.github.io/GridapPardiso.jl/stable)
[](https://gridap.github.io/GridapPardiso.jl/dev)
[](https://github.com/gridap/GridapPardiso.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/gridap/GridapPardiso.jl)
[Gridap](https://github.com/gridap/Gridap.jl) (Grid-based approximation of partial differential equations in Julia) plugin to use the [Intel Pardiso OneAPI MKL direct sparse solver](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html).
## Basic Usage
```julia
using Gridap
using GridapPardiso
A = sparse([1,2,3,4,5],[1,2,3,4,5],[1.0,2.0,3.0,4.0,5.0])
b = ones(A.n)
x = similar(b)
msglvl = 1
ps = PardisoSolver(mtype=GridapPardiso.MTYPE_REAL_NON_SYMMETRIC, msglvl=msglvl)
ss = symbolic_setup(ps, A)
ns = numerical_setup(ss, A)
solve!(x, ns, b)
```
## Usage in a Finite Element computation
```julia
using Gridap
using GridapPardiso
using SparseMatricesCSR
# Define the FE problem
# -Δu = x*y in (0,1)^3, u = 0 on the boundary.
model = CartesianDiscreteModel((0,1,0,1,0,1), (10,10,10))
order=1
reffe = ReferenceFE(lagrangian,Float64,order)
V = FESpace(model,
reffe,
conformity=:H1,
dirichlet_tags="boundary")
U = TrialFESpace(V)
trian = get_triangulation(model)
dΩ = Measure(trian,2)
a(u,v)=∫(∇(v)⋅∇(u))dΩ
f(x)=x[1]*x[2]
l(v)=∫(v*f)dΩ
assem = SparseMatrixAssembler(SparseMatrixCSR{1,Float64,Int},Vector{Float64},U,V)
op = AffineFEOperator(a,l,U,V,assem)
ls = PardisoSolver()
solver = LinearFESolver(ls)
uh = solve(solver,op)
```
## Installation
**GridPardiso** itself is installed when you add and use it into another project.
First, ensure that your system fulfills the requirements (see instructions below). Only after these steps, to include into your project from the Julia REPL, use the following commands:
```
pkg> add GridapPardiso
julia> using GridapPardiso
```
If, for any reason, you need to manually build the project (e.g., you added the project with the wrong environment resulting in a build that fails, you have fixed the environment and want to re-build the project), write down the following commands in Julia REPL:
```
pkg> add GridapPardiso
pkg> build GridPardiso
julia> using GridapPardiso
```
### Requirements
**GridapPardiso** requires the following software to be installed on your system:
1. Intel oneAPI MKL library. In particular, **GridapPardiso** relies on the
[Intel Pardiso oneAPI MKL direct sparse solver](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html).
2. GNU C compiler (`gcc`) + GNU `OpenMP` library (`libgomp`).
In order to find 1., the build system of **GridapPardiso** relies on the `MKLROOT` environment variable. This variable must point to the MKL installation directory on your system. [Intel oneAPI MKL](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html) includes the `mklvars.sh` Unix shell script in order to set up appropriately this environment variable. Assuming that `/opt/intel/mkl/` is the Intel MKL installation directory on your system, you have to run this script using the following command (most preferably in a script that is executed automatically when a new shell is opened):
```
$ source /opt/intel/mkl/bin/mklvars.sh intel64
```
In order to find 2., there are two alternatives:
* The user may optionally set the `GRIDAP_PARDISO_LIBGOMP_DIR` environment variable. This variable must contain the absolute path to the folder in which the `libgomp` dynamic library file resides on your system.
* The build system tries to do its best to find `libgomp` on the system.
If `GRIDAP_PARDISO_LIBGOMP_DIR` is defined, then the build system follows the first alternative. If not, then it follows the second. Thus, the environment variable has precedence over the default behaviour of trying to find the library automatically.
In general, the user may let the build system to find `libgomp` in the first place. If the build system fails, or it finds an undesired version of `libgomp`, then the environment variable can be used as a fallback solution, e.g., for those systems with a non-standard installation of `libgomp`, and/or several simultaneous installations of `libgomp`.
We note that, in Debian-based Linux OSs, the following commands can be installed in order to satisfy requirement 2. (typically executed as sudo):
```
$ apt-get update
$ apt-get install -y gcc libgomp1
```
In such systems, the build system is able to automatically find `libgomp`.
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.7.0 | d5124a3d4803cc1171ece001bc02cb4c7d063468 | docs | 78 | # GridapPardiso.jl
```@index
```
```@autodocs
Modules = [GridapPardiso]
```
| GridapPardiso | https://github.com/gridap/GridapPardiso.jl.git |
|
[
"MIT"
] | 0.1.2 | 8bfd52b3c5eca486cf54fc40be458c2014786661 | code | 4319 | module DependencyWalker
using ObjectFile, Crayons
import Libdl
export Library
struct Library{OH<:Union{ObjectHandle,Missing,Nothing}}
path::String
handle_type::Type{OH}
level::Int
deps::Vector{Library}
# Whether to show libraries whose metadata can't be read. They're mostly
# noisy.
shownothing::Bool
end
function Library(path::String, level::Int = 0; shownothing::Bool=false)
if !isfile(path)
# TODO: should check also if it can be opened?
return Library(path, Missing, level, Library[], shownothing)
end
io = open(path, "r")
oh, deps_names = try
oh = readmeta(io)
# Get the list of needed libraries
oh, keys(find_libraries(oh))
catch
nothing, []
end
close(io)
deps = dependency_tree(deps_names, level; shownothing=shownothing)
return Library(path, typeof(oh), level, deps, shownothing)
end
Library(path::String, oht::Type{T}, level::Int; shownothing::Bool=false) where {T<:Union{Missing,Nothing}} =
Library{T}(path, oht, level, Library[], shownothing)
Library(path::AbstractString, oht, level; shownothing::Bool=false) =
Library(String(path), oht, level; shownothing=shownothing)
# Check if `lib` matches `open_lib`, one of the libraries currently loaded in
# the system. The condition to ignore the case is not quite accurate as this is
# a file-system property rather than an OS one, but in most cases this is
# correct.
@static if Sys.iswindows() || Sys.isapple()
is_library_open(lib, open_lib) = occursin(lowercase(lib), lowercase(open_lib))
else
is_library_open(lib, open_lib) = occursin(lib, open_lib)
end
function dependency_tree(deps_names, level::Int; dlext::String = Libdl.dlext, shownothing::Bool=false)
# Initialise list of dependencies
deps = Library[]
# Get list of already dlopen'ed libraries
open_dls = Libdl.dllist()
for dep in deps_names
if Sys.iswindows() && occursin(r"^api-ms-win-.*\.dll"i, dep)
# Skipp all "api-ms-win-*" libraries on Windows
continue
end
split_dep = split(dep, '.')
# Get rid of the soversion. TODO: this is only for GNU/Linux and
# FreeBSD, no idea what we have to do for the other operating systems.
idx = findlast(isequal(dlext), split_dep)
if !isnothing(idx)
dep = join(split_dep[1:idx], '.')
end
# Get the first dlopen'ed library matching the needed library, if any
idx = findfirst(d -> is_library_open(dep, d), open_dls)
if isnothing(idx)
# Push a missing library to the list
push!(deps, Library(dep, Missing, level + 1; shownothing=shownothing))
else
# Push the found library to the list
push!(deps, Library(open_dls[idx], level + 1; shownothing=shownothing))
end
end
return deps
end
reduce_hash(x::UInt64) = Base.hash_64_32(x)
reduce_hash(x::UInt32) = x
function Base.show(io::IO, lib::Library{T}; shownothing::Bool=false) where {T}
if T === Missing
symbol = "✗"
extra_info = " (NOT FOUND)"
elseif T === Nothing
symbol = "❓"
extra_info = " (COULD NOT READ METADATA)"
else
symbol = "◼"
extra_info = ""
end
if !(T === Nothing && !shownothing)
if lib.level > 0
println(io)
end
print(io, repeat(" ", lib.level), Crayon(foreground = reduce_hash(hash(lib.path))), symbol,
Crayon(reset=true), " ", lib.path, Crayon(foreground = :yellow), "$(extra_info)")
for dep in lib.deps
show(io, dep)
end
end
end
"""
DependencyWalker lets you walk through the dependencies of a shared library loaded in Julia.
The package exports a single function, `Library`, which takes as only argument the path to the shared library:
```
julia> using DependencyWalker, LibSSH2_jll
julia> LibSSH2_jll.libssh2_path # Path to the libssh2 library
"/home/user/.julia/artifacts/26c7d3a6c17151277018b133ab0034e93ddc3d1e/lib/libssh2.so"
julia> Library(LibSSH2_jll.libssh2_path)
◼ /home/user/.julia/artifacts/26c7d3a6c17151277018b133ab0034e93ddc3d1e/lib/libssh2.so
◼ /usr/bin/../lib/julia/libmbedtls.so.12
◼ /usr/bin/../lib/julia/libmbedx509.so.0
...
```
"""
DependencyWalker
end # module
| DependencyWalker | https://github.com/giordano/DependencyWalker.jl.git |
|
[
"MIT"
] | 0.1.2 | 8bfd52b3c5eca486cf54fc40be458c2014786661 | code | 275 | using DependencyWalker
using Test
using ObjectFile, Pango_jll
@testset "DependencyWalker.jl" begin
pango = Library(Pango_jll.libpango_path)
@show pango
@test pango isa Library{<:ObjectHandle}
@test Library("this does not exist.foo") isa Library{Missing}
end
| DependencyWalker | https://github.com/giordano/DependencyWalker.jl.git |
|
[
"MIT"
] | 0.1.2 | 8bfd52b3c5eca486cf54fc40be458c2014786661 | docs | 2198 | # DependencyWalker
[](https://travis-ci.com/giordano/DependencyWalker.jl)
[](https://cloud.drone.io/giordano/DependencyWalker.jl)
## Introduction
Walk through the dependencies of a shared library loaded in
[Julia](https://julialang.org/), similarly to what [Dependency
Walker](https://en.wikipedia.org/wiki/Dependency_Walker) does with shared
libraries on your system.
## Installation
This package is registered, so you can install it by entering the package
manager mode in the REPL with the `]` key and running the command
```
add DependencyWalker
```
## Usage
The package exports a single function, `Library`, which takes as only argument
the path to the shared library:
```julia
julia> using DependencyWalker, LibSSH2_jll
julia> LibSSH2_jll.libssh2_path # Path to the libssh2 library
"/home/user/.julia/artifacts/26c7d3a6c17151277018b133ab0034e93ddc3d1e/lib/libssh2.so"
julia> Library(LibSSH2_jll.libssh2_path)
◼ /home/user/.julia/artifacts/26c7d3a6c17151277018b133ab0034e93ddc3d1e/lib/libssh2.so
◼ /usr/bin/../lib/julia/libmbedtls.so.12
◼ /usr/bin/../lib/julia/libmbedx509.so.0
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/julia/libmbedcrypto.so.3
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/julia/libmbedcrypto.so.3
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/julia/libmbedx509.so.0
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/julia/libmbedcrypto.so.3
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
◼ /usr/bin/../lib/julia/libmbedcrypto.so.3
◼ /usr/bin/../lib/libc.so.6
◼ /lib64/ld-linux-x86-64.so.2
```
## License
The `DependencyWalker.jl` package is licensed under the MIT "Expat" License.
The original author is Mosè Giordano.
| DependencyWalker | https://github.com/giordano/DependencyWalker.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 274 | module ArrayAllez
include("cache.jl")
include("threads.jl")
include("inplace.jl")
@init @require Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" begin
include("inplace-flux.jl")
include("prod+cumprod.jl")
end
include("inplace-zygote.jl")
include("dropdims.jl")
end
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 1470 |
export broadsum, broadsum!, bsum, bsum!
"""
broadsum(*, A, B, C) = sum(A .* B .* C)
broadsum(f,*, A,B) = sum(f, A .* B)
These aim to work exactly like `sum(broadcast(...))`, but without materialising the broadcast array.
Simplest case works & is fast. Version with `f` is slow.
broadsum(*, A, B; dims=1) = sum(A .* B; dims=1)
broadsum!(Z, *,A,B) = sum!(Z, A .* B)
Similarly immitates `sum!(Z, broadcast(...))` without materialising.
Now uses `LazyArrays.BroadcastArray`. The in-place form is actually a little slower.
"""
bsum(op, As...; dims=:) =
_bsum(dims, identity, Broadcast.broadcasted(op, As...))
bsum(f::Function, op::Function, As...) =
_bsum((:), f, Broadcast.broadcasted(op, As...))
@inline function _bsum(::Colon, fun::Function, bc)
@assert length(bc.args) >= 1
T = Broadcast.combine_eltypes(fun∘bc.f, bc.args)
tot = zero(T)
@simd for I in eachindex(bc)
@inbounds tot += fun(bc[I])
end
tot
end
using LazyArrays: BroadcastArray
_bsum(dims::Union{Int,NTuple}, ::typeof(identity), bc) = sum(BroadcastArray(bc); dims=dims)
bsum!(Z::AbstractArray, op, As...) = sum!(Z, BroadcastArray(op, As...))
const broadsum = bsum
const broadsum! = bsum!
# also dispatch complete sum to method which has better reverse:
# _bsum(::Colon, ::typeof(identity), bc) = sum_(BroadcastArray(bc))
# or not, crashes?
# Base._sum(A::BroadcastArray, ::Colon) = _bsum((:), identity, A.broadcasted)
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 2010 |
export Array_, similar_, copy_
struct CacheKey{T}
name::Symbol
l::Int
c::Int
CacheKey(n::Symbol, A::AT, sz::NTuple{M,Int} = size(A)) where AT <: AbstractArray{T,M} where {T,M} =
new{AT}(n, prod(sz), checksum(sz))
CacheKey(n::Symbol, ::Val{T}, sz::NTuple{M,Int}) where {T,M} = new{Array{T,M}}(n, prod(sz), checksum(sz))
end
checksum(size::NTuple{N,Int}) where N = sum(ntuple(i -> size[i] * i^2, Val(N)))
using LRUCache
const cache = LRU{CacheKey, AbstractArray}(;maxsize = 100) # very crude: fixed size, for now
# function Base.getindex(lru::LRU, key::CacheKey{AT}) where AT
# node = lru.ht[key]
# LRUCache.move_to_front!(lru.q, node)
# return node.v::AT # to improve type stability?
# end
"""
similar_(name, A) ≈ similar(A)
Array_{T}(name, size) ≈ Array{T}(undef, size)
New arrays for intermediate results, drawn from an LRU cache when `length(A) >= 2000`.
The cache's key uses `name::Symbol` as well as type & size to ensure different uses don't collide.
copy_(name, A) = copyto!(similar_(name, A), A)
Just like that.
"""
similar_(A::AbstractArray) = similar_(:array_, A)
function similar_(name::Symbol, A::TA)::TA where TA<:AbstractArray
if length(A) < 2000
similar(A)
else
get(cache, CacheKey(name, A)) do
similar(A)
end
end
end
struct Array_{T} end
@doc @doc(similar_)
Array_(any...) = Array_{Float64}(any...)
Array_{T}(sz::Vararg{Int}) where {T} = Array_{T}(:Array_, sz)
Array_{T}(name::Symbol, sz::Vararg{Int}) where {T} = Array_{T}(name, sz)
Array_{T}(sz::Tuple) where {T} = Array{T}(:Array_, sz)
function Array_{T}(name::Symbol, sz::NTuple{N,Int}) where {T,N}
key = CacheKey(name, Val(T), sz)
if prod(sz) < 2000
Array{T}(undef, sz);
else
get(cache, key) do
Array{T, N}(undef, sz)
end
end
end
@doc @doc(similar_)
copy_(A::AbstractArray) = copy_(:copy_, A)
copy_(name::Symbol, A::AbstractArray) = copyto!(similar_(name, A), A)
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 843 |
export @dropdims
using MacroTools
"""
@dropdims sum(A; dims=1)
Macro which wraps such reductions in `dropdims(...; dims=1)`.
Allows `sum(A; dims=1) do x stuff end`,
and works on whole blocks of code like `@views`.
Does not handle other keywords, like `reduce(...; dims=..., init=...)`.
"""
macro dropdims(ex)
_dropdims(ex)
end
function _dropdims(ex)
out = MacroTools.postwalk(ex) do x
if @capture(x, red_(args__, dims=d_)) || @capture(x, red_(args__; dims=d_))
:( dropdims($x; dims=$d) )
elseif @capture(x, dropdims(red_(args__, dims=d1_); dims=d2_) do z_ body_ end) ||
@capture(x, dropdims(red_(args__; dims=d1_); dims=d2_) do z_ body_ end)
:( dropdims($red($z -> $body, $(args...); dims=$d1); dims=$d2) )
else
x
end
end
esc(out)
end
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 6811 | IVERBOSE && @info "ArrayAllez loaded in-place code for Tracker"
using .Tracker
using .Tracker: track, TrackedArray, TrackedReal, @grad, data, nobacksies # also for prod....jl
"""
It is safe to mutate the forward output of `exp!` and `exp_`,
as they keep a copy for backwards use.
exp!!(A::TrackedArray)
The gradient function of `exp!!` mutates its backward `Δ`, no copies.
Whether or not this is safe is for you to decide.
It tends to lead to Inf problems when used inside `@btime`.
"""
exp!(A::TrackedArray) = track(exp!, A)
exp!!(A::TrackedArray) = track(exp!!, A)
exp_(A::TrackedArray) = track(exp_, A)
exp0(A::TrackedArray) = exp.(A)
@grad function exp_(A::TrackedArray)
expA = exp_(A.data)
expA_copy = copy(expA) # ensures that mutating output won't damage the gradient
expA, Δ -> ( scale!(expA_copy, Δ) ,) # and we can then mutate the copy here... for 1st deriv?
end
@grad function exp!(A::TrackedArray)
expA = exp!(A.data)
expA_copy = copy(expA)
expA, Δ -> ( scale!(expA_copy, Δ) ,)
end
@grad function exp!!(A::TrackedArray)
expA = exp!(A.data)
expA, Δ -> ( scale!(Δ, expA) ,)
end
exp_(name::Symbol, A::TrackedArray) = track(exp_, name, A)
@grad function exp_(name::Symbol, A::TrackedArray)
expA = exp_(name, A.data)
expA_copy = copy_(:exp_copy, expA) # opts in but has a distinct name? NOT safe
expA, Δ -> (nothing, scale!(expA_copy, Δ) ,)
end
"""
For `log!` it is safe to mutate both the input and the forward output,
as the `inv_(A)` needed for the gradient is computed ahead of time.
For `log_` it is safe to mutate the output but not its input.
log!!(A::TrackedArray)
Note that the gradient function of `log!!` mutates its backward `Δ`.
Even less of a good idea than `exp!!` as we must copy `inv_(A)` anyway.
"""
log!(A::TrackedArray) = track(log!, A)
log!!(A::TrackedArray) = track(log!!, A)
log_(A::TrackedArray) = track(log_, A)
log0(A::TrackedArray) = log.(A)
@grad log_(A::TrackedArray) =
log_(A.data), Δ -> ( iscale_(Δ, A.data) ,)
@grad function log!(A::TrackedArray)
invA = inv_(A.data)
log!(A.data), Δ -> ( scale!(invA, Δ) ,)
end
@grad function log!!(A::TrackedArray)
invA = inv_(A.data)
logA = log!(A.data)
logA, Δ -> ( scale!(Δ, invA) ,)
end
log_(name::Symbol, A::TrackedArray) = track(log_, name, A)
@grad function log_(name::Symbol, A::TrackedArray)
invA = inv_(:log_copy, A.data)
logA = log_(name, A.data)
logA, Δ -> (nothing, scale!(invA, Δ) ,)
end
"""
scale!!(A::TrackedArray, b)
This may mutate its backward `Δ`, watch out.
"""
scale!(A::TrackedArray, b) = track(scale!, A, b)
scale!(A::TrackedArray, b::Number) = track(scale!, A, b) # just to avoid ambiguty
scale!(A::Array, b::TrackedArray) = track(scale!, A, b)
scale_(A::TrackedArray, b) = track(scale_, A, b)
scale_(A::TrackedArray, b::AbstractArray) = track(scale_, A, b) # avoid an ambiguty?
scale0(A::TrackedArray, b) = A .* b
@grad scale_(A::TrackedArray, b::Number) = scale_(A.data, b), Δ -> (scale_(Δ, b), nothing)
@grad scale!(A::TrackedArray, b::Number) = scale!(A.data, b), Δ -> (scale_(Δ, b), nothing)
@grad scale!!(A::TrackedArray, b::Number) = scale!(A.data, b), Δ -> (scale!(Δ, b), nothing)
# @grad scale_(A::TrackedArray, b::TrackedReal) = scale_(A.data, b), Δ -> (scale_(Δ, b), dot(A,Δ))
# @grad scale!(A::TrackedArray, b::TrackedReal) = @error "hmm"
@grad scale_(A::TrackedArray, B::Union{Array, RVector}) = scale_(A.data, B), Δ -> (scale_(Δ, B), nothing)
@grad scale!(A::TrackedArray, B::Union{Array, RVector}) = scale!(A.data, B), Δ -> (scale_(Δ, B), nothing)
@grad scale!!(A::TrackedArray, B::Union{Array, RVector}) = scale!(A.data, B), Δ -> (scale!(Δ, B), nothing)
@grad scale_(A::TrackedArray, B::TrackedArray) =
scale_(A.data, B.data), Δ -> (scale_(Δ, B), sum!(similar(B), scale_(Δ,A)) )
@grad function scale!(A::TrackedArray, B::TrackedArray)
Ac = copy(A.data)
scale!(A.data, B.data), Δ -> (scale_(Δ, B), sum!(similar(B), scale!(Ac,Δ)) )
end
@grad function scale!!(A::TrackedArray, B::TrackedArray)
Ac = copy(A.data)
scale!(A.data, B.data), function(Δ)
∇B = sum!(similar(B), scale!(Ac,Δ))
(scale!(Δ, B), ∇B)
end
end
@grad scale_(A::Array, B::TrackedArray) =
scale_(A, B.data), Δ -> (nothing, sum!(similar(B), scale_(Δ,A)) )
@grad function scale!(A::Array, B::TrackedArray)
Ac = copy(A)
scale!(A, B.data), Δ -> (nothing, sum!(similar(B), scale!(Ac,Δ)) )
end
iscale!(A::TrackedArray, b) = track(iscale!, A, b)
iscale_(A::TrackedArray, b) = track(iscale_, A, b)
iscale0(A::TrackedArray, b) = A ./ b
@grad iscale_(A::TrackedArray, b::Number) = iscale_(A.data, b), Δ -> (iscale_(Δ, b), nothing)
@grad iscale!(A::TrackedArray, b::Number) = iscale!(A.data, b), Δ -> (iscale!(Δ, b), nothing)
# @grad iscale_(A::TrackedArray, b::TrackedReal) = iscale_(A.data, b.data), Δ -> (iscale_(Δ, b), nothing)
# @grad iscale!(A::TrackedArray, b::TrackedReal) = iscale!(A.data, b.data), Δ -> (iscale!(Δ, b), nothing)
@grad iscale_(A::TrackedArray, B::Union{Array, RVector}) = iscale_(A.data, B), Δ -> (iscale_(Δ, B), nothing)
@grad iscale!(A::TrackedArray, B::Union{Array, RVector}) = iscale!(A.data, B), Δ -> (iscale!(Δ, B), nothing)
@grad iscale!!(A::TrackedArray, B::Union{Array, RVector}) = scale!(A.data, inv!(B)), Δ -> (scale!(Δ, B), nothing)
@grad iscale_(A::TrackedArray, B::TrackedArray) = iscale_(A.data, B.data), Δ -> (iscale_(Δ, B), sum!(similar(B), -1 .* Δ .* A ./ B .^2))
# @grad function iscale!(A::TrackedArray, B::TrackedArray)
# Ac = copy(A.data)
# iscale!(A.data, B.data), Δ -> (scale(Δ, B.data), sum!(similar(B), scale!(Ac,Δ)) )
# end
@grad iscale_(A::Array, B::TrackedArray) = iscale_(A, B.data), Δ -> (nothing, sum!(similar(B), -1 .* Δ .* A ./ B .^2))
"""
inv!!(A::TrackedArray)
This may mutate its backward `Δ`, watch out.
"""
inv!(A::TrackedArray, b::Number=1) = track(inv!, A, b)
inv_(A::TrackedArray, b::Number=1) = track(inv_, A, b)
inv!!(A::TrackedArray, b::Number=1) = track(inv!!, A, b)
inv0(A::TrackedArray, b) = 1 ./ A
# @grad inv!(A::TrackedArray, b::Number=1) = inv_(A.data, b), Δ -> (-1 .* Δ .* b .* A.data .^ (-2) , nothing) # one copy
@grad function inv_(A::TrackedArray, b=1)
invA = inv_(A.data, b)
invA_copy = copy(invA) # don't invert twice?
invA, Δ -> (invA_copy .= -1 .* Δ .* b .* invA_copy .^ 2 , nothing) # total one copy
end
# @grad inv!(A::TrackedArray, b::Number=1) = inv!(A.data, b), Δ -> (-1 .* Δ .* b .* A.data .^ 2 , nothing) # one copy
@grad function inv!(A::TrackedArray, b=1)
invA = inv!(A.data, b)
invA_copy = copy(invA) # keep gradient calc safe from downstream mutations
invA, Δ -> (invA_copy .= -1 .* Δ .* b .* invA_copy .^ 2 , nothing) # total one copy
end
@grad inv!!(A::TrackedArray, b::Number=1) = inv!(A.data, b), Δ -> (scale!(Δ,A,A,-b), nothing)
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 4723 |
using ZygoteRules
using ZygoteRules: @adjoint
# From the flux file, this one replaces
# @grad -> @adjoint
# .data ->
# ::TrackedArray -> ::AbstractArray
# Delete every line containing track(
# Delete all docstrings -- @adjoint dislikes them
# Delete inv!! whatever that was.
@adjoint function exp_(A::AbstractArray)
expA = exp_(A)
expA_copy = copy(expA) # ensures that mutating output won't damage the gradient
expA, Δ -> ( scale!(expA_copy, Δ) ,) # and we can then mutate the copy here... for 1st deriv?
end
@adjoint function exp!(A::AbstractArray)
expA = exp!(A)
expA_copy = copy(expA)
expA, Δ -> ( scale!(expA_copy, Δ) ,)
end
@adjoint function exp!!(A::AbstractArray)
expA = exp!(A)
expA, Δ -> ( scale!(Δ, expA) ,)
end
@adjoint function exp_(name::Symbol, A::AbstractArray)
expA = exp_(name, A)
expA_copy = copy_(:exp_copy, expA) # opts in but has a distinct name? NOT safe
expA, Δ -> (nothing, scale!(expA_copy, Δ) ,)
end
@adjoint log_(A::AbstractArray) =
log_(A), Δ -> ( iscale_(Δ, A) ,)
@adjoint function log!(A::AbstractArray)
invA = inv_(A)
log!(A), Δ -> ( scale!(invA, Δ) ,)
end
@adjoint function log!!(A::AbstractArray)
invA = inv_(A)
logA = log!(A)
logA, Δ -> ( scale!(Δ, invA) ,)
end
@adjoint function log_(name::Symbol, A::AbstractArray)
invA = inv_(:log_copy, A)
logA = log_(name, A)
logA, Δ -> (nothing, scale!(invA, Δ) ,)
end
@adjoint scale_(A::AbstractArray, b::Number) = scale_(A, b), Δ -> (scale_(Δ, b), nothing)
@adjoint scale!(A::AbstractArray, b::Number) = scale!(A, b), Δ -> (scale_(Δ, b), nothing)
@adjoint scale!!(A::AbstractArray, b::Number) = scale!(A, b), Δ -> (scale!(Δ, b), nothing)
# @adjoint scale_(A::AbstractArray, b::TrackedReal) = scale_(A, b), Δ -> (scale_(Δ, b), dot(A,Δ))
# @adjoint scale!(A::AbstractArray, b::TrackedReal) = @error "hmm"
@adjoint scale_(A::AbstractArray, B::Union{Array, RVector}) = scale_(A, B), Δ -> (scale_(Δ, B), nothing)
@adjoint scale!(A::AbstractArray, B::Union{Array, RVector}) = scale!(A, B), Δ -> (scale_(Δ, B), nothing)
@adjoint scale!!(A::AbstractArray, B::Union{Array, RVector}) = scale!(A, B), Δ -> (scale!(Δ, B), nothing)
@adjoint scale_(A::AbstractArray, B::AbstractArray) =
scale_(A, B), Δ -> (scale_(Δ, B), sum!(similar(B), scale_(Δ,A)) )
@adjoint function scale!(A::AbstractArray, B::AbstractArray)
Ac = copy(A)
scale!(A, B), Δ -> (scale_(Δ, B), sum!(similar(B), scale!(Ac,Δ)) )
end
@adjoint function scale!!(A::AbstractArray, B::AbstractArray)
Ac = copy(A)
scale!(A, B), function(Δ)
∇B = sum!(similar(B), scale!(Ac,Δ))
(scale!(Δ, B), ∇B)
end
end
@adjoint scale_(A::Array, B::AbstractArray) =
scale_(A, B), Δ -> (nothing, sum!(similar(B), scale_(Δ,A)) )
@adjoint function scale!(A::Array, B::AbstractArray)
Ac = copy(A)
scale!(A, B), Δ -> (nothing, sum!(similar(B), scale!(Ac,Δ)) )
end
@adjoint iscale_(A::AbstractArray, b::Number) = iscale_(A, b), Δ -> (iscale_(Δ, b), nothing)
@adjoint iscale!(A::AbstractArray, b::Number) = iscale!(A, b), Δ -> (iscale!(Δ, b), nothing)
# @adjoint iscale_(A::AbstractArray, b::TrackedReal) = iscale_(A, b), Δ -> (iscale_(Δ, b), nothing)
# @adjoint iscale!(A::AbstractArray, b::TrackedReal) = iscale!(A, b), Δ -> (iscale!(Δ, b), nothing)
@adjoint iscale_(A::AbstractArray, B::Union{Array, RVector}) = iscale_(A, B), Δ -> (iscale_(Δ, B), nothing)
@adjoint iscale!(A::AbstractArray, B::Union{Array, RVector}) = iscale!(A, B), Δ -> (iscale!(Δ, B), nothing)
@adjoint iscale!!(A::AbstractArray, B::Union{Array, RVector}) = scale!(A, inv!(B)), Δ -> (scale!(Δ, B), nothing)
@adjoint iscale_(A::AbstractArray, B::AbstractArray) = iscale_(A, B), Δ -> (iscale_(Δ, B), sum!(similar(B), -1 .* Δ .* A ./ B .^2))
# @adjoint function iscale!(A::AbstractArray, B::AbstractArray)
# Ac = copy(A)
# iscale!(A, B), Δ -> (scale(Δ, B), sum!(similar(B), scale!(Ac,Δ)) )
# end
@adjoint iscale_(A::Array, B::AbstractArray) = iscale_(A, B), Δ -> (nothing, sum!(similar(B), -1 .* Δ .* A ./ B .^2))
# @adjoint inv!(A::AbstractArray, b::Number=1) = inv_(A, b), Δ -> (-1 .* Δ .* b .* A .^ (-2) , nothing) # one copy
@adjoint function inv_(A::AbstractArray, b=1)
invA = inv_(A, b)
invA_copy = copy(invA) # don't invert twice?
invA, Δ -> (invA_copy .= -1 .* Δ .* b .* invA_copy .^ 2 , nothing) # total one copy
end
# @adjoint inv!(A::AbstractArray, b::Number=1) = inv!(A, b), Δ -> (-1 .* Δ .* b .* A .^ 2 , nothing) # one copy
@adjoint function inv!(A::AbstractArray, b=1)
invA = inv!(A, b)
invA_copy = copy(invA) # keep gradient calc safe from downstream mutations
invA, Δ -> (invA_copy .= -1 .* Δ .* b .* invA_copy .^ 2 , nothing) # total one copy
end
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 9985 |
if VERSION <= v"1.1"
const TH_EXP = 100
const TH_INV = 1000
else
const TH_EXP = 5000
const TH_INV = 100_000
end
#========== exp! log! inv! ==========#
export exp0, exp_, exp!, exp!!
export log0, log_, log!, log!!
export inv0, inv_, inv!, inv!!
"""
exp!(A)
exp_(A) = exp!(similar(A), A)
exp0(A) ≈ exp.(A)
Element-wise in-place exponential, and friends.
Multi-threaded when `length(A) >= $TH_EXP`.
Will be handled by `Yeppp` or `AppleAccelerate` or `IntelVectorMath` if you load one of them,
note that `Yeppp` may well be slower.
"""
function exp! end
exp0(A) = similar(A) .= exp.(A) # maps Adjoint -> Adjoint etc
@doc @doc(exp!)
exp_(A) = exp!(similar(A), A)
exp!(A) = exp!(A, A)
exp!!(A) = exp!(A) # differs in gradient
function exp!(B, A)
@assert size(A)==size(B)
if length(A) < TH_EXP
for I in eachindex(A)
@inbounds B[I] = exp1(A[I])
end
else
Threads.@threads for I in eachindex(A)
@inbounds B[I] = exp1(A[I])
end
end
B
end
"""
log!(A)
log_(A) ≈ log!(similar(A), A)
log0(A) = log.(A)
Element-wise in-place natural logarithm, and friends.
Multi-threaded when `length(A) >= $TH_EXP`.
Will be handled by `Yeppp` or `AppleAccelerate` or `IntelVectorMath` if you load one of them.
"""
function log! end
log0(A) = similar(A) .= log.(A)
@doc @doc(log!)
log_(A) = log!(similar(A), A)
log!(A) = log!(A, A)
log!!(A) = log!(A) # differs in gradient
function log!(B, A)
@assert size(A)==size(B)
if length(A) < TH_EXP
for I in eachindex(A)
@inbounds B[I] = log1(A[I])
end
else
Threads.@threads for I in eachindex(A)
@inbounds B[I] = log1(A[I])
end
end
B
end
# These are a little faster than Julia's built-in functions?
exp1(x::Float64) = ccall(:exp, Cdouble, (Cdouble,), x)
exp1(x) = exp(x)
log1(x::Float64) = ccall(:log, Cdouble, (Cdouble,), x)
log1(x) = log(x)
# Versions which use cache
exp_(name::Symbol, A) = exp!(similar_(name, A), A)
log_(name::Symbol, A) = log!(similar_(name, A), A)
"""
inv!(A) ≈ 1 ./ A
inv!(A, b::Number) ≈ b ./ A
And `inv_(A)` which copies, and `inv0(A)` simple broadcasting.
Multi-threaded when `length(A) >= $TH_INV`.
Will be handled by `AppleAccelerate` if you load it.
"""
function inv! end
inv0(A::AbstractArray, b::Number=1) = similar(A) .= b ./ A # maps Adjoint -> Adjoint etc
@doc @doc(inv!)
inv_(A::AbstractArray, b::Number=1) = inv!(similar(A), A, b)
inv_(a::Number) = 1/a # for iscale_
inv!(A::AbstractArray, b::Number=1) = inv!(A, A, 1)
inv!(a::Number) = 1/a
function inv!(C::AbstractArray, A::AbstractArray, b::Number=1)
@assert size(A)==size(C)
if length(A) < TH_INV
for I in eachindex(A)
@inbounds C[I] = b / A[I]
end
else
Threads.@threads for I in eachindex(A)
@inbounds C[I] = b / A[I]
end
end
C
end
inv_(name::Symbol, A::AbstractArray, b::Number=1) = inv!(similar_(name, A), A, b)
inv_(name::Symbol, a::Number=1) = 1/a # for iscale_
#========== scale! iscale! ==========#
export scale0, scale_, scale!, scale!!
export iscale0, iscale_, iscale!, iscale!!
using LinearAlgebra: Adjoint, Transpose
const ARVector = Union{Adjoint{<:Any, <:AbstractVector}, Transpose{<:Any, <:AbstractVector}}
const RVector = Union{Adjoint{<:Any, <:Vector}, Transpose{<:Any, <:Vector}}
"""
scale!(A, b::Number) ≈ A .* b
scale!(M, v::Vector) ≈ A .* v # M::Matrix
scale!(M, r::Adjoint) ≈ A .* r # r::RowVector / Transpose etc.
scale!(A, B) ≈ A .* B # A,B same ndims
In-place scaling by a constant or (in the case of a matrix) by a row- or column-vector.
For each of these, there is also also `scale_(A, ...)` non-mutating but perhaps accellerated,
and `scale0(A, ...)` simple broadcasting.
"""
function scale! end
using LinearAlgebra
scale0(A::AbstractArray, b) = similar(A) .= A .* b
scale_(A::Array, b::Number) = rmul!(copy(A), b)
scale!(A::Array, b::Number) = rmul!(A, b)
scale!!(A::Array, b) = scale!(A,b) # differs in gradient
scale_(A::RVector, b::Number) = rmul!(copy(A), b) # scale_(::Abstract...) causes flux ambiguities
scale!(A::RVector, b::Number) = rmul!(A, b)
scale!!(A::RVector, b) = scale!(A,b)
@doc @doc(scale!)
scale_(A::Matrix, v::Vector) = lmul!(Diagonal(v), copy(A))
scale!(A::Matrix, v::Vector) = lmul!(Diagonal(v), A)
scale_(A::Matrix, r::RVector) = rmul!(copy(A), Diagonal(transpose(r)))
scale!(A::Matrix, r::RVector) = rmul!(A, Diagonal(transpose(r)))
# scale_(A::AbstractArray{T,N}, B::AbstractArray{T,N}) where {T,N} = similar(A) .= A .* B
# scale!(A::AbstractArray{T,N}, B::AbstractArray{T,N}) where {T,N} = A .= A .* B
function scale!(C::AbstractArray{T,N}, A::AbstractArray{TA,N}, B::AbstractArray{TB,N}) where {T,N, TA, TB}
@assert size(A) == size(B) == size(C)
for i in eachindex(A)
@inbounds C[i] = A[i] * B[i]
end
C
end
scale_(A::AbstractArray{T,N}, B::AbstractArray{T,N}) where {T,N} = scale!(similar(A), A, B)
scale!(A::AbstractArray{T,N}, B::AbstractArray{T,N}) where {T,N} = scale!(A, A, B)
scale_(name::Symbol, A::Array, b::Number) = rmul!(copy_(name, A), b)
scale_(name::Symbol, A::Matrix, v::Vector) = lmul!(Diagonal(v), copy_(name, A))
scale_(name::Symbol, A::Matrix, r::RVector) = rmul!(copy_(name, A), Diagonal(transpose(r)))
scale_(name::Symbol, A::AbstractArray{T,N}, B::AbstractArray{T,N}) where {T,N} =
scale!(similar_(name, A), A, B)
"""
iscale!(A, b::Number) ≈ A ./ b
iscale!(A, v::Vector) ≈ A ./ v # A::Matrix
iscale!(A, r::Adjoint) ≈ A ./ r # r::RowVector / Transpose etc.
iscale!(A, B) ≈ A ./ B
For each of these, there is also `iscale_(A, ...)` non-mutating but perhaps accellerated,
and `iscale0(A, ...)` simple broadcasting.
Finally there is `iscale!!(A, x)` which mutate both arguments, wihch may be a terrible idea.
"""
function iscale! end
iscale0(A::AbstractArray, b) = similar(A) .= A ./ b
@doc @doc(iscale!)
iscale_(A::AbstractArray, b) = scale_(A, inv_(b))
iscale!(A::AbstractArray, b) = scale!(A, inv_(b))
iscale!!(A::AbstractArray, b) = scale!(A, inv!(b))
function iscale!(C::Array{T,N}, A::Array{TA,N}, B::Array{TB,N}) where {T,N, TA, TB}
@assert size(A) == size(B) == size(C)
for i in eachindex(A)
@inbounds C[i] = A[i] / B[i]
end
C
end
iscale_(A::Array{T,N}, B::Array{T,N}) where {T,N} = iscale!(similar(A), A, B)
iscale!(A::Array{T,N}, B::Array{T,N}) where {T,N} = iscale!(A, A, B)
# On square matrices this is a tie, but on (n,N) ./ N' it is faster
# The equivalent for iscale(Matrix, Vector) however is slower, wrong order
function iscale!(C::Matrix, A::Matrix, r::RVector)
@assert size(A)==size(C)
axes(A,2) == axes(r,2) || throw(DimensionMismatch("size disagreement in iscale?(Matrix,RowVector)"))
@inbounds for j in axes(A,2)
invr = inv(r[j])
for i in axes(A,1)
C[i,j] = A[i,j] * invr
end
end
C
end
iscale_(A::Matrix, r::RVector) = iscale!(similar(A), A, r)
iscale!(A::Matrix, r::RVector) = iscale!(A, A, r)
iscale_(name::Symbol, A::AbstractArray, b) = scale_(name, A, inv_(name, b))
iscale_(name::Symbol, A::Matrix, r::RVector) = iscale!(similar_(name, A), A, r)
iscale_(name::Symbol, A::Array{T,N}, B::Array{T,N}) where {T,N} =
iscale!(similar_(name, A), A, B)
#========== Accelerators ==========#
const CFloat = Union{Float64, Float32}
const CFloatArray{N} = Array{<:CFloat, N}
const CFloatMatrix = Matrix{<:CFloat}
IVERBOSE = false
VEC = ""
function load_note(str)
global VEC
if VEC == ""
@info "ArrayAllez loaded code for $str"
VEC = str
else
@warn "ArrayAllez loaded code for $str, perhaps overwriting $VEC"
VEC *= " then $str"
end
end
using Requires
@init @require Yeppp = "6310b701-1812-5374-a82f-9f6f2d54a40a" begin
using .Yeppp
exp!(B::CFloatArray, A::CFloatArray) = Yeppp.exp!(B, A)
log!(B::CFloatArray, A::CFloatArray) = Yeppp.log!(B, A) # log_(A) calls log!(B,A)
scale_(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} = Yeppp.multiply(A,B)
scale!(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} = Yeppp.multiply!(A,A,B)
IVERBOSE && load_note("Yeppp")
end
@init @require AppleAccelerate = "13e28ba4-7ad8-5781-acae-3021b1ed3924" begin
using .AppleAccelerate
exp!(B::CFloatArray, A::CFloatArray) = AppleAccelerate.exp!(B, A)
log!(B::CFloatArray, A::CFloatArray) = AppleAccelerate.log!(B, A)
inv!(A::CFloatArray) = AppleAccelerate.rec!(A, A)
scale_(A::Vector{T}, B::Vector{T}) where {T<:CFloat} = AppleAccelerate.vmul(A,B)
scale!(A::Vector{T}, B::Vector{T}) where {T<:CFloat} = AppleAccelerate.vmul!(A,A,B)
scale_(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} =
reshape(AppleAccelerate.vmul(vec(A),vec(B)), size(A)) # vmul is literally only vectors
scale!(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} =
begin AppleAccelerate.vmul!(vec(A),vec(A),vec(B)); A end
iscale_(A::Vector{T}, B::Vector{T}) where {T<:CFloat} = AppleAccelerate.vdiv(A,B)
iscale!(A::Vector{T}, B::Vector{T}) where {T<:CFloat} = AppleAccelerate.vdiv!(A,A,B)
iscale_(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} =
reshape(AppleAccelerate.vdiv(vec(A),vec(B)), size(A))
iscale!(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} =
begin AppleAccelerate.vdiv!(vec(A),vec(A),vec(B)); A end
IVERBOSE && load_note("AppleAccelerate")
end
@init @require IntelVectorMath = "c8ce9da6-5d36-5c03-b118-5a70151be7bc" begin
using .IntelVectorMath
exp!(B::CFloatArray, A::CFloatArray) = IVM.exp!(B, A)
log!(B::CFloatArray, A::CFloatArray) = IVM.log!(B, A) # log_(A) calls log!(B,A)
iscale_(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} = IVM.divide(A,B)
iscale!(A::Array{T,N}, B::Array{T,N}) where {T<:CFloat,N} = IVM.divide!(A,A,B)
IVERBOSE && load_note("IntelVectorMath")
end
#========== The End ==========#
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 2340 | # https://github.com/FluxML/Flux.jl/pull/524
Base.prod(xs::TrackedArray; dims=:) = track(prod, xs; dims=dims)
@grad prod(xs; dims=:) = _prod(xs.data, prod(xs.data, dims=dims), dims)
_prod(xd, p, ::Colon) = p, Δ -> (nobacksies(:prod, ∇prod(xd, p, data(Δ)) ),)
_prod(xd, p, dims) = count(iszero, p) == 0 ?
(p, Δ -> (nobacksies(:prod, p ./ xd .* data(Δ) ),)) :
(p, Δ -> (nobacksies(:prod, mapslices(∇prod, xd; dims=dims) .* data(Δ)),))
function ∇prod(x, p=prod(x), Δ=1)
numzero = count(iszero, x)
if numzero == 0
∇ = p ./ x .* Δ
elseif numzero > 1
∇ = zero(x)
else
∇ = ∇prod_one(x, Δ)
end
end
function ∇prod_one(x::Array, Δ)
zloc = findfirst(iszero, x)
∇ = copy(x)
∇[zloc] = 1
nonzero = prod(∇) * Δ
∇ .= 0
∇[zloc] = nonzero
∇
end
∇prod_one(x::AbstractArray, Δ) = ForwardDiff.gradient(y -> prod(y) * Δ, x)
Base.cumsum(xs::TrackedArray; dims=1) = track(cumsum, xs; dims=dims)
@grad cumsum(xs; dims=1) = _cumsum(xs.data, dims)
_cumsum(xd::Array, d) = cumsum(xd; dims=d), Δ -> (reverse(cumsum(reverse(Δ,dims=d),dims=d),dims=d),)
_cumsum(xd::AbstractArray, d) = cumsum(xd; dims=d), Δ -> (mapslices(reverse∘cumsum∘reverse,Δ, dims=d),)
Base.cumprod(xs::TrackedArray; dims=nothing) = track(cumprod, xs; dims=dims)
@grad cumprod(xs; dims=nothing) = _cumprod(xs.data, dims)
_cumprod(xd, ::Nothing, p = cumprod(xd)) = p, Δ -> (nobacksies(:cumprod, ∇cumprod(xd, p, data(Δ)) ),)
_cumprod(xd, d, p = cumprod(xd, dims=d)) = p, Δ -> (nobacksies(:cumprod, ∇cumprod_d(xd, Val(d), p, data(Δ)) ),)
function ∇cumprod(x::Vector, p, Δ)
len = length(x)
z = something(findfirst(iszero, x), len+1)
∇ = zero(x)
@inbounds for i=1:z-1
ixi = 1/x[i]
for k=i:z-1
∇[i] += p[k] * Δ[k] * ixi
end
end
@inbounds if z != len+1
pk = z==1 ? one(p[1]) : p[z-1] # will be prod(x[j] for j=1:k if j!=z)
∇[z] += pk * Δ[z]
for k=(z+1):len
pk *= x[k]
∇[z] += pk * Δ[k]
end
end
∇
end
∇cumprod(x::AbstractVector, p, Δ) = vec(Δ' * ForwardDiff.jacobian(cumprod, x))
@noinline function ∇cumprod_d(x::AbstractArray{T,N}, ::Val{d}, p, Δ) where {T,N,d}
∇ = similar(x)
for i in Iterators.product(ntuple(k -> k==d ? Ref(:) : axes(x,k), Val(N))...)
copyto!(view(∇,i...), ∇cumprod(x[i...], p[i...], Δ[i...]))
end
∇ # roughly mapslices(∇cumprod, x,p,Δ; dims=d) if that existed
end
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 1152 |
"""
This is the replacement for Julia's `@threads` macro proposed in
https://github.com/JuliaLang/julia/pull/35003
"""
macro threads(args...)
na = length(args)
if na != 1
throw(ArgumentError("wrong number of arguments in @threads"))
end
ex = args[1]
if !isa(ex, Expr)
throw(ArgumentError("need an expression argument to @threads"))
end
if ex.head === :for
if ex.args[1] isa Expr && ex.args[1].head === :(=)
return _threadsfor(ex.args[1], ex.args[2])
else
throw(ArgumentError("nested outer loops are not currently supported by @threads"))
end
else
throw(ArgumentError("unrecognized argument to @threads"))
end
end
function _threadsfor(iter_stmt, lbody)
loopvar = iter_stmt.args[1]
iter = iter_stmt.args[2]
rng = gensym(:rng)
out = quote
Base.@sync for $rng in $(Iterators.partition)($iter, $(length)($iter) ÷ $(nthreads)())
Base.Threads.@spawn begin
Base.@sync for $loopvar in $rng
$lbody
end
end
end
end
esc(out)
end
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 7924 | using ArrayAllez
using Test
@static if Sys.isapple()
using AppleAccelerate
@info "testing with AppleAccelerate (always done on Apple machines)"
elseif Sys.isunix()
using IntelVectorMath
@info "testing with IntelVectorMath (always done on Linux machines)"
else
@info "testing without AppleAccelerate nor IntelVectorMath, to check fallbacks"
end
@testset "exp/log/inv/scale" begin
@testset "small" begin
m = rand(3,7)
v = randn(3)
r = randn(7)'
@test exp!(copy(m)) ≈ exp_(m) ≈ exp0(m) ≈ exp_(:test, m)
@test exp!(copy(v)) ≈ exp_(v) ≈ exp0(v) ≈ exp_(:test, v)
@test exp!(copy(r)) ≈ exp_(r) ≈ exp0(r) ≈ exp_(:test, r)
@test exp!(copy(m)) ≈ exp_(m) ≈ exp0(m) ≈ exp_(:test, m)
@test exp!(copy(v)) ≈ exp_(v) ≈ exp0(v) ≈ exp_(:test, v)
@test exp!(copy(r)) ≈ exp_(r) ≈ exp0(r) ≈ exp_(:test, r)
@test inv!(copy(m)) ≈ inv_(m) ≈ inv0(m) ≈ inv_(:test, m)
@test inv!(copy(v)) ≈ inv_(v) ≈ inv0(v) ≈ inv_(:test, v)
@test inv!(copy(r)) ≈ inv_(r) ≈ inv0(r) ≈ inv_(:test, r)
@test scale!(copy(m),π) ≈ scale_(m,π) ≈ scale0(m,π)
@test scale!(copy(v),π) ≈ scale_(v,π) ≈ scale0(v,π)
@test scale!(copy(r),π) ≈ scale_(r,π) ≈ scale0(r,π)
@test scale!(copy(m),v) ≈ scale_(m,v) ≈ scale0(m,v)
@test scale!(copy(v),v) ≈ scale_(v,v) ≈ scale0(v,v)
@test scale!(copy(m),r) ≈ scale_(m,r) ≈ scale0(m,r)
@test scale!(copy(r),r) ≈ scale_(r,r) ≈ scale0(r,r)
@test iscale!(copy(m),π) ≈ iscale_(m,π) ≈ iscale0(m,π)
@test iscale!(copy(v),π) ≈ iscale_(v,π) ≈ iscale0(v,π)
@test iscale!(copy(r),π) ≈ iscale_(r,π) ≈ iscale0(r,π)
@test iscale!(copy(m),v) ≈ iscale_(m,v) ≈ iscale0(m,v)
@test iscale!(copy(v),v) ≈ iscale_(v,v) ≈ iscale0(v,v)
@test iscale!(copy(m),r) ≈ iscale_(m,r) ≈ iscale0(m,r)
@test iscale!(copy(r),r) ≈ iscale_(r,r) ≈ iscale0(r,r)
end
@testset "large" begin # needed because some functions switch on threading
m = rand(300,700);
v = randn(300);
r = randn(700)';
@test exp!(copy(m)) ≈ exp_(m) ≈ exp0(m) ≈ exp_(:test, m)
@test exp!(copy(v)) ≈ exp_(v) ≈ exp0(v) ≈ exp_(:test, v)
@test exp!(copy(r)) ≈ exp_(r) ≈ exp0(r) ≈ exp_(:test, r)
@test exp!(copy(m)) ≈ exp_(m) ≈ exp0(m) ≈ exp_(:test, m)
@test exp!(copy(v)) ≈ exp_(v) ≈ exp0(v) ≈ exp_(:test, v)
@test exp!(copy(r)) ≈ exp_(r) ≈ exp0(r) ≈ exp_(:test, r)
@test inv!(copy(m)) ≈ inv_(m) ≈ inv0(m) ≈ inv_(:test, m)
@test inv!(copy(v)) ≈ inv_(v) ≈ inv0(v) ≈ inv_(:test, v)
@test inv!(copy(r)) ≈ inv_(r) ≈ inv0(r) ≈ inv_(:test, r)
end
end
@testset "dropdims" begin
@dropdims begin
a = sum(ones(3,7), dims=2)
b = sum(10 .* randn(2,10); dims=2) do x
trunc(Int, x)
end
end
@test a isa Vector
@test b isa Vector
end
@info "loading Tracker"
using Tracker
using Tracker: TrackedArray, gradcheck, back!, data, grad
gradtest(f, dims...) = gradtest(f, rand.(Float64, dims)...) ## from Flux tests
gradtest(f, xs::AbstractArray...) = gradcheck((xs...) -> sum(sin.(f(xs...))), xs...)
using ForwardDiff
mycheck(f, x) = ForwardDiff.gradient(z -> sum(sin,f(z)), x) ≈ Tracker.gradient(z -> sum(sin,f(z)), x)[1]
@testset "Tracker gradients" begin
@testset "exp + log" begin
@test gradtest(exp0, (2,3))
@test gradtest(sum∘exp_, (2,3))
@test gradtest(sum∘exp!∘copy, (2,3))
p = param(randn(2,3));
back!(sum(exp.(p)))
pg = p.grad
p.grad[:] .= 0;
back!(sum(exp!(p)))
@test p.grad ≈ pg
p.grad[:] .= 0;
back!(sum(exp!!(p)))
@test p.grad ≈ pg
@test gradtest(log0, rand(2,3))
@test gradtest(log_, rand(2,3))
@test gradtest(log!∘copy, rand(2,3))
p = param(rand(2,3));
back!(sum(log.(p)))
pg = p.grad
p.grad[:] .= 0;
back!(sum(log!(p)))
@test p.grad ≈ pg
# p.grad[:] .= 0;
# back!(sum(log!!(p)))
# @test p.grad ≈ pg
@test gradcheck(A -> scale0(A,4) |> sum, rand(2,3))
@test gradcheck(A -> scale_(A,4) |> sum, rand(2,3))
end
@testset "exp + log II" begin # using ForwardDiff, no problem to test exp! etc.
m = rand(3,7)
mycheck(z -> log0(z), m)
mycheck(z -> log_(z), m)
mycheck(z -> log!(z), m)
mycheck(z -> log!!(z), m)
mycheck(z -> exp0(z), m)
mycheck(z -> exp_(z), m)
mycheck(z -> exp!(z), m)
mycheck(z -> exp!!(z), m)
end
@testset "scale + inv" begin
m = rand(3,7)
v = randn(3)
r = randn(7)'
@test gradtest(z -> scale_(z,9), m)
@test gradtest(z -> scale_(z,v), m)
@test gradtest(z -> scale_(z,r), m)
# @test gradtest(z -> iscale_(z,9), m)
# @test gradtest(z -> iscale_(z,v), m)
# @test gradtest(z -> iscale_(z,r), m)
# @test gradcheck(z -> sum(inv_(z)), m)
# @test gradcheck(z -> sum(inv_(z,9)), m)
# @test gradcheck(z -> sum(scale_(m,z)), v) # crash?
# @test gradcheck(z -> sum(scale_(m,z)), r)
#
# @test gradcheck(z -> sum(iscale_(m,z)), v)
# @test gradcheck(z -> sum(iscale_(m,z)), r)
end
@testset "scale + inv II" begin
m = rand(3,7)
v = randn(3)
r = randn(7)'
@test mycheck(z -> scale0(z,9), m)
@test mycheck(z -> scale_(z,9), m)
@test mycheck(z -> scale!(z,9), m)
@test mycheck(z -> scale0(z,v), m)
@test mycheck(z -> scale_(z,v), m)
@test mycheck(z -> scale!(z,v), m)
@test mycheck(z -> scale0(z,r), m)
@test mycheck(z -> scale_(z,r), m)
# @test mycheck(z -> scale!(z,r), m) # ambiguous
@test mycheck(z -> scale0(z,m), m)
# @test mycheck(z -> scale_(z,m), m) # no method
# @test mycheck(z -> scale!(z,m), m)
end
@testset "prod + cumprod" begin # https://github.com/FluxML/Flux.jl/pull/524
@test gradtest(x -> prod(x, dims=(2, 3)), (3,4,5))
@test gradtest(x -> prod(x, dims=1), (3,4,5))
@test gradtest(x -> prod(x, dims=1), (3,))
@test gradtest(x -> prod(x), (3,4,5))
@test gradtest(x -> prod(x), (3,))
rzero(dims...) = (x = rand(dims...); x[2]=0; x)
@test gradtest(x -> prod(x, dims=(2, 3)), rzero(3,4,5))
@test gradtest(x -> prod(x, dims=1), rzero(3,4,5))
@test gradtest(x -> prod(x, dims=1), rzero(3,))
@test gradtest(x -> prod(x), rzero(3,4,5))
@test gradtest(x -> prod(x), rzero(3,))
@test gradtest(x -> cumsum(x, dims=2), (3,4,5))
@test gradtest(x -> cumsum(x, dims=1), (3,))
@test gradtest(x -> cumsum(x), (3,))
@test gradtest(x -> cumprod(x, dims=2), (3,4,5))
@test gradtest(x -> cumprod(x, dims=1), (3,))
@test gradtest(x -> cumprod(x), (3,))
@test gradtest(x -> cumprod(x, dims=2), rzero(3,4,5))
@test gradtest(x -> cumprod(x, dims=1), rzero(3,))
@test gradtest(x -> cumprod(x), rzero(3,))
end
end
#=
@info "loading Zygote"
using Zygote: Zygote
@testset "Zygote gradients" begin
@testset "* left & right" begin
@test Zygote.gradient(*ˡ, 2,3) == (3, nothing)
@test Zygote.gradient(sum∘*ˡ, rand(2,2), ones(2,2)) == ([2 2; 2 2], nothing)
@test Zygote.gradient(*ʳ, 2,3) == (nothing, 2)
@test Zygote.gradient(sum∘*ʳ, ones(2,2), rand(2,2)) == (nothing, [2 2; 2 2])
end
@testset "odot" begin
@test Zygote.gradient(⊙ˡ, 2,3) == (3, nothing)
@test Zygote.gradient(sum∘⊙ˡ, rand(2,2), ones(2,2,2,2)) == ([8 8; 8 8], nothing)
@test Zygote.gradient(⊙ʳ, 2,3) == (nothing, 2)
@test Zygote.gradient(sum∘⊙ʳ, ones(2,2,2), rand(2,2)) == (nothing, [4 4; 4 4])
end
end
=#
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | code | 978 |
#=
On Julia 1.0, it was worth using threaded loop for exp & log above about 100.
But on 1.2 & 1.3, it looks like it only pays above about 5000.
=#
using ArrayAllez, BenchmarkTools
Threads.nthreads()
ArrayAllez.TH_EXP
function exp_t(A)
B = similar(A)
Threads.@threads for I in eachindex(A)
@inbounds B[I] = exp(A[I])
end
B
end
times_exp = []
@time for p in 6:2:14
r = rand(2^p)
t0 = 1e6 * @belapsed exp0($r)
t1 = 1e6 * @belapsed exp_t($r)
t2 = 1e6 * @belapsed exp_($r)
push!(times_exp, (length = 2^p, bcast = t0, thread = t1, lib = t2))
end
times_exp
function log_t(A)
B = similar(A)
Threads.@threads for I in eachindex(A)
@inbounds B[I] = log(A[I])
end
B
end
times_log = []
@time for p in 6:2:14
r = rand(2^p)
t0 = 1e6 * @belapsed log0($r)
t1 = 1e6 * @belapsed log_t($r)
t2 = 1e6 * @belapsed log_($r)
push!(times_log, (length = 2^p, bcast = t0, thread = t1, lib = t2))
end
times_log
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.0.8 | 73a70e4eec7f58bada9e96c64569b671dc659793 | docs | 3130 | # ArrayAllez.jl
[](https://travis-ci.com/mcabbott/ArrayAllez.jl)
[](https://github.com/mcabbott/ArrayAllez.jl/actions?query=workflow%3ACI+branch%3Amaster)
```
] add ArrayAllez
```
### `log! ∘ exp!`
This began as a way to more conveniently choose between [Yeppp!](https://github.com/JuliaMath/Yeppp.jl)
and [AppleAccelerate](https://github.com/JuliaMath/AppleAccelerate.jl)
and [IntelVectorMath](https://github.com/JuliaMath/IntelVectorMath.jl),
without requiring that any by installed.
The fallback version is just a loop, with `@threads` for large enough arrays.
```julia
x = rand(1,100);
y = exp0(x) # precisely = exp.(x)
x ≈ log!(y) # in-place, just a loop
using AppleAccelerate # or using IntelVectorMath, or using Yeppp
y = exp!(x) # with ! mutates
x = log_(y) # with _ copies
```
Besides `log!` and `exp!`, there is also `scale!` which understands rows/columns.
And `iscale!` which divides, and `inv!` which is an element-wise inverse.
All have non-mutating versions ending `_` instead of `!`, and simple broadcast-ed versions with `0`.
```julia
m = ones(3,7)
v = rand(3)
r = rand(7)'
scale0(m, 99) # simply m .* 99
scale_(m, v) # like m .* v but using rmul!
iscale!(m, r) # like m ./ r but mutating.
m
```
### `∇`
These commands all make some attempt to define gradients for use with
[Tracker](https://github.com/FluxML/Tracker.jl) ans
[Zygote](https://github.com/FluxML/Zygote.jl), but caveat emptor.
There is also an `exp!!` which mutates both its forward input and its backward gradient,
which may be a terrible idea.
```julia
using Tracker
x = param(randn(5));
y = exp_(x)
Tracker.back!(sum_(exp!(x)))
x.data == y # true
x.grad
```
This package also defines gradients for `prod` (overwriting an incorrect one) and `cumprod`,
as in [this PR](https://github.com/FluxML/Flux.jl/pull/524).
### `Array_`
An experiment with [LRUCache](https://github.com/JuliaCollections/LRUCache.jl) for working space:
```julia
x = rand(2000)' # turns off below this size
copy_(:copy, x)
similar_(:sim, x)
Array_{Float64}(:new, 5,1000) # @btime 200 ns, 32 bytes
inv_(:inv, x) # most of the _ functions can opt-in
```
### `@dropdims`
This macro wraps reductions like `sum(A; dims=...)` in `dropdims()`.
It understands things like this:
```julia
@dropdims sum(10 .* randn(2,10); dims=2) do x
trunc(Int, x)
end
```
### Removed
This package used to provide two functions generalising matrix multiplication. They are now better handled by other packages:
* `TensorCore.boxdot` contracts neighbours: `rand(2,3,5) ⊡ rand(5,7,11) |> size == (2,3,7,11)`
* `NNlib.batched_mul` keeps a batch dimension: `rand(2,3,10) ⊠ rand(3,5,10) |> size == (2,5,10)`
### See Also
* [Vectorize.jl](https://github.com/rprechelt/Vectorize.jl) is a more comprehensive wrapper.
* [Strided.jl](https://github.com/Jutho/Strided.jl) adds `@threads` to broadcasting.
* [LoopVectorization.jl](https://github.com/chriselrod/LoopVectorization.jl) adds AVX black magic.
| ArrayAllez | https://github.com/mcabbott/ArrayAllez.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 571 | using PosteriorStats
using Documenter
DocMeta.setdocmeta!(PosteriorStats, :DocTestSetup, :(using PosteriorStats); recursive=true)
makedocs(;
modules=[PosteriorStats],
repo=Remotes.GitHub("arviz-devs", "PosteriorStats.jl"),
sitename="PosteriorStats.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true", edit_link="main", assets=String[]
),
pages=["Home" => "index.md", "API" => "api.md"],
warnonly=[:footnote, :missing_docs],
)
deploydocs(; repo="github.com/arviz-devs/PosteriorStats.jl.git", devbranch="main")
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 1569 | module PosteriorStats
using Compat: @constprop
using DataInterpolations: DataInterpolations
using Distributions: Distributions
using DocStringExtensions: FIELDS, FUNCTIONNAME, TYPEDEF, TYPEDFIELDS, SIGNATURES
using IteratorInterfaceExtensions: IteratorInterfaceExtensions
using LinearAlgebra: mul!, norm
using LogExpFunctions: LogExpFunctions
using Markdown: @doc_str
using MCMCDiagnosticTools: MCMCDiagnosticTools
using Optim: Optim
using OrderedCollections: OrderedCollections
using PrettyTables: PrettyTables
using Printf: Printf
using PSIS: PSIS, PSISResult, psis, psis!
using Random: Random
using Setfield: Setfield
using Statistics: Statistics
using StatsBase: StatsBase
using Tables: Tables
using TableTraits: TableTraits
# PSIS
export PSIS, PSISResult, psis, psis!
# LOO-CV
export AbstractELPDResult, PSISLOOResult, WAICResult
export elpd_estimates, information_criterion, loo, waic
# Model weighting and comparison
export AbstractModelWeightsMethod, BootstrappedPseudoBMA, PseudoBMA, Stacking, model_weights
export ModelComparisonResult, compare
# Summary statistics
export SummaryStats, summarize
export default_diagnostics, default_stats, default_summary_stats
# Others
export hdi, hdi!, loo_pit, r2_score
const DEFAULT_INTERVAL_PROB = 0.94
const INFORMATION_CRITERION_SCALES = (deviance=-2, log=1, negative_log=-1)
include("utils.jl")
include("hdi.jl")
include("elpdresult.jl")
include("loo.jl")
include("waic.jl")
include("model_weights.jl")
include("compare.jl")
include("loo_pit.jl")
include("r2_score.jl")
include("summarize.jl")
end # module
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 8124 | """
compare(models; kwargs...) -> ModelComparisonResult
Compare models based on their expected log pointwise predictive density (ELPD).
The ELPD is estimated either by Pareto smoothed importance sampling leave-one-out
cross-validation (LOO) or using the widely applicable information criterion (WAIC).
We recommend loo. Read more theory here - in a paper by some of the
leading authorities on model comparison dx.doi.org/10.1111/1467-9868.00353
# Arguments
- `models`: a `Tuple`, `NamedTuple`, or `AbstractVector` whose values are either
[`AbstractELPDResult`](@ref) entries or any argument to `elpd_method`.
# Keywords
- `weights_method::AbstractModelWeightsMethod=Stacking()`: the method to be used to weight
the models. See [`model_weights`](@ref) for details
- `elpd_method=loo`: a method that computes an `AbstractELPDResult` from an argument in
`models`.
- `sort::Bool=true`: Whether to sort models by decreasing ELPD.
# Returns
- [`ModelComparisonResult`](@ref): A container for the model comparison results. The
fields contain a similar collection to `models`.
# Examples
Compare the centered and non centered models of the eight school problem using the defaults:
[`loo`](@ref) and [`Stacking`](@ref) weights. A custom `myloo` method formates the inputs
as expected by [`loo`](@ref).
```jldoctest compare; filter = [r"└.*"]
julia> using ArviZExampleData
julia> models = (
centered=load_example_data("centered_eight"),
non_centered=load_example_data("non_centered_eight"),
);
julia> function myloo(idata)
log_like = PermutedDimsArray(idata.log_likelihood.obs, (2, 3, 1))
return loo(log_like)
end;
julia> mc = compare(models; elpd_method=myloo)
┌ Warning: 1 parameters had Pareto shape values 0.7 < k ≤ 1. Resulting importance sampling estimates are likely to be unstable.
└ @ PSIS ~/.julia/packages/PSIS/...
ModelComparisonResult with Stacking weights
rank elpd elpd_mcse elpd_diff elpd_diff_mcse weight p ⋯
non_centered 1 -31 1.4 0 0.0 1.0 0.9 ⋯
centered 2 -31 1.4 0.06 0.067 0.0 0.9 ⋯
1 column omitted
julia> mc.weight |> pairs
pairs(::NamedTuple) with 2 entries:
:non_centered => 1.0
:centered => 5.34175e-19
```
Compare the same models from pre-computed PSIS-LOO results and computing
[`BootstrappedPseudoBMA`](@ref) weights:
```jldoctest compare; setup = :(using Random; Random.seed!(23))
julia> elpd_results = mc.elpd_result;
julia> compare(elpd_results; weights_method=BootstrappedPseudoBMA())
ModelComparisonResult with BootstrappedPseudoBMA weights
rank elpd elpd_mcse elpd_diff elpd_diff_mcse weight p ⋯
non_centered 1 -31 1.4 0 0.0 0.52 0.9 ⋯
centered 2 -31 1.4 0.06 0.067 0.48 0.9 ⋯
1 column omitted
```
"""
function compare(
inputs;
weights_method::AbstractModelWeightsMethod=Stacking(),
elpd_method=loo,
model_names=_indices(inputs),
sort::Bool=true,
)
length(model_names) === length(inputs) ||
throw(ArgumentError("Length of `model_names` must match length of `inputs`"))
elpd_results = map(Base.Fix1(_maybe_elpd_results, elpd_method), inputs)
weights = model_weights(weights_method, elpd_results)
perm = _sortperm(elpd_results; by=x -> elpd_estimates(x).elpd, rev=true)
i_elpd_max = first(perm)
elpd_max_i = elpd_estimates(elpd_results[i_elpd_max]; pointwise=true).elpd
elpd_diff_and_mcse = map(elpd_results) do r
elpd_diff_j = similar(elpd_max_i)
# workaround for named dimension packages that check dimension names are exact, for
# cases where dimension names differ
map!(-, elpd_diff_j, elpd_max_i, elpd_estimates(r; pointwise=true).elpd)
return _sum_and_se(elpd_diff_j)
end
elpd_diff = map(first, elpd_diff_and_mcse)
elpd_diff_mcse = map(last, elpd_diff_and_mcse)
rank = _assimilar(elpd_results, (1:length(elpd_results))[perm])
result = ModelComparisonResult(
model_names, rank, elpd_diff, elpd_diff_mcse, weights, elpd_results, weights_method
)
sort || return result
return _permute(result, perm)
end
_maybe_elpd_results(elpd_method, x::AbstractELPDResult; kwargs...) = x
function _maybe_elpd_results(elpd_method, x; kwargs...)
elpd_result = elpd_method(x; kwargs...)
elpd_result isa AbstractELPDResult && return elpd_result
throw(
ErrorException(
"Return value of `elpd_method` must be an `AbstractELPDResult`, not `$(typeof(elpd_result))`.",
),
)
end
"""
ModelComparisonResult
Result of model comparison using ELPD.
This struct implements the Tables and TableTraits interfaces.
Each field returns a collection of the corresponding entry for each model:
$(FIELDS)
"""
struct ModelComparisonResult{E,N,R,W,ER,M}
"Names of the models, if provided."
name::N
"Ranks of the models (ordered by decreasing ELPD)"
rank::R
"ELPD of a model subtracted from the largest ELPD of any model"
elpd_diff::E
"Monte Carlo standard error of the ELPD difference"
elpd_diff_mcse::E
"Model weights computed with `weights_method`"
weight::W
"""`AbstactELPDResult`s for each model, which can be used to access useful stats like
ELPD estimates, pointwise estimates, and Pareto shape values for PSIS-LOO"""
elpd_result::ER
"Method used to compute model weights with [`model_weights`](@ref)"
weights_method::M
end
#### custom tabular show methods
function Base.show(io::IO, mime::MIME"text/plain", r::ModelComparisonResult; kwargs...)
return _show(io, mime, r; kwargs...)
end
function Base.show(io::IO, mime::MIME"text/html", r::ModelComparisonResult; kwargs...)
return _show(io, mime, r; kwargs...)
end
function _show(io::IO, mime::MIME, r::ModelComparisonResult; kwargs...)
row_labels = collect(r.name)
cols = Tables.columnnames(r)[2:end]
table = NamedTuple{cols}(Tables.columntable(r))
weights_method_name = _typename(r.weights_method)
weights = table.weight
digits_weights = ceil(Int, -log10(maximum(weights))) + 1
weight_formatter = PrettyTables.ft_printf(
"%.$(digits_weights)f", findfirst(==(:weight), cols)
)
return _show_prettytable(
io,
mime,
table;
title="ModelComparisonResult with $(weights_method_name) weights",
row_labels,
extra_formatters=(weight_formatter,),
kwargs...,
)
end
function _permute(r::ModelComparisonResult, perm)
return ModelComparisonResult(
(_permute(getfield(r, k), perm) for k in fieldnames(typeof(r))[1:(end - 1)])...,
r.weights_method,
)
end
#### Tables interface as column table
Tables.istable(::Type{<:ModelComparisonResult}) = true
Tables.columnaccess(::Type{<:ModelComparisonResult}) = true
Tables.columns(r::ModelComparisonResult) = r
function Tables.columnnames(::ModelComparisonResult)
return (
:name, :rank, :elpd, :elpd_mcse, :elpd_diff, :elpd_diff_mcse, :weight, :p, :p_mcse
)
end
function Tables.getcolumn(r::ModelComparisonResult, i::Int)
return Tables.getcolumn(r, Tables.columnnames(r)[i])
end
function Tables.getcolumn(r::ModelComparisonResult, nm::Symbol)
nm ∈ fieldnames(typeof(r)) && return getfield(r, nm)
if nm ∈ (:elpd, :elpd_mcse, :p, :p_mcse)
return map(e -> getproperty(elpd_estimates(e), nm), r.elpd_result)
end
throw(ArgumentError("Unrecognized column name $nm"))
end
Tables.rowaccess(::Type{<:ModelComparisonResult}) = true
Tables.rows(r::ModelComparisonResult) = Tables.rows(Tables.columntable(r))
IteratorInterfaceExtensions.isiterable(::ModelComparisonResult) = true
function IteratorInterfaceExtensions.getiterator(r::ModelComparisonResult)
return Tables.datavaluerows(Tables.columntable(r))
end
TableTraits.isiterabletable(::ModelComparisonResult) = true
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 2243 | """
$(TYPEDEF)
An abstract type representing the result of an ELPD computation.
Every subtype stores estimates of both the expected log predictive density (`elpd`) and the
effective number of parameters `p`, as well as standard errors and pointwise estimates of
each, from which other relevant estimates can be computed.
Subtypes implement the following functions:
- [`elpd_estimates`](@ref)
- [`information_criterion`](@ref)
"""
abstract type AbstractELPDResult end
function _show_elpd_estimates(
io::IO, mime::MIME"text/plain", r::AbstractELPDResult; kwargs...
)
estimates = elpd_estimates(r)
table = map(Base.vect, NamedTuple{(:elpd, :elpd_mcse, :p, :p_mcse)}(estimates))
_show_prettytable(io, mime, table; kwargs...)
return nothing
end
"""
$(FUNCTIONNAME)(result::AbstractELPDResult; pointwise=false) -> (; elpd, elpd_mcse, lpd)
Return the (E)LPD estimates from the `result`.
"""
function elpd_estimates end
"""
$(FUNCTIONNAME)(elpd, scale::Symbol)
Compute the information criterion for the given `scale` from the `elpd` estimate.
`scale` must be one of `$(keys(INFORMATION_CRITERION_SCALES))`.
See also: [`loo`](@ref), [`waic`](@ref)
"""
function information_criterion(estimates, scale::Symbol)
scale_value = INFORMATION_CRITERION_SCALES[scale]
return scale_value * estimates.elpd
end
"""
$(FUNCTIONNAME)(result::AbstractELPDResult, scale::Symbol; pointwise=false)
Compute information criterion for the given `scale` from the existing ELPD `result`.
`scale` must be one of `$(keys(INFORMATION_CRITERION_SCALES))`.
If `pointwise=true`, then pointwise estimates are returned.
"""
function information_criterion(
result::AbstractELPDResult, scale::Symbol; pointwise::Bool=false
)
return information_criterion(elpd_estimates(result; pointwise), scale)
end
function _lpd_pointwise(log_likelihood, dims)
ndraws = prod(Base.Fix1(size, log_likelihood), dims)
lpd = LogExpFunctions.logsumexp(log_likelihood; dims)
T = eltype(lpd)
return dropdims(lpd; dims) .- log(T(ndraws))
end
function _elpd_estimates_from_pointwise(pointwise)
elpd, elpd_mcse = _sum_and_se(pointwise.elpd)
p, p_mcse = _sum_and_se(pointwise.p)
return (; elpd, elpd_mcse, p, p_mcse)
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 3692 | """
hdi(samples::AbstractArray{<:Real}; prob=$(DEFAULT_INTERVAL_PROB)) -> (; lower, upper)
Estimate the unimodal highest density interval (HDI) of `samples` for the probability `prob`.
The HDI is the minimum width Bayesian credible interval (BCI). That is, it is the smallest
possible interval containing `(100*prob)`% of the probability mass.[^Hyndman1996]
`samples` is an array of shape `(draws[, chains[, params...]])`. If multiple parameters are
present, then `lower` and `upper` are arrays with the shape `(params...,)`, computed
separately for each marginal.
This implementation uses the algorithm of [^ChenShao1999].
!!! note
Any default value of `prob` is arbitrary. The default value of
`prob=$(DEFAULT_INTERVAL_PROB)` instead of a more common default like `prob=0.95` is
chosen to reminder the user of this arbitrariness.
[^Hyndman1996]: Rob J. Hyndman (1996) Computing and Graphing Highest Density Regions,
Amer. Stat., 50(2): 120-6.
DOI: [10.1080/00031305.1996.10474359](https://doi.org/10.1080/00031305.1996.10474359)
[jstor](https://doi.org/10.2307/2684423).
[^ChenShao1999]: Ming-Hui Chen & Qi-Man Shao (1999)
Monte Carlo Estimation of Bayesian Credible and HPD Intervals,
J Comput. Graph. Stat., 8:1, 69-92.
DOI: [10.1080/10618600.1999.10474802](https://doi.org/10.1080/00031305.1996.10474359)
[jstor](https://doi.org/10.2307/1390921).
# Examples
Here we calculate the 83% HDI for a normal random variable:
```jldoctest hdi; setup = :(using Random; Random.seed!(78))
julia> x = randn(2_000);
julia> hdi(x; prob=0.83) |> pairs
pairs(::NamedTuple) with 2 entries:
:lower => -1.38266
:upper => 1.25982
```
We can also calculate the HDI for a 3-dimensional array of samples:
```jldoctest hdi; setup = :(using Random; Random.seed!(67))
julia> x = randn(1_000, 1, 1) .+ reshape(0:5:10, 1, 1, :);
julia> hdi(x) |> pairs
pairs(::NamedTuple) with 2 entries:
:lower => [-1.9674, 3.0326, 8.0326]
:upper => [1.90028, 6.90028, 11.9003]
```
"""
function hdi(x::AbstractArray{<:Real}; kwargs...)
xcopy = similar(x)
copyto!(xcopy, x)
return hdi!(xcopy; kwargs...)
end
"""
hdi!(samples::AbstractArray{<:Real}; prob=$(DEFAULT_INTERVAL_PROB)) -> (; lower, upper)
A version of [`hdi`](@ref) that sorts `samples` in-place while computing the HDI.
"""
function hdi!(x::AbstractArray{<:Real}; prob::Real=DEFAULT_INTERVAL_PROB)
0 < prob < 1 || throw(DomainError(prob, "HDI `prob` must be in the range `(0, 1)`."))
return _hdi!(x, prob)
end
function _hdi!(x::AbstractVector{<:Real}, prob::Real)
isempty(x) && throw(ArgumentError("HDI cannot be computed for an empty array."))
n = length(x)
interval_length = floor(Int, prob * n) + 1
if any(isnan, x) || interval_length == n
lower, upper = extrema(x)
else
npoints_to_check = n - interval_length + 1
sort!(x)
lower_range = @views x[begin:(begin - 1 + npoints_to_check)]
upper_range = @views x[(begin - 1 + interval_length):end]
lower, upper = argmax(Base.splat(-), zip(lower_range, upper_range))
end
return (; lower, upper)
end
_hdi!(x::AbstractMatrix{<:Real}, prob::Real) = _hdi!(vec(x), prob)
function _hdi!(x::AbstractArray{<:Real}, prob::Real)
ndims(x) > 0 ||
throw(ArgumentError("HDI cannot be computed for a 0-dimensional array."))
axes_out = _param_axes(x)
lower = similar(x, axes_out)
upper = similar(x, axes_out)
for (i, x_slice) in zip(eachindex(lower), _eachparam(x))
lower[i], upper[i] = _hdi!(x_slice, prob)
end
return (; lower, upper)
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 4587 | """
$(SIGNATURES)
Results of Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO).
See also: [`loo`](@ref), [`AbstractELPDResult`](@ref)
$(FIELDS)
"""
struct PSISLOOResult{E,P,R<:PSIS.PSISResult} <: AbstractELPDResult
"Estimates of the expected log pointwise predictive density (ELPD) and effective number of parameters (p)"
estimates::E
"Pointwise estimates"
pointwise::P
"Pareto-smoothed importance sampling (PSIS) results"
psis_result::R
end
function elpd_estimates(r::PSISLOOResult; pointwise::Bool=false)
return pointwise ? r.pointwise : r.estimates
end
function Base.show(io::IO, mime::MIME"text/plain", result::PSISLOOResult; kwargs...)
_show_elpd_estimates(io, mime, result; title="PSISLOOResult with estimates", kwargs...)
println(io)
println(io)
print(io, "and ")
show(io, mime, result.psis_result)
return nothing
end
"""
loo(log_likelihood; reff=nothing, kwargs...) -> PSISLOOResult{<:NamedTuple,<:NamedTuple}
Compute the Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO).
[^Vehtari2017][^LOOFAQ]
`log_likelihood` must be an array of log-likelihood values with shape
`(chains, draws[, params...])`.
# Keywords
- `reff::Union{Real,AbstractArray{<:Real}}`: The relative effective sample size(s) of the
_likelihood_ values. If an array, it must have the same data dimensions as the
corresponding log-likelihood variable. If not provided, then this is estimated using
`MCMCDiagnosticTools.ess`.
- `kwargs`: Remaining keywords are forwarded to [`PSIS.psis`].
See also: [`PSISLOOResult`](@ref), [`waic`](@ref)
[^Vehtari2017]: Vehtari, A., Gelman, A. & Gabry, J.
Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC.
Stat Comput 27, 1413–1432 (2017).
doi: [10.1007/s11222-016-9696-4](https://doi.org/10.1007/s11222-016-9696-4)
arXiv: [1507.04544](https://arxiv.org/abs/1507.04544)
[^LOOFAQ]: Aki Vehtari. Cross-validation FAQ. https://mc-stan.org/loo/articles/online-only/faq.html
# Examples
Manually compute ``R_\\mathrm{eff}`` and calculate PSIS-LOO of a model:
```jldoctest
julia> using ArviZExampleData, MCMCDiagnosticTools
julia> idata = load_example_data("centered_eight");
julia> log_like = PermutedDimsArray(idata.log_likelihood.obs, (:draw, :chain, :school));
julia> reff = ess(log_like; kind=:basic, split_chains=1, relative=true);
julia> loo(log_like; reff)
PSISLOOResult with estimates
elpd elpd_mcse p p_mcse
-31 1.4 0.9 0.34
and PSISResult with 500 draws, 4 chains, and 8 parameters
Pareto shape (k) diagnostic values:
Count Min. ESS
(-Inf, 0.5] good 7 (87.5%) 151
(0.5, 0.7] okay 1 (12.5%) 446
```
"""
loo(ll::AbstractArray; kwargs...) = _loo(ll; kwargs...)
function _psis_loo_setup(log_like, _reff; kwargs...)
if _reff === nothing
# normalize log likelihoods to improve numerical stability of ESS estimate
like = LogExpFunctions.softmax(log_like; dims=(1, 2))
reff = MCMCDiagnosticTools.ess(like; kind=:basic, split_chains=1, relative=true)
else
reff = _reff
end
# smooth importance weights
psis_result = PSIS.psis(-log_like, reff; kwargs...)
return psis_result
end
function _loo(log_like; reff=nothing, kwargs...)
_check_log_likelihood(log_like)
psis_result = _psis_loo_setup(log_like, reff; kwargs...)
return _loo(log_like, psis_result)
end
function _loo(log_like, psis_result, dims=(1, 2))
# compute pointwise estimates
lpd_i = _maybe_scalar(_lpd_pointwise(log_like, dims))
elpd_i, elpd_se_i = map(
_maybe_scalar, _elpd_loo_pointwise_and_se(psis_result, log_like, dims)
)
p_i = lpd_i - elpd_i
pointwise = (;
elpd=elpd_i,
elpd_mcse=elpd_se_i,
p=p_i,
reff=psis_result.reff,
pareto_shape=psis_result.pareto_shape,
)
# combine estimates
estimates = _elpd_estimates_from_pointwise(pointwise)
return PSISLOOResult(estimates, pointwise, psis_result)
end
function _elpd_loo_pointwise_and_se(psis_result::PSIS.PSISResult, log_likelihood, dims)
log_norm = LogExpFunctions.logsumexp(psis_result.log_weights; dims)
log_weights = psis_result.log_weights .- log_norm
elpd_i = _log_mean(log_likelihood, log_weights; dims)
elpd_i_se = _se_log_mean(log_likelihood, log_weights; dims, log_mean=elpd_i)
return (
elpd=_maybe_scalar(dropdims(elpd_i; dims)),
elpd_se=_maybe_scalar(dropdims(elpd_i_se; dims) ./ sqrt.(psis_result.reff)),
)
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 5781 | """
loo_pit(y, y_pred, log_weights; kwargs...) -> Union{Real,AbstractArray}
Compute leave-one-out probability integral transform (LOO-PIT) checks.
# Arguments
- `y`: array of observations with shape `(params...,)`
- `y_pred`: array of posterior predictive samples with shape `(draws, chains, params...)`.
- `log_weights`: array of normalized log LOO importance weights with shape
`(draws, chains, params...)`.
# Keywords
- `is_discrete`: If not provided, then it is set to `true` iff elements of `y` and `y_pred`
are all integer-valued. If `true`, then data are smoothed using [`smooth_data`](@ref) to
make them non-discrete before estimating LOO-PIT values.
- `kwargs`: Remaining keywords are forwarded to `smooth_data` if data is discrete.
# Returns
- `pitvals`: LOO-PIT values with same size as `y`. If `y` is a scalar, then `pitvals` is a
scalar.
LOO-PIT is a marginal posterior predictive check. If ``y_{-i}`` is the array ``y`` of
observations with the ``i``th observation left out, and ``y_i^*`` is a posterior prediction
of the ``i``th observation, then the LOO-PIT value for the ``i``th observation is defined as
```math
P(y_i^* \\le y_i \\mid y_{-i}) = \\int_{-\\infty}^{y_i} p(y_i^* \\mid y_{-i}) \\mathrm{d} y_i^*
```
The LOO posterior predictions and the corresponding observations should have similar
distributions, so if conditional predictive distributions are well-calibrated, then all
LOO-PIT values should be approximately uniformly distributed on ``[0, 1]``.[^Gabry2019]
[^Gabry2019]: Gabry, J., Simpson, D., Vehtari, A., Betancourt, M. & Gelman, A.
Visualization in Bayesian Workflow.
J. R. Stat. Soc. Ser. A Stat. Soc. 182, 389–402 (2019).
doi: [10.1111/rssa.12378](https://doi.org/10.1111/rssa.12378)
arXiv: [1709.01449](https://arxiv.org/abs/1709.01449)
# Examples
Calculate LOO-PIT values using as test quantity the observed values themselves.
```jldoctest loo_pit1
julia> using ArviZExampleData
julia> idata = load_example_data("centered_eight");
julia> y = idata.observed_data.obs;
julia> y_pred = PermutedDimsArray(idata.posterior_predictive.obs, (:draw, :chain, :school));
julia> log_like = PermutedDimsArray(idata.log_likelihood.obs, (:draw, :chain, :school));
julia> log_weights = loo(log_like).psis_result.log_weights;
julia> loo_pit(y, y_pred, log_weights)
╭───────────────────────────────╮
│ 8-element DimArray{Float64,1} │
├───────────────────────────────┴──────────────────────────────────────── dims ┐
↓ school Categorical{String} [Choate, Deerfield, …, St. Paul's, Mt. Hermon] Unordered
└──────────────────────────────────────────────────────────────────────────────┘
"Choate" 0.943511
"Deerfield" 0.63797
"Phillips Andover" 0.316697
"Phillips Exeter" 0.582252
"Hotchkiss" 0.295321
"Lawrenceville" 0.403318
"St. Paul's" 0.902508
"Mt. Hermon" 0.655275
```
Calculate LOO-PIT values using as test quantity the square of the difference between
each observation and `mu`.
```jldoctest loo_pit1
julia> using Statistics
julia> mu = idata.posterior.mu;
julia> T = y .- median(mu);
julia> T_pred = y_pred .- mu;
julia> loo_pit(T .^ 2, T_pred .^ 2, log_weights)
╭───────────────────────────────╮
│ 8-element DimArray{Float64,1} │
├───────────────────────────────┴──────────────────────────────────────── dims ┐
↓ school Categorical{String} [Choate, Deerfield, …, St. Paul's, Mt. Hermon] Unordered
└──────────────────────────────────────────────────────────────────────────────┘
"Choate" 0.873577
"Deerfield" 0.243686
"Phillips Andover" 0.357563
"Phillips Exeter" 0.149908
"Hotchkiss" 0.435094
"Lawrenceville" 0.220627
"St. Paul's" 0.775086
"Mt. Hermon" 0.296706
```
"""
function loo_pit(
y::Union{AbstractArray,Number},
y_pred::AbstractArray,
log_weights::AbstractArray;
is_discrete::Union{Bool,Nothing}=nothing,
kwargs...,
)
sample_dims = (1, 2)
size(y) == size(y_pred)[3:end] ||
throw(ArgumentError("data dimensions of `y` and `y_pred` must have the size"))
size(log_weights) == size(y_pred) ||
throw(ArgumentError("`log_weights` and `y_pred` must have same size"))
_is_discrete = if is_discrete === nothing
all(isinteger, y) && all(isinteger, y_pred)
else
is_discrete
end
if _is_discrete
is_discrete === nothing &&
@warn "All data and predictions are integer-valued. Smoothing data before running `loo_pit`."
y_smooth = smooth_data(y; kwargs...)
y_pred_smooth = smooth_data(y_pred; dims=_otherdims(y_pred, sample_dims), kwargs...)
return _loo_pit(y_smooth, y_pred_smooth, log_weights)
else
return _loo_pit(y, y_pred, log_weights)
end
end
function _loo_pit(y::Number, y_pred, log_weights)
return @views exp.(LogExpFunctions.logsumexp(log_weights[y_pred .≤ y]))
end
function _loo_pit(y::AbstractArray, y_pred, log_weights)
sample_dims = (1, 2)
T = typeof(exp(zero(float(eltype(log_weights)))))
pitvals = similar(y, T)
param_dims = _otherdims(log_weights, sample_dims)
# work around for `eachslices` not supporting multiple dims in older Julia versions
map!(
pitvals,
y,
CartesianIndices(map(Base.Fix1(axes, y_pred), param_dims)),
CartesianIndices(map(Base.Fix1(axes, log_weights), param_dims)),
) do yi, i1, i2
yi_pred = @views y_pred[:, :, i1]
lwi = @views log_weights[:, :, i2]
init = T(-Inf)
sel_iter = Iterators.flatten((
init, (lwi_j for (lwi_j, yi_pred_j) in zip(lwi, yi_pred) if yi_pred_j ≤ yi)
))
return clamp(exp(LogExpFunctions.logsumexp(sel_iter)), 0, 1)
end
return pitvals
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 10816 | const DEFAULT_STACKING_OPTIMIZER = Optim.LBFGS()
"""
$(TYPEDEF)
An abstract type representing methods for computing model weights.
Subtypes implement [`model_weights`](@ref)`(method, elpd_results)`.
"""
abstract type AbstractModelWeightsMethod end
"""
model_weights(elpd_results; method=Stacking())
model_weights(method::AbstractModelWeightsMethod, elpd_results)
Compute weights for each model in `elpd_results` using `method`.
`elpd_results` is a `Tuple`, `NamedTuple`, or `AbstractVector` with
[`AbstractELPDResult`](@ref) entries. The weights are returned in the same type of
collection.
[`Stacking`](@ref) is the recommended approach, as it performs well even when the true data
generating process is not included among the candidate models. See [^YaoVehtari2018] for
details.
See also: [`AbstractModelWeightsMethod`](@ref), [`compare`](@ref)
[^YaoVehtari2018]: Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman.
Using Stacking to Average Bayesian Predictive Distributions.
2018. Bayesian Analysis. 13, 3, 917–1007.
doi: [10.1214/17-BA1091](https://doi.org/10.1214/17-BA1091)
arXiv: [1704.02030](https://arxiv.org/abs/1704.02030)
# Examples
Compute [`Stacking`](@ref) weights for two models:
```jldoctest model_weights; filter = [r"└.*"]
julia> using ArviZExampleData
julia> models = (
centered=load_example_data("centered_eight"),
non_centered=load_example_data("non_centered_eight"),
);
julia> elpd_results = map(models) do idata
log_like = PermutedDimsArray(idata.log_likelihood.obs, (2, 3, 1))
return loo(log_like)
end;
┌ Warning: 1 parameters had Pareto shape values 0.7 < k ≤ 1. Resulting importance sampling estimates are likely to be unstable.
└ @ PSIS ~/.julia/packages/PSIS/...
julia> model_weights(elpd_results; method=Stacking()) |> pairs
pairs(::NamedTuple) with 2 entries:
:centered => 5.34175e-19
:non_centered => 1.0
```
Now we compute [`BootstrappedPseudoBMA`](@ref) weights for the same models:
```jldoctest model_weights; setup = :(using Random; Random.seed!(94))
julia> model_weights(elpd_results; method=BootstrappedPseudoBMA()) |> pairs
pairs(::NamedTuple) with 2 entries:
:centered => 0.483723
:non_centered => 0.516277
```
"""
function model_weights(elpd_results; method::AbstractModelWeightsMethod=Stacking())
return model_weights(method, elpd_results)
end
# Akaike-type weights are defined as exp(-AIC/2), normalized to 1, which on the log-score
# IC scale is equivalent to softmax
akaike_weights!(w, elpds) = LogExpFunctions.softmax!(w, elpds)
_akaike_weights(elpds) = _softmax(elpds)
"""
$(TYPEDEF)
Model weighting method using pseudo Bayesian Model Averaging (pseudo-BMA) and Akaike-type
weighting.
PseudoBMA(; regularize=false)
PseudoBMA(regularize)
Construct the method with optional regularization of the weights using the standard error of
the ELPD estimate.
!!! note
This approach is not recommended, as it produces unstable weight estimates. It is
recommended to instead use [`BootstrappedPseudoBMA`](@ref) to stabilize the weights
or [`Stacking`](@ref). For details, see [^YaoVehtari2018].
[^YaoVehtari2018]: Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman.
Using Stacking to Average Bayesian Predictive Distributions.
2018. Bayesian Analysis. 13, 3, 917–1007.
doi: [10.1214/17-BA1091](https://doi.org/10.1214/17-BA1091)
arXiv: [1704.02030](https://arxiv.org/abs/1704.02030)
See also: [`Stacking`](@ref)
"""
struct PseudoBMA <: AbstractModelWeightsMethod
regularize::Bool
end
PseudoBMA(; regularize::Bool=false) = PseudoBMA(regularize)
function model_weights(method::PseudoBMA, elpd_results)
elpds = map(elpd_results) do result
est = elpd_estimates(result)
method.regularize || return est.elpd
return est.elpd - est.elpd_mcse / 2
end
return _akaike_weights(elpds)
end
"""
$(TYPEDEF)
Model weighting method using pseudo Bayesian Model Averaging using Akaike-type weighting
with the Bayesian bootstrap (pseudo-BMA+)[^YaoVehtari2018].
The Bayesian bootstrap stabilizes the model weights.
BootstrappedPseudoBMA(; rng=Random.default_rng(), samples=1_000, alpha=1)
BootstrappedPseudoBMA(rng, samples, alpha)
Construct the method.
$(TYPEDFIELDS)
See also: [`Stacking`](@ref)
[^YaoVehtari2018]: Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman.
Using Stacking to Average Bayesian Predictive Distributions.
2018. Bayesian Analysis. 13, 3, 917–1007.
doi: [10.1214/17-BA1091](https://doi.org/10.1214/17-BA1091)
arXiv: [1704.02030](https://arxiv.org/abs/1704.02030)
"""
struct BootstrappedPseudoBMA{R<:Random.AbstractRNG,T<:Real} <: AbstractModelWeightsMethod
"The random number generator to use for the Bayesian bootstrap"
rng::R
"The number of samples to draw for bootstrapping"
samples::Int
"""The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap.
The default (1) corresponds to a uniform distribution on the simplex."""
alpha::T
end
function BootstrappedPseudoBMA(;
rng::Random.AbstractRNG=Random.default_rng(), samples::Int=1_000, alpha::Real=1
)
return BootstrappedPseudoBMA(rng, samples, alpha)
end
function model_weights(method::BootstrappedPseudoBMA, elpd_results)
_elpd = vec(elpd_estimates(first(values(elpd_results)); pointwise=true).elpd)
α = similar(_elpd)
n = length(α)
rng = method.rng
α_dist = Distributions.Dirichlet(n, method.alpha)
ic_mat = _elpd_matrix(elpd_results)
elpd_mean = similar(ic_mat, axes(ic_mat, 2))
weights_mean = zero(elpd_mean)
w = similar(weights_mean)
for _ in 1:(method.samples)
_model_weights_bootstrap!(w, elpd_mean, α, rng, α_dist, ic_mat)
weights_mean .+= w
end
weights_mean ./= method.samples
return _assimilar(elpd_results, weights_mean)
end
function _model_weights_bootstrap!(w, elpd_mean, α, rng, α_dist, ic_mat)
Random.rand!(rng, α_dist, α)
mul!(elpd_mean, ic_mat', α)
elpd_mean .*= length(α)
akaike_weights!(w, elpd_mean)
return w
end
"""
$(TYPEDEF)
Model weighting using stacking of predictive distributions[^YaoVehtari2018].
Stacking(; optimizer=Optim.LBFGS(), options=Optim.Options()
Stacking(optimizer[, options])
Construct the method, optionally customizing the optimization.
$(TYPEDFIELDS)
See also: [`BootstrappedPseudoBMA`](@ref)
[^YaoVehtari2018]: Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman.
Using Stacking to Average Bayesian Predictive Distributions.
2018. Bayesian Analysis. 13, 3, 917–1007.
doi: [10.1214/17-BA1091](https://doi.org/10.1214/17-BA1091)
arXiv: [1704.02030](https://arxiv.org/abs/1704.02030)
"""
struct Stacking{O<:Optim.AbstractOptimizer} <: AbstractModelWeightsMethod
"""The optimizer to use for the optimization of the weights. The optimizer must support
projected gradient optimization via a `manifold` field."""
optimizer::O
"""The Optim options to use for the optimization of the weights."""
options::Optim.Options
function Stacking(
optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options()
)
hasfield(typeof(optimizer), :manifold) ||
throw(ArgumentError("The optimizer must have a `manifold` field."))
_optimizer = Setfield.@set optimizer.manifold = Optim.Sphere()
return new{typeof(_optimizer)}(_optimizer, options)
end
end
function Stacking(;
optimizer::Optim.AbstractOptimizer=DEFAULT_STACKING_OPTIMIZER,
options::Optim.Options=Optim.Options(),
)
return Stacking(optimizer, options)
end
function model_weights(method::Stacking, elpd_pairs)
ic_mat = _elpd_matrix(elpd_pairs)
exp_ic_mat = exp.(ic_mat)
_, weights = _model_weights_stacking(exp_ic_mat, method.optimizer, method.options)
return _assimilar(elpd_pairs, weights)
end
function _model_weights_stacking(exp_ic_mat, optimizer, options)
# set up optimization objective
objective = InplaceStackingOptimObjective(exp_ic_mat)
# set up initial point on optimization manifold
w0 = similar(exp_ic_mat, axes(exp_ic_mat, 2))
fill!(w0, 1//length(w0))
x0 = _initial_point(objective, w0)
# optimize
sol = Optim.optimize(Optim.only_fg!(objective), x0, optimizer, options)
# check convergence
Optim.converged(sol) ||
@warn "Optimization of stacking weights failed to converge after $(Optim.iterations(sol)) iterations."
# return solution and weights
w = _final_point(objective, sol.minimizer)
return sol, w
end
function _elpd_matrix(elpd_results)
elpd_values = map(elpd_results) do result
return vec(elpd_estimates(result; pointwise=true).elpd)
end
return reduce(hcat, collect(elpd_values))
end
# Optimize on the probability simplex by converting the problem to optimization on the unit
# sphere, optimizing with projected gradients, and mapping the solution back to the sphere.
# When the objective function on the simplex is convex, each global minimizer on the sphere
# maps to the global minimizer on the simplex, but the optimization manifold is simple, and
# no inequality constraints exist.
# Q Li, D McKenzie, W Yin. "From the simplex to the sphere: faster constrained optimization
# using the Hadamard parametrization." Inf. Inference. 12.3 (2023): iaad017.
# doi: 10.1093/imaiai/iaad017. arXiv: 2112.05273
struct InplaceStackingOptimObjective{E,C}
exp_ic_mat::E
cache::C
end
function InplaceStackingOptimObjective(exp_ic_mat)
cache = (
similar(exp_ic_mat, axes(exp_ic_mat, 1)), similar(exp_ic_mat, axes(exp_ic_mat, 2))
)
return InplaceStackingOptimObjective(exp_ic_mat, cache)
end
function (obj::InplaceStackingOptimObjective)(F, G, x)
exp_ic_mat = obj.exp_ic_mat
cache, w = obj.cache
_sphere_to_simplex!(w, x)
mul!(cache, exp_ic_mat, w)
cache .= inv.(cache)
if G !== nothing
mul!(G, exp_ic_mat', cache)
G .*= -1
_∇sphere_to_simplex!(G, x)
end
if F !== nothing
return sum(log, cache)
end
return nothing
end
_initial_point(::InplaceStackingOptimObjective, w0) = _simplex_to_sphere(w0)
_final_point(::InplaceStackingOptimObjective, x) = _sphere_to_simplex(x)
# if ∑xᵢ² = 1, then if wᵢ = xᵢ², then w is on the probability simplex
_sphere_to_simplex(x) = x .^ 2
function _sphere_to_simplex!(w, x)
w .= x .^ 2
return w
end
_simplex_to_sphere(x) = sqrt.(x)
function _∇sphere_to_simplex!(∂x, x)
∂x .*= 2 .* x
return ∂x
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 2110 | """
r2_score(y_true::AbstractVector, y_pred::AbstractArray) -> (; r2, r2_std)
``R²`` for linear Bayesian regression models.[^GelmanGoodrich2019]
# Arguments
- `y_true`: Observed data of length `noutputs`
- `y_pred`: Predicted data with size `(ndraws[, nchains], noutputs)`
[^GelmanGoodrich2019]: Andrew Gelman, Ben Goodrich, Jonah Gabry & Aki Vehtari (2019)
R-squared for Bayesian Regression Models, The American Statistician,
73:3, 307-9,
DOI: [10.1080/00031305.2018.1549100](https://doi.org/10.1080/00031305.2018.1549100).
# Examples
```jldoctest
julia> using ArviZExampleData
julia> idata = load_example_data("regression1d");
julia> y_true = idata.observed_data.y;
julia> y_pred = PermutedDimsArray(idata.posterior_predictive.y, (:draw, :chain, :y_dim_0));
julia> r2_score(y_true, y_pred) |> pairs
pairs(::NamedTuple) with 2 entries:
:r2 => 0.683197
:r2_std => 0.0368838
```
"""
function r2_score(y_true, y_pred)
r_squared = r2_samples(y_true, y_pred)
return NamedTuple{(:r2, :r2_std)}(StatsBase.mean_and_std(r_squared; corrected=false))
end
"""
r2_samples(y_true::AbstractVector, y_pred::AbstractArray) -> AbstractVector
``R²`` samples for Bayesian regression models. Only valid for linear models.
See also [`r2_score`](@ref).
# Arguments
- `y_true`: Observed data of length `noutputs`
- `y_pred`: Predicted data with size `(ndraws[, nchains], noutputs)`
"""
function r2_samples(y_true::AbstractVector, y_pred::AbstractArray)
@assert ndims(y_pred) ∈ (2, 3)
corrected = false
dims = ndims(y_pred)
var_y_est = dropdims(Statistics.var(y_pred; corrected, dims); dims)
y_true_reshape = reshape(y_true, ntuple(one, ndims(y_pred) - 1)..., :)
var_residual = dropdims(Statistics.var(y_pred .- y_true_reshape; corrected, dims); dims)
# allocate storage for type-stability
T = typeof(first(var_y_est) / first(var_residual))
sample_axes = ntuple(Base.Fix1(axes, y_pred), ndims(y_pred) - 1)
r_squared = similar(y_pred, T, sample_axes)
r_squared .= var_y_est ./ (var_y_est .+ var_residual)
return r_squared
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 15758 | """
$(TYPEDEF)
A container for a column table of values computed by [`summarize`](@ref).
This object implements the Tables and TableTraits column table interfaces. It has a custom
`show` method.
`SummaryStats` behaves like an `OrderedDict` of columns, where the columns can be accessed
using either `Symbol`s or a 1-based integer index.
$(TYPEDFIELDS)
SummaryStats([name::String,] data[, parameter_names])
SummaryStats(data[, parameter_names]; name::String="SummaryStats")
Construct a `SummaryStats` from tabular `data` with optional stats `name` and `param_names`.
`data` must not contain a column `:parameter`, as this is reserved for the parameter names,
which are always in the first column.
"""
struct SummaryStats{D,V<:AbstractVector}
"The name of the collection of summary statistics, used as the table title in display."
name::String
"""The summary statistics for each parameter. It must implement the Tables interface."""
data::D
"Names of the parameters"
parameter_names::V
function SummaryStats(name::String, data, parameter_names::V) where {V}
coltable = Tables.columns(data)
:parameter ∈ Tables.columnnames(coltable) &&
throw(ArgumentError("Column `:parameter` is reserved for parameter names."))
length(parameter_names) == Tables.rowcount(data) || throw(
DimensionMismatch(
"length $(length(parameter_names)) of `parameter_names` does not match number of rows $(Tables.rowcount(data)) in `data`.",
),
)
return new{typeof(coltable),V}(name, coltable, parameter_names)
end
end
function SummaryStats(
data,
parameter_names::AbstractVector=Base.OneTo(Tables.rowcount(data));
name::String="SummaryStats",
)
return SummaryStats(name, data, parameter_names)
end
function SummaryStats(name::String, data)
return SummaryStats(name, data, Base.OneTo(Tables.rowcount(data)))
end
function _ordereddict(stats::SummaryStats)
return OrderedCollections.OrderedDict(
k => Tables.getcolumn(stats, k) for k in Tables.columnnames(stats)
)
end
# forward key interfaces from its parent
Base.parent(stats::SummaryStats) = getfield(stats, :data)
Base.keys(stats::SummaryStats) = map(Symbol, Tables.columnnames(stats))
Base.haskey(stats::SummaryStats, nm::Symbol) = nm ∈ keys(stats)
Base.length(stats::SummaryStats) = length(parent(stats)) + 1
Base.getindex(stats::SummaryStats, i::Union{Int,Symbol}) = Tables.getcolumn(stats, i)
function Base.iterate(stats::SummaryStats)
ncols = length(stats)
return stats.parameter_names, (2, ncols)
end
function Base.iterate(stats::SummaryStats, (i, ncols)::NTuple{2,Int})
i > ncols && return nothing
return Tables.getcolumn(stats, i), (i + 1, ncols)
end
function Base.merge(
stats::SummaryStats{<:NamedTuple}, other_stats::SummaryStats{<:NamedTuple}...
)
isempty(other_stats) && return stats
stats_all = (stats, other_stats...)
stats_last = last(stats_all)
return SummaryStats(
stats_last.name, merge(map(parent, stats_all)...), stats_last.parameter_names
)
end
function Base.merge(stats::SummaryStats, other_stats::SummaryStats...)
isempty(other_stats) && return stats
stats_all = (stats, other_stats...)
data_merged = merge(map(_ordereddict, stats_all)...)
parameter_names = pop!(data_merged, :parameter)
return SummaryStats(last(stats_all).name, data_merged, parameter_names)
end
for f in (:(==), :isequal)
@eval begin
function Base.$(f)(stats::SummaryStats, other_stats::SummaryStats)
colnames1 = Tables.columnnames(stats)
colnames2 = Tables.columnnames(other_stats)
vals1 = (Tables.getcolumn(stats, k) for k in colnames1)
vals2 = (Tables.getcolumn(other_stats, k) for k in colnames2)
return all(Base.splat($f), zip(colnames1, colnames2)) &&
all(Base.splat($f), zip(vals1, vals2))
end
end
end
#### custom tabular show methods
function Base.show(io::IO, mime::MIME"text/plain", stats::SummaryStats; kwargs...)
return _show(io, mime, stats; kwargs...)
end
function Base.show(io::IO, mime::MIME"text/html", stats::SummaryStats; kwargs...)
return _show(io, mime, stats; kwargs...)
end
function _show(io::IO, mime::MIME, stats::SummaryStats; kwargs...)
data = parent(stats)
rhat_formatter = _prettytables_rhat_formatter(data)
extra_formatters = rhat_formatter === nothing ? () : (rhat_formatter,)
return _show_prettytable(
io,
mime,
data;
title=stats.name,
row_labels=Tables.getcolumn(stats, :parameter),
extra_formatters,
kwargs...,
)
end
#### Tables interface as column table
Tables.istable(::Type{<:SummaryStats}) = true
Tables.columnaccess(::Type{<:SummaryStats}) = true
Tables.columns(s::SummaryStats) = s
function Tables.columnnames(s::SummaryStats)
data_cols = Tables.columnnames(parent(s))
data_cols isa Tuple && return (:parameter, data_cols...)
return collect(Iterators.flatten(((:parameter,), data_cols)))
end
function Tables.getcolumn(stats::SummaryStats, i::Int)
i == 1 && return stats.parameter_names
return Tables.getcolumn(parent(stats), i - 1)
end
function Tables.getcolumn(stats::SummaryStats, nm::Symbol)
nm === :parameter && return stats.parameter_names
return Tables.getcolumn(parent(stats), nm)
end
function Tables.schema(s::SummaryStats)
data_schema = Tables.schema(parent(s))
data_schema === nothing && return nothing
T = eltype(s.parameter_names)
if data_schema isa Tables.Schema{Nothing,Nothing}
return Tables.Schema([:parameter; data_schema.names], [T; data_schema.types])
else
return Tables.Schema((:parameter, data_schema.names...), (T, data_schema.types...))
end
end
IteratorInterfaceExtensions.isiterable(::SummaryStats) = true
function IteratorInterfaceExtensions.getiterator(s::SummaryStats)
return Tables.datavaluerows(Tables.columntable(s))
end
TableTraits.isiterabletable(::SummaryStats) = true
"""
summarize(data, stats_funs...; name="SummaryStats", [var_names]) -> SummaryStats
Compute the summary statistics in `stats_funs` on each param in `data`.
`stats_funs` is a collection of functions that reduces a matrix with shape `(draws, chains)`
to a scalar or a collection of scalars. Alternatively, an item in `stats_funs` may be a
`Pair` of the form `name => fun` specifying the name to be used for the statistic or of the
form `(name1, ...) => fun` when the function returns a collection. When the function returns
a collection, the names in this latter format must be provided.
If no stats functions are provided, then those specified in [`default_summary_stats`](@ref)
are computed.
`var_names` specifies the names of the parameters in `data`. If not provided, the names are
inferred from `data`.
To support computing summary statistics from a custom object, overload this method
specifying the type of `data`.
See also [`SummaryStats`](@ref), [`default_summary_stats`](@ref), [`default_stats`](@ref),
[`default_diagnostics`](@ref).
# Examples
Compute `mean`, `std` and the Monte Carlo standard error (MCSE) of the mean estimate:
```jldoctest summarize; setup = (using Random; Random.seed!(84))
julia> using Statistics, StatsBase
julia> x = randn(1000, 4, 3) .+ reshape(0:10:20, 1, 1, :);
julia> summarize(x, mean, std, :mcse_mean => sem; name="Mean/Std")
Mean/Std
mean std mcse_mean
1 0.0003 0.990 0.016
2 10.02 0.988 0.016
3 19.98 0.988 0.016
```
Avoid recomputing the mean by using `mean_and_std`, and provide parameter names:
```jldoctest summarize
julia> summarize(x, (:mean, :std) => mean_and_std, mad; var_names=[:a, :b, :c])
SummaryStats
mean std mad
a 0.000305 0.990 0.978
b 10.0 0.988 0.995
c 20.0 0.988 0.979
```
Note that when an estimator and its MCSE are both computed, the MCSE is used to determine
the number of significant digits that will be displayed.
```jldoctest summarize
julia> summarize(x; var_names=[:a, :b, :c])
SummaryStats
mean std hdi_3% hdi_97% mcse_mean mcse_std ess_tail ess_bulk r ⋯
a 0.0003 0.99 -1.92 1.78 0.016 0.012 3567 3663 1 ⋯
b 10.02 0.99 8.17 11.9 0.016 0.011 3841 3906 1 ⋯
c 19.98 0.99 18.1 21.9 0.016 0.012 3892 3749 1 ⋯
1 column omitted
```
Compute just the statistics with an 89% HDI on all parameters, and provide the parameter
names:
```jldoctest summarize
julia> summarize(x, default_stats(; prob_interval=0.89)...; var_names=[:a, :b, :c])
SummaryStats
mean std hdi_5.5% hdi_94.5%
a 0.000305 0.990 -1.63 1.52
b 10.0 0.988 8.53 11.6
c 20.0 0.988 18.5 21.6
```
Compute the summary stats focusing on `Statistics.median`:
```jldoctest summarize
julia> summarize(x, default_summary_stats(median)...; var_names=[:a, :b, :c])
SummaryStats
median mad eti_3% eti_97% mcse_median ess_tail ess_median rhat
a 0.004 0.978 -1.83 1.89 0.020 3567 3336 1.00
b 10.02 0.995 8.17 11.9 0.023 3841 3787 1.00
c 19.99 0.979 18.1 21.9 0.020 3892 3829 1.00
```
"""
function summarize end
"""
summarize(data::AbstractArray, stats_funs...; kwargs...) -> SummaryStats
Compute the summary statistics in `stats_funs` on each param in `data`, with size
`(draws, chains, params)`.
"""
@constprop :aggressive function summarize(
data::AbstractArray{<:Union{Real,Missing},3},
stats_funs_and_names...;
name::String="SummaryStats",
var_names=axes(data, 3),
)
if isempty(stats_funs_and_names)
return summarize(data, default_summary_stats()...; name, var_names)
end
length(var_names) == size(data, 3) || throw(
DimensionMismatch(
"length $(length(var_names)) of `var_names` does not match number of parameters $(size(data, 3)) in `data`.",
),
)
names_and_funs = map(_fun_and_name, stats_funs_and_names)
fnames = map(first, names_and_funs)
_check_function_names(fnames)
funs = map(last, names_and_funs)
return SummaryStats(name, _summarize(data, funs, fnames), var_names)
end
function _check_function_names(fnames)
for name in fnames
name === nothing && continue
if name === nothing || name isa Symbol || name isa Tuple{Symbol,Vararg{Symbol}}
continue
end
throw(ArgumentError("Function name must be a symbol or a tuple of symbols."))
end
end
"""
default_summary_stats(focus=Statistics.mean; kwargs...)
Combinatiton of [`default_stats`](@ref) and [`default_diagnostics`](@ref) to be used with
[`summarize`](@ref).
"""
function default_summary_stats(focus=Statistics.mean; kwargs...)
return (default_stats(focus; kwargs...)..., default_diagnostics(focus; kwargs...)...)
end
"""
default_stats(focus=Statistics.mean; prob_interval=$(DEFAULT_INTERVAL_PROB), kwargs...)
Default statistics to be computed with [`summarize`](@ref).
The value of `focus` determines the statistics to be returned:
- `Statistics.mean`: `mean`, `std`, `hdi_3%`, `hdi_97%`
- `Statistics.median`: `median`, `mad`, `eti_3%`, `eti_97%`
If `prob_interval` is set to a different value than the default, then different HDI and ETI
statistics are computed accordingly. [`hdi`](@ref) refers to the highest-density interval,
while `eti` refers to the equal-tailed interval (i.e. the credible interval computed from
symmetric quantiles).
See also: [`hdi`](@ref)
"""
function default_stats end
default_stats(; kwargs...) = default_stats(Statistics.mean; kwargs...)
function default_stats(
::typeof(Statistics.mean); prob_interval::Real=DEFAULT_INTERVAL_PROB, kwargs...
)
hdi_names = map(Symbol, _prob_interval_to_strings("hdi", prob_interval))
return (
(:mean, :std) => StatsBase.mean_and_std ∘ _skipmissing,
hdi_names => x -> hdi(_cskipmissing(x); prob=prob_interval),
)
end
function default_stats(
::typeof(Statistics.median); prob_interval::Real=DEFAULT_INTERVAL_PROB, kwargs...
)
eti_names = map(Symbol, _prob_interval_to_strings("eti", prob_interval))
prob_tail = (1 - prob_interval) / 2
p = (prob_tail, 1 - prob_tail)
return (
:median => Statistics.median ∘ _skipmissing,
:mad => StatsBase.mad ∘ _skipmissing,
eti_names => Base.Fix2(Statistics.quantile, p) ∘ _skipmissing ∘ vec,
)
end
"""
default_diagnostics(focus=Statistics.mean; kwargs...)
Default diagnostics to be computed with [`summarize`](@ref).
The value of `focus` determines the diagnostics to be returned:
- `Statistics.mean`: `mcse_mean`, `mcse_std`, `ess_tail`, `ess_bulk`, `rhat`
- `Statistics.median`: `mcse_median`, `ess_tail`, `ess_bulk`, `rhat`
"""
default_diagnostics(; kwargs...) = default_diagnostics(Statistics.mean; kwargs...)
function default_diagnostics(::typeof(Statistics.mean); kwargs...)
return (
:mcse_mean => MCMCDiagnosticTools.mcse,
:mcse_std => _mcse_std,
:ess_tail => _ess_tail,
(:ess_bulk, :rhat) => MCMCDiagnosticTools.ess_rhat,
)
end
function default_diagnostics(::typeof(Statistics.median); kwargs...)
return (
:mcse_median => _mcse_median,
:ess_tail => _ess_tail,
:ess_median => _ess_median,
MCMCDiagnosticTools.rhat,
)
end
function _prob_interval_to_strings(interval_type, prob; digits=2)
α = (1 - prob) / 2
perc_lower = string(round(100 * α; digits))
perc_upper = string(round(100 * (1 - α); digits))
return map((perc_lower, perc_upper)) do s
s = replace(s, r"\.0+$" => "")
return "$(interval_type)_$s%"
end
end
# aggressive constprop allows summarize to be type-inferrable when called by
# another function
@constprop :aggressive function _summarize(data::AbstractArray{<:Any,3}, funs, fun_names)
return merge(map(fun_names, funs) do fname, f
return _map_over_params(fname, f, data)
end...)
end
@constprop :aggressive function _map_over_params(fname, f, data)
vals = _map_paramslices(f, data)
return _namedtuple_of_vals(f, fname, vals)
end
_namedtuple_of_vals(f, fname::Symbol, val) = (; fname => val)
_namedtuple_of_vals(f, ::Nothing, val) = (; _fname(f) => val)
function _namedtuple_of_vals(f, fname::NTuple{N,Symbol}, val::AbstractVector) where {N}
return NamedTuple{fname}(ntuple(i -> getindex.(val, i), Val(N)))
end
function _namedtuple_of_vals(f, fname::NTuple{N,Symbol}, val::NamedTuple) where {N}
return NamedTuple{fname}(values(val))
end
function _namedtuple_of_vals(f, ::Nothing, val::AbstractVector{<:NamedTuple{K}}) where {K}
return NamedTuple{K}(ntuple(i -> getindex.(val, i), length(K)))
end
_fun_and_name(p::Pair) = p
_fun_and_name(f) = nothing => f
_fname(f) = nameof(f)
# curried functions
_mcse_std(x) = MCMCDiagnosticTools.mcse(x; kind=Statistics.std)
_mcse_median(x) = MCMCDiagnosticTools.mcse(x; kind=Statistics.median)
_ess_median(x) = MCMCDiagnosticTools.ess(x; kind=Statistics.median)
_ess_tail(x) = MCMCDiagnosticTools.ess(x; kind=:tail)
# functions that have a 3D array method
_map_paramslices(f, x) = map(f, eachslice(x; dims=3))
_map_paramslices(f::typeof(_ess_median), x) = f(x)
_map_paramslices(f::typeof(_ess_tail), x) = f(x)
_map_paramslices(f::typeof(MCMCDiagnosticTools.ess_rhat), x) = f(x)
_map_paramslices(f::typeof(MCMCDiagnosticTools.rhat), x) = f(x)
_map_paramslices(f::typeof(MCMCDiagnosticTools.mcse), x) = f(x)
_map_paramslices(f::typeof(_mcse_std), x) = f(x)
_map_paramslices(f::typeof(_mcse_median), x) = f(x)
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 12465 | function _check_log_likelihood(x)
if any(!isfinite, x)
@warn "All log likelihood values must be finite, but some are not."
end
return nothing
end
"""
smooth_data(y; dims=:, interp_method=CubicSpline, offset_frac=0.01)
Smooth `y` along `dims` using `interp_method`.
`interp_method` is a 2-argument callabale that takes the arguments `y` and `x` and returns
a DataInterpolations.jl interpolation method, defaulting to a cubic spline interpolator.
`offset_frac` is the fraction of the length of `y` to use as an offset when interpolating.
"""
function smooth_data(
y;
dims::Union{Int,Tuple{Int,Vararg{Int}},Colon}=Colon(),
interp_method=DataInterpolations.CubicSpline,
offset_frac=1//100,
)
T = float(eltype(y))
y_interp = similar(y, T)
n = dims isa Colon ? length(y) : prod(Base.Fix1(size, y), dims)
x = range(0, 1; length=n)
x_interp = range(0 + offset_frac, 1 - offset_frac; length=n)
_smooth_data!(y_interp, interp_method, y, x, x_interp, dims)
return y_interp
end
function _smooth_data!(y_interp, interp_method, y, x, x_interp, ::Colon)
interp = interp_method(vec(y), x)
interp(vec(y_interp), x_interp)
return y_interp
end
function _smooth_data!(y_interp, interp_method, y, x, x_interp, dims)
for (y_interp_i, y_i) in zip(
_eachslice(y_interp; dims=_otherdims(y_interp, dims)),
_eachslice(y; dims=_otherdims(y, dims)),
)
interp = interp_method(vec(y_i), x)
interp(vec(y_interp_i), x_interp)
end
return y_interp
end
Base.@pure _typename(::T) where {T} = T.name.name
_astuple(x) = (x,)
_astuple(x::Tuple) = x
function _assimilar(x::AbstractArray, y)
z = similar(x, eltype(y))
copyto!(z, y)
return z
end
_assimilar(x::AbstractArray, y::NamedTuple) = _assimilar(x, values(y))
function _assimilar(x::Tuple, y)
z = NTuple{length(x),eltype(y)}(y)
return z
end
function _assimilar(x::NamedTuple, y)
z = NamedTuple{fieldnames(typeof(x))}(_assimilar(values(x), y))
return z
end
function _skipmissing(x::AbstractArray)
Missing <: eltype(x) && return skipmissing(x)
return x
end
function _cskipmissing(x::AbstractArray)
Missing <: eltype(x) && return collect(skipmissing(x))
return x
end
_sortperm(x; kwargs...) = sortperm(collect(x); kwargs...)
_permute(x::AbstractVector, p::AbstractVector) = x[p]
_permute(x::Tuple, p::AbstractVector) = x[p]
function _permute(x::NamedTuple, p::AbstractVector)
return NamedTuple{_permute(keys(x), p)}(_permute(values(x), p))
end
# TODO: try to find a way to do this that works for arrays with named indices
_indices(x) = keys(x)
# eachslice-like iterator that accepts multiple dimensions and has a `size` even for older
# Julia versions
@static if VERSION ≥ v"1.9-"
_eachslice(x; dims) = eachslice(x; dims)
else
function _eachslice(x; dims)
_dims = _astuple(dims)
alldims_perm = (_otherdims(x, _dims)..., _dims...)
dims_axes = map(Base.Fix1(axes, x), _dims)
other_dims = ntuple(_ -> Colon(), ndims(x) - length(_dims))
xperm = PermutedDimsArray(x, alldims_perm)
return Base.Iterators.map(CartesianIndices(dims_axes)) do i
return view(xperm, other_dims..., i)
end
end
end
_alldims(x) = ntuple(identity, ndims(x))
_otherdims(x, dims) = filter(∉(dims), _alldims(x))
_param_dims(x::AbstractArray) = ntuple(i -> i + 2, max(0, ndims(x) - 2))
_param_axes(x::AbstractArray) = map(Base.Fix1(axes, x), _param_dims(x))
function _params_array(x::AbstractArray, param_dim::Int=3)
param_dim > 0 || throw(ArgumentError("param_dim must be positive"))
sample_sizes = ntuple(Base.Fix1(size, x), param_dim - 1)
return reshape(x, sample_sizes..., :)
end
function _eachparam(x::AbstractArray, param_dim::Int=3)
return eachslice(_params_array(x, param_dim); dims=param_dim)
end
_maybe_scalar(x) = x
_maybe_scalar(x::AbstractArray{<:Any,0}) = x[]
_logabssubexp(x, y) = LogExpFunctions.logsubexp(reverse(minmax(x, y))...)
# softmax with support for other mappable iterators
_softmax(x::AbstractArray) = LogExpFunctions.softmax(x)
function _softmax(x)
nrm = LogExpFunctions.logsumexp(x)
return map(x) do xi
return exp(xi - nrm)
end
end
# compute sum and estimate of standard error of sum
function _sum_and_se(x; dims=:)
s = sum(x; dims)
n = dims isa Colon ? length(x) : prod(Base.Fix1(size, x), dims)
se = Statistics.std(x; dims) * sqrt(oftype(one(eltype(s)), n))
return s, se
end
_sum_and_se(x::Number; kwargs...) = (x, oftype(float(x), NaN))
function _log_mean(logx, log_weights; dims=:)
log_expectand = logx .+ log_weights
return LogExpFunctions.logsumexp(log_expectand; dims)
end
function _se_log_mean(
logx, log_weights; dims=:, log_mean=_log_mean(logx, log_weights; dims)
)
# variance of mean estimated using self-normalized importance weighting
# Art B. Owen. (2013) Monte Carlo theory, methods and examples. eq. 9.9
log_expectand = @. 2 * (log_weights + _logabssubexp(logx, log_mean))
log_var_mean = LogExpFunctions.logsumexp(log_expectand; dims)
# use delta method to asymptotically map variance of mean to variance of logarithm of mean
se_log_mean = @. exp(log_var_mean / 2 - log_mean)
return se_log_mean
end
"""
sigdigits_matching_se(x, se; sigdigits_max=7, scale=2) -> Int
Get number of significant digits of `x` so that the last digit of `x` is the first digit of
`se*scale`.
"""
function sigdigits_matching_se(x::Real, se::Real; sigdigits_max::Int=7, scale::Real=2)
(iszero(x) || !isfinite(x) || !isfinite(se) || !isfinite(scale)) && return 0
sigdigits_max ≥ 0 || throw(ArgumentError("`sigdigits_max` must be non-negative"))
se ≥ 0 || throw(ArgumentError("`se` must be non-negative"))
iszero(se) && return sigdigits_max
scale > 0 || throw(ArgumentError("`scale` must be positive"))
first_digit_x = floor(Int, log10(abs(x)))
last_digit_x = floor(Int, log10(se * scale))
sigdigits_x = first_digit_x - last_digit_x + 1
return clamp(sigdigits_x, 0, sigdigits_max)
end
# format a number with the given number of significant digits
# - chooses scientific or decimal notation by whichever is most appropriate
# - shows trailing zeros if significant
# - removes trailing decimal point if no significant digits after decimal point
function _printf_with_sigdigits(v::Real, sigdigits)
s = sprint(Printf.format, Printf.Format("%#.$(sigdigits)g"), v)
return replace(s, r"\.$" => "")
end
#
# PrettyTables formatter utility functions
#
"""
ft_printf_sigdigits(sigdigits[, columns])
Use Printf to format the elements in the `columns` to the number of `sigdigits`.
If `sigdigits` is a `Real`, and `columns` is not specified (or is empty), then the
formatting will be applied to the entire table.
Otherwise, if `sigdigits` is a `Real` and `columns` is a vector, then the elements in the
columns will be formatted to the number of `sigdigits`.
"""
function ft_printf_sigdigits(sigdigits::Int, columns::AbstractVector{Int}=Int[])
if isempty(columns)
return (v, _, _) -> begin
v isa Real || return v
return _printf_with_sigdigits(v, sigdigits)
end
else
return (v, _, j) -> begin
v isa Real || return v
for col in columns
col == j && return _printf_with_sigdigits(v, sigdigits)
end
return v
end
end
end
"""
ft_printf_sigdigits_matching_se(se_vals[, columns]; kwargs...)
Use Printf to format the elements in the `columns` to sigdigits based on the standard error
column in `se_vals`.
All values are formatted with Printf to the number of significant digits determined by
[`sigdigits_matching_se`](@ref). `kwargs` are forwarded to that function.
`se_vals` must be the same length as any of the columns in the table.
If `columns` is a non-empty vector, then the formatting is only applied to those columns.
Otherwise, the formatting is applied to the entire table.
"""
function ft_printf_sigdigits_matching_se(
se_vals::AbstractVector, columns::AbstractVector{Int}=Int[]; kwargs...
)
if isempty(columns)
return (v, i, _) -> begin
(v isa Real && se_vals[i] isa Real) || return v
sigdigits = sigdigits_matching_se(v, se_vals[i]; kwargs...)
return _printf_with_sigdigits(v, sigdigits)
end
else
return (v, i, j) -> begin
(v isa Real && se_vals[i] isa Real) || return v
for col in columns
if col == j
sigdigits = sigdigits_matching_se(v, se_vals[i]; kwargs...)
return _printf_with_sigdigits(v, sigdigits)
end
end
return v
end
end
end
function _prettytables_rhat_formatter(data)
cols = findall(x -> x === :rhat, Tables.columnnames(data))
isempty(cols) && return nothing
return PrettyTables.ft_printf("%.2f", cols)
end
function _prettytables_integer_formatter(data)
sch = Tables.schema(data)
sch === nothing && return nothing
cols = findall(t -> t <: Integer, sch.types)
isempty(cols) && return nothing
return PrettyTables.ft_printf("%d", cols)
end
# formatting functions for special columns
# see https://ronisbr.github.io/PrettyTables.jl/stable/man/formatters/
function _default_prettytables_formatters(data; sigdigits_se=2, sigdigits_default=3)
formatters = []
col_names = Tables.columnnames(data)
for (i, k) in enumerate(col_names)
for mcse_key in (Symbol("mcse_$k"), Symbol("$(k)_mcse"))
if haskey(data, mcse_key)
push!(
formatters,
ft_printf_sigdigits_matching_se(Tables.getcolumn(data, mcse_key), [i]),
)
continue
end
end
end
mcse_cols = findall(col_names) do k
s = string(k)
return startswith(s, "mcse_") || endswith(s, "_mcse")
end
isempty(mcse_cols) || push!(formatters, ft_printf_sigdigits(sigdigits_se, mcse_cols))
ess_cols = findall(_is_ess_label, col_names)
isempty(ess_cols) || push!(formatters, PrettyTables.ft_printf("%d", ess_cols))
ft_integer = _prettytables_integer_formatter(data)
ft_integer === nothing || push!(formatters, ft_integer)
push!(formatters, ft_printf_sigdigits(sigdigits_default))
return formatters
end
function _show_prettytable(
io::IO, data; sigdigits_se=2, sigdigits_default=3, extra_formatters=(), kwargs...
)
formatters = (
extra_formatters...,
_default_prettytables_formatters(data; sigdigits_se, sigdigits_default)...,
)
col_names = Tables.columnnames(data)
alignment = [
eltype(Tables.getcolumn(data, col_name)) <: Real ? :r : :l for col_name in col_names
]
kwargs_new = merge(
(
show_subheader=false,
vcrop_mode=:middle,
show_omitted_cell_summary=true,
row_label_alignment=:l,
formatters,
alignment,
),
kwargs,
)
PrettyTables.pretty_table(io, data; kwargs_new...)
return nothing
end
function _show_prettytable(
io::IO,
::MIME"text/plain",
data;
title_crayon=PrettyTables.Crayon(),
hlines=:none,
vlines=:none,
newline_at_end=false,
kwargs...,
)
alignment_anchor_regex = Dict{Int,Vector{Regex}}()
for (i, k) in enumerate(Tables.columnnames(data))
v = Tables.getcolumn(data, k)
if eltype(v) <: Real && !(eltype(v) <: Integer) && !_is_ess_label(k)
alignment_anchor_regex[i] = [r"\.", r"e", r"^NaN$", r"Inf$"]
end
end
alignment_anchor_fallback = :r
alignment_anchor_fallback_override = Dict(
i => :r for (i, k) in enumerate(Tables.columnnames(data)) if _is_ess_label(k)
)
return _show_prettytable(
io,
data;
backend=Val(:text),
title_crayon,
hlines,
vlines,
newline_at_end,
alignment_anchor_regex,
alignment_anchor_fallback,
alignment_anchor_fallback_override,
kwargs...,
)
end
function _show_prettytable(
io::IO, ::MIME"text/html", data; minify=true, max_num_of_rows=25, kwargs...
)
return _show_prettytable(
io, data; backend=Val(:html), minify, max_num_of_rows, kwargs...
)
end
_is_ess_label(k::Symbol) = ((k === :ess) || startswith(string(k), "ess_"))
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 2492 | """
$(SIGNATURES)
Results of computing the widely applicable information criterion (WAIC).
See also: [`waic`](@ref), [`AbstractELPDResult`](@ref)
$(FIELDS)
"""
struct WAICResult{E,P} <: AbstractELPDResult
"Estimates of the expected log pointwise predictive density (ELPD) and effective number of parameters (p)"
estimates::E
"Pointwise estimates"
pointwise::P
end
function elpd_estimates(r::WAICResult; pointwise::Bool=false)
return pointwise ? r.pointwise : r.estimates
end
function Base.show(io::IO, mime::MIME"text/plain", result::WAICResult; kwargs...)
_show_elpd_estimates(io, mime, result; title="WAICResult with estimates", kwargs...)
return nothing
end
"""
waic(log_likelihood::AbstractArray) -> WAICResult{<:NamedTuple,<:NamedTuple}
Compute the widely applicable information criterion (WAIC).[^Watanabe2010][^Vehtari2017][^LOOFAQ]
`log_likelihood` must be an array of log-likelihood values with shape
`(chains, draws[, params...])`.
See also: [`WAICResult`](@ref), [`loo`](@ref)
[^Watanabe2010]: Watanabe, S. Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory. 11(116):3571−3594, 2010. https://jmlr.csail.mit.edu/papers/v11/watanabe10a.html
[^Vehtari2017]: Vehtari, A., Gelman, A. & Gabry, J.
Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC.
Stat Comput 27, 1413–1432 (2017).
doi: [10.1007/s11222-016-9696-4](https://doi.org/10.1007/s11222-016-9696-4)
arXiv: [1507.04544](https://arxiv.org/abs/1507.04544)
[^LOOFAQ]: Aki Vehtari. Cross-validation FAQ. https://mc-stan.org/loo/articles/online-only/faq.html
# Examples
Calculate WAIC of a model:
```jldoctest
julia> using ArviZExampleData
julia> idata = load_example_data("centered_eight");
julia> log_like = PermutedDimsArray(idata.log_likelihood.obs, (:draw, :chain, :school));
julia> waic(log_like)
WAICResult with estimates
elpd elpd_mcse p p_mcse
-31 1.4 0.9 0.33
```
"""
waic(ll::AbstractArray) = _waic(ll)
function _waic(log_like, dims=(1, 2))
_check_log_likelihood(log_like)
# compute pointwise estimates
lpd_i = _lpd_pointwise(log_like, dims)
p_i = _maybe_scalar(dropdims(Statistics.var(log_like; corrected=true, dims); dims))
elpd_i = lpd_i - p_i
pointwise = (elpd=elpd_i, p=p_i)
# combine estimates
estimates = _elpd_estimates_from_pointwise(pointwise)
return WAICResult(estimates, pointwise)
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 5344 | using IteratorInterfaceExtensions
using PosteriorStats
using Tables
using TableTraits
using Test
function _isequal(x::ModelComparisonResult, y::ModelComparisonResult)
return Tables.columntable(x) == Tables.columntable(y)
end
@testset "compare" begin
data = eight_schools_data()
eight_schools_loo_results = map(loo ∘ log_likelihood_eight_schools, data)
mc1 = @inferred ModelComparisonResult compare(eight_schools_loo_results)
@testset "basic checks" begin
@test mc1.name == (:non_centered, :centered)
@test mc1.rank == (non_centered=1, centered=2)
@test _isapprox(
mc1.elpd_diff,
(
non_centered=0.0,
centered=(
eight_schools_loo_results.non_centered.estimates.elpd -
eight_schools_loo_results.centered.estimates.elpd
),
),
)
@test mc1.elpd_diff.non_centered == 0.0
@test mc1.elpd_diff.centered > 0
@test mc1.weight == NamedTuple{(:non_centered, :centered)}(
model_weights(eight_schools_loo_results)
)
@test mc1.elpd_result ==
NamedTuple{(:non_centered, :centered)}(eight_schools_loo_results)
mc2 = compare(data; elpd_method=loo ∘ log_likelihood_eight_schools)
@test _isequal(mc2, mc1)
@test_throws ArgumentError compare(eight_schools_loo_results; model_names=[:foo])
end
@testset "keywords are forwarded" begin
mc2 = compare(eight_schools_loo_results; weights_method=PseudoBMA())
@test !_isequal(mc2, compare(eight_schools_loo_results))
@test mc2.weights_method === PseudoBMA()
mc3 = compare(eight_schools_loo_results; sort=false)
for k in filter(!=(:weights_method), propertynames(mc1))
if k === :name
@test getproperty(mc3, k) == reverse(getproperty(mc1, k))
else
@test getproperty(mc3, k) ==
NamedTuple{(:centered, :non_centered)}(getproperty(mc1, k))
end
end
mc3 = compare(eight_schools_loo_results; model_names=[:a, :b])
@test mc3.name == [:b, :a]
mc4 = compare(eight_schools_loo_results; elpd_method=waic)
@test !_isequal(mc4, mc2)
end
@testset "ModelComparisonResult" begin
@testset "Tables interface" begin
@test Tables.istable(typeof(mc1))
@test Tables.columnaccess(typeof(mc1))
@test Tables.columns(mc1) == mc1
@test Tables.columnnames(mc1) == (
:name,
:rank,
:elpd,
:elpd_mcse,
:elpd_diff,
:elpd_diff_mcse,
:weight,
:p,
:p_mcse,
)
table = Tables.columntable(mc1)
for k in (:name, :rank, :elpd_diff, :elpd_diff_mcse, :weight)
@test getproperty(table, k) == collect(getproperty(mc1, k))
end
for k in (:elpd, :elpd_mcse, :p, :p_mcse)
@test getproperty(table, k) ==
collect(map(x -> getproperty(x.estimates, k), mc1.elpd_result))
end
for (i, k) in enumerate(Tables.columnnames(mc1))
@test Tables.getcolumn(mc1, i) == Tables.getcolumn(mc1, k)
end
@test_throws ArgumentError Tables.getcolumn(mc1, :foo)
@test Tables.rowaccess(typeof(mc1))
@test map(NamedTuple, Tables.rows(mc1)) ==
map(NamedTuple, Tables.rows(Tables.columntable(mc1)))
end
@testset "TableTraits interface" begin
@test IteratorInterfaceExtensions.isiterable(mc1)
@test TableTraits.isiterabletable(mc1)
nt = collect(Iterators.take(IteratorInterfaceExtensions.getiterator(mc1), 1))[1]
@test isequal(
nt,
(; (k => Tables.getcolumn(mc1, k)[1] for k in Tables.columnnames(mc1))...),
)
nt = collect(Iterators.take(IteratorInterfaceExtensions.getiterator(mc1), 2))[2]
@test isequal(
nt,
(; (k => Tables.getcolumn(mc1, k)[2] for k in Tables.columnnames(mc1))...),
)
end
@testset "show" begin
mc5 = compare(eight_schools_loo_results; weights_method=PseudoBMA())
@test sprint(show, "text/plain", mc1) == """
ModelComparisonResult with Stacking weights
rank elpd elpd_mcse elpd_diff elpd_diff_mcse weight p p_mcse
non_centered 1 -31 1.4 0 0.0 1.0 0.9 0.32
centered 2 -31 1.4 0.06 0.067 0.0 0.9 0.34"""
@test sprint(show, "text/plain", mc5) == """
ModelComparisonResult with PseudoBMA weights
rank elpd elpd_mcse elpd_diff elpd_diff_mcse weight p p_mcse
non_centered 1 -31 1.4 0 0.0 0.52 0.9 0.32
centered 2 -31 1.4 0.06 0.067 0.48 0.9 0.34"""
@test startswith(sprint(show, "text/html", mc1), "<table")
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 3281 | using OffsetArrays
using PosteriorStats
using Statistics
using Test
@testset "hdi/hdi!" begin
@testset "AbstractVector" begin
@testset for n in (10, 100, 1_000),
prob in (1 / n, 0.5, 0.73, 0.96, (n - 1 + 0.1) / n),
T in (Float32, Float64, Int64)
x = T <: Integer ? rand(T(1):T(30), n) : randn(T, n)
r = @inferred hdi(x; prob)
@test r isa NamedTuple{(:lower, :upper),NTuple{2,T}}
l, u = r
interval_length = floor(Int, prob * n) + 1
if T <: Integer
@test sum(x -> l ≤ x ≤ u, x) ≥ interval_length
else
@test sum(x -> l ≤ x ≤ u, x) == interval_length
end
xsort = sort(x)
lind = 1:(n - interval_length + 1)
uind = interval_length:n
@assert all(collect(uind) .- collect(lind) .+ 1 .== interval_length)
@test minimum(xsort[uind] - xsort[lind]) ≈ u - l
@test hdi!(copy(x); prob) == r
end
end
@testset "edge cases and errors" begin
@testset "NaNs returned if contains NaNs" begin
x = randn(1000)
x[3] = NaN
@test isequal(hdi(x), (lower=NaN, upper=NaN))
end
@testset "errors for empty array" begin
x = Float64[]
@test_throws ArgumentError hdi(x)
end
@testset "errors for 0-dimensional array" begin
x = fill(1.0)
@test_throws ArgumentError hdi(x)
end
@testset "test errors when prob is not in (0, 1)" begin
x = randn(1_000)
@testset for prob in (0, 1, -0.1, 1.1, NaN)
@test_throws DomainError hdi(x; prob)
end
end
end
@testset "AbstractArray consistent with AbstractVector" begin
@testset for sz in ((100, 2), (100, 2, 3), (100, 2, 3, 4)),
prob in (0.72, 0.81),
T in (Float32, Float64, Int64)
x = T <: Integer ? rand(T(1):T(30), sz) : randn(T, sz)
r = @inferred hdi(x; prob)
if ndims(x) == 2
@test r isa NamedTuple{(:lower, :upper),NTuple{2,T}}
@test r == hdi(vec(x); prob)
else
@test r isa NamedTuple{(:lower, :upper),NTuple{2,Array{T,length(sz) - 2}}}
r_slices = dropdims(
mapslices(x -> hdi(x; prob), x; dims=(1, 2)); dims=(1, 2)
)
@test r.lower == first.(r_slices)
@test r.upper == last.(r_slices)
end
@test hdi!(copy(x); prob) == r
end
end
@testset "OffsetArray" begin
@testset for n in (100, 1_000), prob in (0.732, 0.864), T in (Float32, Float64)
x = randn(T, (n, 2, 3, 4))
xoff = OffsetArray(x, (-1, 2, -3, 4))
r = hdi(x; prob)
roff = @inferred hdi(xoff; prob)
@test roff isa NamedTuple{(:lower, :upper),<:NTuple{2,OffsetMatrix{T}}}
@test axes(roff.lower) == (axes(xoff, 3), axes(xoff, 4))
@test axes(roff.upper) == (axes(xoff, 3), axes(xoff, 4))
@test collect(roff.lower) == r.lower
@test collect(roff.upper) == r.upper
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 1807 | using ArviZExampleData
using RCall
r_loo_installed() = !isempty(rcopy(R"system.file(package='loo')"))
# R loo with our API
function loo_r(log_likelihood; reff=nothing)
R"require('loo')"
if reff === nothing
reff = rcopy(R"loo::relative_eff(exp($(log_likelihood)))")
end
result = R"loo::loo($log_likelihood, r_eff=$reff)"
estimates = rcopy(R"$(result)$estimates")
estimates = (
elpd=estimates[1, 1],
elpd_mcse=estimates[1, 2],
p=estimates[2, 1],
p_mcse=estimates[2, 2],
)
pointwise = rcopy(R"$(result)$pointwise")
pointwise = (
elpd=pointwise[:, 1],
elpd_mcse=pointwise[:, 2],
p=pointwise[:, 3],
reff=reff,
pareto_shape=pointwise[:, 5],
)
return (; estimates, pointwise)
end
# R loo with our API
function waic_r(log_likelihood)
R"require('loo')"
result = R"loo::waic($log_likelihood)"
estimates = rcopy(R"$(result)$estimates")
estimates = (
elpd=estimates[1, 1],
elpd_mcse=estimates[1, 2],
p=estimates[2, 1],
p_mcse=estimates[2, 2],
)
pointwise = rcopy(R"$(result)$pointwise")
pointwise = (elpd=pointwise[:, 1], p=pointwise[:, 2])
return (; estimates, pointwise)
end
function log_likelihood_eight_schools(idata)
# convert to Array to keep compile times low
return PermutedDimsArray(collect(idata.log_likelihood.obs), (2, 3, 1))
end
function eight_schools_data()
return (
centered=load_example_data("centered_eight"),
non_centered=load_example_data("non_centered_eight"),
)
end
function _isapprox(x::AbstractArray, y::AbstractArray; kwargs...)
return isapprox(collect(x), collect(y); kwargs...)
end
_isapprox(x, y; kwargs...) = all(map((x, y) -> isapprox(x, y; kwargs...), x, y))
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 6021 | using Logging: SimpleLogger, with_logger
using OffsetArrays
using PosteriorStats
using Test
@testset "loo" begin
@testset "core functionality" begin
@testset for sz in ((1000, 4), (1000, 4, 2), (100, 4, 2, 3)),
T in (Float32, Float64),
TA in (Array, OffsetArray)
atol_perm = cbrt(eps(T))
log_likelihood = randn(T, sz)
if TA === OffsetArray
log_likelihood = OffsetArray(log_likelihood, (0, -1, 10, 30)[1:length(sz)])
end
loo_result =
TA === OffsetArray ? loo(log_likelihood) : @inferred(loo(log_likelihood))
@test loo_result isa PosteriorStats.PSISLOOResult
estimates = elpd_estimates(loo_result)
pointwise = elpd_estimates(loo_result; pointwise=true)
@testset "return types and values as expected" begin
@test estimates isa NamedTuple{(:elpd, :elpd_mcse, :p, :p_mcse),NTuple{4,T}}
@test pointwise isa
NamedTuple{(:elpd, :elpd_mcse, :p, :reff, :pareto_shape)}
if length(sz) == 2
@test eltype(pointwise) === T
else
@test eltype(pointwise) <: TA{T,length(sz) - 2}
end
@test loo_result.psis_result isa PSIS.PSISResult
@test loo_result.psis_result.reff == pointwise.reff
@test loo_result.psis_result.pareto_shape == pointwise.pareto_shape
end
@testset "information criterion" begin
@test information_criterion(loo_result, :log) == estimates.elpd
@test information_criterion(loo_result, :negative_log) == -estimates.elpd
@test information_criterion(loo_result, :deviance) == -2 * estimates.elpd
@test information_criterion(loo_result, :log; pointwise=true) ==
pointwise.elpd
@test information_criterion(loo_result, :negative_log; pointwise=true) ==
-pointwise.elpd
@test information_criterion(loo_result, :deviance; pointwise=true) ==
-2 * pointwise.elpd
end
end
end
# @testset "keywords forwarded" begin
# log_likelihood = convert_to_dataset((x=randn(1000, 4, 2, 3), y=randn(1000, 4, 3)))
# @test loo(log_likelihood; var_name=:x).estimates == loo(log_likelihood.x).estimates
# @test loo(log_likelihood; var_name=:y).estimates == loo(log_likelihood.y).estimates
# @test loo(log_likelihood; var_name=:x, reff=0.5).pointwise.reff == fill(0.5, 2, 3)
# end
# @testset "errors" begin
# log_likelihood = convert_to_dataset((x=randn(1000, 4, 2, 3), y=randn(1000, 4, 3)))
# @test_throws ArgumentError loo(log_likelihood)
# @test_throws ArgumentError loo(log_likelihood; var_name=:z)
# @test_throws DimensionMismatch loo(log_likelihood; var_name=:x, reff=rand(2))
# end
@testset "warnings" begin
io = IOBuffer()
log_likelihood = randn(100, 4)
@testset for bad_val in (NaN, -Inf, Inf)
log_likelihood[1] = bad_val
result = with_logger(SimpleLogger(io)) do
loo(log_likelihood)
end
msg = String(take!(io))
@test occursin("Warning:", msg)
end
io = IOBuffer()
log_likelihood = randn(100, 4)
@testset for bad_reff in (NaN, 0, Inf)
result = with_logger(SimpleLogger(io)) do
loo(log_likelihood; reff=bad_reff)
end
msg = String(take!(io))
@test occursin("Warning:", msg)
end
io = IOBuffer()
log_likelihood = randn(5, 1)
result = with_logger(SimpleLogger(io)) do
loo(log_likelihood)
end
msg = String(take!(io))
@test occursin("Warning:", msg)
end
@testset "show" begin
loglike = log_likelihood_eight_schools(eight_schools_data().centered)
# regression test
@test sprint(show, "text/plain", loo(loglike)) == """
PSISLOOResult with estimates
elpd elpd_mcse p p_mcse
-31 1.4 0.9 0.34
and PSISResult with 500 draws, 4 chains, and 8 parameters
Pareto shape (k) diagnostic values:
Count Min. ESS
(-Inf, 0.5] good 6 (75.0%) 135
(0.5, 0.7] okay 2 (25.0%) 421"""
end
@testset "agrees with R loo" begin
if r_loo_installed()
models = eight_schools_data()
@testset for name in keys(models)
log_likelihood = log_likelihood_eight_schools(models[name])
reff_rand = rand(size(log_likelihood, 3))
@testset for reff in (nothing, reff_rand)
result_r = loo_r(log_likelihood; reff)
result = loo(log_likelihood; reff)
@test result.estimates.elpd ≈ result_r.estimates.elpd
@test result.estimates.elpd_mcse ≈ result_r.estimates.elpd_mcse
@test result.estimates.p ≈ result_r.estimates.p
@test result.estimates.p_mcse ≈ result_r.estimates.p_mcse
@test result.pointwise.elpd ≈ result_r.pointwise.elpd
# increased tolerance for elpd_mcse, since we use a different approach
@test result.pointwise.elpd_mcse ≈ result_r.pointwise.elpd_mcse rtol =
0.01
@test result.pointwise.p ≈ result_r.pointwise.p
@test result.pointwise.reff ≈ result_r.pointwise.reff
@test result.pointwise.pareto_shape ≈ result_r.pointwise.pareto_shape
end
end
else
@warn "Skipping consistency tests against R loo::loo, since loo is not installed."
@test_broken false
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 3314 | using Distributions
using OffsetArrays
using PosteriorStats
using StatsBase
using Test
@testset "loo_pit" begin
@testset "scalar data" begin
ndraws = 100
nchains = 3
y = randn()
y_pred = randn(ndraws, nchains)
weights = rand(ndraws, nchains)
log_weights = log.(weights) .- log(sum(weights))
pitvals = @inferred loo_pit(y, y_pred, log_weights)
@test pitvals isa typeof(y)
@test 0 <= pitvals <= 1
@test pitvals ≈ mean(y_pred .≤ y, StatsBase.weights(weights))
end
@testset "array data" begin
ndraws = 100
nchains = 3
@testset for sz in ((100,), (5, 4)), T in (Float32, Float64)
y = randn(T, sz...)
y_pred = randn(T, ndraws, nchains, sz...)
weights = rand(T, ndraws, nchains, sz...)
weights ./= sum(weights; dims=(1, 2))
log_weights = log.(weights)
pitvals = @inferred loo_pit(y, y_pred, log_weights)
@test pitvals isa typeof(y)
@test size(pitvals) == sz
@test all(p -> 0 ≤ p ≤ 1, pitvals)
pitvals_exp = dropdims(
sum((y_pred .≤ reshape(y, 1, 1, sz...)) .* weights; dims=(1, 2));
dims=(1, 2),
)
@test pitvals ≈ pitvals_exp
end
end
@testset "discrete data" begin
ndraws = 1_000
nchains = 3
dists = Binomial.(10:10:100, 0.25)
d = product_distribution(dists)
y = rand(d)
y_sample = rand(d, ndraws * nchains)
y_pred = reshape(transpose(y_sample), ndraws, nchains, length(y))
loglike = mapslices(yi -> logpdf.(dists, yi), y_pred; dims=3)
log_weights = psis(loglike).log_weights
pit_vals = loo_pit(
PosteriorStats.smooth_data(y; dims=1),
PosteriorStats.smooth_data(y_pred; dims=3),
log_weights,
)
ϵ = sqrt(eps())
@test loo_pit(y, y_pred, log_weights) == pit_vals
@test loo_pit(y, y_pred, log_weights; is_discrete=true) == pit_vals
@test loo_pit(y, y_pred, log_weights; is_discrete=false) != pit_vals
@test !(loo_pit(y .+ ϵ, y_pred, log_weights) ≈ pit_vals)
@test loo_pit(y .+ ϵ, y_pred, log_weights; is_discrete=true) ≈ pit_vals
@test !(loo_pit(y, y_pred .+ ϵ, log_weights) ≈ pit_vals)
@test loo_pit(y, y_pred .+ ϵ, log_weights; is_discrete=true) ≈ pit_vals
end
# @testset "OffsetArrays data" begin
# draw_dim = Dim{:draw}(1:100)
# chain_dim = Dim{:chain}(0:2)
# sample_dims = (draw_dim, chain_dim)
# param_dims = (Dim{:param1}(1:2), Dim{:param2}([:a, :b, :c]))
# all_dims = (sample_dims..., param_dims...)
# y = DimArray(randn(size(param_dims)...), param_dims)
# y_pred = DimArray(randn(size(all_dims)...), all_dims)
# weights = DimArray(rand(size(all_dims)...), all_dims)
# weights ./= sum(weights; dims=(:draw, :chain))
# log_weights = log.(weights)
# pitvals = @inferred loo_pit(y, y_pred, log_weights)
# @test pitvals isa typeof(y)
# @test all(p -> 0 ≤ p ≤ 1, pitvals)
# @test DimensionalData.data(pitvals) ==
# loo_pit(map(DimensionalData.data, (y, y_pred, log_weights))...)
# end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 5575 | using FiniteDifferences
using LinearAlgebra
using OffsetArrays
using Optim
using PosteriorStats
using Random
using Test
struct DummyOptimizer <: Optim.AbstractOptimizer end
@testset "model_weights" begin
function test_model_weights(weights_method)
@testset "weights are same collection as arguments" begin
elpd_results_tuple = map(loo, (randn(1000, 4, 2, 3), randn(1000, 4, 2, 3)))
weights_tuple = @inferred model_weights(weights_method(), elpd_results_tuple)
@test weights_tuple isa NTuple{2,Float64}
@test sum(weights_tuple) ≈ 1
elpd_results_nt = NamedTuple{(:x, :y)}(elpd_results_tuple)
weights_nt = @inferred model_weights(weights_method(), elpd_results_nt)
@test weights_nt isa NamedTuple{(:x, :y),NTuple{2,Float64}}
@test _isapprox(values(weights_nt), weights_tuple)
elpd_results_da = OffsetVector(collect(elpd_results_tuple), 0:1)
weights_da = @inferred model_weights(weights_method(), elpd_results_da)
@test weights_da isa OffsetVector
@test axes(weights_da) == axes(elpd_results_da)
@test collect(weights_da) ≈ collect(weights_tuple)
end
@testset "weights invariant to order" begin
elpd_results = map(
waic, (randn(1000, 4, 10), randn(1000, 4, 10), randn(1000, 4, 10))
)
weights1 = model_weights(weights_method(), elpd_results)
weights2 = model_weights(weights_method(), reverse(elpd_results))
T = eltype(weights1)
@test _isapprox(weights1, reverse(weights2); atol=sqrt(eps(T)))
end
@testset "identical models get the same weights" begin
ll = randn(1000, 4, 10)
result = waic(ll)
elpd_results = fill(result, 3)
weights = model_weights(weights_method(), elpd_results)
@test sum(weights) ≈ 1
@test weights ≈ fill(weights[1], length(weights))
end
@testset "better model gets higher weight" begin
data = eight_schools_data()
elpd_results = map(loo ∘ log_likelihood_eight_schools, data)
weights = model_weights(weights_method(), elpd_results)
@test sum(weights) ≈ 1
@test weights.non_centered > weights.centered
end
end
@testset "PseudoBMA" begin
@test !PseudoBMA().regularize
@test PseudoBMA(true) === PseudoBMA(; regularize=true)
test_model_weights(PseudoBMA)
@testset "regularization is respected" begin
elpd_results = map(waic, [randn(1000, 4, 2, 3) for _ in 1:2])
weights_reg = model_weights(PseudoBMA(true), elpd_results)
weights_nonreg = model_weights(PseudoBMA(false), elpd_results)
@test !(weights_reg ≈ weights_nonreg)
end
end
@testset "BootstrappedPseudoBMA" begin
test_model_weights() do
# use the same seed for every run
rng = MersenneTwister(37)
BootstrappedPseudoBMA(; rng)
end
@testset "number of samples can be configured" begin
elpd_results = map(waic, [randn(1000, 4, 2, 3) for _ in 1:2])
rng = MersenneTwister(64)
weights1 = model_weights(BootstrappedPseudoBMA(; rng, samples=10), elpd_results)
rng = MersenneTwister(64)
weights2 = model_weights(
BootstrappedPseudoBMA(; rng, samples=100), elpd_results
)
@test !(weights1 ≈ weights2)
end
end
@testset "Stacking" begin
@testset "InplaceStackingOptimObjective" begin
E = rand(20, 10)
obj = PosteriorStats.InplaceStackingOptimObjective(E)
@test obj.cache isa NTuple{2,Vector{Float64}}
@test length(obj.cache[1]) == size(E, 1)
@test length(obj.cache[2]) == size(E, 2)
x = normalize(randn(size(E, 2)))
# test derivatives with finite differences
grad_exp = FiniteDifferences.grad(
central_fdm(5, 1), x -> obj(true, nothing, x), x
)[1]
grad = similar(x)
obj(true, grad, x)
@test grad ≈ grad_exp
grad = similar(x)
obj(nothing, grad, x)
@test grad ≈ grad_exp
@test @allocated(obj(true, nothing, x)) ≤ 32
@test @allocated(obj(true, grad, x)) == @allocated(obj(true, nothing, x))
end
@testset "constructor errors if invalid optimizer provided" begin
@test_throws ArgumentError Stacking(; optimizer=DummyOptimizer())
end
@testset "stacking is default" begin
elpd_results = map(waic, [randn(1000, 4, 2, 3) for _ in 1:2])
@test model_weights(elpd_results) == model_weights(Stacking(), elpd_results)
end
test_model_weights(Stacking)
@testset "alternate optimizer options are used" begin
elpd_results = map(waic, [randn(1000, 4, 2, 3) for _ in 1:10])
weights1 = model_weights(Stacking(), elpd_results)
weights2 = model_weights(Stacking(), elpd_results)
optimizer = GradientDescent()
weights3 = model_weights(Stacking(; optimizer), elpd_results)
options = Optim.Options(; iterations=2)
weights4 = model_weights(Stacking(; options), elpd_results)
@test weights3 != weights1 == weights2 != weights4
@test weights3 ≈ weights1
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 1055 | using GLM
using PosteriorStats
using Statistics
using Test
@testset "r2_score/r2_sample" begin
@testset "basic" begin
n = 100
@testset for T in (Float32, Float64),
sz in (300, (100, 3)),
σ in T.((2, 1, 0.5, 0.1))
x = range(T(0), T(1); length=n)
slope = T(2)
intercept = T(3)
y = @. slope * x + intercept + randn(T) * σ
x_reshape = length(sz) == 1 ? x' : reshape(x, 1, 1, :)
y_pred = slope .* x_reshape .+ intercept .+ randn(T, sz..., n) .* σ
r2_val = @inferred r2_score(y, y_pred)
@test r2_val isa NamedTuple{(:r2, :r2_std),NTuple{2,T}}
r2_draws = @inferred PosteriorStats.r2_samples(y, y_pred)
@test r2_val.r2 ≈ mean(r2_draws)
@test r2_val.r2_std ≈ std(r2_draws; corrected=false)
# check rough consistency with GLM
res = lm(@formula(y ~ 1 + x), (; x=Float64.(x), y=Float64.(y)))
@test r2_val.r2 ≈ r2(res) rtol = 1
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 356 | using PosteriorStats
using Random
using Test
Random.seed!(97)
@testset "PosteriorStats" begin
include("helpers.jl")
include("utils.jl")
include("hdi.jl")
include("loo.jl")
include("loo_pit.jl")
include("waic.jl")
include("model_weights.jl")
include("compare.jl")
include("r2_score.jl")
include("summarize.jl")
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 14268 | using IteratorInterfaceExtensions
using MCMCDiagnosticTools
using OrderedCollections
using PosteriorStats
using Statistics
using StatsBase
using Tables
using TableTraits
using Test
struct SampleWrapper{T,N}
draws::T
var_names::N
end
function PosteriorStats.summarize(
spl::SampleWrapper, stats_funs...; var_names=spl.var_names, kwargs...
)
x = spl.draws
return summarize(x, stats_funs...; var_names, kwargs...)
end
_mean_and_std(x) = (mean=mean(x), std=std(x))
@testset "summary statistics" begin
@testset "SummaryStats" begin
parameter_names = ["a", "bb", "ccc", "d", "e"]
data = (est=randn(5), mcse_est=rand(5), rhat=rand(5), ess=rand(5))
@inferred SummaryStats(data; name="Stats")
stats = @inferred SummaryStats(data, parameter_names; name="Stats")
@testset "basic interfaces" begin
@test parent(stats) === data
@test stats.name == "Stats"
@test SummaryStats("MoreStats", data).name == "MoreStats"
@test SummaryStats(data; name="MoreStats").name == "MoreStats"
@test keys(stats) == (:parameter, keys(data)...)
for k in keys(stats)
@test haskey(stats, k)
if k === :parameter
@test getindex(stats, k) == parameter_names
else
@test getindex(stats, k) == getindex(data, k)
end
end
@test !haskey(stats, :foo)
@test length(stats) == length(data) + 1
@test getindex(stats, 1) == parameter_names
for i in 1:length(data)
@test stats[i + 1] == data[i]
end
@test Base.iterate(stats) == (parameter_names, (2, length(stats)))
@test Base.iterate(stats, (2, length(stats))) == (stats[2], (3, length(stats)))
data_copy1 = deepcopy(data)
stats2 = SummaryStats(data_copy1, parameter_names)
@test stats2 == stats
@test isequal(stats2, stats)
data_copy2 = deepcopy(data)
parameter_names2 = copy(parameter_names)
parameter_names2[1] = "foo"
stats3 = SummaryStats(data_copy2, parameter_names2; name="Stats")
@test stats3 != stats2
@test !isequal(stats3, stats2)
stats3 = SummaryStats(data_copy2, parameter_names; name="Stats")
stats3[:est][2] = NaN
@test stats3 != stats2
@test !isequal(stats3, stats2)
stats2[:est][2] = NaN
@test stats3 != stats2
@test isequal(stats3, stats2)
end
@testset "merge" begin
stats_dict = SummaryStats(
OrderedDict(pairs(data)), parameter_names; name="Stats"
)
@test merge(stats) === stats
@test merge(stats, stats) == stats
@test merge(stats_dict) === stats_dict
@test merge(stats_dict, stats_dict) == stats_dict
@test merge(stats, stats_dict) == stats
@test merge(stats_dict, stats) == stats_dict
data2 = (ess=randn(5), rhat=rand(5), mcse_est=rand(5), est2=rand(5))
stats2 = SummaryStats(data2, 1:5; name="Stats2")
stats2_dict = SummaryStats(OrderedDict(pairs(data2)), 1:5; name="Stats2")
for stats_a in (stats, stats_dict), stats_b in (stats2, stats2_dict)
@test merge(stats_a, stats_b) ==
SummaryStats(merge(data, data2), stats_b.parameter_names)
@test merge(stats_a, stats_b).name == stats_b.name
@test merge(stats_b, stats_a) ==
SummaryStats(merge(data2, data), stats_a.parameter_names)
@test merge(stats_b, stats_a).name == stats_a.name
end
end
@testset "Tables interface" begin
@test Tables.istable(typeof(stats))
@test Tables.columnaccess(typeof(stats))
@test Tables.columns(stats) === stats
@test Tables.columnnames(stats) == keys(stats)
table = Tables.columntable(stats)
@test table == (; parameter=parameter_names, data...)
for (i, k) in enumerate(Tables.columnnames(stats))
@test Tables.getcolumn(stats, i) == Tables.getcolumn(stats, k)
end
@test_throws ErrorException Tables.getcolumn(stats, :foo)
@test !Tables.rowaccess(typeof(stats))
@test Tables.schema(stats) == Tables.schema(Tables.columntable(stats))
end
@testset "TableTraits interface" begin
@test IteratorInterfaceExtensions.isiterable(stats)
@test TableTraits.isiterabletable(stats)
nt = collect(Iterators.take(IteratorInterfaceExtensions.getiterator(stats), 1))[1]
@test isequal(
nt,
(;
(
k => Tables.getcolumn(stats, k)[1] for
k in Tables.columnnames(stats)
)...
),
)
nt = collect(Iterators.take(IteratorInterfaceExtensions.getiterator(stats), 2))[2]
@test isequal(
nt,
(;
(
k => Tables.getcolumn(stats, k)[2] for
k in Tables.columnnames(stats)
)...
),
)
end
@testset "show" begin
parameter_names = ["a", "bb", "ccc", "d", "e"]
data = (
est=[111.11, 1.2345e-6, 5.4321e8, Inf, NaN],
mcse_est=[0.0012345, 5.432e-5, 2.1234e5, Inf, NaN],
rhat=vcat(1.009, 1.011, 0.99, Inf, NaN),
ess=vcat(312.45, 23.32, 1011.98, Inf, NaN),
ess_bulk=vcat(9.2345, 876.321, 999.99, Inf, NaN),
)
stats = SummaryStats(data, parameter_names)
@test sprint(show, "text/plain", stats) == """
SummaryStats
est mcse_est rhat ess ess_bulk
a 111.110 0.0012 1.01 312 9
bb 1.e-06 5.4e-05 1.01 23 876
ccc 5.432e+08 2.1e+05 0.99 1012 1000
d Inf Inf Inf Inf Inf
e NaN NaN NaN NaN NaN"""
@test startswith(sprint(show, "text/html", stats), "<table")
end
end
@testset "summarize" begin
@testset "base cases" begin
x = randn(1_000, 4, 3)
if VERSION ≥ v"1.9.0-"
stats1 = @inferred summarize(x, mean, std, median)
else
stats1 = summarize(x, mean, std, median)
end
@test stats1 isa SummaryStats
@test getfield(stats1, :name) == "SummaryStats"
@test stats1 == SummaryStats(
(
mean=map(mean, eachslice(x; dims=3)),
std=map(std, eachslice(x; dims=3)),
median=map(median, eachslice(x; dims=3)),
),
axes(x, 3),
)
function _compute_stats(x)
return summarize(x, (:mean, :std) => mean_and_std, :median => median)
end
if VERSION ≥ v"1.10.0-"
stats2 = @inferred _compute_stats(x)
else
stats2 = _compute_stats(x)
end
@test stats2 == stats1
stats3 = summarize(x, mean, std; var_names=["a", "b", "c"], name="Stats")
@test getfield(stats3, :name) == "Stats"
@test stats3 == SummaryStats(
(mean=map(mean, eachslice(x; dims=3)), std=map(std, eachslice(x; dims=3))),
["a", "b", "c"],
)
stats4 = summarize(x; var_names=["a", "b", "c"], name="Stats")
@test getfield(stats4, :name) == "Stats"
stats5 = summarize(
x, default_summary_stats()...; var_names=["a", "b", "c"], name="Stats"
)
@test stats4 == stats5
stats6 = summarize(x, _mean_and_std, mad)
@test haskey(stats6, :mean)
@test haskey(stats6, :std)
@test haskey(stats6, :mad)
@test_throws DimensionMismatch summarize(x, mean; var_names=["a", "b"])
@test_throws ArgumentError summarize(x, "std" => std)
@test_throws ArgumentError summarize(x, ("mean", "std") => mean_and_std)
end
@testset "default stats function sets" begin
@testset "array inputs" begin
x = randn(1_000, 4, 3)
# not completely type-inferrable due to HDI
stats1 = summarize(x, default_summary_stats()...)
@test all(
map(
_isapprox,
stats1,
summarize(
x,
mean,
std,
(Symbol("hdi_3%"), Symbol("hdi_97%")) => hdi,
:mcse_mean => mcse,
:mcse_std => (x -> mcse(x; kind=std)),
:ess_tail => (x -> ess(x; kind=:tail)),
:ess_bulk => (x -> ess(x; kind=:bulk)),
rhat,
),
),
)
stats2 = summarize(x, default_summary_stats(median; prob_interval=0.9)...)
@test all(
map(
_isapprox,
stats2,
summarize(
x,
median,
mad,
(Symbol("eti_5%"), Symbol("eti_95%")) =>
(x -> quantile(vec(x), (0.05, 0.95))),
:mcse_median => (x -> mcse(x; kind=median)),
:ess_tail => (x -> ess(x; kind=:tail)),
:ess_median => (x -> ess(x; kind=median)),
rhat,
),
),
)
_compute_diagnostics(x) = summarize(x, default_diagnostics()...)
if VERSION ≥ v"1.10.0-"
stats3 = @inferred _compute_diagnostics(x)
else
stats3 = _compute_diagnostics(x)
end
@test all(
map(
_isapprox,
stats3,
summarize(
x,
:mcse_mean => mcse,
:mcse_std => (x -> mcse(x; kind=std)),
:ess_tail => (x -> ess(x; kind=:tail)),
:ess_bulk => (x -> ess(x; kind=:bulk)),
rhat,
),
),
)
@test all(
map(
_isapprox,
summarize(x, default_stats()...),
summarize(
x, mean, std, (Symbol("hdi_3%"), Symbol("hdi_97%")) => hdi
),
),
)
x2 = convert(Array{Union{Float64,Missing}}, x)
x2[1, 1, 1] = missing
stats4 = summarize(x2, default_summary_stats()...)
@test stats4[:mean] ≈ [mean(skipmissing(x2[:, :, 1])); stats1[:mean][2:end]]
@test stats4[:std] ≈ [std(skipmissing(x2[:, :, 1])); stats1[:std][2:end]]
@test stats4[Symbol("hdi_3%")] ≈ [
hdi(collect(skipmissing(x2[:, :, 1]))).lower
stats1[Symbol("hdi_3%")][2:end]
]
@test stats4[Symbol("hdi_97%")] ≈ [
hdi(collect(skipmissing(x2[:, :, 1]))).upper
stats1[Symbol("hdi_97%")][2:end]
]
for k in (:mcse_mean, :mcse_std, :ess_tail, :ess_bulk, :rhat)
@test stats4[k][1] === missing
@test stats4[k][2:end] ≈ stats1[k][2:end]
end
stats5 = summarize(x2, default_summary_stats(median; prob_interval=0.9)...)
@test stats5[:median] ≈
[median(skipmissing(x2[:, :, 1])); stats2[:median][2:end]]
@test stats5[:mad] ≈ [mad(skipmissing(x2[:, :, 1])); stats2[:mad][2:end]]
@test stats5[Symbol("eti_5%")] ≈ [
quantile(skipmissing(x2[:, :, 1]), 0.05)
stats2[Symbol("eti_5%")][2:end]
]
@test stats5[Symbol("eti_95%")] ≈ [
quantile(skipmissing(x2[:, :, 1]), 0.95)
stats2[Symbol("eti_95%")][2:end]
]
for k in (:mcse_median, :ess_tail, :ess_median, :rhat)
@test stats5[k][1] === missing
@test stats5[k][2:end] ≈ stats2[k][2:end]
end
end
@testset "custom inputs" begin
x = randn(1_000, 4, 3)
sample = SampleWrapper(x, ["a", "b", "c"])
function _compute_diagnostics(x)
return summarize(x, default_diagnostics()...; name="foo")
end
if VERSION ≥ v"1.10.0-"
stats1 = @inferred _compute_diagnostics(sample)
else
stats1 = _compute_diagnostics(sample)
end
@test stats1 isa SummaryStats
@test stats1.name == "foo"
@test stats1 == summarize(
x, default_diagnostics()...; name="foo", var_names=["a", "b", "c"]
)
stats2 = summarize(sample)
@test stats2.name == "SummaryStats"
@test stats2 == summarize(x; var_names=["a", "b", "c"])
end
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 8835 | using OffsetArrays
using PosteriorStats
using Random
using StatsBase
using Statistics
using Test
@testset "utils" begin
@testset "_assimilar" begin
@testset for x in ([8, 2, 5], (8, 2, 5), (; a=8, b=2, c=5))
@test @inferred(PosteriorStats._assimilar((x=1.0, y=2.0, z=3.0), x)) ==
(x=8, y=2, z=5)
@test @inferred(PosteriorStats._assimilar((randn(3)...,), x)) == (8, 2, 5)
y = OffsetVector(randn(3), -1)
@test @inferred(PosteriorStats._assimilar(y, x)) == OffsetVector([8, 2, 5], -1)
end
end
@testset "_sortperm/_permute" begin
@testset for (x, y) in (
[3, 1, 4, 2] => [1, 2, 3, 4],
(3, 1, 4, 2) => (1, 2, 3, 4),
(x=3, y=1, z=4, w=2) => (y=1, w=2, x=3, z=4),
)
perm = PosteriorStats._sortperm(x)
@test perm == [2, 4, 1, 3]
@test PosteriorStats._permute(x, perm) == y
end
end
@testset "_eachslice" begin
x = randn(2, 3, 4)
slices = PosteriorStats._eachslice(x; dims=(3, 1))
@test size(slices) == (size(x, 3), size(x, 1))
slices = collect(slices)
for i in axes(x, 3), j in axes(x, 1)
@test slices[i, j] == x[j, :, i]
end
@test PosteriorStats._eachslice(x; dims=2) ==
PosteriorStats._eachslice(x; dims=(2,))
if VERSION ≥ v"1.9-"
for dims in ((3, 1), (2, 3), 3)
@test PosteriorStats._eachslice(x; dims) === eachslice(x; dims)
end
end
end
@testset "_logabssubexp" begin
x, y = rand(2)
@test @inferred(PosteriorStats._logabssubexp(log(x), log(y))) ≈ log(abs(x - y))
@test PosteriorStats._logabssubexp(log(y), log(x)) ≈ log(abs(y - x))
end
@testset "_sum_and_se" begin
@testset for n in (100, 1_000), scale in (1, 5)
x = randn(n) * scale
s, se = @inferred PosteriorStats._sum_and_se(x)
@test s ≈ sum(x)
@test se ≈ StatsBase.sem(x) * n
x = randn(n, 10) * scale
s, se = @inferred PosteriorStats._sum_and_se(x; dims=1)
@test s ≈ sum(x; dims=1)
@test se ≈ mapslices(StatsBase.sem, x; dims=1) * n
x = randn(10, n) * scale
s, se = @inferred PosteriorStats._sum_and_se(x; dims=2)
@test s ≈ sum(x; dims=2)
@test se ≈ mapslices(StatsBase.sem, x; dims=2) * n
end
@testset "::Number" begin
@test isequal(PosteriorStats._sum_and_se(2), (2, NaN))
@test isequal(PosteriorStats._sum_and_se(3.5f0; dims=()), (3.5f0, NaN32))
end
end
@testset "_log_mean" begin
x = rand(1000)
logx = log.(x)
w = rand(1000)
w ./= sum(w)
logw = log.(w)
@test PosteriorStats._log_mean(logx, logw) ≈ log(mean(x, StatsBase.fweights(w)))
x = rand(1000, 4)
logx = log.(x)
@test PosteriorStats._log_mean(logx, logw; dims=1) ≈
log.(mean(x, StatsBase.fweights(w); dims=1))
end
@testset "_se_log_mean" begin
ndraws = 1_000
@testset for n in (1_000, 10_000), scale in (1, 5)
x = rand(n) * scale
w = rand(n)
w = StatsBase.weights(w ./ sum(w))
logx = log.(x)
logw = log.(w)
se = @inferred PosteriorStats._se_log_mean(logx, logw)
se_exp = std(log(mean(rand(n) * scale, w)) for _ in 1:ndraws)
@test se ≈ se_exp rtol = 1e-1
end
end
@testset "sigdigits_matching_se" begin
@test PosteriorStats.sigdigits_matching_se(123.456, 0.01) == 5
@test PosteriorStats.sigdigits_matching_se(123.456, 1) == 3
@test PosteriorStats.sigdigits_matching_se(123.456, 0.0001) == 7
@test PosteriorStats.sigdigits_matching_se(1e5, 0.1) == 7
@test PosteriorStats.sigdigits_matching_se(1e5, 0.2; scale=5) == 6
@test PosteriorStats.sigdigits_matching_se(1e4, 0.5) == 5
@test PosteriorStats.sigdigits_matching_se(1e4, 0.5; scale=1) == 6
@test PosteriorStats.sigdigits_matching_se(1e5, 0.1; sigdigits_max=2) == 2
# errors
@test_throws ArgumentError PosteriorStats.sigdigits_matching_se(123.456, -1)
@test_throws ArgumentError PosteriorStats.sigdigits_matching_se(
123.456, 1; sigdigits_max=-1
)
@test_throws ArgumentError PosteriorStats.sigdigits_matching_se(
123.456, 1; scale=-1
)
# edge cases
@test PosteriorStats.sigdigits_matching_se(0.0, 1) == 0
@test PosteriorStats.sigdigits_matching_se(NaN, 1) == 0
@test PosteriorStats.sigdigits_matching_se(Inf, 1) == 0
@test PosteriorStats.sigdigits_matching_se(100, 1; scale=Inf) == 0
@test PosteriorStats.sigdigits_matching_se(100, Inf) == 0
@test PosteriorStats.sigdigits_matching_se(100, 0) == 7
@test PosteriorStats.sigdigits_matching_se(100, 0; sigdigits_max=2) == 2
end
@testset "_printf_with_sigdigits" begin
@test PosteriorStats._printf_with_sigdigits(123.456, 1) == "1.e+02"
@test PosteriorStats._printf_with_sigdigits(-123.456, 1) == "-1.e+02"
@test PosteriorStats._printf_with_sigdigits(123.456, 2) == "1.2e+02"
@test PosteriorStats._printf_with_sigdigits(-123.456, 2) == "-1.2e+02"
@test PosteriorStats._printf_with_sigdigits(123.456, 3) == "123"
@test PosteriorStats._printf_with_sigdigits(-123.456, 3) == "-123"
@test PosteriorStats._printf_with_sigdigits(123.456, 4) == "123.5"
@test PosteriorStats._printf_with_sigdigits(-123.456, 4) == "-123.5"
@test PosteriorStats._printf_with_sigdigits(123.456, 5) == "123.46"
@test PosteriorStats._printf_with_sigdigits(-123.456, 5) == "-123.46"
@test PosteriorStats._printf_with_sigdigits(123.456, 6) == "123.456"
@test PosteriorStats._printf_with_sigdigits(-123.456, 6) == "-123.456"
@test PosteriorStats._printf_with_sigdigits(123.456, 7) == "123.4560"
@test PosteriorStats._printf_with_sigdigits(-123.456, 7) == "-123.4560"
@test PosteriorStats._printf_with_sigdigits(123.456, 8) == "123.45600"
@test PosteriorStats._printf_with_sigdigits(0.00000123456, 1) == "1.e-06"
@test PosteriorStats._printf_with_sigdigits(0.00000123456, 2) == "1.2e-06"
end
@testset "ft_printf_sigdigits" begin
@testset "all columns" begin
@testset for sigdigits in 1:5
ft1 = PosteriorStats.ft_printf_sigdigits(sigdigits)
for i in 1:10, j in 1:5
v = randn()
@test ft1(v, i, j) ==
PosteriorStats._printf_with_sigdigits(v, sigdigits)
@test ft1("foo", i, j) == "foo"
end
end
end
@testset "subset of columns" begin
@testset for sigdigits in 1:5
ft = PosteriorStats.ft_printf_sigdigits(sigdigits, [2, 3])
for i in 1:10, j in 1:5
v = randn()
if j ∈ [2, 3]
@test ft(v, i, j) ==
PosteriorStats._printf_with_sigdigits(v, sigdigits)
else
@test ft(v, i, j) === v
end
@test ft("foo", i, j) == "foo"
end
end
end
end
@testset "ft_printf_sigdigits_matching_se" begin
@testset "all columns" begin
@testset for scale in 1:3
se = rand(5)
ft = PosteriorStats.ft_printf_sigdigits_matching_se(se; scale)
for i in eachindex(se), j in 1:5
v = randn()
sigdigits = PosteriorStats.sigdigits_matching_se(v, se[i]; scale)
@test ft(v, i, j) == PosteriorStats._printf_with_sigdigits(v, sigdigits)
@test ft("foo", i, j) == "foo"
end
end
end
@testset "subset of columns" begin
@testset for scale in 1:3
se = rand(5)
ft = PosteriorStats.ft_printf_sigdigits_matching_se(se, [2, 3]; scale)
for i in eachindex(se), j in 1:5
v = randn()
if j ∈ [2, 3]
sigdigits = PosteriorStats.sigdigits_matching_se(v, se[i]; scale)
@test ft(v, i, j) ==
PosteriorStats._printf_with_sigdigits(v, sigdigits)
@test ft("foo", i, j) == "foo"
else
@test ft(v, i, j) === v
end
end
end
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | code | 3676 | using Logging: SimpleLogger, with_logger
using OffsetArrays
using PosteriorStats
using Test
@testset "waic" begin
@testset "core functionality" begin
@testset for sz in ((1000, 4), (1000, 4, 2), (100, 4, 2, 3)),
T in (Float32, Float64),
TA in (Array, OffsetArray)
atol_perm = cbrt(eps(T))
log_likelihood = randn(T, sz)
if TA === OffsetArray
log_likelihood = OffsetArray(log_likelihood, (0, -1, 10, 30)[1:length(sz)])
end
waic_result =
TA === OffsetArrays ? waic(log_likelihood) : @inferred(waic(log_likelihood))
@test waic_result isa PosteriorStats.WAICResult
estimates = elpd_estimates(waic_result)
pointwise = elpd_estimates(waic_result; pointwise=true)
@testset "return types and values as expected" begin
@test estimates isa NamedTuple{(:elpd, :elpd_mcse, :p, :p_mcse),NTuple{4,T}}
@test pointwise isa NamedTuple{(:elpd, :p)}
if length(sz) == 2
@test eltype(pointwise) === T
else
@test eltype(pointwise) <: TA{T,length(sz) - 2}
end
end
@testset "information criterion" begin
@test information_criterion(waic_result, :log) == estimates.elpd
@test information_criterion(waic_result, :negative_log) == -estimates.elpd
@test information_criterion(waic_result, :deviance) == -2 * estimates.elpd
@test information_criterion(waic_result, :log; pointwise=true) ==
pointwise.elpd
@test information_criterion(waic_result, :negative_log; pointwise=true) ==
-pointwise.elpd
@test information_criterion(waic_result, :deviance; pointwise=true) ==
-2 * pointwise.elpd
end
end
end
@testset "warnings" begin
io = IOBuffer()
log_likelihood = randn(100, 4)
@testset for bad_val in (NaN, -Inf, Inf)
log_likelihood[1] = bad_val
result = with_logger(SimpleLogger(io)) do
waic(log_likelihood)
end
msg = String(take!(io))
@test occursin("Warning:", msg)
end
end
@testset "show" begin
loglike = log_likelihood_eight_schools(eight_schools_data().centered)
# regression test
@test sprint(show, "text/plain", waic(loglike)) == """
WAICResult with estimates
elpd elpd_mcse p p_mcse
-31 1.4 0.9 0.33"""
end
@testset "agrees with R waic" begin
if r_loo_installed()
models = eight_schools_data()
@testset for name in keys(models)
log_likelihood = log_likelihood_eight_schools(models[name])
reff_rand = rand(size(log_likelihood, 3))
result_r = waic_r(log_likelihood)
result = waic(log_likelihood)
@test result.estimates.elpd ≈ result_r.estimates.elpd
@test result.estimates.elpd_mcse ≈ result_r.estimates.elpd_mcse
@test result.estimates.p ≈ result_r.estimates.p
@test result.estimates.p_mcse ≈ result_r.estimates.p_mcse
@test result.pointwise.elpd ≈ result_r.pointwise.elpd
@test result.pointwise.p ≈ result_r.pointwise.p
end
else
@warn "Skipping consistency tests against R loo::waic, since loo is not installed."
@test_broken false
end
end
end
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | docs | 978 | # PosteriorStats
[](https://julia.arviz.org/PosteriorStats)
[](https://github.com/arviz-devs/PosteriorStats.jl/actions)
[](https://codecov.io/gh/arviz-devs/PosteriorStats.jl)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/SciML/ColPrac)
[](https://numfocus.org)
PosteriorStats implements widely-used and well-characterized statistical analyses for the Bayesian workflow.
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | docs | 647 | # API
```@index
Pages = ["stats.md"]
```
## Summary statistics
```@docs
SummaryStats
default_diagnostics
default_stats
default_summary_stats
summarize
```
## General statistics
```@docs
hdi
hdi!
```
## LOO and WAIC
```@docs
AbstractELPDResult
PSISLOOResult
WAICResult
elpd_estimates
information_criterion
loo
waic
```
## Model comparison
```@docs
ModelComparisonResult
compare
model_weights
```
The following model weighting methods are available
```@docs
AbstractModelWeightsMethod
BootstrappedPseudoBMA
PseudoBMA
Stacking
```
## Predictive checks
```@docs
loo_pit
r2_score
```
### Utilities
```@docs
PosteriorStats.smooth_data
```
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
|
[
"MIT"
] | 0.2.5 | 472553eb890cbc11fde5c300852b98515b1d52cf | docs | 656 | ```@meta
CurrentModule = PosteriorStats
```
# PosteriorStats
PosteriorStats implements widely-used and well-characterized statistical analyses for the Bayesian workflow.
These functions generally estimate properties of posterior and/or posterior predictive distributions.
The default implementations defined here operate on Monte Carlo samples.
See the [API](@ref) for details.
## Extending this package
The methods defined here are intended to be extended by two types of packages.
- packages that implement data types for storing Monte Carlo samples
- packages that implement other representations for posterior distributions than Monte Carlo draws
| PosteriorStats | https://github.com/arviz-devs/PosteriorStats.jl.git |
Subsets and Splits