licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 12161 | const K_CART_TYPE{T} = Quantity{T,Unitful.𝐋^-1,
Unitful.FreeUnits{(Ang^-1,),Unitful.𝐋^-1,nothing}}
phases(kpoints::Vector{<:Vec3}, R::Vec3) = exp.(-2im * π .* dot.(kpoints, (R,)))
function uniform_shifted_kgrid(::Type{T}, nkx::Integer, nky::Integer,
nkz::Integer, gamma_center = false) where {T}
t = [Vec3{T}(kx, ky, kz) for kx in 0:nkx-1, ky in 0:nky-1, kz in 0:nkz-1]
s = Vec3(nkx, nky, nkz)
t = map(t) do x
(x .+ 0.5) ./ s .- 0.5
end
if gamma_center
shift = 0.5 .* ((s.+ 1) .% 2)./s
t = map(t) do x
x .+ shift
end
end
return reshape(t, nkx * nky * nkz)
end
function uniform_shifted_kgrid(nkx::Integer, nky::Integer, nkz::Integer, gamma_center=false)
return uniform_shifted_kgrid(Float64, nkx, nky, nkz, gamma_center)
end
function uniform_kgrid(nkx::Integer, nky::Integer, nkz::Integer)
return reshape([Vec3{Float64}(kx, ky, kz)
for kz in range(0, (1 - 1 / nkz); length = nkz),
ky in range(0, (1 - 1 / nky); length = nky),
kx in range(0, (1 - 1 / nkx); length = nkx)], nkx * nky * nkz)
end
abstract type AbstractKGrid{T} end
core_kgrid(x::AbstractKGrid) = x.core
k_cryst(x::AbstractKGrid) = core_kgrid(x).k_cryst
k_cryst(x::Vec3) = x
# k_cart(x::AbstractKGrid) = core_kgrid(x).k_cart
# phase(x::AbstractKGrid) = core_kgrid(x).phase
Base.length(kgrid::AbstractKGrid) = length(core_kgrid(kgrid))
struct CoreKGrid{T} <: AbstractKGrid{T}
k_cryst::Vector{Vec3{T}}
# k_cart ::Vector{Vec3{T}} #not needed for now
# phase ::Vector{Complex{T}}
end
core_kgrid(x::CoreKGrid) = x
Base.length(x::CoreKGrid) = length(x.k_cryst)
abstract type KPoint{T<:AbstractFloat} end
struct KBond{T<:AbstractFloat} # All matrices/operators are in wannier gauge, i.e. block-like gauge
k_id1 :: Int #TODO: Optimize, can we drop these?
k_id2 :: Int
vr :: Vec3{K_CART_TYPE{T}}
end
Base.@kwdef mutable struct AbInitioKPoint{T} <: KPoint{T}
k_cryst :: Vec3{T}
k_cart :: Vec3{K_CART_TYPE{T}}
eigvals :: Vector{T} #original eigenvalues, in hamiltonian gauge
H :: Matrix{Complex{T}} # Hk in wannier gauge
neighbors::Vector{KBond{T}} = KBond{T}[]
overlaps::Vector{Matrix{Complex{T}}} = Matrix{Complex{T}}[] #already in wannier gauge
hamis::Vector{Matrix{Complex{T}}} = Matrix{Complex{T}}[] #Hamiltonian element between the block-like states in wannier gauge
uHu::Matrix{Matrix{Complex{T}}} = Matrix{Matrix{Complex{T}}}(undef, 0, 0)
end
k_cryst(k::AbInitioKPoint) = k.k_cryst
struct AbInitioKGrid{T,SA} <: AbstractKGrid{T}
kpoints::SA
neighbor_weights::Vector{T} #ordered in the same way as neighbors in kpoints
end
num_states(chk, ik) = chk.ndimwin[ik]
function disentanglement_range(chk, ik)
first_band_id = findfirst(view(chk.lwindow, :, ik))
return first_band_id:first_band_id+num_states(chk, ik)-1
end
function AbInitioKGrid(::Type{T},
eig_filename::AbstractString,
chk_filename::AbstractString,
nnkp_filename::AbstractString,
mmn_filename::AbstractString,
uHu_filename::AbstractString) where {T}
eigenvalues = read_eig(eig_filename)
wannier_chk_params = read_chk(chk_filename)
v_mat = wannier_chk_params.V_matrix
nwann = wannier_chk_params.n_wann
kpoints = map(enumerate(wannier_chk_params.kpoints)) do (ik, k)
@views v = v_mat[1:num_states(wannier_chk_params, ik), 1:nwann, ik]
eigvals = eigenvalues[:, ik]
return AbInitioKPoint{T}(; k_cryst = k,
k_cart = wannier_chk_params.recip_cell * k,
eigvals = eigenvalues[:, ik],
H = v' *
diagm(eigvals[disentanglement_range(wannier_chk_params,
ik)]) * v)
end
fill_k_neighbors!(kpoints, nnkp_filename, wannier_chk_params.recip_cell)
fill_overlaps!(kpoints, mmn_filename, uHu_filename, wannier_chk_params)
return AbInitioKGrid(StructArray(kpoints), wannier_chk_params.neighbor_weights)
end
function AbInitioKGrid(eig_filename::AbstractString, chk_filename::AbstractString,
nnkp_filename::AbstractString, mmn_filename::AbstractString,
uHu_filename::AbstractString)
return AbInitioKGrid(Float64, eig_filename, chk_filename, nnkp_filename, mmn_filename,
uHu_filename)
end
function AbInitioKGrid(job::Job)
wancalc = getfirst(x -> x isa Calculation{Wannier90}, job.calculations)
if wancalc === nothing
error("Coulnd't find a wannier calculation in job $job.")
end
wname = name(wancalc)
wan_file = ext -> begin
files = DFC.searchdir(job, "$wname.$ext")
if isempty(files)
error("Couldn't find $wname.$ext in job directory $(job.local_dir).")
end
return files[1]
end
return AbInitioKGrid(wan_file("eig"), wan_file("chk"), wan_file("nnkp"),
wan_file("mmn"), wan_file("uHu"))
end
n_wannier_functions(grid::AbInitioKGrid) = size(grid.kpoints.overlaps[1], 1)
n_nearest_neighbors(grid::AbInitioKGrid) = length(grid.kpoints.neighbors[1])
Base.length(grid::AbInitioKGrid) = length(grid.kpoints)
struct HamiltonianKGrid{T,MT<:AbstractMatrix{Complex{T}},VT<:AbstractVector{T}} <:
AbstractKGrid{T}
core::CoreKGrid{T}
Hk::Vector{MT}
eigvals::Vector{VT}
eigvecs::Vector{MT}
end
function HamiltonianKGrid(kpoints::Vector{<:Vec3}, args...)
return HamiltonianKGrid(CoreKGrid(kpoints), args...)
end
@doc raw"""
HamiltonianKGrid(hami::TBHamiltonian{T}, nk, H_function_k::Function = x -> nothing) where T
HamiltonianKGrid(hami::TBHamiltonian{T}, k_grid, H_function_k::Function = x -> nothing) where T
Takes a k grid, calculates Hk for each of them and diagonalizes. Only the eigenvectors and eigenvalues of Hk are stored,
the `H_function_k` function is called on the intermediate Hk.
"""
function HamiltonianKGrid(hami::TBHamiltonian{T}, kpoints::Vector{<:Vec3},
Hk_function::Function = x -> nothing) where {T}
# kpoints = [KPoint(k, blocksize(hami), R, zeros_block(hami)) for k in k_grid]
n_eigvals = max(blocksize(hami)...)
eigvals = hami[1].block isa AbstractMagneticMatrix ?
[MagneticVector(zeros(T, n_eigvals)) for k in kpoints] :
[zeros(T, n_eigvals) for k in kpoints]
kgrid = HamiltonianKGrid(kpoints, [zeros_block(hami) for k in kpoints], eigvals,
[zeros_block(hami) for k in kpoints])
nk = length(kpoints)
calc_caches = [HermitianEigenWs(block(hami[1])) for i in 1:nthreads()]
p = Progress(nk, 1, "Calculating H(k)...")
@inbounds @threads for i in 1:nk
Hk!(kgrid.eigvecs[i], hami, k_cryst(kgrid)[i])
copy!(kgrid.Hk[i], kgrid.eigvecs[i])
Hk_function(kgrid.Hk[i])
eigen!(kgrid.eigvals[i], kgrid.eigvecs[i], calc_caches[threadid()])
next!(p)
end
return kgrid
end
function Hk!(out::AbstractMatrix, tbhami::TBHamiltonian, kpoint::Vec3)
fill!(out, zero(eltype(out)))
fourier_transform(tbhami, kpoint) do i, iR, R_cart, b, fac
@inbounds out[i] += fac * b.block[i]
end
end
"""
Hk(hamiltonian::TBHamiltonian, kpoint::Vec3)
Hk!(hk::AbstractMatrix, hamiltonian::TBHamiltonian, kpoint::Vec3)
Constructs the reciprocal Hamiltonian at a given _k_-point.
"""
function Hk(tbhami::TBHamiltonian, kpoint::Vec3)
out = similar(tbhami[1].block)
Hk!(out, tbhami, kpoint)
return out
end
Hk(g::HamiltonianKGrid) = g.Hk
eigvecs(g::HamiltonianKGrid) = g.eigvecs
eigvals(g::HamiltonianKGrid) = g.eigvals
"Fourier transforms the tight binding hamiltonian and calls the R_function with the current index and the phase."
function fourier_transform(R_function::Function, tb_hami::TBHamiltonian{T}, kpoint::Vec3) where {T}
for (iR, b) in enumerate(tb_hami)
fac = ℯ^(2im * π * (b.R_cryst ⋅ kpoint))
for i in eachindex(block(b))
R_function(i, iR, b.R_cart, b, fac)
end
end
end
"""
WannierBand
Represents a Wannier interpolated band. See also [`wannierbands`](@ref).
"""
mutable struct WannierBand{T<:AbstractFloat,VT<:AbstractVector} <: DFC.AbstractBand
kpoints_cryst::Vector{Vec3{T}}
eigvals ::Vector{T}
eigvec ::Vector{VT}
end
DFControl.eigvals(b::WannierBand) = b.eigvals
function Base.show(io::IO, band::WannierBand)
summary(io, band)
string = """
$(length(band.kpoints_cryst)) k_points: $(band.kpoints_cryst[1]) -> $(band.kpoints_cryst[end])
mean energy: $(sum(band.eigvals)/length(band.eigvals)) eV
"""
println(io, string)
return
end
"""
wannierbands(hamiltonian::TBHamiltonian, kpoints::Vector{Vec3})
wannierbands(hamiltonian::TBHamiltonian, bands::Vector{DFControl.AbstractBand}
Constructs the whole bandstructure for a given set of _k_-points and [`TBHamiltonian`](@ref TBOperator).
"""
function wannierbands(tbhamis::TBHamiltonian{T}, kpoints::Vector{<:Vec3}) where {T}
matdim = blocksize(tbhamis, 2)
kgrid = HamiltonianKGrid(tbhamis, kpoints)
nbnd = size(tbhamis[1].block, 2)
evals = [zeros(length(kpoints)) for i in 1:nbnd]
evecs = [[similar(kgrid.eigvecs[1][:, 1]) for i in 1:length(kpoints)] for i in 1:nbnd]
@threads for i in 1:length(kpoints)
eigvals, eigvecs = kgrid.eigvals[i], kgrid.eigvecs[i]
for e in 1:length(eigvals)
evals[e][i] = eigvals[e]
evecs[e][i] = eigvecs[:, e]
end
end
return [WannierBand{T,eltype(evecs[1])}(kpoints, evals[i],
evecs[i]) for i in 1:nbnd]
end
function wannierbands(tbhamis, dfbands::Vector{<:DFC.AbstractBand})
return wannierbands(tbhamis, dfbands[1].k_points_cryst)
end
function wannierbands(tbhamis, dfbands::Union{NamedTuple,Tuple})
return wannierbands(tbhamis, dfbands[1][1].k_points_cryst)
end
function energy_bins(binfunc::Function, wbands::Vector{<:WannierBand}, E_range,
normalize_bins = false)
nbins = length(E_range) - 1
bins = zeros(typeof(binfunc(wbands[1].eigvec[1])), nbins)
# sum = zero(typeof(binfunc(bands[1].eigvec[1])))
nperbin = zeros(Int, nbins)
for b in wbands
for (e, v) in zip(b.eigvals, b.eigvec)
ie = findfirst(i -> E_range[i] <= e <= E_range[i+1], 1:nbins)
if ie === nothing
continue
end
bins[ie] += binfunc(v)
nperbin[ie] += 1
end
end
if normalize_bins #like taking the mean
for i in 1:nbins
if nperbin[i] > 0
bins[i] /= nperbin[i]
end
end
end
return bins
end
function character_contribution(wband::WannierBand, atoms::Vector{Atom})
contributions = zeros(length(wband.kpoints_cryst))
for (i, v) in enumerate(wband.eigvec)
for a in atoms
contributions[i] += norm(v[a])^2
end
end
return contributions
end
function DFControl.FileIO.pdos(wbands::Vector{<:WannierBand}, atoms::Vector{Atom},
dE = 0.02)
Emin = minimum(wbands[1].eigvals)
Emax = maximum(wbands[end].eigvals)
E_range = range(Emin, Emax; step = dE)
bins = energy_bins(wbands, E_range, false) do v
tot = 0.0
for a in atoms
tot += norm(v[a])^2 / dE
end
return tot
end
return (E = E_range, pdos = bins ./ length(wbands[1].kpoints_cryst))
end
function kpdos(bands::Vector{<:WannierBand}, atoms::Vector{Atom})
return map(x -> character_contribution(x, atoms), bands)
end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 4266 | """
TBBlock
Building block for [`TBOperator`](@ref). It holds the matrix elements of the operator between
central and a shifted unit cell. Upon construction, the wigner-seitz shifts are taken into
account to create the correct matrix elements between the Wannierfunctions, stored in
`tb_block`. The `block` field is basically `tb_block` but with each element divided by
the amount of Wigner-Seitz degeneracies and shifts which speeds up later _k_-point interpolation.
"""
struct TBBlock{T<:AbstractFloat,LT,M<:AbstractMatrix{Complex{T}}}
R_cryst :: Vec3{Int}
R_cart :: Vec3{LT}
block :: M
tb_block :: M
end
"""
TBOperator
Alias for a `Vector` of [`TBBlocks`](@ref TBBlock). Indexing with `NTuple{3,Int}` or `Vec3`
is supported which allows for easily retrieving the [`TBBlock`](@ref) that corresponds
to the shifted unit cell.
Aliases: `TBHamiltonian`, `TBSpin`
"""
const TBOperator{T,LT,M} = Vector{TBBlock{T,LT,M}}
const TBHamiltonian = TBOperator
const TBSpin = TBOperator
block(x::TBBlock) = x.block
"""
generate_TBBlocks(chk::NamedTuple, O_R::Vector)
Generates the `Vector` of [`TBBlocks`](@ref TBBlock) from the Wannier90 checkpoint info in `chk`,
and the real space operator `O_R`. This preapplies the Wigner Seitz shifts and degeneracies to
speed up _k_-point interpolation.
"""
function generate_TBBlocks(chk::NamedTuple, O_R::Vector)
c = chk.cell'
R_cryst, degens = chk.ws_R_cryst, chk.ws_degens
ws_shifts, ws_nshifts = chk.ws_shifts_cryst, chk.ws_nshifts
out = [TBBlock(R, c * R, zeros(ComplexF64, chk.n_wann, chk.n_wann),
zeros(ComplexF64, chk.n_wann, chk.n_wann)) for R in R_cryst]
for (h, R, shifts, nshifts, d) in zip(O_R, R_cryst, ws_shifts, ws_nshifts, degens)
for i in eachindex(h)
ns = nshifts[i]
frac = 1 / (ns * d)
for is in 1:ns
rcryst = R + shifts[i][is]
h1 = out[rcryst]
if h1 === nothing
h1 = TBBlock(rcryst, c * rcryst,
zeros(ComplexF64, chk.n_wann, chk.n_wann),
zeros(ComplexF64, chk.n_wann, chk.n_wann))
push!(out, h1)
end
h1.block[i] += h[i] * frac
h1.tb_block[i] = h[i]
end
end
end
# this doesn't do much but it should make things more hermitian
for o in out
other = out[-o.R_cryst]
other.block .= (o.block' .+ other.block)./2
o.block .= (other.block' .+ o.block)./2
other.tb_block .= (o.tb_block' .+ other.tb_block)./2
o.tb_block .= (other.tb_block' .+ o.tb_block)./2
end
return out
end
for f in (:getindex, :size, :similar)
@eval Base.$f(h::TBBlock, args...) = $f(block(h), args...)
end
LinearAlgebra.eigen(h::TBBlock) = eigen(block(h))
Base.getindex(h::TBHamiltonian, R::Vec3{Int}) = getfirst(x -> x.R_cryst == R, h)
#some small type piracy?
Base.zeros(m::AbstractArray{T}) where {T} = fill!(similar(m), zero(T))
zeros_block(h::TBHamiltonian) = zeros(block(h[1]))
similar_block(h::TBHamiltonian) = similar(block(h[1]))
blocksize(h::TBHamiltonian, args...) = size(block(h[1]), args...)
for op in (:+, :-, :*, :/)
@eval function Base.$op(t::TBBlock{T}, v::T) where {T}
return TBBlock(t.R_cart, t.R_cryst, $op(block(t), v), $op(t.tb_block, v))
end
@eval function Base.$op(v::T, t::TBBlock{T}) where {T}
return TBBlock(t.R_cart, t.R_cryst, $op(v, block(t)), $op(v, t.tb_block))
end
@eval function Base.$op(t::TBBlock{T,M}, v::M) where {T,M}
return TBBlock(t.R_cart, t.R_cryst, $op(block(t), v), $op(t.tb_block, v))
end
@eval function Base.$op(v::M, t::TBBlock{T,M}) where {T,M}
return TBBlock(t.R_cart, t.R_cryst, $op(v, block(t)), $op(v, t.tb_block))
end
@eval function Base.$op(t::TBBlock{T,M}, v::TBBlock{T,M}) where {T,M}
return TBBlock(t.R_cart, t.R_cryst, $op(block(t), block(v)),
$op(t.tb_block, v.tb_block))
end
end
struct RmnBlock{T<:AbstractFloat}
R_cart ::Vec3{T}
R_cryst ::Vec3{Int}
block ::Matrix{Point3{T}}
end
const TBRmn{T} = Vector{RmnBlock{T}}
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 31 | div1(x, y) = div(x - 1, y) + 1
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 16269 | struct WannierFunction{N, T<:AbstractFloat} <: AbstractArray{SVector{N, Complex{T}}, 3}
points::Array{Point{3, T}, 3}
values::Array{SVector{N, Complex{T}}, 3}
end
function WannierFunction(filename::AbstractString, points::Array{Point3{T}, 3}=read_points_from_xsf(filename)) where {T <: AbstractFloat}
re = read_values_from_xsf(T, filename)
values = [SVector(complex(a)) for a in re]
return normalize(WannierFunction(points, values))
end
function WannierFunction(filename_re::String, filename_im::String, points::Array{Point3{T}, 3} = read_points_from_xsf(filename_re)) where {T <: AbstractFloat}
re, im = read_values_from_xsf.(T, (filename_re, filename_im))
values = [SVector(Complex(a, b)) for (a, b) in zip(re, im)]
return normalize(WannierFunction(points, values))
end
function WannierFunction(filename_up_re::String, filename_up_im::String, filename_down_re::String, filename_down_im::String, points::Array{Point3{T}, 3} = read_points_from_xsf(filename_up_re)) where {T <: AbstractFloat}
up_re, up_im, down_re, down_im =
read_values_from_xsf.(T, (filename_up_re, filename_up_im, filename_down_re, filename_down_im))
values = [SVector(Complex(a, b), Complex(c, d)) for (a, b, c, d) in zip(up_re, up_im, down_re, down_im)]
return normalize(WannierFunction(points, values))
end
WannierFunction(point_func::Function, points::Array) =
normalize(WannierFunction(points, point_func.(points)))
for f in (:size, :getindex, :setindex!)
@eval @inline @propagate_inbounds Base.$f(x::WannierFunction, i...) =
Base.$f(x.values, i...)
end
for f in (:length, :stride, :ndims, :axes, :strides)
@eval @inline Base.$f(w::WannierFunction) = Base.$f(w.values)
end
Base.similar(x::WannierFunction,::Type{S}) where S =
WannierFunction(x.points, similar(x.values, S))
Base.unsafe_convert(T::Type{<:Ptr}, x::WannierFunction) =
unsafe_convert(T, x.values)
Base.Broadcast.broadcastable(w::WannierFunction) =
w.values
#### LinearAlgebra overloads
function LinearAlgebra.adjoint(w::WannierFunction)
out = WannierFunction(w.points, similar(w.values))
adjoint!(out, w)
end
LinearAlgebra.adjoint!(w1::WannierFunction, w2::WannierFunction) =
w1 .= adjoint.(w2)
function LinearAlgebra.dot(w1::WannierFunction{T}, w2::WannierFunction{T}) where {T}
s = zero(T)
for (v1, v2) in zip(w1.values, w2.values)
s += v1' * v2
end
return real(s)
end
function LinearAlgebra.dot(v::Vector, wfs::Vector{<:WannierFunction})
res = WannierFunction(wfs[1].points, zeros(eltype(wfs[1].values), size(wfs[1].values)))
for ic in 1:length(v)
res .+= v[ic] .* wfs[ic]
end
return res
end
LinearAlgebra.dot(wfs::Vector{<:WannierFunction}, v::Vector) =
dot(v, wfs)
LinearAlgebra.norm(wfc::WannierFunction) =
dot(wfc, wfc)
LinearAlgebra.normalize!(wfc::WannierFunction) =
wfc ./= sqrt(norm(wfc))
same_grid(w1::WannierFunction, w2::WannierFunction) =
w1.points === w2.points
function wan_op(op::Function, w1::W, w2::W) where {W <: WannierFunction}
@assert same_grid(w1, w2) "Wannier functions are not defined on the same grid"
return WannierFunction(w1.points, op(w1.values, w2.values))
end
Base.:(+)(w1::WannierFunction, w2::WannierFunction) = wan_op(+, w1, w2)
Base.:(*)(w1::WannierFunction, w2::WannierFunction) = wan_op(*, w1, w2)
Base.:(-)(w1::WannierFunction, w2::WannierFunction) = wan_op(-, w1, w2)
Base.:(*)(w1::WannierFunction, n::Number) = WannierFunction(w1.points, w1.values .* n)
Base.:(*)(n::Number, w1::WannierFunction) = WannierFunction(w1.points, n .* w1.values)
Base.:(/)(w1::WannierFunction, n::Number) = WannierFunction(w1.points, n ./ w1.values)
Base.:(/)(n::Number, w1::WannierFunction) = WannierFunction(w1.points, w1.values ./ n)
LinearAlgebra.dot(w1::WannierFunction, n::Number) = w1 * n
LinearAlgebra.dot(n::Number, w1::WannierFunction) = n * w1
function generate_wannierfunctions(k_filenames::Vector{String}, chk_info, wannier_plot_supercell::NTuple{3,Int}, wan_plot_list=1:chk_info.n_wann)
num_kpts = length(chk_info.kpoints)
U = permutedims(chk_info.U_matrix, (2, 1, 3))
U_opt = permutedims(chk_info.U_matrix_opt,(2,1,3))
tu = read_unk(k_filenames[1])
nrx, nry, nrz = size(tu,1), size(tu,2), size(tu,3)
supercell_xrange = -div(wannier_plot_supercell[1],2)*nrx : div(wannier_plot_supercell[1] + 1, 2)*nrx - 1
supercell_yrange = -div(wannier_plot_supercell[2],2)*nry : div(wannier_plot_supercell[2] + 1, 2)*nry - 1
supercell_zrange = -div(wannier_plot_supercell[3],2)*nrz : div(wannier_plot_supercell[3] + 1, 2)*nrz - 1
nx, ny, nz = length.((supercell_xrange, supercell_yrange, supercell_zrange))
nwfun = length(wan_plot_list)
wfuncs_all = zeros(eltype(tu), nwfun, (wannier_plot_supercell .* (nrx, nry, nrz))...,size(tu, 5))
n_wann = chk_info.n_wann
r_wan = zeros(eltype(tu), chk_info.n_wann, nrx, nry, nrz)
p = Progress(length(chk_info.kpoints))
@inbounds for ik = 1:length(chk_info.kpoints)
k = chk_info.kpoints[ik]
unk_all = read_unk(k_filenames[ik])
for is = 1:size(tu, 5)
u = U[wan_plot_list, :, ik]
u_opt = U_opt[:, :, ik]
inc_ids = findall(!iszero, chk_info.lwindow[:, ik])
fill!(r_wan, 0.0)
for ib in 1:chk_info.ndimwin[ik]
iib = inc_ids[ib]
Threads.@threads for nz in 1:nrz
for ny in 1:nry
for nx in 1:nrx
@simd for iw in 1:n_wann
r_wan[iw, nx, ny, nz] += u_opt[iw, ib] * unk_all[nx,ny,nz,iib,is]
end
end
end
end
end
Threads.@threads for iisz in 1:nz
isz = supercell_zrange[iisz]
iz = mod1(isz, nrz)
for iisy in 1:ny
isy = supercell_yrange[iisy]
iy = mod1(isy, nry)
for iisx in 1:nx
isx = supercell_xrange[iisx]
ix = mod1(isx, nrx)
scalfac = exp(2im*π*dot(k, Vec3((isx-1)/nrx, (isy-1)/nry, (isz-1)/nrz)))
for ib in 1:n_wann
rt = r_wan[ib, ix, iy, iz] * scalfac
for iw in 1:nwfun
wfuncs_all[iw, iisx, iisy, iisz, is] += u[iw, ib] * rt
end
end
end
end
end
end
next!(p)
end
wfuncs_all ./= num_kpts
if size(tu, 5) == 1
Threads.@threads for iw = 1:size(wfuncs_all, 1)
tmaxx = 0.0
cmod = 1.0+0.0im
for iisx in 1:nx
for iisy in 1:ny
for iisz in 1:nz
w = wfuncs_all[iw, iisx, iisy, iisz, 1]
t = abs2(w)
if t > tmaxx
tmaxx = t
cmod = w
end
end
end
end
cmod /= abs(cmod)
@views wfuncs_all[:, :, :, iw, 1] ./= cmod
end
end
str_cell = ustrip.(chk_info.cell)
points = [str_cell * Point3((x-1)/nrx, (y-1)/nry, (z-1)/nrz) for x in supercell_xrange, y in supercell_yrange, z in supercell_zrange]
if size(tu,5) == 1
wfuncs_out = Vector{WannierFunction{1, eltype(wfuncs_all).parameters[1]}}(undef, size(wfuncs_all, 1))
Threads.@threads for i=1:size(wfuncs_all, 1)
wfuncs_out[i] = WannierFunction{1, eltype(wfuncs_all).parameters[1]}(points, map(x -> SVector(x), view(wfuncs_all,i, :, :, :, 1)))
end
return normalize!.(wfuncs_out)
else
wfuncs_out = Vector{WannierFunction{2, eltype(wfuncs_all).parameters[1]}}(undef, size(wfuncs_all, 1))
Threads.@threads for i=1:size(wfuncs_all, 1)
wfuncs_out[i] = WannierFunction{2, eltype(wfuncs_all).parameters[1]}(points, map(x -> SVector(x), zip(view(wfuncs_all, i, :, :, :, 1), view(wfuncs_all, i, :, :, :, 2))))
end
return wfuncs_out
end
end
function generate_wannierfunctions(job::Job, supercell::NTuple{3,Int}, args...)
tdir = job.dir
unk_files = reverse(searchdir(job, "UNK"))
chk_files = reverse(searchdir(job, ".chk"))
if !DFC.Jobs.runslocal(job)
tdir = mkpath(tempname())
for f in [unk_files; chk_files]
fname = splitpath(f)[end]
DFC.Servers.pull(job, fname, joinpath(tdir, fname))
end
unk_files = reverse(searchdir(tdir, "UNK"))
chk_files = reverse(searchdir(tdir, ".chk"))
end
if ismagnetic(job.structure) && Structures.iscolin(job.structure) && !any(Calculations.issoc, job.calculations)
wfuncs = Vector{WannierFunction}[]
for (is, s) in enumerate(("up", "down"))
wan_calc = getfirst(x -> eltype(x) == Wannier90&& x[:spin] == s, job.calculations)
chk_info = read_chk(joinpath(tdir, "$(wan_calc.name).chk"))
unk_files = filter(x->occursin(".$is", x), searchdir(tdir, "UNK"))
push!(wfuncs, generate_wannierfunctions(unk_files, chk_info, supercell, args...))
end
wfuncs = (up=wfuncs[1], down=wfuncs[2])
else
wan_calc = getfirst(x -> eltype(x)==Wannier90, job.calculations)
chk_info = read_chk(joinpath(tdir, "$(wan_calc.name).chk"))
unk_files = searchdir(tdir, "UNK")
wfuncs = generate_wannierfunctions(unk_files, chk_info, supercell, args...)
end
if !DFC.Jobs.runslocal(job)
rm(tdir, recursive=true)
end
return wfuncs
end
function bloch_sum(wfunc, kpoint; i_pos_offset = (0,0,0), i_neg_offset=(0,0,0))
cell_boundaries = div.(size(wfunc.points), 3) .+ 1
x = wfunc.points[cell_boundaries[1]+1, 1, 1] .- wfunc.points[1]
y = wfunc.points[1, cell_boundaries[2]+1, 1] .- wfunc.points[1]
z = wfunc.points[1, 1, cell_boundaries[3]+1] .- wfunc.points[1]
bloch = WannierFunction(wfunc.points, zeros(eltype(wfunc.values), size(wfunc.values)))
dims = size(wfunc.values)
for i1 in -3:1:3, i2 in -3:1:3, i3 in -3:1:3
R_cryst = Vec3(i1, i2, i3)
o1, o2, o3 = cell_boundaries .* R_cryst
shiftvec = x * R_cryst[1] .+ y * R_cryst[2] .+ z*R_cryst[3]
phase = ℯ^(2im*π*(R_cryst ⋅ kpoint))
if i1 + i2 + i3 == 0
continue
end
if i1 < 0
o1 += i_neg_offset[1]
elseif i1 > 0
o1 += i_pos_offset[1]
end
if i2 < 0
o2 += i_neg_offset[2]
elseif i2 > 0
o2 += i_pos_offset[2]
end
if i3 < 0
o3 += i_neg_offset[3]
elseif i3 > 0
o3 += i_pos_offset[3]
end
for j3 in 1:dims[3]
oid3 = mod1(j3 - o3, dims[3])
for j2 in 1:dims[2]
oid2 = mod1(j2 - o2, dims[2])
for j1 in 1:dims[1]
oid1 = mod1(j1 - o1, dims[1])
bloch.values[j1, j2, j3] += phase * wfunc.values[oid1, oid2, oid3]
end
end
end
end
return bloch
end
"Calculates the angular momentum between two wavefunctions and around the center."
function calc_angmom(wfc1::WannierFunction{N, T}, wfc2::WannierFunction{N, T}, center::Point3{T}, cutoff=Inf) where {N, T <: AbstractFloat}
points = wfc1.points
origin = points[1, 1, 1]
da = points[2, 1, 1] - origin
db = points[1, 2, 1] - origin
dc = points[1, 1, 2] - origin
V = SMatrix{3,3}(inv([convert(Array, da) convert(Array, db) convert(Array, dc)])')
L = zero(Point3{Complex{T}})
c2 = cutoff^2
@inbounds for i2 in 2:size(wfc1, 3)
for i1 in 2:size(wfc1, 2)
for i in 2:size(wfc1, 1)
r = points[i, i1, i2] - center
if dot(r, r) < c2
dw_cryst = Point3(wfc2.values[i, i1, i2] - wfc2.values[i-1, i1, i2],
wfc2.values[i, i1, i2] - wfc2.values[i, i1-1, i2],
wfc2.values[i, i1, i2] - wfc2.values[i, i1, i2-1])
dw_cart = V * dw_cryst
L += (wfc1.values[i, i1, i2]',) .* cross(r, dw_cart)
end
end
end
end
return -1im * L
end
#this doesn't work I think
# function calc_angmom_squared(wfc1::WannierFunction{N, T}, wfc2::WannierFunction{N, T}, center::Point3{T}) where {N, T <: AbstractFloat}
# points = wfc1.points
# origin = points[1, 1, 1]
# da = points[2, 1, 1] - origin
# db = points[1, 2, 1] - origin
# dc = points[1, 1, 2] - origin
# V = SMatrix{3,3}(inv([convert(Array, da) convert(Array, db) convert(Array, dc)])')
# Lsq = zero(Complex{T})
# @inbounds for i2 = 2:size(wfc1)[3]
# for i1 = 2:size(wfc1)[2]
# for i = 2:size(wfc1)[1]
# dw_cryst = Point3(wfc2.values[i, i1, i2] - wfc2.values[i-1, i1, i2],
# wfc2.values[i, i1, i2] - wfc2.values[i, i1-1, i2],
# wfc2.values[i, i1, i2] - wfc2.values[i, i1, i2-1])
# dw_cryst_sq = map(x->x .^2,dw_cryst)
# r = points[i, i1, i2] - center
# dw_cart = V * dw_cryst
# Lsq += wfc1.values[i, i1, i2] ⋅ (r[2]^2 * (dw_cryst_sq[1] + dw_cryst_sq[3]) +
# r[1]^2 * (dw_cryst_sq[2] + dw_cryst_sq[3]) +
# r[3]^2 * (dw_cryst_sq[1] + dw_cryst_sq[2]) -
# 2 * (r ⋅ dw_cryst +
# r[2] * r[3] * dw_cryst[2] .* dw_cryst[3] +
# r[1] * r[3] * dw_cryst[1] .* dw_cryst[3] +
# r[1] * r[2] * dw_cryst[1] .* dw_cryst[2]))
# end
# end
# end
# return Lsq
# end
function calc_spin(wfc1::WannierFunction{2, T}, wfc2::WannierFunction{2, T}) where T <: AbstractFloat
S = Point3(SMatrix{2, 2}(0, 1, 1, 0)/2,
SMatrix{2, 2}(0, -1im, 1im, 0)/2,
SMatrix{2, 2}(1, 0, 0, -1)/2)
outS = zero(Point3{Complex{T}})
for (w1, w2) in zip(wfc1.values, wfc2.values)
outS += (w1',) .* S .* (w2,)
end
return outS
end
"Calculates the dipole term between two wavefunctions. Make sure the wavefunctions are normalized!"
function calc_dip(wfc1::WannierFunction{N, T}, wfc2::WannierFunction{N, T}) where {N, T <: AbstractFloat}
out = zero(Point3{Complex{T}})
for (w1, w2, p) in zip(wfc1, wfc2, wfc1.points)
out += w1' * w2 * p
end
return real(out)
end
# Is this code actually correct?
# "Calculates the dipoles from the supplied wannier dipole output."
# function calc_k_dips(dip_raw::Array{Tuple{Int, Int, Int, Int, Int, Point3{T}}}, kpoints::AbstractArray) where T<:AbstractFloat
# dim = 0
# for i=1:length(dip_raw)
# d = dip_raw[i][4]
# if d > dim
# dim = d
# else
# break
# end
# end
# out = zeros(Point3{T}, dim, dim)
# tmp = [[zeros(Complex{T}, 3)] for i=1:dim, i1=1:dim]
# for i=1:size(dip_raw)[1]
# d = dip_raw[i]
# complex_part = 2π*(kpoints[1]*d[1]+kpoints[2]*d[2]+kpoints[3]*d[3])
# factor = exp(-1im * complex_part)
# tmp[d[4],d[5]][1] += d[6][1] * factor
# tmp[d[4],d[5]][2] += d[6][2] * factor
# tmp[d[4],d[5]][3] += d[6][3] * factor
# end
# for i in eachindex(out)
# out[i] = Point3(real(tmp[i][1]),real(tmp[i][2]),real(tmp[i][3]))
# end
# return Mat{2*dim, 2*dim, Point3{T}}([out zeros(out);zeros(out) out])
# end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 347 | using Base.Sys
using DFControl.Utils: searchdir
cd(assetfile("Fe/"))
big_files = searchdir(".", "bz2")
if islinux()
for f in big_files
run(`bunzip2 $f`)
end
end
job = Job(".")
abgrid = DFW.AbInitioKGrid(job)
big_files = map(x -> splitext(x)[1], big_files)
if islinux()
for f in big_files
run(`bzip2 $f`)
end
end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 1440 | nk = (5,5,5)
R = DFW.Vec3(2, 0, 0)
fermi = 11.4394
n_ωh = 300
n_ωv = 50
ωh = -30.0
ωv = 0.5
hami = DFW.read_colin_hami(DFW.read_chk(assetfile("wanup.chk")),
DFW.read_chk(assetfile("wandn.chk")),
DFW.read_eig(assetfile("wanup.eig")),
DFW.read_eig(assetfile("wandn.eig")))
str = DFC.FileIO.wan_parse_calculation(assetfile("wanup.win")).structure
ω_grid = DFW.setup_ω_grid(ωh, ωv, n_ωh, n_ωv)
kpoints = DFW.ExchangeKGrid(hami, DFW.uniform_kgrid(nk...), R)
@test isapprox(sum(sum.(kpoints.hamiltonian_kgrid.eigvals)), 28310.156014291606)
@test isapprox(sum(kpoints.D), 0.07393119748780791 - 1.0312022655727505e-15im)
g_caches = [fill!(similar(kpoints.hamiltonian_kgrid.eigvecs[1]), zero(ComplexF64)) for i=1:3]
G = fill!(similar(kpoints.hamiltonian_kgrid.eigvecs[1]), zero(ComplexF64))
fill!(G, zero(ComplexF64))
DFW.integrate_Gk!(G, ω_grid[1], fermi, kpoints, g_caches);
exch = calc_exchanges(hami, str.atoms, fermi; R=R, site_diagonal=false, nk=nk, n_ωh = n_ωh, n_ωv = n_ωv, ωh = ωh, ωv = ωv )
maxJ = maximum([tr(e.J) for e in exch])
@test isapprox(maxJ, 26.044428709929104)
exch1 = calc_exchanges(hami, str.atoms, fermi; R=R, site_diagonal=true, nk=nk, n_ωh = n_ωh, n_ωv = n_ωv, ωh = ωh, ωv = ωv )
maxJ1 = maximum([sum(e.J) for e in exch1])
@test isapprox(maxJ, maxJ1)
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 6544 | using DFWannier
T= Float32
x = WannierModel{T}("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/","/home/ponet/Documents/PhD/GeTe/SOC/GeTe_bands.out",[[Atom(T[0.0,0.0,-0.0239129,-2*0.155854]...) for i=1:4]...,[Atom(T[0.0,0.0,5.5540692,2*0.318205]...) for i=1:4]...],true);
x2 = WannierModel{T}("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/","/home/ponet/Documents/PhD/GeTe/SOC/GeTe_bands.out",[[Atom(T[0.0,0.0,-0.0239129,-0.155854]...) for i=1:4]...,[Atom(T[0.0,0.0,5.5540692,0.318205]...) for i=1:4]...]);
using Plots
@time test = calculate_eig_angmom_soc_bloch_gpu(x,90:0.1:110.);
@time test1 = calculate_eig_angmom_soc_bloch_gpu(x,90:0.1:110.);
@time test1 = calculate_eig_angmom_soc_bloch(x2,90:0.1:110.);
plot(plot(test1[8],:angmom2_x),plot(test[8],:angmom2_x))
@time benchmark = construct_bloch_sum(x.wfcs[1],x.kpoints[1]);
begin
test1 = construct_bloch_sum(x.wfcs[1],x.kpoints[1])
assert(Array(test1.values)==Array(benchmark.values))
end
test2 = calculate_eig_angmom_soc_bloch(x2,90:110.);
using Plots
test = construct_bloch_sum(x.wfcs[1],x.kpoints[1])
Array(test.values)
test = construct_bloch_sum(x2.wfcs[1],x2.kpoints[1])
T=Float64
test_wfc1 = DFWannier.host2gpu(read_xsf_file("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/wan_00003.xsf",Atom{T}(0.0,0.0,0.1,0.1),T))
test_wfc2= DFWannier.host2gpu(read_xsf_file("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/wan_00004.xsf",Atom{T}(0.0,0.0,0.1,0.1),T))
dims = size(test_wfc1.values)
cu_dims = CuArray(UInt32[dims...])
Lx = CuArray(zeros(Complex{T},dims))
Ly = CuArray(zeros(Complex{T},dims))
Lz = CuArray(zeros(Complex{T},dims))
n1 = CuArray(zeros(Complex{T},dims))
n2 = CuArray(zeros(Complex{T},dims))
grid = Array(test_wfc1.grid)
origin = grid[1,1,1]
a = grid[2,1,1] .- origin
b = grid[1,2,1] .- origin
c = grid[1,1,2] .- origin
V = CuArray(inv([[a...] [b...] [c...]]))
begin
# indices = Array{Tuple{Tuple{Int32,Int32,Int32},Tuple{Int32,Int32,Int32}},1}()
# coeff = Array{Complex{T},1}()
indices = Array{Tuple{CuArray{Int32,1},CuArray{Int32,1}},1}()
coeff = Array{CuArray{Complex{T},1},1}()
k=T[0.2,0.2,0.3]
for R1=-1:0,R2=-1:0,R3=-1:0
if R1+R2+R3 == 0
continue
end
R= R1*test_wfc1.cell[1]+R2*test_wfc1.cell[2]+R3*test_wfc1.cell[3]
ind1,ind2 = DFWannier.find_start(test_wfc1,R,27)
# push!(indices,((Int32(ind1[1]),Int32(ind1[2]),Int32(ind1[3])),(Int32(ind1[1]-ind2[1]),Int32(ind1[2]-ind2[2]),Int32(ind1[3]-ind2[3]))))
push!(indices,(CuArray{Int32}([ind1...]),CuArray{Int32}([ind2...])))
# push!(coeff,Complex{T}(exp(dot(-2*pi*k,[R1,R2,R3])*1im)))
push!(coeff,CuArray(Complex{T}(exp(dot(-2*pi*k,[R1,R2,R3])*1im))))
end
dfprintln(length(indices),length(coeff))
k_wfcs = Array{Wfc3D_gpu{T},1}()
#optimize so we dont redo the zero index ...
for wfc in [test_wfc1,test_wfc2]
push!(k_wfcs,Wfc3D_gpu(wfc.grid,CuArray(zeros(Complex{T},size(wfc.values))),wfc.cell,wfc.atom))
end
end
@time for i =1:2000
construct_bloch_sums([test_wfc1,test_wfc2],k_wfcs,k,CuArray(indices),CuArray(coeff))
end
calculate_angmom(test_wfc1,test_wfc2,V,CuArray([test_wfc1.atom.center.x,test_wfc1.atom.center.y,test_wfc1.atom.center.z]),dims,Lx,Ly,Lz,n2,n2)
using CUDAnative, CUDAdrv
function haversine_cpu(lat1::Float32, lon1::Float32, lat2::Float32, lon2::Float32, radius::Float32)
c1 = cospi(lat1 / 180.0f0)
c2 = cospi(lat2 / 180.0f0)
dlat = lat2 - lat1
dlon = lon2 - lon1
d1 = sinpi(dlat / 360.0f0)
d2 = sinpi(dlon / 360.0f0)
t = d2 * d2 * c1 * c2
a = d1 * d1 + t
c = 2.0f0 * asin(min(1.0f0, sqrt(a)))
return radius * c
end
function pairwise_dist_cpu(lat::Vector{Float32}, lon::Vector{Float32})
# allocate
n = length(lat)
rowresult = Array{Float32}(n, n)
# brute force fill in each cell
for i in 1:n, j in 1:n
@inbounds rowresult[i, j] = haversine_cpu(lat[i], lon[i], lat[j], lon[j] , 6372.8f0)
end
return rowresult
end
# from https://devblogs.nvidia.com/parallelforall/fast-great-circle-distance-calculation-cuda-c/
function haversine_gpu(lat1::Float32, lon1::Float32, lat2::Float32, lon2::Float32, radius::Float32)
# XXX: need to prefix math intrinsics with CUDAnative
c1 = CUDAnative.cospi(lat1 / 180.0f0)
c2 = CUDAnative.cospi(lat2 / 180.0f0)
dlat = lat2 - lat1
dlon = lon2 - lon1
d1 = CUDAnative.sinpi(dlat / 360.0f0)
d2 = CUDAnative.sinpi(dlon / 360.0f0)
t = d2 * d2 * c1 * c2
a = d1 * d1 + t
c = 2.0f0 * CUDAnative.asin(CUDAnative.min(1.0f0, CUDAnative.sqrt(a)))
return radius * c
end
# pairwise distance calculation kernel
function pairwise_dist_kernel(lat::CuDeviceVector{Float32}, lon::CuDeviceVector{Float32},
rowresult::CuDeviceMatrix{Float32}, n)
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
j = (blockIdx().y-1) * blockDim().y + threadIdx().y
if i <= n && j <= n
# store to shared memory
shmem = @cuDynamicSharedMem(Float32, 2*blockDim().x + 2*blockDim().y)
if threadIdx().y == 1
shmem[threadIdx().x] = lat[i]
shmem[blockDim().x + threadIdx().x] = lon[i]
end
if threadIdx().x == 1
shmem[2*blockDim().x + threadIdx().y] = lat[j]
shmem[2*blockDim().x + blockDim().y + threadIdx().y] = lon[j]
end
sync_threads()
# load from shared memory
lat_i = shmem[threadIdx().x]
lon_i = shmem[blockDim().x + threadIdx().x]
lat_j = shmem[2*blockDim().x + threadIdx().y]
lon_j = shmem[2*blockDim().x + blockDim().y + threadIdx().y]
@inbounds rowresult[i, j] = haversine_gpu(lat_i, lon_i, lat_j, lon_j, 6372.8f0)
end
end
function pairwise_dist_gpu(lat::Vector{Float32}, lon::Vector{Float32})
# upload
lat_gpu = CuArray(lat)
lon_gpu = CuArray(lon)
# allocate
n = length(lat)
rowresult_gpu = CuArray{Float32}(n, n)
# calculate launch configuration
# NOTE: we want our launch configuration to be as square as possible,
# because that minimizes shared memory usage
ctx = CuCurrentContext()
dev = device(ctx)
total_threads = min(n, attribute(dev, CUDAdrv.MAX_THREADS_PER_BLOCK))
threads_x = floor(Int, sqrt(total_threads))
threads_y = total_threads ÷ threads_x
threads = (threads_x, threads_y)
blocks = ceil.(Int, n ./ threads)
# calculate size of dynamic shared memory
shmem = 2 * sum(threads) * sizeof(Float32)
dfprintln(shmem)
@cuda (blocks, threads, shmem) pairwise_dist_kernel(lat_gpu, lon_gpu, rowresult_gpu, n)
return Array(rowresult_gpu)
end
# generate reasonable data
const n = 10000
const lat = rand(Float32, n) .* 45
const lon = rand(Float32, n) .* -120
@test pairwise_dist_cpu(lat, lon) ≈ pairwise_dist_gpu(lat, lon)
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 2221 | #!/opt/julia0.6/julia
using DFWannier
using CUDAnative
using CuArrays
CUDAnative.@profile begin
T=Float64
test_wfc1 = DFWannier.host2gpu(read_xsf_file("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/wan_00003.xsf",Atom{T}(0.0,0.0,0.1,0.1),T))
test_wfc2= DFWannier.host2gpu(read_xsf_file("/home/ponet/Documents/PhD/GeTe/NSOC/paperxsf/wan_00004.xsf",Atom{T}(0.0,0.0,0.1,0.1),T))
dims = size(test_wfc1.values)
cu_dims = CuArray(UInt32[dims...])
Lx = CuArray(zeros(Complex{T},dims))
Ly = CuArray(zeros(Complex{T},dims))
Lz = CuArray(zeros(Complex{T},dims))
n1 = CuArray(zeros(Complex{T},dims))
n2 = CuArray(zeros(Complex{T},dims))
grid = Array(test_wfc1.grid)
origin = grid[1,1,1]
a = grid[2,1,1] .- origin
b = grid[1,2,1] .- origin
c = grid[1,1,2] .- origin
V = CuArray(inv([[a...] [b...] [c...]]))
indices = Array{Tuple{Tuple{Int32,Int32,Int32},Tuple{Int32,Int32,Int32}},1}()
t_coeff = Array{Complex{T}}(27)
t=1
k=T[0.2,0.2,0.3]
for R1=-1:1,R2=-1:1,R3=-1:1
R= R1*test_wfc1.cell[1]+R2*test_wfc1.cell[2]+R3*test_wfc1.cell[3]
ind1,ind2 = DFWannier.find_start(test_wfc1,R,27)
push!(indices,((Int32(ind1[1]),Int32(ind1[2]),Int32(ind1[3])),(Int32(ind1[1]-ind2[1]),Int32(ind1[2]-ind2[2]),Int32(ind1[3]-ind2[3]))))
t_coeff[t] = Complex{T}(exp(dot(-2*pi*k,[R1,R2,R3])*1im))
t+=1
end
coefficients = CuArray(t_coeff)
cu_indices = CuArray(indices)
k_wfcs = Array{Wfc3D_gpu{T},1}(2)
for (n,wfc) in enumerate([test_wfc1,test_wfc2])
k_wfcs[n] = Wfc3D_gpu(wfc.grid,CuArray(zeros(Complex{T},size(wfc.values))),wfc.cell,wfc.atom)
end
# DFWannier.construct_bloch_sums([test_wfc1,test_wfc2],k_wfcs,k,cu_indices,coefficients)
begin
correct = calculate_angmom(test_wfc1,test_wfc2,V,CuArray([test_wfc1.atom.center.x,test_wfc1.atom.center.y,test_wfc1.atom.center.z]),dims,Lx,Ly,Lz,n2,n2)
test = calculate_angmom(test_wfc1,test_wfc2,V,CuArray([test_wfc1.atom.center.x,test_wfc1.atom.center.y,test_wfc1.atom.center.z]),dims,Lx,Ly,Lz,n2,n2)
@time for i =1:2000
test = calculate_angmom(test_wfc1,test_wfc2,V,CuArray([test_wfc1.atom.center.x,test_wfc1.atom.center.y,test_wfc1.atom.center.z]),dims,Lx,Ly,Lz,n2,n2)
end
assert(test==correct)
end
end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 276 | nk = (2,2,2)
R = DFW.Vec3(2, 0, 0)
fermi = 13.129
n_ωh = 300
n_ωv = 50
ωh = -30.0
ωv = 0.5
hami = read_hamiltonian(assetfile("wanup.chk"), assetfile("wanup.eig"))
wbands = wannierbands(hami, uniform_kgrid(nk...))
@test isapprox(sum(wbands[3].eigvals), 26.24675680998617)
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 2370 | for F in (Float32, Float64)
n = 3
tup = rand(Complex{F}, n, n)
tdown = rand(Complex{F}, n, n)
orig_up = Hermitian((tup + tup')/2)
orig_dn = Hermitian((tdown + tdown')/2)
colin = DFW.ColinMatrix(orig_up, orig_dn)
normal_eig1, normal_eig2 = eigen(orig_up), eigen(orig_dn)
cache = DFW.HermitianEigenWs(colin)
cached_eig = eigen(colin, cache)
@test sum(normal_eig1.values) + sum(normal_eig2.values) ≈ sum(cached_eig.values)
@test Array(normal_eig1) ≈ Array(cached_eig)[1:n, 1:n] ≈ DFW.up(colin)
@test Array(normal_eig2) ≈ Array(cached_eig)[1:n, n+1:end] ≈ DFW.down(colin)
end
for F in (Float32, Float64)
t = rand(Complex{F}, 50, 50)
orig = (t + t')/2
normal_eig = eigen(orig)
cache = DFW.HermitianEigenWs(orig)
cached_eig = eigen(orig, cache)
@test sum(normal_eig.values) ≈ sum(cached_eig.values)
@test Array(normal_eig) ≈ Array(cached_eig) ≈ orig
end
mat = rand(20, 20)
ats = [DFControl.Atom(name=:Fe, element=DFControl.element(:Fe), position_cart=DFControl.Point3(0.0, 0.0,0.0).*DFControl.angstrom, position_cryst=DFControl.Point3(0.0,0.0,0.0), projections=[DFControl.Projection(Structures.orbital("d"), 1, 10)]), DFControl.Atom(name=:Fe, element=DFControl.element(:Fe), position_cart=DFControl.Point3(1.0, 0.0,0.0).*DFControl.angstrom, position_cryst=DFControl.Point3(1.0,0.0,0.0), projections=[DFControl.Projection(Structures.orbital("d"), 11, 20)])]
noncolinmat = convert(DFW.NonColinMatrix, mat)
@test mat[1] == noncolinmat[1]
@test mat[2, 2] == noncolinmat[11, 11]
@test mat[6, 6] == noncolinmat[13, 13]
@test mat[11, 11] == noncolinmat[6, 6]
@test mat[1, 2] == noncolinmat[1, 11]
@test mat[2, 1] == noncolinmat[11, 1]
@test mat[1, 6] == noncolinmat[1, 13]
@test mat[11, 1] == noncolinmat[6, 1]
@test noncolinmat[ats[1]] == [noncolinmat[1:5,1:5] noncolinmat[1:5, 11:15];noncolinmat[11:15, 1:5] noncolinmat[11:15, 11:15]]
@test noncolinmat[ats[1], ats[2]] == [noncolinmat[1:5,6:10] noncolinmat[1:5, 16:20];noncolinmat[11:15, 6:10] noncolinmat[11:15, 16:20]]
@test noncolinmat[ats[1], ats[2], DFW.Up()] == noncolinmat[1:5,6:10]
@test noncolinmat[ats[1], ats[2], DFW.Down()] == noncolinmat[11:15, 16:20]
@test noncolinmat[ats[1], ats[2], DFW.Up(), DFW.Down()] == noncolinmat[1:5,16:20]
@test noncolinmat[ats[1], ats[2], DFW.Down(), DFW.Up()] == noncolinmat[11:15, 6:10]
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 1560 | using DFWannier
using ProfileView
T = Float32
x = WannierModel{T}("/Users/ponet/Documents/Fysica/PhD/GeTe/colin/paperxsf/test1","/Users/ponet/Documents/Fysica/PhD/GeTe/fullrel/GeTe_bands.out",[[Atom(T[0.0,0.0,-0.0239129,-0.155854]...) for i=1:4]...,[Atom(T[0.0,0.0,5.5540692,0.318205]...) for i=1:4]...]);
@time tbbandssoc = calculate_eig_cm_angmom_soc(x,90:0.2:110);
Profile.clear()
using BenchmarkTools
@benchmark tbbandssoc = calculate_eig_cm_angmom_soc(x,90:0.2:110)
@profile tbbandssoc1 = calculate_eig_cm_angmom_soc(x,50:0.2:110);
@time calculate_angmom(x.wfcs[1],x.wfcs[3])
Profile.clear()
Profile.init(1000000000, 0.00100000)
ProfileView.view()
@code_warntype read_xsf_file("/Users/ponet/Documents/Fysica/PhD/GeTe/colin/paperxsf/test1")
struct perfTest
val::Float64
function perfTest()
new(rand(Float64))
end
end
struct perfTest2
val::Float64
t::Bool
function perfTest2()
new(rand(Float64),rand(Bool))
end
end
const test_array = [perfTest() for i=1:10000000]
const test_array2 = [perfTest2() for i=1:10000000]
function bench_test(t)
out = 0.0
@inbounds for i=1:10000000
out += t[i].val
end
out
end
# using StaticArrays
function bench_test2(t)
out = 0.0
v = Array{perfTest2,1}(12)
@inbounds for i=1:10:10000000
unsafe_copy!(v,1,t,i,12)
@inbounds for j = 1:12
out += v[j].val
end
end
out
end
function bench_test2(t)
out = 0.0
@inbounds for i=1:10000000
out += t[i].val
end
out
end
using BenchmarkTools
@code_native bench_test(test_array)
@code_native bench_test2(test_array2)
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 4670 | # EXCLUDE FROM TESTING
# this file doesn't have an entry point, see `verify.jl` instead
# Fast parallel reduction for Kepler hardware
# - uses shuffle and shared memory to reduce efficiently
# - support for large arrays
#
# Based on devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
using CUDAdrv, CUDAnative
#
# Main implementation
#
# Reduce a value across a warp
@inline function reduce_warp(op::Function, val::T)::T where {T}
offset = CUDAnative.warpsize() ÷ UInt32(2)
# TODO: this can be unrolled if warpsize is known...
while offset > 0
val = op(val, shfl_down(val, offset))
offset ÷= UInt32(2)
end
return val
end
# Reduce a value across a block, using shared memory for communication
@inline function reduce_block(op::Function, val_x::T,val_y::T,val_z::T)::T where {T}
# shared mem for 32 partial sums
shared = @cuStaticSharedMem(T, 32)
# TODO: use fldmod1 (JuliaGPU/CUDAnative.jl#28)
wid_x = div(threadIdx().x-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
wid_y = div(threadIdx().y-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
wid_z = div(threadIdx().z-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
lane_x = rem(threadIdx().x-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
lane_y = rem(threadIdx().y-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
lane_z = rem(threadIdx().z-UInt32(1), CUDAnative.warpsize()) + UInt32(1)
# each warp performs partial reduction
val_x = reduce_warp(op, val_x)
val_y = reduce_warp(op, val_y)
val_z = reduce_warp(op, val_z)
# write reduced value to shared memory
if lane_x == 1
@inbounds shared[wid_x] = val_x
end
if lane_y == 1
@inbounds shared[wid_y] = val_y
end
if lane_z == 1
@inbounds shared[wid_z] = val_z
end
# wait for all partial reductions
sync_threads()
# read from shared memory only if that warp existed
@inbounds val_x = (threadIdx().x <= fld(blockDim().x, CUDAnative.warpsize())) ? shared[lane] : zero(T)
@inbounds val_y = (threadIdx().y <= fld(blockDim().y, CUDAnative.warpsize())) ? shared[lane] : zero(T)
@inbounds val_z = (threadIdx().z <= fld(blockDim().z, CUDAnative.warpsize())) ? shared[lane] : zero(T)
# final reduce within first warp
if wid_x == 1
val_x = reduce_warp(op, val_x)
end
if wid_y == 1
val_y = reduce_warp(op, val_x)
end
if wid_z == 1
val_z = reduce_warp(op, val_x)
end
return val_x+valy_y+val_z
end
# Reduce an array across a complete grid
function reduce_grid(op::Function, input::CuDeviceVector{T}, output::CuDeviceVector{T},
len::CuDeviceVector{Integer}) where {T}
# TODO: neutral element depends on the operator (see Base's 2 and 3 argument `reduce`)
val = zero(T)
# reduce multiple elements per thread (grid-stride loop)
# TODO: step range (see JuliaGPU/CUDAnative.jl#12)
i_x = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
i_y = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y
i_z = (blockIdx().z-UInt32(1)) * blockDim().z + threadIdx().z
step_x = blockDim().x * gridDim().x
step_y = blockDim().y * gridDim().y
step_z = blockDim().z * gridDim().z
while i_x <= len[1]
while i_y <= len[2]
while i_z <= len[3]
@inbounds val = op(val, input[i])
i_z+=step_z
end
i_y += step_y
end
i_x += step_x
end
val = reduce_block(op, val)
if threadIdx().x == UInt32(1)
@inbounds output[blockIdx().x] = val
end
return
end
"""
Reduce a large array.
Kepler-specific implementation, ie. you need sm_30 or higher to run this code.
"""
function gpu_reduce(op::Function, input::CuVector{T}, output::CuVector{T}) where {T}
len = length(input)
# TODO: these values are hardware-dependent, with recent GPUs supporting more threads
threads = 512
blocks = min((len + threads - 1) ÷ threads, 1024)
# the output array must have a size equal to or larger than the number of thread blocks
# in the grid because each block writes to a unique location within the array.
if length(output) < blocks
throw(ArgumentError("output array too small, should be at least $blocks elements"))
end
@cuda (blocks,threads) reduce_grid(op, input, output, Int32(len))
@cuda (1,1024) reduce_grid(op, output, output, Int32(blocks))
return
end
ctx = CuCurrentContext()
dev = device(ctx)
if capability(dev) < v"3.0"
warn("this example requires a newer GPU")
exit(0)
end
len = 10^7
input = ones(Int32, len)
# CPU
cpu_val = reduce(+, input)
# CUDAnative
let
gpu_input = CuArray(input)
gpu_output = similar(gpu_input)
gpu_reduce(+, gpu_input, gpu_output)
gpu_val = Array(gpu_output)[1]
@assert cpu_val == gpu_val
end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 391 | using DFWannier
using Test
using LinearAlgebra
assetfile(f) = joinpath(@__DIR__, "assets", f)
@time @testset "linalg" begin include("linalg.jl") end
@time @testset "hami_calcs" begin include("hami_calcs.jl") end
# @time @testset "berry" begin include("berry.jl") end
@time @testset "exchanges" begin include("exchanges.jl") end
@time @testset "wan_calcs" begin include("wan_calcs.jl") end
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | code | 1677 | import DFWannier: Point3, SVector
import DFWannier: WannierFunction
x_range = -1:0.01:1
y_range = -1:0.01:1
z_range = -1:0.01:1
wfc_grid = [Point3(x, y, z) for x in x_range, y in y_range, z in z_range]
px_orb = normalize(WannierFunction(wfc_grid, [SVector(((p[1] + 0im) * ℯ^(-norm(p)^2)),) for p in wfc_grid]))
px_orb2 = px_orb'
LinearAlgebra.adjoint!(px_orb2, px_orb2)
@test values(px_orb2) == values(px_orb)
@test norm(px_orb) ≈ norm(px_orb2) ≈ 1.0
py_orb = normalize(WannierFunction(wfc_grid, [SVector(((p[2] + 0im) * ℯ^(-norm(p)^2)),) for p in wfc_grid]))
pz_orb = normalize(WannierFunction(wfc_grid, [SVector(((p[3] + 0im) * ℯ^(-norm(p)^2)),) for p in wfc_grid]))
@test dot(px_orb, py_orb) <= 1.0e-15
@test dot(px_orb, px_orb) ≈ 1.0
Lx = zeros(ComplexF64, 3, 3)
Ly = zeros(ComplexF64, 3, 3)
Lz = zeros(ComplexF64, 3, 3)
for (i1, p1) in enumerate((px_orb, py_orb, pz_orb)), (i2, p2) in enumerate((px_orb, py_orb, pz_orb))
Lx[i1, i2], Ly[i1, i2], Lz[i1, i2] = DFW.calc_angmom(p1, p2, zero(Point3))
end
@test norm(sum(Lx .- [0 0 0; 0 0 -im; 0 im 0])) < 1e-4
@test norm(sum(Ly .- [0 0 im; 0 0 0; -im 0 0])) < 1e-4
@test norm(sum(Lz .- [0 -im 0; im 0 0; 0 0 0])) < 1e-4
px_orb_up = normalize(WannierFunction(wfc_grid, [SVector((p[1] + 0im, zero(ComplexF64)) .* ℯ^(-norm(p)^2)) for p in wfc_grid]))
px_orb_dn = normalize(WannierFunction(wfc_grid, [SVector((zero(ComplexF64), p[1] + 0im) .* ℯ^(-norm(p)^2)) for p in wfc_grid]))
@test dot(px_orb_dn, px_orb_up) ≈ 0.0
@test DFW.calc_spin(px_orb_up, px_orb_up) ≈ DFW.Point3(0.0 + 0im, 0.0+0.0im, 0.5 + 0.0im)
@test norm(DFW.calc_dip(px_orb, py_orb)) < 1e-17
@test norm(DFW.calc_dip(px_orb_up, px_orb_dn)) < 1e-17
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 1130 | # DFWannier
| **Documentation** | **Build Status** |
|:----------------------------------------------------------------------------------- |:----------------------------------------------- |
| [![][docs-img]][docs-url] [![][ddocs-img]][ddocs-url] | [![][ci-img]][ci-url] [![][ccov-img]][ccov-url] |
[ddocs-img]: https://img.shields.io/badge/docs-dev-blue.svg
[ddocs-url]: https://louisponet.github.io/DFWannier.jl/dev
[docs-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-url]: https://louisponet.github.io/DFWannier.jl/stable
[ccov-img]: https://codecov.io/gh/louisponet/DFWannier.jl/branch/master/graph/badge.svg?token=OJumiSp7H1
[ccov-url]: https://codecov.io/gh/louisponet/DFWannier.jl
[ci-img]: https://github.com/louisponet/DFWannier.jl/workflows/CI/badge.svg?branch=master&event=push
[ci-url]: https://github.com/louisponet/DFWannier.jl/actions
[](https://pkgs.genieframework.com?packages=DFWannier)
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 2356 | ```@meta
EditURL = "<unknown>/src/calculate_exchanges.jl"
```
````@example calculate_exchanges
using LinearAlgebra#hide
BLAS.set_num_threads(1)#hide
using DFWannier
assets_dir = joinpath(splitdir(pathof(DFWannier))[1], "../test/assets")
````
We first read the colinear Hamiltonian from the outputs of wannier90.
````@example calculate_exchanges
hami = read_hamiltonian(joinpath(assets_dir, "wanup.chk"),
joinpath(assets_dir, "wandn.chk"),
joinpath(assets_dir, "wanup.eig"),
joinpath(assets_dir, "wandn.eig"))
````
We can the generate the bandstructure by first defining a k-path and then perform the
interpolation.
````@example calculate_exchanges
structure = read_w90_input(joinpath(assets_dir, "wanup.win")).structure
````
First we create some high symmetry kpoints
then we explicitely interpolate between the high symmetry kpoints to form
`bands_kpoints`.
````@example calculate_exchanges
kpoints = [Vec3(0.0, 0.0, 0.5),
Vec3(0.0, 0.5, 0.5),
Vec3(0.5, 0.5, 0.5),
Vec3(0.5, 0.5, 0.0),
Vec3(0.5, 0.0, 0.0),
Vec3(0.0, 0.0, 0.0)]
band_kpoints = eltype(kpoints)[]
for i = 1:length(kpoints)-1
for α in range(0, 1, 20)
push!(band_kpoints, Vec3((1-α) .* kpoints[i] .+ α .* kpoints[i+1]))
end
end
````
In order to calculate the magnetic exchanges we need to specify the fermi level (e.g. can be found in an nscf output file),
and we need to specify the atoms we want to calculate the exchanges between.
We set the number of k points used for the kpoint interpolation, and number of frequency points to calculate the
contour integral (`n_ωh`, `n_ωv`).
````@example calculate_exchanges
exch = calc_exchanges(hami, structure[element(:Ni)], 12.0; nk=(5,5,5), n_ωh = 300, n_ωv = 30)
````
This leads to a list of exchanges where each holds the J matrix, whose trace is the actual exchange between the sites specified
by `atom1` and `atom2`.
To calculate the exchange between the atoms in the central unit cell and those in a shifted one we can use R.
In this specific case we are calculating the exchanges towards the unit cell shifted twice along the `b` cell vector.
````@example calculate_exchanges
exch = calc_exchanges(hami, structure[element(:Ni)], 12.0, R=(0,2,0); nk=(5,5,5), n_ωh = 300, n_ωv = 30)
````
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 800 | # Exchanges
```@meta
CurrentModule = DFWannier
```
Using the [`TBHamiltonian`](@ref TBOperator) and [`calc_exchanges`](@ref), it is possible to calculate the magnetic exchange parameters $J_{ij}$ between atoms $i$ and $j$ for the isotropic Heisenberg model:\\
$E = \sum_{i,j} J_{ij} \overrightarrow{S}_i \cdot \overrightarrow{S}_j$
This involves calculating the Green's functions $G$ and on-site magnetic field matrices $\Delta$, which then determine $J$ as
$J_{ij} = \frac{1}{2\pi} \int_{-\infty}^{E_f} d\varepsilon \Delta_i G_{ij}^{\downarrow}(\varepsilon) \Delta_j G_{ji}^{\uparrow}(\varepsilon).
See [Weak ferromagnetism in antiferromagnets: Fe2O3 and La2CuO4](https://elar.urfu.ru/bitstream/10995/111495/1/2-s2.0-33644554849.pdf).
```@docs
Exchange2ndOrder
Exchange4thOrder
calc_exchanges
```
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 1775 | ```@meta
EditURL = "<unknown>/src/generate_bandstructure.jl"
```
````@example generate_bandstructure
using DFWannier
using Plots
assets_dir = joinpath(splitdir(pathof(DFWannier))[1], "../test/assets")
````
We can use a .chk and .eig file to construct a tight binding Hamiltonian
in the Wannier basis.
````@example generate_bandstructure
hami = read_hamiltonian(joinpath(assets_dir, "Fe/Fe.chk"), joinpath(assets_dir, "Fe/Fe.eig"))
````
We can the generate the bandstructure by first defining a k-path and then perform the
interpolation.
````@example generate_bandstructure
structure = read_w90_input(joinpath(assets_dir, "Fe/Fe.win")).structure
````
First we create some high symmetry kpoints
then we explicitely interpolate between the high symmetry kpoints to form
`bands_kpoints`.
````@example generate_bandstructure
kpoints = [Vec3(0.0, 0.0, 0.5),
Vec3(0.0, 0.5, 0.5),
Vec3(0.5, 0.5, 0.5),
Vec3(0.5, 0.5, 0.0),
Vec3(0.5, 0.0, 0.0),
Vec3(0.0, 0.0, 0.0)]
band_kpoints = eltype(kpoints)[]
for i = 1:length(kpoints)-1
for α in range(0, 1, 20)
push!(band_kpoints, Vec3((1-α) .* kpoints[i] .+ α .* kpoints[i+1]))
end
end
bands = wannierbands(hami, band_kpoints)
plot(bands)
````
We can also construct a colinear Tight Binding Hamiltonian by
reading the outputs of up and down Wannierizations
````@example generate_bandstructure
hami = read_hamiltonian(joinpath(assets_dir, "wanup.chk"),
joinpath(assets_dir, "wandn.chk"),
joinpath(assets_dir, "wanup.eig"),
joinpath(assets_dir, "wandn.eig"))
structure = read_w90_input(joinpath(assets_dir, "wanup.win")).structure
bands = wannierbands(hami, band_kpoints)
plot(bands)
````
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 528 | # DFWannier Documentation
This package is meant to be used as a post processing tool for Wannier90. It relies heavily on
[`DFControl`](https://louisponet.github.io/DFControl.jl/stable).
The main capabilities are
- Generating the Tight-Binding Hamiltonian, Spin and dipole operators
- Interpolating bands and other properties in _k_-space using said operators
- Generating the real space Wannier functions
- Calculate various Berry and geometric properties
- Calculate the magnetic exchange parameters for the Heisenberg model
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 0.2.3 | c8b8a89084dc4841c256be79813592574e76dcf5 | docs | 447 | # Tight Binding
Various tight binding operators can be generated using the outputs of Wannier90.
```@meta
CurrentModule = DFWannier
```
```@docs
TBBlock
TBOperator
```
The following functions can be used to generate specific operators.
```@docs
read_hamiltonian
read_spin
read_r
```
## Reciprocal
The tight binding operators can be used to interpolate properties in reciprocal space.
```@docs
Hk
HamiltonianKGrid
WannierBand
wannierbands
```
| DFWannier | https://github.com/louisponet/DFWannier.jl.git |
|
[
"MIT"
] | 1.2.0 | 412b250c1eb756212d79e00ed5019b2f096cb629 | code | 8000 | #=
InterpolatedRejectionSampling
Copyright © 2019 Mark Wells <[email protected]>
Distributed under terms of the AGPL-3.0 license.
=#
module InterpolatedRejectionSampling
using Base.Iterators
using Interpolations
using Interpolations: Extrapolation
using StatsBase: sample, Weights
export irsample, irsample!
@inline function midpoints(x::AbstractVector{Float64})
length(x) == 1 && return x
retval = Vector{Float64}(undef, length(x)-1)
@fastmath @inbounds @simd for i in eachindex(retval)
retval[i] = (x[i ] +
x[i+1])/2
end
return retval
end
@inline function midpoints(x::AbstractRange{Float64})
length(x) == 1 && return x
Δx = 0.5*step(x)
return range(first(x)+Δx, stop=last(x)-Δx, length=length(x)-1)
end
@inline get_interp(interp::AbstractExtrapolation{Float64,N,ITPT,IT}, val::NTuple{N,Float64}) where {N,ITPT,IT} = interp(val...)
#@inline get_knots(interp::Extrapolation{Float64,1,ITPT,BSpline{Linear},ET}) where {ITPT,ET} = first(interp.itp.ranges)
#@inline get_knots(interp::Extrapolation{Float64,1,ITPT,Gridded{Linear},ET}) where {ITPT,ET} = first(interp.itp.knots)
@inline get_knots(interp::Extrapolation{Float64,N,ITPT,BSpline{Linear},ET}) where {N,ITPT,ET} = interp.itp.ranges
@inline get_knots(interp::Extrapolation{Float64,N,ITPT,Gridded{Linear},ET}) where {N,ITPT,ET} = interp.itp.knots
@inline get_coefs(interp::Extrapolation{Float64,N,ITPT,BSpline{Linear},ET}) where {N,ITPT,ET} = interp.itp.itp.coefs
@inline get_coefs(interp::Extrapolation{Float64,N,ITPT,Gridded{Linear},ET}) where {N,ITPT,ET} = interp.itp.coefs
@inline get_Δ(x::AbstractVector) = length(x) > 1 ? diff(x) : 1
@inline integrate(interp::AbstractExtrapolation{Float64,N,ITPT,IT}, (x, Δx)::NTuple{2,NTuple{N,Float64}}
) where {N,ITPT,IT} = prod(Δx)*get_interp(interp, x)
function integrate(knots::NTuple{N,AbstractVector{Float64}},
interp::AbstractExtrapolation{Float64,N,ITPT,IT}
) where {N,ITPT,IT}
midknots = map(midpoints, knots)
Δknots = map(get_Δ, knots)
return sum(x -> integrate(interp, x), zip(product(midknots...), product(Δknots...)))
end
@inline integrate(interp::AbstractExtrapolation{Float64,N,ITPT,IT}
) where {N,ITPT,IT} = integrate(get_knots(interp), interp)
@inline is_normalized(interp::AbstractExtrapolation{Float64,N,ITPT,IT}
) where {N,ITPT,IT} = isapprox(integrate(interp), one(Float64))
function normalize_interp(interp::AbstractExtrapolation{Float64,N,ITPT,IT}) where {N,ITPT,IT}
knots = get_knots(interp)
coefs = get_coefs(interp)
A = integrate(interp)
coefs ./= A
return LinearInterpolation(knots, coefs)
end
@inline sliced_knots(k::AbstractVector{Float64}, s::T
) where T<:Union{Missing,Float64} = ismissing(s) ? k : [s]
@inline sliced_knots(knots::NTuple{N,AbstractVector}, slice::AbstractVector{Union{Missing,Float64}}
) where N = ntuple(i -> sliced_knots(knots[i], slice[i]), Val(N))
struct Cells{Float64,N,ITPT,IT,ET}
knots::NTuple{N,Vector}
pmass::Array{Float64,N}
cinds::CartesianIndices{N,NTuple{N,Base.OneTo{Int}}}
interp::Extrapolation{Float64,N,ITPT,IT,ET}
function Cells(interp::Extrapolation{Float64,N,ITPT,IT,ET},
knots::NTuple{N,AbstractVector} = get_knots(interp)
) where {T<:Union{Missing,Float64},N,ITPT,IT,ET}
ksz = map(length, knots)
midpnt = map(midpoints, knots)
msz = map(length, midpnt)
pmass = Array{Float64,N}(undef,msz)
for (i,k) in enumerate(product(midpnt...))
pmass[i] = get_interp(interp, k)
end
pmass ./= sum(pmass)
new{Float64,N,ITPT,IT,ET}(knots, pmass, CartesianIndices(msz), interp)
end
function Cells(interp::Extrapolation{Float64,N,ITPT,IT,ET},
slice::AbstractVector{Union{Missing,Float64}}
) where {N,ITPT,IT,ET}
knots = get_knots(interp)
sknots = sliced_knots(knots, slice)
return Cells(interp, sknots)
end
end
@inline get_interp(C::Cells{Float64,N,ITPT,IT,ET}, val::NTuple{N,Float64}
) where {N,ITPT,IT,ET} = get_interp(C.interp, val)
import StatsBase.sample
@inline sample(C::Cells) = sample(CartesianIndices(C.cinds), Weights(vec(C.pmass)))
@inline _get_xmin(x::AbstractVector{Float64}, i::Int)::Float64 = x[i]
@inline _get_span(x::AbstractVector{Float64}, i::Int)::Float64 = length(x) == 1 ? zero(Float64) : x[i+1] - x[i]
struct Support{Float64,N}
xmin::NTuple{N,Float64}
span::NTuple{N,Float64}
function Support(C::Cells{Float64,N}, cind::CartesianIndex{N}) where {N}
xmin = ntuple(i -> _get_xmin(C.knots[i], cind.I[i])::Float64, Val(N))
span = ntuple(i -> _get_span(C.knots[i], cind.I[i])::Float64, Val(N))
new{Float64,N}(xmin,span)
end
end
@inline Base.getindex(S::Support{Float64,N}, i::Int) where N = (S.xmin[i], S.span[i])
Base.iterate(S::Support{Float64,N}, state::Int=1) where N = state > N ? nothing : (S[state], state+1)
@inline propose_sample(S::Support{Float64,N}
) where N = ntuple(i -> iszero(last(S[i])) ?
first(S[i]) :
first(S[i]) + rand()*last(S[i]),
Val(N)
)
@inline get_extrema(S::Support{Float64,N}
) where N = product(ntuple(i -> (first(S[i]), first(S[i]) + last(S[i])),
Val(N)
)...
)
@inline maxmapreduce(f,a) = mapreduce(f, max, a)
struct Envelope{Float64,N,ITPT,IT,ET}
support::Support{Float64,N}
maxvalue::Float64
interp::Extrapolation{Float64,N,ITPT,IT,ET}
function Envelope(C::Cells{Float64,N,ITPT,IT,ET},
cind::CartesianIndex{N}
) where {N,ITPT,IT,ET}
support = Support(C, cind)
spnts = get_extrema(support)
maxvalue = maxmapreduce(x -> get_interp(C,x), spnts)
new{Float64,N,ITPT,IT,ET}(support, maxvalue, C.interp)
end
end
@inline get_interp(E::Envelope{Float64,N,ITPT,IT,ET},
val::NTuple{N,Float64}
) where {N,ITPT,IT,ET} = get_interp(E.interp, val)
@inline propose_sample(E::Envelope) = propose_sample(E.support)
function rsample(E::Envelope{Float64,N,ITPT,IT,ET}) where {N,ITPT,IT,ET}
while true
samp = propose_sample(E)
if rand()*E.maxvalue ≤ get_interp(E, samp)
return samp
end
end
error("unable to draw a sample after $maxruns runs")
end
function rsample(C::Cells)
cind = sample(C)
E = Envelope(C,cind)
return rsample(E)
end
function rsample(interp::Extrapolation)
C = Cells(interp)
return rsample(C)
end
function irsample(knots::NTuple{N,AbstractVector{Float64}},
probs::AbstractArray{Float64,N}
) where N
interp = LinearInterpolation(knots, probs)
return rsample(interp)
end
function irsample(knots::NTuple{N,AbstractVector{Float64}},
probs::AbstractArray{Float64,N},
n::Int
) where N
interp = LinearInterpolation(knots, probs)
retval = Matrix{Float64}(undef, N, n)
for s in eachcol(retval)
s .= rsample(interp)
end
return retval
end
function rsample(interp::Extrapolation, slice::AbstractVector{Union{Missing,Float64}})
C = Cells(interp, slice)
return rsample(C)
end
function irsample!(slices::AbstractMatrix{Union{Missing,Float64}},
knots::NTuple{N,AbstractVector{Float64}},
probs::AbstractArray{Float64,N}
) where N
interp = LinearInterpolation(knots, probs)
for s in eachcol(slices)
s .= rsample(interp, s)
end
end
end
| InterpolatedRejectionSampling | https://github.com/m-wells/InterpolatedRejectionSampling.jl.git |
|
[
"MIT"
] | 1.2.0 | 412b250c1eb756212d79e00ed5019b2f096cb629 | code | 3838 | using InterpolatedRejectionSampling
using InterpolatedRejectionSampling: get_knots, integrate, normalize_interp,
is_normalized, get_interp, maxmapreduce, Cells, sample, Support, iterate,
propose_sample, get_extrema, Envelope, rsample, irsample, irsample!
using Test
using Interpolations
using Random: seed!
seed!(1234)
slice = [missing,missing,0.3]
knots = (π.*[0.0, sort(rand(20))..., 1.0],
π.*[0.0, sort(rand(19))..., 1.0],
π.*[0.0, sort(rand(18))..., 1.0])
coefs = [sin(x)*sin(y)*sin(z) for x=knots[1],y=knots[2],z=knots[3]]
interp = LinearInterpolation(knots,coefs)
ranges = (range(0, stop=π, length=20),
range(0, stop=π, length=19),
range(0, stop=π, length=18))
coefs_ranges = [sin(x)*sin(y)*sin(z) for x=ranges[1],y=ranges[2],z=ranges[3]]
interp_ranges = LinearInterpolation(ranges,coefs_ranges)
@testset "Utilities" begin
@test get_knots(interp) === knots
@test isapprox(integrate(interp), 8.0; atol = 0.5)
@test get_knots(interp_ranges) === ranges
@test isapprox(integrate(interp_ranges), 8.0; atol = 0.5)
ninterp = normalize_interp(interp)
@test isa(ninterp, AbstractExtrapolation)
@test isapprox(integrate(ninterp), one(Float64))
@test is_normalized(ninterp)
ninterp = normalize_interp(interp_ranges)
@test isa(ninterp, AbstractExtrapolation)
@test isapprox(integrate(ninterp), one(Float64))
@test is_normalized(ninterp)
pnt = (0.2,1.0,0.9)
@test get_interp(interp, pnt) === interp(pnt...)
@test get_interp(interp_ranges, pnt) === interp_ranges(pnt...)
@test maxmapreduce(x -> x^2, [0.0, 0.5, -2.0, 1.5]) == 4.0
end
@testset "Cells" begin
cells = Cells(interp)
@test isa(cells, Cells)
s = sample(cells)
@test isa(s, CartesianIndex{3})
cells = Cells(interp,slice)
@test isa(cells, Cells)
s = sample(cells)
@test isa(s, CartesianIndex{3})
cells = Cells(interp_ranges)
@test isa(cells, Cells)
end
@testset "Support" begin
cells = Cells(interp)
s = sample(cells)
supp = Support(cells, s)
@test isa(supp, Support{Float64,3})
@test isa(iterate(supp), Tuple{NTuple{2,Float64}, Int})
@test isa(iterate(supp,2), Tuple{NTuple{2,Float64}, Int})
@test isa(iterate(supp,3), Tuple{NTuple{2,Float64}, Int})
@test isa(iterate(supp,4), Nothing)
samp = propose_sample(supp)
@test isa(samp, NTuple{3,Float64})
spnts = get_extrema(supp)
@test length(spnts) == 8
@test eltype(spnts) == NTuple{3,Float64}
end
@testset "Envelope/rsample" begin
cells = Cells(interp)
s = sample(cells)
envelope = Envelope(cells, s)
@test isa(envelope, Envelope)
@test isa(rsample(envelope), NTuple{3,Float64})
@test isa(rsample(interp), NTuple{3,Float64})
@test isa(rsample(interp, slice), NTuple{3,Float64})
end
@testset "irsample/irsample!" begin
@test isa(irsample(knots, coefs), NTuple{3,Float64})
samp = irsample(knots, coefs, 2)
@test isa(samp, Matrix{Float64})
@test size(samp) == (3,2)
slices = Matrix{Union{Missing,Float64}}(missing, 3, 3)
slices[1,1], slices[1,2], slices[2,2] = 0.2, 0.3, 0.4
@test !iszero(count(ismissing, slices))
irsample!(slices, knots, coefs)
@test iszero(count(ismissing, slices))
@test isa(convert(Matrix{Float64}, slices), Matrix{Float64})
@test isa(irsample(ranges, coefs_ranges), NTuple{3,Float64})
samp = irsample(ranges, coefs_ranges, 2)
@test isa(samp, Matrix{Float64})
@test size(samp) == (3,2)
slices = Matrix{Union{Missing,Float64}}(missing, 3, 3)
slices[1,1], slices[1,2], slices[2,2] = 0.2, 0.3, 0.4
@test !iszero(count(ismissing, slices))
irsample!(slices, ranges, coefs_ranges)
@test iszero(count(ismissing, slices))
@test isa(convert(Matrix{Float64}, slices), Matrix{Float64})
end
| InterpolatedRejectionSampling | https://github.com/m-wells/InterpolatedRejectionSampling.jl.git |
|
[
"MIT"
] | 1.2.0 | 412b250c1eb756212d79e00ed5019b2f096cb629 | docs | 2492 | # InterpolatedRejectionSampling.jl
[](https://travis-ci.com/m-wells/InterpolatedRejectionSampling.jl)
[](https://codecov.io/gh/m-wells/InterpolatedRejectionSampling.jl)
[](https://coveralls.io/github/m-wells/InterpolatedPDFs.jl?branch=master)
## Draw samples from discrete multivariate distributions
For a given discrete (n-dimensional) grid of values and the vectors that describe the span of the underlying space we can draw samples.
The interpolation of the space is handled by [`Interpolations.jl`](https://github.com/JuliaMath/Interpolations.jl)
# A simple example
First we need to setup a discrete distribution
```
julia> X = range(0, π, length=10)
julia> Y = range(0, π/4, length=9)
julia> knots = (X,Y)
julia> prob = [sin(x)+tan(y) for x in X, y in Y]
10×9 Array{Float64,2}:
0.0 0.0984914 0.198912 0.303347 … 0.668179 0.820679 1.0
0.34202 0.440512 0.540933 0.645367 1.0102 1.1627 1.34202
0.642788 0.741279 0.8417 0.946134 1.31097 1.46347 1.64279
0.866025 0.964517 1.06494 1.16937 1.5342 1.6867 1.86603
0.984808 1.0833 1.18372 1.28815 1.65299 1.80549 1.98481
0.984808 1.0833 1.18372 1.28815 … 1.65299 1.80549 1.98481
0.866025 0.964517 1.06494 1.16937 1.5342 1.6867 1.86603
0.642788 0.741279 0.8417 0.946134 1.31097 1.46347 1.64279
0.34202 0.440512 0.540933 0.645367 1.0102 1.1627 1.34202
1.22465e-16 0.0984914 0.198912 0.303347 0.668179 0.820679 1.0
```
We can visualize the probability density matrix like so:
```
julia> using PyPlot
julia> imshow(transpose(prob);
extent = (knots[1][1], knots[1][end], knots[2][1], knots[2][end]),
aspect = "auto",
origin = "lower")
julia> ax = gca()
julia> ax.set_xlabel("x-axis [sin(x)]")
julia> ax.set_ylabel("y-axis [tan(y)]")
```
To perform a sampling
```
julia> using InterpolatedRejectionSampling
julia> n = 100_000
julia> xy = irsample(knots,prob,n)
julia> hist2D(xy[1,:],xy[2,:])
julia> ax = gca()
julia> ax.set_xlabel("x-axis [sin(x)]")
julia> ax.set_ylabel("y-axis [tan(y)]")
```
| InterpolatedRejectionSampling | https://github.com/m-wells/InterpolatedRejectionSampling.jl.git |
|
[
"MIT"
] | 0.1.0 | 0b5b2f6500e085defd85ca008cce05a3a44a00ca | code | 624 | using GBPirate
using Documenter
DocMeta.setdocmeta!(GBPirate, :DocTestSetup, :(using GBPirate); recursive=true)
makedocs(;
modules=[GBPirate],
authors="Wimmerer <[email protected]> and contributors",
repo="https://github.com/Wimmerer/GBPirate.jl/blob/{commit}{path}#{line}",
sitename="GBPirate.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Wimmerer.github.io/GBPirate.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/Wimmerer/GBPirate.jl",
devbranch="main",
)
| GBPirate | https://github.com/Wimmerer/GBPirate.jl.git |
|
[
"MIT"
] | 0.1.0 | 0b5b2f6500e085defd85ca008cce05a3a44a00ca | code | 1093 | module GBPirate
import SparseArrays
using SuiteSparseGraphBLAS
import LinearAlgebra: mul!
export gbmul!
function gbmul!(C::StridedVecOrMat{T}, A::SparseArrays.SparseMatrixCSC{T, Int64}, B::StridedVecOrMat{T}, α::Number, β::Number) where {T}
Agb = GBMatrix{T}(size(A))
SuiteSparseGraphBLAS._packcscmatrix!(Agb, A.colptr, A.rowval, A.nzval)
SuiteSparseGraphBLAS._makeshallow!(Agb)
Bgb = GBMatrix{T}(size(B))
SuiteSparseGraphBLAS._packdensematrix!(Bgb, B)
SuiteSparseGraphBLAS._makeshallow!(Bgb)
Cgb = GBMatrix(size(C), convert(T, α))
gbset(Cgb, :format, :bycol)
gbset(Cgb, :sparsity_control, :full)
mul!(Cgb, Agb, Bgb; accum=*)
# Done with Agb and Bgb now.
# They do not need to be unpacked since they're shallow.
# But packing modified the index vectors of A.
A.colptr .+= 1
A.rowval .+= 1
return Cgb
Ctemp = SuiteSparseGraphBLAS._unpackdensematrix!(Cgb)
if iszero(β)
copyto!(C, Ctemp)
else
C .*= Ctemp
end
ccall(:jl_free, Cvoid, (Ptr{T},), pointer(Ctemp))
return C
end
end
| GBPirate | https://github.com/Wimmerer/GBPirate.jl.git |
|
[
"MIT"
] | 0.1.0 | 0b5b2f6500e085defd85ca008cce05a3a44a00ca | code | 89 | using GBPirate
using Test
@testset "GBPirate.jl" begin
# Write your tests here.
end
| GBPirate | https://github.com/Wimmerer/GBPirate.jl.git |
|
[
"MIT"
] | 0.1.0 | 0b5b2f6500e085defd85ca008cce05a3a44a00ca | docs | 415 | # GBPirate
[](https://Wimmerer.github.io/GBPirate.jl/stable)
[](https://Wimmerer.github.io/GBPirate.jl/dev)
[](https://github.com/Wimmerer/GBPirate.jl/actions/workflows/CI.yml?query=branch%3Amain)
| GBPirate | https://github.com/Wimmerer/GBPirate.jl.git |
|
[
"MIT"
] | 0.1.0 | 0b5b2f6500e085defd85ca008cce05a3a44a00ca | docs | 176 | ```@meta
CurrentModule = GBPirate
```
# GBPirate
Documentation for [GBPirate](https://github.com/Wimmerer/GBPirate.jl).
```@index
```
```@autodocs
Modules = [GBPirate]
```
| GBPirate | https://github.com/Wimmerer/GBPirate.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 1395 | push!(LOAD_PATH,"../src/")
using DecomposingPolynomialSystems
using Documenter
DocMeta.setdocmeta!(DecomposingPolynomialSystems, :DocTestSetup, :(using DecomposingPolynomialSystems); recursive=true)
makedocs(;
modules=[DecomposingPolynomialSystems],
authors="Viktor Korotynskiy <[email protected]> and contributors",
repo="https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl/blob/{commit}{path}#{line}",
sitename="DecomposingPolynomialSystems.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://multivariatepolynomialsystems.github.io/DecomposingPolynomialSystems.jl",
edit_link="main",
assets=["assets/custom.css"],
collapselevel=2
),
pages = [
"Introduction" => "index.md",
"Sampling Polynomial Systems" => "sampling.md",
"Symmetries" => [
"Scaling symmetries" => "symmetries/scalings.md",
"Deck transformations" => "symmetries/deck.md",
],
"Invariants" => [
"Invariants" => "invariants/invariants.md",
],
"Decomposing Polynomial Systems" => [
"Decompose" => "decomposition/decompose.md",
]
],
)
deploydocs(;
repo="github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git",
devbranch="main",
)
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 686 | module DecomposingPolynomialSystems
import HomotopyContinuation
const HC = HomotopyContinuation
using HomotopyContinuation.ModelKit
export @var, Variable, Expression, System
using SparseArrays: SparseVector, SparseMatrixCSC, spzeros, AbstractSparseVector, findnz, sparse
using Combinatorics: partitions, multiset_permutations, combinations
using LinearAlgebra: nullspace
using UnPack: @unpack
include("utils.jl")
include("sampled_system.jl")
include("monomials.jl")
# include("expression_map.jl")
include("scalings.jl")
include("interpolation.jl")
include("deck_transformations.jl")
# include("invariants.jl")
# include("implicitization.jl")
# include("decompose.jl")
end # module
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 29453 | export Tolerances,
DeckTransformation,
DeckTransformationGroup,
symmetries_fixing_parameters_dense!,
symmetries_fixing_parameters_graded!,
symmetries_fixing_parameters!,
symmetries_fixing_parameters,
_deck_action, _deck_commutes_with_scaling,
_scalings_commuting_with_deck,
_sample_for_deck_computation!,
_deck_vandermonde_matrix
@kwdef struct Tolerances
nullspace_atol::Float64=0
nullspace_rtol::Float64=0
rref_tol::Float64=1e-5
sparsify_tol::Float64=1e-5
end
MiExpression = Union{Missing, Expression}
struct DeckTransformation
exprs::Vector{MiExpression}
unknowns::Vector{Variable}
parameters::Vector{Variable}
function DeckTransformation(exprs, unknowns, parameters)
# TODO: verify args
return new(exprs, unknowns, parameters)
end
end
Base.getindex(dt::DeckTransformation, inds...) = getindex(dt.exprs, inds...)
function Base.show(io::IO, dt::DeckTransformation)
println(
io,
"DeckTransformation: acts on $(phrase(length(dt.unknowns), "unknown")),",
" fixes $(phrase(length(dt.parameters), "parameter"))",
)
println(io, " action:")
for i in 1:length(dt.exprs)
print(io, " ", dt.unknowns[i], " ↦ ", dt.exprs[i])
i < length(dt.exprs) && print(io, "\n")
end
end
"""
DeckTransformationGroup
A `DeckTransformationGroup` is the result of deck transformations computation.
"""
struct DeckTransformationGroup
maps::Vector{DeckTransformation}
group::GapObj
F::SampledSystem
end
function DeckTransformationGroup(F::SampledSystem)
symmetries = _init_symmetries(length(F.deck_permutations), unknowns(F))
return DeckTransformationGroup(symmetries, F)
end
function DeckTransformationGroup(
symmetries::Vector{Vector{MiExpression}},
F::SampledSystem
)
action = [DeckTransformation(symmetry, unknowns(F), parameters(F)) for symmetry in symmetries]
return DeckTransformationGroup(action, to_group(deck_permutations(F)), F)
end
function Base.show(io::IO, deck::DeckTransformationGroup)
println(io, "DeckTransformationGroup of order $(length(deck.maps))")
println(io, " structure: ", order(deck.group) == 1 ? "trivial" : group_structure(deck.group))
print(io, " action:")
for i in eachindex(deck.maps)
println(io, "\n ", to_ordinal(i), " map:")
for (j, var) in enumerate(unknowns(deck.F)) # action on parameters is trivial, don't show it
print(io, " ", var, " ↦ ", deck.maps[i][j])
j < length(unknowns(deck.F)) && print(io, "\n")
end
end
end
Base.getindex(deck::DeckTransformationGroup, inds...) = getindex(deck.maps, inds...)
function _denom_deg(
num_deg::SparseVector{Tv,Ti},
grading::Grading{Tv,Ti},
var_id::Int
) where {Tv<:Integer,Ti<:Integer}
denom_deg = spzeros(Tv, Ti, length(num_deg))
U₀ = grading.free_part
if !isnothing(U₀)
denom_deg[1:size(U₀,1)] = num_deg[1:size(U₀,1)] - U₀[:, var_id]
end
k = nfree(grading)
for (sᵢ, Uᵢ) in grading.mod_part
n_scalings = size(Uᵢ,1)
denom_deg[(k+1):(k+n_scalings)] = mod.(num_deg[(k+1):(k+n_scalings)] - Uᵢ[:, var_id], sᵢ)
k += n_scalings
end
return denom_deg
end
# function to_nconstraints_ninstances(
# path_ids::Dict{Vector{Int}, Vector{Int}},
# samples::Dict{Vector{Int}, Samples}
# )
# c_i_dict = Dict{Vector{Int}, NTuple{2, Int}}()
# for key in keys(path_ids)
# c_i_dict[key] = (length(path_ids[key]), ninstances(samples[key]))
# end
# return c_i_dict
# end
# function total_nconst_ninst(
# c_i_dict::Dict{Vector{Int}, NTuple{2, Int}}
# )
# cₜ, iₜ = 0, 0
# for (c, i) in values(c_i_dict)
# cₜ += c*i
# iₜ += i
# end
# return cₜ, iₜ
# end
# function pick_nconstraints_ninstances(
# c_i_dict::Dict{Vector{Int}, NTuple{2, Int}},
# cₘᵢₙ::Int,
# iₘᵢₙ::Int
# )
# picked_c_i = Dict{Vector{Int}, NTuple{2, Int}}()
# cₜ, iₜ = total_nconst_ninst(c_i_dict)
# for (key, (cₖ, iₖ)) in c_i_dict
# cₜ, iₜ = cₜ - cₖ*iₖ, iₜ - iₖ
# Δc, Δi = cₘᵢₙ - cₜ, iₘᵢₙ - iₜ
# if Δc ≤ 0 && Δi ≤ 0
# picked_c_i[key] = (0, 0)
# continue
# end
# i = max(Δi, div(Δc, cₖ, RoundUp))
# i > iₖ && error("Not enough instances or constraints")
# c = max(div(Δc, i, RoundUp), 1)
# c > cₖ && error("Not enough instantces or constraints")
# picked_c_i[key] = (c, i)
# cₘᵢₙ, iₘᵢₙ = cₘᵢₙ - c*i, iₘᵢₙ - i
# end
# return picked_c_i
# end
# function _deck_vandermonde_matrix(
# deck_permutation::Vector{Int},
# function_id::Int,
# samples::Dict{Vector{Int}, Samples},
# path_ids_for_deck::Dict{Vector{Int}, Vector{Int}},
# eval_num_mons::Dict{Samples, EvaluatedMonomials},
# eval_denom_mons::Dict{Samples, EvaluatedMonomials},
# min_nconstraints::Int,
# min_ninstances::Int
# )
# n_num_mons = nmonomials(first(values(eval_num_mons)))
# n_denom_mons = nmonomials(first(values(eval_denom_mons)))
# n_mons = n_num_mons + n_denom_mons
# c_i_dict = to_nconstraints_ninstances(path_ids_for_deck, samples)
# picked_c_i = pick_nconstraints_ninstances(c_i_dict, min_nconstraints, min_ninstances)
# ntotal_constraints, _ = total_nconst_ninst(picked_c_i)
# A = zeros(ComplexF64, ntotal_constraints, n_mons)
# @assert size(A, 1) >= size(A, 2)
# row_offset = 0
# for (sampled_path_ids, samplesₖ) in samples
# n_constraints, n_instances = picked_c_i[sampled_path_ids]
# n_constraints == 0 && continue
# sols = samplesₖ.solutions
# ids_rel = Dict(zip(sampled_path_ids, 1:length(sampled_path_ids)))
# deck_path_ids = path_ids_for_deck[sampled_path_ids]
# eval_num = eval_num_mons[samplesₖ]
# eval_denom = eval_denom_mons[samplesₖ]
# for i in 1:n_constraints
# path_id = deck_path_ids[i] # length(deck_path_ids) ≤ n_constraints always
# deck_values = sols[function_id, ids_rel[deck_permutation[path_id]], 1:n_instances]
# rows = (row_offset+1):(row_offset+n_instances)
# A[rows, 1:nunkn_dep(eval_num)] = transpose(eval_num.unkn_dep[:, ids_rel[path_id], 1:n_instances])
# A[rows, (nunkn_dep(eval_num)+1):n_num_mons] = transpose(eval_num.param_only[:, 1:n_instances])
# A[rows, (n_num_mons+1):(n_num_mons+nunkn_dep(eval_denom))] = -transpose(eval_denom.unkn_dep[:, ids_rel[path_id], 1:n_instances]).*deck_values
# A[rows, (n_num_mons+nunkn_dep(eval_denom)+1):end] = -transpose(eval_denom.param_only[:, 1:n_instances]).*deck_values
# row_offset += n_instances
# end
# end
# return A
# end
function _deck_vandermonde_matrix(
deck_permutation::Vector{Int},
function_id::Int,
samples::Dict{Vector{Int}, Samples},
eval_num_mons::Dict{Samples, EvaluatedMonomials},
eval_denom_mons::Dict{Samples, EvaluatedMonomials},
)
n_num_mons = nmonomials(first(values(eval_num_mons)))
n_denom_mons = nmonomials(first(values(eval_denom_mons)))
n_mons = n_num_mons + n_denom_mons
A = zeros(ComplexF64, n_mons, n_mons)
@assert size(A, 1) >= size(A, 2)
row_offset = 0
for (sampled_path_ids, samplesₖ) in samples
n_instances = min(n_mons-row_offset, ninstances(samplesₖ))
sols = samplesₖ.solutions
ids_rel = Dict(zip(sampled_path_ids, 1:length(sampled_path_ids)))
eval_num = eval_num_mons[samplesₖ]
eval_denom = eval_denom_mons[samplesₖ]
deck_values = sols[function_id, ids_rel[deck_permutation[1]], 1:n_instances]
rows = (row_offset+1):(row_offset+n_instances)
A[rows, 1:nunkn_dep(eval_num)] = transpose(eval_num.unkn_dep[:, ids_rel[1], 1:n_instances])
A[rows, (nunkn_dep(eval_num)+1):n_num_mons] = transpose(eval_num.param_only[:, 1:n_instances])
A[rows, (n_num_mons+1):(n_num_mons+nunkn_dep(eval_denom))] = -transpose(eval_denom.unkn_dep[:, ids_rel[1], 1:n_instances]).*deck_values
A[rows, (n_num_mons+nunkn_dep(eval_denom)+1):end] = -transpose(eval_denom.param_only[:, 1:n_instances]).*deck_values
row_offset += n_instances
row_offset == n_mons && return A
end
end
function _all_interpolated(symmetries::Vector{Vector{MiExpression}})
for symmetry in symmetries
for expr in symmetry
ismissing(expr) && return false
end
end
return true
end
function _init_symmetries(n_symmetries::Int, unknowns::Vector{Variable})
symmetries = [[missing for j in eachindex(unknowns)] for i in 1:n_symmetries]
symmetries = Vector{Vector{MiExpression}}(symmetries)
symmetries[1] = Expression.(unknowns) # set the first to the identity
return symmetries
end
function _interpolate_deck_function(
deck_permutation::Vector{Int},
function_id::Int,
samples::Dict{Vector{Int}, Samples},
# path_ids_for_deck::Dict{Vector{Int}, Vector{Int}},
eval_num_mons::Dict{Samples, EvaluatedMonomials},
eval_denom_mons::Dict{Samples, EvaluatedMonomials},
num_mons::AbstractMonomialVector,
denom_mons::AbstractMonomialVector,
tols::Tolerances;
logging::Bool=false
)
# min_nconstraints = length(num_mons) + length(denom_mons) + 5 # TODO: x^2 + ax + b example requires (+1)
# min_ninstances = nparam_only(num_mons) + nparam_only(denom_mons) # TODO: understand
A = _deck_vandermonde_matrix(
deck_permutation,
function_id,
samples,
# path_ids_for_deck,
eval_num_mons,
eval_denom_mons
# min_nconstraints,
# min_ninstances
)
logging && println(
"Created vandermonde matrix of size ",
size(A)
)
logging && println("Computing nullspace...")
if tols.nullspace_rtol == 0
N = nullspace(A, atol=tols.nullspace_atol)
else
N = nullspace(A, atol=tols.nullspace_atol, rtol=tols.nullspace_rtol)
end
coeffs = transpose(N)
logging && println("Size of the transposed nullspace: ", size(coeffs))
if size(coeffs, 1) == 0 return missing end
logging && println("Computing the reduced row echelon form of the transposed nullspace...\n")
coeffs = rref(coeffs, tols.rref_tol)
sparsify!(coeffs, tols.sparsify_tol; digits=1)
coeffs = remove_zero_nums_and_denoms(coeffs, num_mons, denom_mons)
if size(coeffs, 1) == 0 return missing end
coeffs = good_representative(coeffs)
return rational_function(coeffs, num_mons, denom_mons; tol=tols.sparsify_tol)
end
function _interpolate_deck_function(
deck_permutation::Vector{Int},
function_id::Int,
samples::Dict{Vector{Int}, Samples},
# path_ids_for_deck::Dict{Vector{Int}, Vector{Int}},
eval_mons::Dict{Samples, EvaluatedMonomials},
mons::AbstractMonomialVector,
tols::Tolerances;
logging::Bool=false
)
return _interpolate_deck_function(
deck_permutation,
function_id,
samples,
# path_ids_for_deck,
eval_mons,
eval_mons,
mons,
mons,
tols;
logging=logging
)
end
function orbit(deck_permutations::Vector{Vector{Int}}, el)
return unique(vcat([perm[el] for perm in deck_permutations]...))
end
# function path_ids_for_deck_computation(F::SampledSystem)
# path_ids_all_deck = [Dict{Vector{Int}, Vector{Int}}() for _ in deck_permutations(F)]
# for (i, deck) in enumerate(deck_permutations(F))
# path_ids_deck = path_ids_all_deck[i]
# for path_ids_samples in keys(samples(F))
# if length(path_ids_samples) == nsolutions(F)
# path_ids_deck[path_ids_samples] = path_ids_samples
# else
# path_ids_deck[path_ids_samples] = []
# for path_id in path_ids_samples
# if deck[path_id] in path_ids_samples
# push!(path_ids_deck[path_ids_samples], path_id)
# end
# end
# end
# end
# end
# return path_ids_all_deck
# end
# function min_nconstraints_among_deck(
# path_ids_all_deck::Vector{Dict{Vector{Int}, Vector{Int}}},
# samples::Dict{Vector{Int}, Samples}
# )
# return min([sum(length(path_ids_deck)*ninstances(samples[path_ids_samples]) for (path_ids_samples, path_ids_deck) in path_ids_all_deck[i]) for i in 1:length(path_ids_all_deck)]...)
# end
# function _sample_for_deck_computation!(
# F::SampledSystem;
# min_nconstraints::Int,
# min_ninstances::Int
# )
# path_ids_all_deck = path_ids_for_deck_computation(F)
# min_nconst = min_nconstraints_among_deck(path_ids_all_deck, samples(F))
# Δ_ninstances = min_ninstances - ninstances(F)
# Δ_min_nconst = min_nconstraints - min_nconst
# n_sols = nsolutions(F)
# if Δ_ninstances > 0
# if Δ_min_nconst > 0
# sample!(F; n_instances=div(Δ_min_nconst, n_sols))
# Δ_ninstances -= div(Δ_min_nconst, n_sols)
# if Δ_ninstances > 0
# path_ids = orbit(deck_permutations(F), 1:max(div(mod(Δ_min_nconst, n_sols), Δ_ninstances, RoundUp), 1))
# sample!(F, path_ids=path_ids, n_instances=Δ_ninstances)
# else
# sample!(F; path_ids=orbit(deck_permutations(F), 1:mod(Δ_min_nconst, n_sols)), n_instances=1)
# end
# else
# sample!(F; path_ids=orbit(deck_permutations(F), 1), n_instances=Δ_ninstances)
# end
# else
# if Δ_min_nconst > 0
# sample!(F; n_instances=div(Δ_min_nconst, n_sols))
# sample!(F; path_ids=orbit(deck_permutations(F), 1:mod(Δ_min_nconst, n_sols)), n_instances=1)
# end
# end
# return path_ids_for_deck_computation(F)
# end
function _sample_for_deck_computation(
F::SampledSystem;
n_instances::Int
)
Δ_ninstances = n_instances - ninstances(F)
if Δ_ninstances > 0
sample!(F; path_ids=orbit(deck_permutations(F), 1), n_instances=Δ_ninstances)
end
end
function symmetries_fixing_parameters_graded!(
F::SampledSystem,
grading::Grading;
degree_bound::Integer=1,
param_dep::Bool=true,
tols::Tolerances=Tolerances(),
logging::Bool=false
)
C = deck_permutations(F)
symmetries = _init_symmetries(length(C), unknowns(F))
mons = DenseMonomialVector{Int8, Int16}(unknowns=unknowns(F), parameters=parameters(F))
for d in 1:degree_bound
extend!(mons; degree=d, extend_params=param_dep)
mon_classes = to_classes(mons, grading)
mon_classes_vect = collect(values(mon_classes))
max_n_mons = max(length.(mon_classes_vect)...)
# max_nparam_only = max(nparam_only.(mon_classes_vect)...)
# path_ids_for_deck = _sample_for_deck_computation!(
# F;
# min_nconstraints = 2*max_n_mons, # TODO: understand this
# min_ninstances = 2*max_n_mons # TODO: understand this
# )
_sample_for_deck_computation(F; n_instances=2*max_n_mons)
for (num_deg, num_mons) in mon_classes
eval_num_mons = nothing
for i in 1:nunknowns(F)
denom_deg = _denom_deg(num_deg, grading, i) # i-th variable
denom_mons = get(mon_classes, denom_deg, nothing)
if !isnothing(denom_mons)
num_denom_mons = vcat(num_mons, denom_mons)
if iszero(gcd(num_denom_mons)) && !is_param_only(num_denom_mons)
if isnothing(eval_num_mons)
eval_num_mons = evaluate(num_mons, samples(F))
end
eval_denom_mons = evaluate(denom_mons, samples(F))
for (j, symmetry) in enumerate(symmetries)
if ismissing(symmetry[i])
symmetry[i] = _interpolate_deck_function(
C[j],
i,
samples(F),
# path_ids_for_deck[j],
eval_num_mons,
eval_denom_mons,
num_mons,
denom_mons,
tols;
logging=logging
)
if logging && !ismissing(symmetry[i])
printstyled(
"Good representative for the ",
to_ordinal(j),
" symmetry, variable ",
unknowns(F)[i],
":\n",
color=:red
)
println(symmetry[i])
end
end
end
end
end
end
if _all_interpolated(symmetries)
logging && printstyled("--- All symmetries are interpolated ---\n", color=:blue)
return DeckTransformationGroup(symmetries, F)
end
end
end
return DeckTransformationGroup(symmetries, F)
end
function symmetries_fixing_parameters_dense!(
F::SampledSystem;
degree_bound::Integer=1,
param_dep::Bool=true,
tols::Tolerances=Tolerances(),
logging::Bool=false
)
C = deck_permutations(F)
symmetries = _init_symmetries(length(C), unknowns(F))
mons = DenseMonomialVector{Int8, Int16}(unknowns=unknowns(F), parameters=parameters(F))
for d in 1:degree_bound
logging && printstyled("Started interpolation for degree = ", d, "...\n"; color=:green)
extend!(mons; degree=d, extend_params=param_dep)
# path_ids_for_deck = _sample_for_deck_computation!(
# F;
# min_nconstraints = 2*length(mons)+1, # TODO: understand this
# min_ninstances = 2*nparam_only(mons) # TODO: understand this
# )
_sample_for_deck_computation(F; n_instances=2*length(mons))
logging && println("Evaluating monomials...\n")
# TODO: pick samples for evaluation
evaluated_mons = evaluate(mons, samples(F))
for (i, symmetry) in enumerate(symmetries)
logging && printstyled("Interpolating the ", i, "-th deck transformation...\n"; color=:blue)
for j in 1:nunknowns(F)
if ismissing(symmetry[j])
symmetry[j] = _interpolate_deck_function(
C[i],
j,
samples(F),
# path_ids_for_deck[i],
evaluated_mons,
mons,
tols;
logging=logging
)
if logging && !ismissing(symmetry[j])
printstyled(
"Good representative for the ",
i,
"-th deck transformation, variable ",
unknowns(F)[j],
":\n";
color=:red
)
println(symmetry[j])
end
end
end
end
if _all_interpolated(symmetries)
logging && printstyled("--- All deck transformations are interpolated ---\n"; color=:blue)
return DeckTransformationGroup(symmetries, F)
end
end
return DeckTransformationGroup(symmetries, F)
end
to_CC(scaling::Tuple{Tv, SparseVector{Tv,Ti}}) where {Tv<:Integer,Ti<:Integer} = [cis(2*pi*k/scaling[1]) for k in scaling[2]]
function _deck_action(
deck_permutation::Vector{Int},
(x₀, p₀)::NTuple{2, AbstractVector{<:Number}},
F::SampledSystem;
tol::Real=1e-5
)
sols, params = all_solutions_samples(F).solutions, all_solutions_samples(F).parameters
instance_id = rand(1:size(sols, 3))
p₁ = params[:, instance_id]
p_inter = randn(ComplexF64, nparameters(F))
x₁ = track_parameter_homotopy(F.system, (x₀, p₀), p₁, p_inter) # along path γ in the parameter space
x₁_id = findfirst(x₁, sols[:,:,instance_id]; tol=tol)
isnothing(x₁_id) && return nothing
Ψ_x₁ = sols[:,deck_permutation[x₁_id], instance_id]
Ψ_x₀ = track_parameter_homotopy(F.system, (Ψ_x₁, p₁), p₀, p_inter) # should be along the same path γ
return Ψ_x₀
end
# supposes scaling is a symmetry of F
function _deck_commutes_with_scaling(
deck_permutation::Vector{Int},
scaling::Tuple{Tv, SparseVector{Tv,Ti}},
F::SampledSystem;
tol::Real=1e-5
) where {Tv<:Integer,Ti<:Integer}
sols, params = all_solutions_samples(F).solutions, all_solutions_samples(F).parameters
inst_id = rand(1:size(sols, 3))
p₀ = params[:, inst_id]
Φ_p₀ = to_CC(scaling)[end-nparameters(F)+1:end].*p₀
sol_id = rand(1:size(sols, 2))
x₀ = sols[:, sol_id, inst_id]
Ψ_x₀ = sols[:, deck_permutation[sol_id], inst_id]
Φ_x₀ = to_CC(scaling)[1:nunknowns(F)].*x₀
ΦΨ_x₀ = to_CC(scaling)[1:nunknowns(F)].*Ψ_x₀
ΨΦ_x₀ = _deck_action(deck_permutation, (Φ_x₀, Φ_p₀), F; tol=tol)
isnothing(ΨΦ_x₀) && return false
return norm(ΦΨ_x₀-ΨΦ_x₀)<tol
end
function _all_deck_commute(
F::SampledSystem,
scaling::Tuple{Tv, SparseVector{Tv, Ti}};
tol::Real=1e-5
) where {Tv<:Integer,Ti<:Integer}
for deck_permutation in deck_permutations(F)
if !_deck_commutes_with_scaling(deck_permutation, scaling, F; tol=tol)
return false
end
end
return true
end
function _scalings_commuting_with_deck(F::SampledSystem, scalings::ScalingGroup)
grading = scalings.grading
final_grading = Grading{Int8, Int16}(nfree(grading), grading.free_part, [])
for (sᵢ, Uᵢ) in grading.mod_part
Vᵢ = Array{Int}(undef, 0, size(Uᵢ, 2))
# TODO: Uᵢ ↦ all linear combinations of rows of Uᵢ (might not commute with 2 gens, but commutes with their combination)
for j in axes(Uᵢ, 1)
if _all_deck_commute(F, (sᵢ, Uᵢ[j, :]))
Vᵢ = [Vᵢ; hcat(Uᵢ[j, :]...)]
end
end
if size(Vᵢ, 1) > 0
push!(final_grading.mod_part, (sᵢ, Vᵢ))
end
end
return ScalingGroup(reduce(final_grading), scalings.vars)
end
function symmetries_fixing_parameters!(
F::SampledSystem;
degree_bound::Integer=1,
param_dep::Bool=true,
tols::Tolerances=Tolerances(),
logging::Bool=false
)
if length(deck_permutations(F)) == 1 # trivial group of symmetries
return DeckTransformationGroup(F)
end
scalings = _scalings_commuting_with_deck(F, scaling_symmetries(F))
scalings = param_dep ? scalings : restrict_scalings(scalings, unknowns(F)) # TODO: justify!
if isempty(grading(scalings))
logging && printstyled("Running dense version...\n", color=:green)
return symmetries_fixing_parameters_dense!(
F;
degree_bound=degree_bound,
param_dep=param_dep,
tols=tols,
logging=logging
)
else
logging && printstyled("Running graded version...\n", color=:green)
return symmetries_fixing_parameters_graded!(
F,
grading(scalings);
degree_bound=degree_bound,
tols=tols,
logging=logging
)
end
end
"""
symmetries_fixing_parameters(F::System; degree_bound=1, param_dep=true, kwargs...)
Given a polynomial system F returns the group of symmetries
of `F` that fix the parameters. The keyword
argument `degree_bound` is used to set the upper bound for the
degrees of numerator and denominator polynomials in expressions
for the symmetries. The `param_dep` keyword argument specifies
whether to consider functions of the symmetries to be dependent
on the parameters of `F`.
```julia-repl
julia> @var x[1:2] p[1:2];
julia> F = System([x[1]^2 - x[2]^2 - p[1], 2*x[1]*x[2] - p[2]]; variables=x, parameters=p);
julia> symmetries_fixing_parameters(F; degree_bound=1, param_dep=false)
DeckTransformationGroup of order 4
structure: C2 x C2
action:
1st map:
x₁ ↦ x₁
x₂ ↦ x₂
2nd map:
x₁ ↦ -x₁
x₂ ↦ -x₂
3rd map:
x₁ ↦ im*x₂
x₂ ↦ -im*x₁
4th map:
x₁ ↦ -im*x₂
x₂ ↦ im*x₁
```
"""
function symmetries_fixing_parameters( # TODO: extend to take an expression map
F::System;
xp₀::Union{Nothing, NTuple{2, AbstractVector{<:Number}}}=nothing,
degree_bound::Integer=1,
param_dep::Bool=true,
tols::Tolerances=Tolerances(),
monodromy_options::Tuple=(),
logging::Bool=false
)
F = run_monodromy(F, xp₀; monodromy_options...)
return symmetries_fixing_parameters!(
F;
degree_bound=degree_bound,
param_dep=param_dep,
tols=tols,
logging=logging
)
end
# ----------------------- DEBUGGING TOOLS -----------------------
export deck_vandermonde_dense,
deck_vandermonde_graded,
to_multiexponent,
reduced_nullspace,
to_coefficients
function reduced_nullspace(A::AbstractMatrix{<:Number}; tols::Tolerances=Tolerances())
if tols.nullspace_rtol == 0
N = nullspace(A, atol=tols.nullspace_atol)
else
N = nullspace(A, atol=tols.nullspace_atol, rtol=tols.nullspace_rtol)
end
N = transpose(N)
size(N, 1) == 0 && return N
N = rref(N, tols.rref_tol)
sparsify!(N, tols.sparsify_tol; digits=1)
return N
end
function deck_vandermonde_dense(
F::SampledSystem;
deck_id::Int,
var::Variable,
degree::Integer=1,
param_dep::Bool=true,
min_nconstraints::Function=mons->2*length(mons),
min_ninstances::Function=mons->2*nparam_only(mons)
)
mons = DenseMonomialVector{Int8, Int16}(unknowns=unknowns(F), parameters=parameters(F))
extend!(mons; degree=degree, extend_params=param_dep)
path_ids_for_deck = _sample_for_deck_computation!(
F;
min_nconstraints = min_nconstraints(mons),
min_ninstances = min_ninstances(mons)
)
eval_mons = evaluate(mons, samples(F))
function_id = findfirst(x->x==var, unknowns(F))
return _deck_vandermonde_matrix(
deck_permutations(F)[deck_id],
function_id,
samples(F),
path_ids_for_deck[deck_id],
eval_mons,
eval_mons,
min_nconstraints(mons),
min_ninstances(mons)
), mons
end
function to_multiexponent(mon::Expression, vars::Vector{Variable})
es, cs = exponents_coefficients(mon, vars)
if length(cs) > 1 || !isone(cs[1])
throw(ArgumentError("Input expression is not a monomial"))
else
return SparseVector{Int8,Int16}(sparse(es[:,1]))
end
end
function deck_vandermonde_graded(
F::SampledSystem,
grading::Grading;
deck_id::Int,
var::Variable,
deg::Integer=1,
param_dep::Bool=true,
num_mon::Expression,
min_nconstraints::Function=(num_mons, denom_mons)->length(num_mons)+length(denom_mons)+2,
min_ninstances::Function=(num_mons, denom_mons)->nparam_only(num_mons)+nparam_only(denom_mons)
)
mons = DenseMonomialVector{Int8, Int16}(unknowns=unknowns(F), parameters=parameters(F))
extend!(mons; degree=deg, extend_params=param_dep)
mon_classes = to_classes(mons, grading)
var_id = findfirst(x->x==var, variables(F))
mexp = to_multiexponent(num_mon, variables(F))
num_deg = degree(mexp, grading)
num_mons = mon_classes[num_deg]
denom_deg = _denom_deg(num_deg, grading, var_id)
denom_mons = get(mon_classes, denom_deg, nothing)
isnothing(denom_mons) && error("denom_mons don't exist")
path_ids_for_deck = _sample_for_deck_computation!(
F;
min_nconstraints = min_nconstraints(num_mons, denom_mons),
min_ninstances = min_ninstances(num_mons, denom_mons)
)
eval_num_mons = evaluate(num_mons, samples(F))
eval_denom_mons = evaluate(denom_mons, samples(F))
return _deck_vandermonde_matrix(
deck_permutations(F)[deck_id],
var_id,
samples(F),
path_ids_for_deck[deck_id],
eval_num_mons,
eval_denom_mons,
min_nconstraints(num_mons, denom_mons),
min_ninstances(num_mons, denom_mons)
), (num_mons, denom_mons)
end
function to_coefficients(
expr::Expression,
mons::SparseMonomialVector{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
es, cs = exponents_coefficients(expr, variables(mons))
mexps = [SparseVector{Tv,Ti}(sparse(esᵢ)) for esᵢ in eachcol(es)]
ids = [findfirst(m, mons) for m in mexps]
v = zeros(ComplexF64, length(mons))
v[ids] = cs
return v
end
function to_coefficients(
num_expr::Expression,
denom_expr::Expression,
num_mons::SparseMonomialVector{Tv,Ti},
denom_mons::SparseMonomialVector{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
return vcat(to_coefficients(num_expr, num_mons), to_coefficients(denom_expr, denom_mons))
end
function to_coefficients(
num_expr::Expression,
denom_expr::Expression,
mons::SparseMonomialVector{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
return to_coefficients(num_expr, denom_expr, mons, mons)
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 2925 | export DecomposablePolynomialSystem,
decompose
struct DecomposablePolynomialSystem
system::SampledSystem
is_decomposable::Bool
deck_transformations::Vector{Vector{Expression}}
factorizing_maps::Vector{FactorizingMap}
intermediate_varieties::Vector{SampledSystem}
end
DecomposablePolynomialSystem(system::SampledSystem, is_decomposable::Bool) = DecomposablePolynomialSystem(system, false, [], [], [])
DecomposablePolynomialSystem(system::SampledSystem, deck_transformations::Vector{Vector{Expression}}, factorizing_maps::Vector{FactorizingMap}) = DecomposablePolynomialSystem(system, true, deck_transformations, factorizing_maps, [SampledSystem() for _ in eachindex(factorizing_maps)])
function decompose(F::System, xp0::Tuple{Vector{CC}, Vector{CC}}; degDeck::Int64=1, degFact::Int64=1, degImpl::Int64=1, tol::Float64=1e-5, paramDepDeck::Bool=false, paramDepFact::Bool=false)::DecomposablePolynomialSystem
F = run_monodromy(F, xp0)
if length(F.block_partitions) == 0
return DecomposablePolynomialSystem(F, false)
end
n_unknowns = length(xp0[1])
n_params = length(xp0[2])
n_sols = size(F.solutions, 2)
paramDepDeck ? n_vars = n_unknowns + n_params : n_vars = n_unknowns
n_instances_deck = Int(ceil(2/n_sols*binomial(n_vars + degDeck, n_vars)))
paramDepFact ? n_vars = n_unknowns + n_params : n_vars = n_unknowns
n_constraints = min([num_constraints(F.block_partitions[i]) for i in 1:length(F.block_partitions)]...)
n_instances_fact = Int(ceil(1/n_constraints*binomial(n_vars + degFact, degFact)))
F = sample_system(F, max(n_instances_deck, n_instances_fact))
deck_transformations = compute_deck_transformations(F, degree=degDeck, tol=tol, param_dep=paramDepDeck)
factorizing_maps = compute_factorizing_maps(F, degree=degFact, tol=tol, param_dep=paramDepFact)
n_new_unknowns = [length(factorizing_maps[i].map) + n_params for i in eachindex(factorizing_maps)]
n_new_mons = [binomial(n_new_unknowns[i] + degImpl, degImpl) for i in eachindex(factorizing_maps)]
n_instances_impl = max([Int(ceil(1/length(F.block_partitions[i])*n_new_mons[i])) for i in eachindex(factorizing_maps)]...)
n_instances_impl = max(n_instances_impl, binomial(n_params + degImpl, degImpl))
F = sample_system(F, n_instances_impl)
DPS = DecomposablePolynomialSystem(F, deck_transformations, factorizing_maps)
for i in eachindex(factorizing_maps)
factorizing_map = factorizing_maps[i]
@var y[i, 1:length(factorizing_map.map)]
DPS.intermediate_varieties[i] = implicitize(F, factorizing_map, F.block_partitions[i], new_vars=y, degree=degImpl)
DPS.factorizing_maps[i].domain = Ref(DPS.system)
DPS.factorizing_maps[i].image = Ref(DPS.intermediate_varieties[i])
DPS.factorizing_maps[i].monodromy_group = action_on_block(F.monodromy_group, F.block_partitions[i])
end
return DPS
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 1538 | export ExpressionMap
struct ExpressionMap
domain_vars::Vector{Variable}
image_vars::Vector{Variable}
exprs::Vector{Expression}
function ExpressionMap(domain_vars, image_vars, funcs)
# TODO: exclude empty vectors, repetitions in vars
return new(domain_vars, image_vars, funcs)
end
end
# TODO: what if vars not in funcs? What if funcs has variables not present in vars?
function ExpressionMap(vars::Vector{Variable}, exprs::Vector{Expression})
@assert length(vars) == length(exprs) "#vars ≂̸ #exprs, specify image variables"
return ExpressionMap(vars, vars, exprs)
end
Base.getindex(f::ExpressionMap, i::Int) = (f.image_vars[i], f.exprs[i])
function Base.getindex(f::ExpressionMap, var::Variable)
id = findfirst(x->x==var, f.image_vars)
if isnothing(id)
error("The variable $(var) isn't present in the image variables")
end
return f.exprs[id]
end
# TODO
function (f::ExpressionMap)(x)
end
# TODO
function Base.:(∘)(f::ExpressionMap, g::ExpressionMap)
end
# TODO
function is_dense(f::ExpressionMap)
end
function Base.show(io::IO, map::ExpressionMap)
println(io, "ExpressionMap: ℂ$(superscriptnumber(length(map.domain_vars))) ⊃ X - - > ℂ$(superscriptnumber(length(map.exprs)))")
println(io, " action:")
if map.domain_vars == map.image_vars
for (i, var) in enumerate(map.domain_vars)
print(io, " ", var, " ↦ ", map.exprs[i])
i < length(map.domain_vars) && print(io, "\n")
end
else
# TODO
end
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 2267 | export implicitize
function evaluate_map_at_samples(factorizing_map::FactorizingMap, block_partition::Vector{Vector{Int}}, F::SampledSystem)::Array{ComplexF64, 3}
n_unknowns = length(factorizing_map.map)
n_blocks = length(block_partition)
n_instances = size(F.parameters, 2)
new_solutions = zeros(ComplexF64, n_unknowns, n_blocks, n_instances)
for i in 1:n_instances
params = F.parameters[:, i]
for j in 1:n_blocks
sol_idx = block_partition[j][1]
sol = F.solutions[:, sol_idx, i]
new_solutions[:, j, i] = ComplexF64.(expand.(subs(factorizing_map.map, vcat(variables(F.equations), parameters(F.equations)) => vcat(sol, params))))
end
end
return new_solutions
end
function implicitize(F::SampledSystem, factorizing_map::FactorizingMap, block_partition::Vector{Vector{Int}}; new_vars::Vector{Variable}=Vector{Variable}([]), mons::Vector{Expression}=Vector{Expression}([]), degree::Int64=0, tol::Float64=1e-5)::SampledSystem
if isempty(factorizing_map.map)
return SampledSystem()
end
new_solutions = evaluate_map_at_samples(factorizing_map, block_partition, F)
if mons == []
mons = get_monomials(vcat(new_vars, parameters(F.equations)), degree)
end
if new_vars == []
@var y[1:length(factorizing_map.map)]
new_vars = y
end
evaluated_mons = evaluate_monomials_at_samples(mons, new_solutions, F.parameters, vcat(new_vars, parameters(F.equations)))
m, n, k = size(evaluated_mons)
A = Matrix{ComplexF64}(transpose(reshape(evaluated_mons, m, n*k)))
# A = A[1:m, 1:m]
@assert size(A, 1) >= size(A, 2)
coeffs = Matrix{ComplexF64}(transpose(nullspace(A)))
if size(coeffs, 1) != 0
println("Computing the reduced row echelon form of the transposed nullspace...")
coeffs = sparsify(rref(coeffs, tol), tol, digits=1)
end
G = System([dot(coeffs[i,:], mons) for i in 1:size(coeffs, 1)], variables=new_vars, parameters=parameters(F.equations))
mon = action_on_blocks(F.monodromy_group, block_partition)
block_partitions = all_block_partitions(mon)
deck = centralizer(mon)
return SampledSystem(G, new_solutions, F.parameters, mon, block_partitions, deck)
end
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 9994 | export rational_function,
polynomial_function,
remove_zero_nums_and_denoms
using LinearAlgebra: norm, dot
function rational_function(
coeffs::AbstractVector{<:Number},
num_mons::AbstractMonomialVector,
denom_mons::AbstractMonomialVector;
logging::Bool=false,
tol::Float64=1e-5
)
n_num_mons, n_denom_mons = length(num_mons), length(denom_mons)
@assert length(coeffs) == n_num_mons + n_denom_mons
num, denom = coeffs[1:n_num_mons], coeffs[n_num_mons+1:end]
if norm(num) < tol
@warn "Numerator close to zero"
end
@assert norm(denom) > tol
nonzero_ids = findall(!iszero, denom)
nonzero_denom = denom[nonzero_ids]
if all(x->x==nonzero_denom[1], nonzero_denom)
num /= nonzero_denom[1]
sparsify!(num, tol; digits=1)
denom[nonzero_ids] = ones(length(nonzero_ids))
end
num, denom = simplify_numbers(num), simplify_numbers(denom)
if logging
println("numerator = ", sum(to_expressions(num_mons).*num))
println("denominator = ", sum(to_expressions(denom_mons).*denom))
end
p = sum(to_expressions(num_mons).*num)
q = sum(to_expressions(denom_mons).*denom)
logging && println("rational function = ", p/q)
return p/q
end
function polynomial_function(
coeffs::AbstractVector{<:Number},
mons::AbstractMonomialVector;
logging::Bool=false
)
@assert length(coeffs) == length(mons.mds)
p = sum(to_expressions(mons).*coeffs)
logging && println("polynomial = ", p)
return p
end
function check_func_type(func_type::String)
if (func_type != "polynomial" && func_type != "rational")
error("func_type argument must be either \"polynomial\" or \"rational\"")
end
end
function reconstruct_function(
coeffs::AbstractVector{<:Number},
mons::AbstractMonomialVector;
func_type::String,
logging::Bool=true,
tol::Float64=1e-5
)
check_func_type(func_type)
if func_type == "rational"
return rational_function(coeffs, mons, mons, logging=logging, tol=tol)
else
return polynomial_function(coeffs, mons, logging=logging)
end
end
function remove_zero_nums_and_denoms(
coeffs::AbstractMatrix{<:Number},
num_mons::AbstractMonomialVector,
denom_mons::AbstractMonomialVector;
logging::Bool=false
)
reasonable_rows = []
n_num_mons, n_denom_mons = length(num_mons), length(denom_mons)
@assert size(coeffs, 2) == n_num_mons + n_denom_mons
for i in axes(coeffs, 1)
if (!iszero(coeffs[i, 1:n_num_mons]) && !iszero(coeffs[i, n_num_mons+1:end]))
push!(reasonable_rows, i)
elseif logging
if iszero(coeffs[i, 1:n_num_mons])
println(
"Denominator removed: ",
polynomial_function(coeffs[i, n_num_mons+1:end], denom_mons)
)
else
println(
"Numerator removed: ",
polynomial_function(coeffs[i, 1:n_num_mons], num_mons)
)
end
end
end
return coeffs[reasonable_rows, :]
end
function remove_zero_nums_and_denoms(
coeffs::AbstractMatrix{<:Number},
mons::AbstractMonomialVector;
logging::Bool=false
)
return _remove_zero_nums_and_denoms(coeffs, mons, mons, logging=logging)
end
function rational_functions(
coeffs::AbstractMatrix{<:Number},
num_mons::AbstractMonomialVector,
denom_mons::AbstractMonomialVector;
logging::Bool=true,
tol::Float64=1e-5
)
for k in eachindex(axes(coeffs, 1))
rational_function(coeffs[k,:], num_mons, denom_mons, logging=logging, tol=tol)
println()
end
end
function reconstruct_functions(
coeffs::AbstractMatrix{<:Number},
mons::AbstractMonomialVector;
func_type::String,
logging::Bool=true,
tol::Float64=1e-5
)
end
function good_representative(coeffs::AbstractMatrix{<:Number})
return coeffs[findmin(vec(sum(coeffs .!= 0, dims=2)))[2], :]
end
# NOT READY YET...
function best_representative(rational_functions::AbstractMatrix{<:Number}, tol::Float64)
n_mons = Int(size(rational_functions, 2)/2)
A = rational_functions[:,n_mons+1:end]
A = [-1 zeros(1, n_mons-1); A]
R = Matrix{CC}(transpose(nullspace(transpose(A))))
R = sparsify(rref(R, tol), tol, digits=1) # 1*R[1,:] + r2*R[2,:] + ... are the solutions
a = R[:,2:end]*rational_functions[:,1:n_mons] # 1*a[1,:] + r2*a[2,:] + ... are possible numerators
return nothing
end
function vandermonde_matrix(
values::AbstractVector{<:Number},
eval_mons::AbstractMatrix{<:Number},
func_type::String
)
check_func_type(func_type)
if func_type == "polynomial"
return [transpose(eval_mons) -values]
elseif func_type == "rational"
return [transpose(eval_mons) -transpose(eval_mons).*values]
end
end
# supposes each md in mds is a multidegree in both unknowns and parameters
# TODO: The implementation below (with _) is more efficient (approx 2x),
# TODO: since it exploits the sparsity of multidegrees. REMOVE THIS METHOD?
# function HomotopyContinuation.evaluate(mons::MonomialVector, samples::Samples)
# solutions = samples.solutions
# parameters = samples.parameters
# n_unknowns, n_sols, n_instances = size(solutions)
# mds = mons.mds
# n_mds = length(mds)
# evaluated_mons = zeros(CC, n_mds, n_sols, n_instances)
# for i in 1:n_instances
# params = view(parameters, :, i)
# params_eval = [prod(params.^md[n_unknowns+1:end]) for md in mds]
# sols = view(solutions, :, :, i)
# for j in 1:n_mds
# evaluated_mons[j, :, i] = vec(prod(sols.^mds[j][1:n_unknowns], dims=1)).*params_eval[j]
# end
# end
# return evaluated_mons
# end
function interpolate(
A::AbstractMatrix{CC},
mons::AbstractMonomialVector;
func_type::String,
tol::Float64=1e-5
)
check_func_type(func_type)
@assert size(A, 1) >= size(A, 2)
if func_type == "polynomial"
@assert size(A, 2) == length(mons)
else
@assert size(A, 2) == 2*length(mons)
end
C = Matrix{CC}(transpose(nullspace(A)))
C = sparsify(rref(C, tol), tol, digits=1)
if size(C, 1) == 0
return nothing
elseif size(C, 1) == 1
return reconstruct_function(C[1, :], mons, func_type=func_type)
else
# choose good/best representative
return reconstruct_function(C[1, :], mons, func_type=func_type)
end
end
# If samples are generated from a proper subvariety X ⊂ Cⁿ, then the method returns just one representative f
# of the equivalence class of functions f+I(X) with the given values. Obviously, if X = Cⁿ, then there is only
# one such function.
function interpolate(
values::AbstractVector{CC},
mons::AbstractMonomialVector,
eval_mons::AbstractMatrix{CC};
func_type::String,
tol::Float64=1e-5
)
check_func_type(func_type)
A = vandermonde_matrix(values, eval_mons, func_type)
return interpolate(A, mons, func_type=func_type, tol=tol)
end
function interpolate_vanishing_polynomials(
samples::Samples,
vars::Vector{Variable},
mons::AbstractMonomialVector;
tol::Float64=1e-5
)
A = evaluate_monomials_at_samples(mons, samples, vars)
A = reshape(A, size(A, 1), size(A, 2)*size(A, 3))
N = Matrix{CC}(transpose(nullspace(transpose(A))))
N = sparsify(rref(N, tol), tol, digits=1)
return N*mons
end
function interpolate_vanishing_polynomials(
samples::AbstractMatrix{CC},
vars::Vector{Variable},
mons::AbstractMonomialVector;
tol::Float64=1e-5
)
@assert size(samples, 1) == length(vars)
@assert size(samples, 2) >= length(mons)
A = evaluate_monomials_at_samples(mons, samples, vars)
N = Matrix{CC}(transpose(nullspace(transpose(A))))
N = sparsify(rref(N, tol), tol, digits=1)
return N*mons
end
function interpolate_dense(
samples::AbstractMatrix{CC},
values::AbstractVector{CC};
vars::Vector{Variable},
degree::Int,
func_type::String,
tol::Float64=1e-5
)
end
function interpolate_sparse(
samples::AbstractMatrix{CC},
values::AbstractVector{CC};
vars::Vector{Variable},
degree::Int,
func_type::String,
tol::Float64=1e-5
)
end
function interpolate(
samples::AbstractMatrix{CC},
values::AbstractVector{CC};
vars::Vector{Variable},
degree::Int,
func_type::String,
method::String,
tol::Float64=1e-5
)
if method == "dense"
return interpolate_dense(samples, values; vars=vars, degree=degree, func_type=func_type, tol=tol)
elseif method == "sparse"
return interpolate_sparse(samples, values; vars=vars, degree=degree, func_type=func_type, tol=tol)
else
error("Not supported method '", method, "' for interpolation")
end
end
#----------- SPARSE INTERPOLATION TESTING -------------
function inv_chinese(d::Int, p::Vector{Int})
n = length(p)
m = prod(p)
D = zeros(Int, n)
for i in 1:n
D[i] = mod(d*invmod(Int(m/p[i]), p[i]), p[i])
end
return D
end
function companion_matrix(λ::Vector{CC})::Matrix{CC}
d = length(λ)
return [[zeros(1,d-1); eye(d-1)] -λ]
end
function f(x, y)
return x^20*y + 2*x^2 + 3*x + 4*y^11 + 5
end
function interpolate_sparse()
end
# t = 5 # number of terms in f
# p₁, p₂ = 139, 193 # pᵢ > deg(fₓᵢ)
# m = p₁*p₂
# ω₁, ω₂ = cispi(2/p₁), cispi(2/p₂) # pᵢ-th roots of unity
# ω = cispi(2/m)
# α = [f(ω₁^s, ω₂^s) for s in 0:2*t-1]
# H₀ = VV2M([[α[j] for j in i:i+t-1] for i in 1:t])
# h = -[α[i] for i in t+1:2*t]
# Hh = [H₀ -h]
# N = nullspace(Hh)
# λ = M2V(p2a(N))
# M = companion_matrix(λ)
# b = eigvals(M)
# d = [mod(Int(round(log(ω, b[i]))), m) for i in 1:length(b)]
# e = [inv_chinese(d[i], [p₁, p₂]) for i in 1:length(d)]
# B = [transpose(VV2M([b.^i for i in 0:t-1])) -α[1:t]]
# N = nullspace(B)
# c = round.(M2V(p2a(N)))
# @var x y
# f_approx = dot(c, [prod([x,y].^e[i]) for i in 1:length(e)])
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 9137 | export invariants
function num_constraints(block_partition::Vector{Vector{Int}})::Int
return length(block_partition)*(length(block_partition[1])-1)
end
function sumFromTo(m, n)
return Int(n*(n+1)/2 - m*(m+1)/2)
end
function num_instances(n_unknowns::Int, n_params::Int, degree::Int, block_partition::Vector{Vector{Int}})::Int
n_constraints = num_constraints(block_partition)
n_all_mons = binomial(n_unknowns + n_params + degree, degree)
n_param_mons = binomial(n_params + degree, degree)
n_minors = sumFromTo(n_param_mons - 1, n_all_mons - 1)
return Int(ceil(n_minors/n_constraints))
end
function create_vandermonde_matrix(block_partition::Vector{Vector{Int}}, eval_mons::Array{ComplexF64, 3}, eval_param_mons::Array{ComplexF64, 2})::Matrix{ComplexF64}
n_instances = size(eval_mons, 3)
n_blocks = length(block_partition)
block_size = length(block_partition[1])
n_constraints = num_constraints(block_partition)
n_unknown_mons = size(eval_mons, 1)
n_param_mons = size(eval_param_mons, 1)
n_minors = sumFromTo(n_param_mons - 1, n_unknown_mons + n_param_mons - 1)
A = zeros(ComplexF64, n_instances*n_constraints, n_minors)
for i in 1:n_instances
for j in 1:n_blocks
block = block_partition[j]
for k in 1:block_size-1
row_idx = (i - 1)*n_constraints + (j-1)*(block_size-1) + k
col_idx = 1
for m in 1:n_unknown_mons
for n in (m+1):n_unknown_mons
M = eval_mons[[m,n], [block[k],block[k+1]], i]
A[row_idx, col_idx] = det(M)
col_idx += 1
end
for n in 1:n_param_mons
M = eval_mons[m, [block[k],block[k+1]], i]
A[row_idx, col_idx] = (M[1]-M[2])*eval_param_mons[n, i]
col_idx += 1
end
end
end
end
end
return A
end
function interpolate_invariants(block_partition::Vector{Vector{Int}}, mons::Vector{Expression}, eval_mons::Array{ComplexF64, 3}, eval_param_mons::Array{ComplexF64, 2}, tol::Float64)::Matrix{ComplexF64}
n_instances = size(eval_mons, 3)
n_constraints = num_constraints(block_partition)
n_unknown_mons = size(eval_mons, 1)
n_param_mons = size(eval_param_mons, 1)
n_minors = sumFromTo(n_param_mons - 1, n_unknown_mons + n_param_mons - 1)
println("n_minors = ", n_minors)
println("n_param_mons = ", n_param_mons)
println("n_unknown_mons = ", n_unknown_mons)
println("Creating vandermonde matrix of size ", (n_instances*n_constraints, n_minors), " from solutions...\n")
A = create_vandermonde_matrix(block_partition, eval_mons, eval_param_mons)
@assert size(A, 1) >= size(A, 2)
println("Computing nullspace...")
inv_minors = Matrix{ComplexF64}(transpose(nullspace(A)))
println("Size of the transposed nullspace: ", size(inv_minors), "\n")
if size(inv_minors, 1) != 0
println("Computing reduced row echelon form of the transposed nullspace...")
inv_minors = sparsify(rref(inv_minors, tol), tol, digits=1)
end
return inv_minors
end
function invariants(F::SampledSystem; degree::Int, tol::Float64=1e-5, param_dep::Bool=true)::Vector{Matrix{ComplexF64}}
params = parameters(F.equations)
param_dep ? vars = vcat(variables(F.equations), params) : vars = variables(F.equations)
param_dep ? n_params = length(params) : n_params = 0
mons = get_monomials_factorization(vars, degree, n_params)
println(mons)
println("Evaluating monomials...\n\n")
eval_mons = evaluate_monomials_at_samples(mons, F.solutions, F.parameters, vars)
println("eval_mons size = ", size(eval_mons))
param_mons = get_monomials(params, degree)
eval_param_mons = evaluate_monomials_at_samples(param_mons, F.parameters, params)
invs_minors = Vector{Matrix{ComplexF64}}([])
for i in 1:length(F.block_partitions)
printstyled("Interpolating invariants for the ", i, "-th block partition...\n", color=:blue)
push!(invs_minors, interpolate_invariants(F.block_partitions[i], mons, eval_mons, eval_param_mons, tol))
println()
end
return invs_minors
end
function invariants(F::System, xp0::Tuple{Vector{ComplexF64}, Vector{ComplexF64}}; degree::Int=0, tol::Float64=1e-5, expected_n_sols::Int=0, param_dep::Bool=true)::Tuple{SampledSystem, Vector{FactorizingMap}}
F = run_monodromy(F, xp0, expected_n_sols=expected_n_sols)
println("\nNumber of nontrivial block partitions: ", length(F.block_partitions), "\n")
if isempty(F.block_partitions)
return (F, [])
end
n_unknowns = length(xp0[1])
n_params = length(xp0[2])
n_constraints = min([num_constraints(F.block_partitions[i]) for i in 1:length(F.block_partitions)]...)
param_dep ? n_vars = n_unknowns + n_params : n_vars = n_unknowns
n_instances = max(Int(ceil(1/n_constraints*binomial(n_vars + degree, degree))), binomial(n_params + degree - 1, degree - 1))
F = sample_system(F, n_instances)
return (F, compute_invariants(F, degree=degree, tol=tol, param_dep=param_dep))
end
function multiplication_matrix(F::SampledSystem, f::Expression, B::Vector{Expression}, instance_id::Int)
sols = F.samples.solutions[:, :, instance_id]
n_sols = size(sols, 2)
vars = variables(F.system)
A = zeros(CC, n_sols, n_sols)
for i in 1:n_sols
A[:, i] = express_in_basis(F, f*B[i], B, instance_id=instance_id)
end
return sparsify(A, 1e-5)
end
function multiplication_matrix(F::SampledSystem, f::Expression, B::Vector{Expression}; degree::Int)::Matrix{Union{Nothing, Expression}}
n_sols = size(F.samples.solutions, 2)
M = Matrix{Union{Nothing, Expression}}(hcat([[nothing for i in 1:n_sols] for j in 1:n_sols]...))
for i in 1:n_sols
M[:, i] = express_in_basis(F, f*B[i], B, degree)
end
return M
end
function monomial_basis(F::SampledSystem)::Vector{Expression}
n_sols = size(F.samples.solutions, 2)
sols = F.samples.solutions[:,:,1] # solutions for the 1st instance
unknowns = variables(F.system)
A = zeros(CC, n_sols, n_sols)
n_indep = 1
indep_mons = Vector{Expression}([])
for i in 0:n_sols-1
mons = get_monomials_fixed_degree(unknowns, i)
for j in eachindex(mons)
A[:, n_indep] = [subs(mons[j], unknowns=>sols[:,k]) for k in 1:n_sols]
if rank(A, atol=1e-8) == n_indep
push!(indep_mons, mons[j])
n_indep += 1
if n_indep > n_sols
return indep_mons
end
end
end
end
@warn "Returned monomials don't form a basis!"
return indep_mons
end
function eval_at_sols(F::SampledSystem, G::Vector{Expression})::Matrix{CC}
sols = F.samples.solutions[:, :, 1]
params = F.samples.parameters[:, 1]
n_sols = size(sols, 2)
n_elems = length(G)
vars = vcat(variables(F.system), parameters(F.system))
A = zeros(CC, n_sols, n_elems)
for i in 1:n_sols
A[i, :] = subs(G, vars => vcat(sols[:,i], params))
end
return A
end
function is_basis(F::SampledSystem, B::Vector{Expression})::Bool
sols = F.samples.solutions[:, :, 1]
n_sols = size(sols, 2)
if length(B) < n_sols || length(B) > n_sols
return false
end
A = eval_at_sols(F, B)
N = nullspace(A)
println("dim null = ", size(N, 2))
return size(N, 2) == 0
end
function are_LI(F::SampledSystem, G::Vector{Expression})::Bool
A = eval_at_sols(F, G)
N = nullspace(A)
println("dim null = ", size(N, 2))
return size(N, 2) == 0
end
function express_in_basis(F::SampledSystem, f::Expression, B::Vector{Expression}; instance_id::Int)::Vector{CC}
sols = F.samples.solutions[:, :, instance_id]
params = F.samples.parameters[:, instance_id]
n_sols = size(sols, 2)
vars = vcat(variables(F.system), parameters(F.system))
A = zeros(CC, n_sols, n_sols+1)
for i in 1:n_sols
A[i, 1:n_sols] = subs(B, vars => vcat(sols[:, i], params))
A[i, n_sols+1] = -subs(f, vars => vcat(sols[:, i], params))
end
c = nullspace(A)
@assert size(c, 2) == 1
@assert abs(c[n_sols+1, 1]) > 1e-10
return sparsify(vec(p2a(c)), 1e-5)
end
function express_in_basis(F::SampledSystem, f::Expression, B::Vector{Expression}, degree::Int)::Vector{Union{Nothing,Expression}}
_, n_sols, n_instances = size(F.samples.solutions)
evals = zeros(CC, n_sols, n_instances)
for i in 1:n_instances
evals[:, i] = express_in_basis(F, f, B; instance_id=i)
end
params = parameters(F.system)
mons = get_monomials(params, degree)
println("n_mons = ", length(mons))
evaluated_mons = evaluate_monomials_at_samples(mons, F.samples.parameters, params)
coeffs = Vector{Union{Nothing, Expression}}([nothing for i in 1:n_sols])
for i in 1:n_sols
coeffs[i] = interpolate_dense(view(evals, i, :), mons, evaluated_mons, func_type="rational", tol=1e-5)
end
return coeffs
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 16346 | export multiexponents,
AbstractMonomialVector,
DenseMonomialVector,
SparseMonomialVector,
iterate,
extend!,
evaluate,
to_expressions,
to_classes,
nparam_only,
nunkn_dep
function multiexponents(; degree::Tv, nvars::Ti) where {Tv<:Integer,Ti<:Integer}
mexps = [spzeros(Tv, Ti, nvars) for _ in 1:num_mons(nvars, degree)]
k = 1
for n in 1:nvars
for part::Vector{Tv} in partitions(degree, n)
for vals in multiset_permutations(part, n)
for inds in combinations(Ti.(1:nvars), n)
mexps[k][inds] = vals
k += 1
end
end
end
end
return mexps
end
abstract type AbstractMonomialVector end
# Structure representing vector of monomials
# Advantage: monomial evalution can be made more efficient
mutable struct DenseMonomialVector{Tv<:Integer,Ti<:Integer} <: AbstractMonomialVector
degree::Tv
n::Int # length
unknowns_mexps::Dict{Tv, Vector{SparseVector{Tv,Ti}}} # keys are degrees
parameters_mexps::Dict{Tv, Vector{SparseVector{Tv,Ti}}} # keys are degrees
unknowns::Vector{Variable}
parameters::Vector{Variable}
end
function DenseMonomialVector{Tv,Ti}(;
unknowns::Vector{Variable},
parameters::Vector{Variable}
) where {Tv<:Integer,Ti<:Integer}
degree = zero(Tv)
unknowns_mexps = Dict(0 => [spzeros(Tv, Ti, length(unknowns))])
parameters_mexps = Dict(0 => [spzeros(Tv, Ti, length(parameters))])
n = 1
return DenseMonomialVector{Tv,Ti}(
degree,
n,
unknowns_mexps,
parameters_mexps,
unknowns,
parameters
)
end
unknowns(mons::DenseMonomialVector) = mons.unknowns
HC.parameters(mons::DenseMonomialVector) = mons.parameters
variables(mons::DenseMonomialVector) = vcat(unknowns(mons), parameters(mons))
nunknowns(mons::DenseMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer} = length(mons.unknowns)
HC.nparameters(mons::DenseMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer} = length(mons.parameters)
nvariables(mons::DenseMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer} = nunknowns(mons)+nparameters(mons)
unknowns_deg(mons::DenseMonomialVector) = max(keys(mons.unknowns_mexps)...)
parameters_deg(mons::DenseMonomialVector) = max(keys(mons.parameters_mexps)...)
Base.length(mons::DenseMonomialVector) = mons.n
nparam_only(mons::DenseMonomialVector) = num_mons_upto(nparameters(mons), parameters_deg(mons))
nunkn_dep(mons::DenseMonomialVector) = length(mons) - nparam_only(mons)
to_expression(
unknowns::Vector{Variable},
unkn_mexp::AbstractSparseVector,
parameters::Vector{Variable},
param_mexp::AbstractSparseVector
) = prodpow(unknowns, unkn_mexp)*prodpow(parameters, param_mexp)
function to_expressions(mons::DenseMonomialVector)
return [to_expression(
mons.unknowns,
unkn_mexp,
mons.parameters,
param_mexp
) for (unkn_mexp, param_mexp) in mons]
end
function Base.show(io::IO, mons::DenseMonomialVector)
println(io, "$(length(mons))-element $(typeof(mons))")
print(io, "[", join(to_expressions(mons), ", "), "]")
end
function extend!(
mons::DenseMonomialVector{Tv,Ti};
degree::Integer,
extend_params::Bool=true
) where {Tv<:Integer,Ti<:Integer}
degree = convert(Tv, degree)
uexps, pexps = mons.unknowns_mexps, mons.parameters_mexps
for d::Tv in mons.degree+1:degree
uexps[d] = multiexponents(degree=d, nvars=Ti(nunknowns(mons)))
end
if extend_params
for d::Tv in min(keys(pexps)...)+1:degree
pexps[d] = multiexponents(degree=d, nvars=Ti(nparameters(mons)))
end
end
mons.degree = degree
mons.n = 0
for udeg in 0:unknowns_deg(mons)
for pdeg in 0:parameters_deg(mons)
if udeg+pdeg ≤ mons.degree
mons.n += num_mons(nunknowns(mons), udeg)*num_mons(nparameters(mons), pdeg)
else
break
end
end
end
return mons
end
struct DenseMonVecState
udeg::Int
uid::Int
pdeg::Int
pid::Int
end
function pick_at_state(
mons::DenseMonomialVector,
state::DenseMonVecState
)
@unpack udeg, uid, pdeg, pid = state
uexps, pexps = mons.unknowns_mexps, mons.parameters_mexps
return uexps[udeg][uid], pexps[pdeg][pid]
end
function next_state(
mons::DenseMonomialVector,
state::DenseMonVecState
)
@unpack udeg, uid, pdeg, pid = state
uexps, pexps = mons.unknowns_mexps, mons.parameters_mexps
if uid < length(uexps[udeg])
return DenseMonVecState(udeg, uid+1, pdeg, pid)
elseif pid < length(pexps[pdeg])
return DenseMonVecState(udeg, 1, pdeg, pid+1)
elseif pdeg > 0
return DenseMonVecState(udeg, 1, pdeg-1, 1)
elseif udeg > 0
return DenseMonVecState(udeg-1, 1, min(mons.degree-udeg+1, parameters_deg(mons)), 1)
else
return nothing
end
end
function Base.iterate(
mons::DenseMonomialVector,
state::DenseMonVecState
)
new_state = next_state(mons, state)
isnothing(new_state) && return nothing
return pick_at_state(mons, new_state), new_state
end
function Base.iterate(mons::DenseMonomialVector)
state = DenseMonVecState(mons.degree, 1, 0, 1)
return pick_at_state(mons, state), state
end
# Structure representing sparse monomial vector
# Advantages:
# 1) monomial evalution can be made more efficient
# 2) we know how many param_only monomials are there (needed for picking samples for evaluation)
struct SparseMonomialVector{Tv<:Integer,Ti<:Integer} <: AbstractMonomialVector
unkn_dep_mexps::NTuple{2, Vector{SparseVector{Tv,Ti}}}
param_only_mexps::Vector{SparseVector{Tv,Ti}}
unknowns::Vector{Variable}
parameters::Vector{Variable}
end
SparseMonomialVector{Tv,Ti}(
unknowns::Vector{Variable},
parameters::Vector{Variable}
) where {Tv<:Integer,Ti<:Integer} = SparseMonomialVector{Tv,Ti}(([], []), [], unknowns, parameters)
struct SparseMonVecState
unkn_dep::Bool
id::Int
end
function pick_at_state(mons::SparseMonomialVector{Tv,Ti}, state::SparseMonVecState) where {Tv<:Integer,Ti<:Integer}
state.unkn_dep && return vcat(mons.unkn_dep_mexps[1][state.id], mons.unkn_dep_mexps[2][state.id])
return vcat(spzeros(Tv,Ti,nunknowns(mons)), mons.param_only_mexps[state.id])
end
function Base.iterate(mons::SparseMonomialVector)
length(mons) == 0 && return nothing
unkn_dep = mons.unkn_dep_mexps
if length(unkn_dep[1]) != 0
state = SparseMonVecState(true, 1)
return pick_at_state(mons, state), state
else
state = SparseMonVecState(false, 1)
return pick_at_state(mons, state), state
end
end
function next_state(
mons::SparseMonomialVector,
state::SparseMonVecState
)
@unpack unkn_dep, id = state
if unkn_dep
if id < length(mons.unkn_dep_mexps[1])
return SparseMonVecState(true, id+1)
else
if length(mons.param_only_mexps) != 0
return SparseMonVecState(false, 1)
else
return nothing
end
end
else
if id < length(mons.param_only_mexps)
return SparseMonVecState(true, id+1)
else
return nothing
end
end
end
function Base.iterate(
mons::SparseMonomialVector,
state::SparseMonVecState
)
new_state = next_state(mons, state)
isnothing(new_state) && return nothing
return pick_at_state(mons, new_state), new_state
end
function Base.findfirst(mexp::SparseVector{Tv,Ti}, mons::SparseMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer}
for (i, mon) in enumerate(mons)
mexp == mon && return i
end
return nothing
end
unknowns(mons::SparseMonomialVector) = mons.unknowns
HC.parameters(mons::SparseMonomialVector) = mons.parameters
variables(mons::SparseMonomialVector) = vcat(unknowns(mons), parameters(mons))
nunknowns(mons::SparseMonomialVector) = length(mons.unknowns)
HC.nparameters(mons::SparseMonomialVector) = length(mons.parameters)
Base.length(mons::SparseMonomialVector) = length(mons.unkn_dep_mexps[1]) + length(mons.param_only_mexps)
is_param_only(mons::SparseMonomialVector) = length(mons.unkn_dep_mexps[1]) == 0
nunkn_dep(mons::SparseMonomialVector) = length(mons.unkn_dep_mexps[1])
nparam_only(mons::SparseMonomialVector) = length(mons.param_only_mexps)
function Base.vcat(monVs::SparseMonomialVector{Tv,Ti}...) where {Tv<:Integer,Ti<:Integer}
return SparseMonomialVector{Tv,Ti}(
(vcat([mons.unkn_dep_mexps[1] for mons in monVs]...), vcat([mons.unkn_dep_mexps[2] for mons in monVs]...)),
vcat([mons.param_only_mexps for mons in monVs]...),
monVs[1].unknowns,
monVs[1].parameters
)
end
function Base.push!(
mons::SparseMonomialVector{Tv,Ti},
(uexp, pexp)::NTuple{2, SparseVector{Tv,Ti}}
) where {Tv<:Integer,Ti<:Integer}
if iszero(uexp)
push!(mons.param_only_mexps, pexp)
else
push!(mons.unkn_dep_mexps[1], uexp)
push!(mons.unkn_dep_mexps[2], pexp)
end
end
function to_expressions(mons::SparseMonomialVector)
return vcat(
[to_expression(
mons.unknowns,
unkn_mexp,
mons.parameters,
param_mexp
) for (unkn_mexp, param_mexp) in zip(mons.unkn_dep_mexps...)],
[prodpow(mons.parameters, mexp) for mexp in mons.param_only_mexps]
)
end
# TODO
function Base.show(io::IO, mons::SparseMonomialVector)
# println(io, "$(length(mons.mds))-element $(typeof(mons))")
print(io, "[", join(to_expressions(mons), ", "), "]")
end
function Base.gcd(mons::SparseMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer}
if nparam_only(mons) == 0
return vcat(
min.(mons.unkn_dep_mexps[1]...),
min.(mons.unkn_dep_mexps[2]...)
)
end
return vcat(
spzeros(Tv, Ti, nunknowns(mons)),
min.(mons.unkn_dep_mexps[2]..., mons.param_only_mexps...)
)
end
struct SparseMatrixMonomialVector{Tv<:Integer,Ti<:Integer} <: AbstractMonomialVector
unkn_dep_mexps::NTuple{2, SparseMatrixCSC{Tv,Ti}}
param_only_mexps::SparseMatrixCSC{Tv,Ti}
unknowns::Vector{Variable}
parameters::Vector{Variable}
end
function SparseMatrixMonomialVector{Tv,Ti}(
mons::SparseMonomialVector{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
unkn_dep_1 = length(mons.unkn_dep_mexps[1]) == 0 ? spzeros(nunknowns(mons), 0) : Base.reduce(hcat, mons.unkn_dep_mexps[1])
unkn_dep_2 = length(mons.unkn_dep_mexps[2]) == 0 ? spzeros(nparameters(mons), 0) : Base.reduce(hcat, mons.unkn_dep_mexps[2])
param_only = length(mons.param_only_mexps) == 0 ? spzeros(nparameters(mons), 0) : Base.reduce(hcat, mons.param_only_mexps)
return SparseMatrixMonomialVector{Tv,Ti}(
(unkn_dep_1, unkn_dep_2),
param_only,
mons.unknowns,
mons.parameters
)
end
nunknowns(mons::SparseMatrixMonomialVector) = length(mons.unknowns)
Base.length(mons::SparseMatrixMonomialVector) = size(mons.unkn_dep_mexps[1], 2) + size(mons.param_only_mexps, 2)
is_param_only(mons::SparseMatrixMonomialVector) = size(mons.unkn_dep_mexps[1], 2) == 0
nunkn_dep(mons::SparseMatrixMonomialVector) = size(mons.unkn_dep_mexps[1], 2)
nparam_only(mons::SparseMatrixMonomialVector) = size(mons.param_only_mexps, 2)
function Base.vcat(monVs::SparseMatrixMonomialVector{Tv,Ti}...) where {Tv<:Integer,Ti<:Integer}
return SparseMatrixMonomialVector{Tv,Ti}(
(Base.reduce(hcat, [mons.unkn_dep_mexps[1] for mons in monVs]), Base.reduce(hcat, [mons.unkn_dep_mexps[2] for mons in monVs])),
Base.reduce(hcat, [mons.param_only_mexps for mons in monVs]),
monVs[1].unknowns,
monVs[1].parameters
)
end
function to_expressions(mons::SparseMatrixMonomialVector)
return vcat(
[to_expression(
mons.unknowns,
mons.unkn_dep_mexps[1][:, i],
mons.parameters,
mons.unkn_dep_mexps[2][:, i]
) for i in axes(mons.unkn_dep_mexps[1], 2)],
[prodpow(mons.parameters, mons.param_only_mexps[:, i]) for i in axes(mons.param_only_mexps, 2)]
)
end
function Base.gcd(mons::SparseMatrixMonomialVector{Tv,Ti}) where {Tv<:Integer,Ti<:Integer}
if nparam_only(mons) == 0
return vcat(
vec(minimum(mons.unkn_dep_mexps[1]; dims=2)),
vec(minimum(mons.unkn_dep_mexps[2]; dims=2))
)
elseif nunkn_dep(mons) == 0
return vcat(
spzeros(Tv, Ti, nunknowns(mons)),
vec(minimum(mons.param_only_mexps; dims=2))
)
else
return vcat(
spzeros(Tv, Ti, nunknowns(mons)),
vec(minimum(hcat(minimum(mons.unkn_dep_mexps[2]; dims=2), minimum(mons.param_only_mexps; dims=2)); dims=2))
)
end
end
# Structure for evaluated monomials
# Advatage: requires less memory (no need to duplicate values for param_only monomials)
struct EvaluatedMonomials
unkn_dep::Array{ComplexF64, 3}
param_only::Array{ComplexF64, 2}
end
nunkn_dep(eval_mons::EvaluatedMonomials) = size(eval_mons.unkn_dep, 1)
nparam_only(eval_mons::EvaluatedMonomials) = size(eval_mons.param_only, 1)
nmonomials(eval_mons::EvaluatedMonomials) = nunkn_dep(eval_mons) + nparam_only(eval_mons)
# TODO: test timings w/ and w/o selectdim
prodpow(
v::AbstractArray,
e::AbstractSparseVector;
) = dropdims(prod(selectdim(v,1,e.nzind).^e.nzval; dims=1); dims=1)
function HC.evaluate(
mexps_dict::Dict{Tv, Vector{SparseVector{Tv,Ti}}},
samples::Array{P,N}
) where {Tv<:Integer, Ti<:Integer, P, N}
evals = Dict{Tv, Array{P,N}}()
for (d, mexps) in mexps_dict
eval = zeros(P, length(mexps), size(samples)[2:end]...)
for (i, mexp) in enumerate(mexps)
eval[i, repeat([:], N-1)...] = prodpow(samples, mexp)
end
evals[d] = eval
end
return evals
end
function HC.evaluate(
mons::DenseMonomialVector,
samples::Samples
)
unkn_evals = evaluate(mons.unknowns_mexps, samples.solutions)
param_evals = evaluate(mons.parameters_mexps, samples.parameters)
unkn_dep = zeros(ComplexF64, nunkn_dep(mons), nsolutions(samples), ninstances(samples))
param_only = zeros(ComplexF64, nparam_only(mons), ninstances(samples))
k = 0
for udeg in unknowns_deg(mons):-1:1
unkn_eval = unkn_evals[udeg]
for pdeg in min(mons.degree-udeg, parameters_deg(mons)):-1:0
param_eval = param_evals[pdeg]
for pid in axes(param_eval, 1)
unkn_dep[(k+1):(k+size(unkn_eval, 1)), :, :] = unkn_eval.*reshape(param_eval[pid, :], 1, 1, :)
k += size(unkn_eval, 1)
end
end
end
param_only = vcat([param_evals[pdeg] for pdeg in parameters_deg(mons):-1:0]...) # TODO: replace vcat?
return EvaluatedMonomials(unkn_dep, param_only)
end
function HC.evaluate(
mons::SparseMonomialVector,
samples::Samples
)
param_only = zeros(ComplexF64, nparam_only(mons), ninstances(samples))
for (i, mexp) in enumerate(mons.param_only_mexps)
param_only[i, :] = prodpow(samples.parameters, mexp)
end
unkn_dep = zeros(ComplexF64, nunkn_dep(mons), nsolutions(samples), ninstances(samples))
for (i, (uexp, pexp)) in enumerate(zip(mons.unkn_dep_mexps...))
unkn_dep[i, :, :] = prodpow(samples.solutions, uexp).*transpose(prodpow(samples.parameters, pexp))
end
return EvaluatedMonomials(unkn_dep, param_only)
end
function HC.evaluate(
mons::SparseMatrixMonomialVector,
samples::Samples
)
param_only = zeros(ComplexF64, nparam_only(mons), ninstances(samples))
for i in axes(mons.param_only_mexps, 2)
param_only[i, :] = prodpow(samples.parameters, mons.param_only_mexps[:, i])
end
unkn_dep = zeros(ComplexF64, nunkn_dep(mons), nsolutions(samples), ninstances(samples))
for i in axes(mons.unkn_dep_mexps[1], 2)
unkn_dep[i, :, :] = prodpow(samples.solutions, mons.unkn_dep_mexps[1][:, i]).*transpose(prodpow(samples.parameters, mons.unkn_dep_mexps[2][:, i]))
end
return EvaluatedMonomials(unkn_dep, param_only)
end
HC.evaluate(
mons::AbstractMonomialVector,
samples::Dict{T, Samples}
) where {T} = Dict(zip(values(samples), [evaluate(mons, s) for s in values(samples)]))
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 11803 | export SampledSystem,
MonodromyInfo,
Samples,
run_monodromy,
sample!,
unknowns,
parameters,
variables,
nunknowns,
nparameters,
nvariables,
nsolutions,
nsamples,
ninstances,
samples,
monodromy_permutations,
block_partitions,
deck_permutations
using HomotopyContinuation: Result, MonodromyResult, nsolutions, ntracked, is_success, solution
using HomotopyContinuation: ParameterHomotopy, Tracker, track
const MONODROMY_SOLVE_REF = "https://www.juliahomotopycontinuation.org/HomotopyContinuation.jl/stable/monodromy/"
const SOLVE_REF = "https://www.juliahomotopycontinuation.org/HomotopyContinuation.jl/stable/solve/"
struct MonodromyInfo
n_solutions::Int
monodromy_permutations::Vector{Vector{Int}}
block_partitions::Vector{Vector{Vector{Int}}}
deck_permutations::Vector{Vector{Int}}
end
MonodromyInfo() = MonodromyInfo(1, [], [], [])
struct Samples
solutions::Array{ComplexF64, 3} # n_unknowns x n_sols x n_instances
parameters::Array{ComplexF64, 2} # n_params x n_instances
end
Samples(
sols::Matrix{ComplexF64},
params::Vector{ComplexF64}
) = Samples(reshape(sols, size(sols)..., 1), reshape(params, :, 1))
HC.nsolutions(samples::Samples) = size(samples.solutions, 2)
ninstances(samples::Samples) = size(samples.parameters, 2)
nsamples(samples::Samples) = nsolutions(samples)*ninstances(samples)
mutable struct SampledSystem
system::System
mon_info::MonodromyInfo
samples::Dict{Vector{Int}, Samples} # key: ids of solution paths
end
unknowns(F::System) = HC.variables(F)
"""
unknowns(F::SampledSystem) -> Vector{Variable}
Returns the vector of unknowns of `F`.
"""
unknowns(F::SampledSystem) = unknowns(F.system)
"""
parameters(F::SampledSystem) -> Vector{Variable}
Returns the vector of parameters of `F`.
"""
HC.parameters(F::SampledSystem) = parameters(F.system)
variables(F::System) = vcat(unknowns(F), parameters(F)) # does a different thing than HC.variables
"""
variables(F::SampledSystem) -> Vector{Variable}
Returns the concatenated vector of unknowns and parameters of `F`.
"""
variables(F::SampledSystem) = variables(F.system)
"""
nunknowns(F::SampledSystem) -> Int
Returns the number of unknowns of `F`.
"""
nunknowns(F::SampledSystem) = length(unknowns(F)) # equivalent to HC.nvariables
"""
nparameters(F::SampledSystem) -> Int
Returns the number of parameters of `F`.
"""
HC.nparameters(F::SampledSystem) = length(parameters(F))
"""
nvariables(F::SampledSystem) -> Int
Returns the number of variables of `F`.
"""
nvariables(F::SampledSystem) = length(variables(F)) # doesn't extend HC.nvariables, does a different thing
"""
nsolutions(F::SampledSystem) -> Int
Returns the number of solutions of `F` obtained by [`run_monodromy`](@ref) method.
"""
HC.nsolutions(F::SampledSystem) = F.mon_info.n_solutions
"""
samples(F::SampledSystem) -> Dict{Vector{Int}, Samples}
Returns the dictionary of samples of a polynomial system `F`.
"""
samples(F::SampledSystem) = F.samples
"""
ninstances(F::SampledSystem) -> Int
Returns the number of sampled instances of `F`.
"""
ninstances(F::SampledSystem) = sum([ninstances(s) for s in values(samples(F))])
"""
nsamples(F::SampledSystem) -> Int
Returns the number of samples of `F`. Notice that `ninstances(F)*nsolutions(F)` doesn't
have to be equal to `nsamples(F)`.
"""
function nsamples(F::SampledSystem)
return sum([nsamples(s) for s in values(samples(F))])
end
"""
monodromy_permutations(F::SampledSystem) -> Vector{Vector{Int}}
Returns the vector of monodromy permutations of `F` obtained by [`run_monodromy`](@ref).
"""
monodromy_permutations(F::SampledSystem) = F.mon_info.monodromy_permutations
"""
block_partitions(F::SampledSystem) -> Vector{Vector{Vector{Int}}}
Returns the vector of all block partitions of the solutions of `F`.
"""
block_partitions(F::SampledSystem) = F.mon_info.block_partitions
"""
deck_permutations(F::SampledSystem) -> Vector{Vector{Int}}
Returns the vector of deck permutations of the solutions (actions of deck transformations
on the solutions) of `F`.
"""
deck_permutations(F::SampledSystem) = F.mon_info.deck_permutations
(F::SampledSystem)(
x₀::AbstractVector{<:Number},
p₀::AbstractVector{<:Number}
) = F.system(x₀, p₀)
function _filter_permutations(perms::Matrix{Int})::Vector{Vector{Int}}
nsols = length(perms[:,1])
return filter(
x->!(0 in x) && (length(unique(x)) == nsols),
eachcol(perms)
)
end
function SampledSystem(F::System, MR::MonodromyResult)
sols, params = hcat(HC.solutions(MR)...), MR.parameters
n_sols = size(sols, 2)
if n_sols == 1
@warn "Monodromy result has only 1 solution, no monodromy group available"
return SampledSystem(
F,
MonodromyInfo(),
Dict([1] => Samples(sols, params))
)
end
monodromy_permutations = _filter_permutations(HC.permutations(MR))
block_partitions = all_block_partitions(to_group(monodromy_permutations))
deck_permutations = to_permutations(centralizer(monodromy_permutations))
return SampledSystem(
F,
MonodromyInfo(
n_sols,
monodromy_permutations,
block_partitions,
deck_permutations
),
Dict(Vector(1:n_sols) => Samples(sols, params))
)
end
function Base.show(io::IO, F::SampledSystem)
println(io, "SampledSystem with $(phrase(nsamples(F), "sample"))")
print(io, " $(phrase(nunknowns(F), "unknown")): ", join(unknowns(F), ", "))
if !isempty(parameters(F))
print(io, "\n $(phrase(nparameters(F), "parameter")): ", join(parameters(F), ", "))
end
print(io, "\n\n")
println(io, " number of solutions: $(nsolutions(F))")
print(io, " sampled instances: $(ninstances(F))")
# print(io, " deck permutations: $(length(deck_permutations(F)))")
end
function random_samples(samples::Samples)
instance_id = rand(1:ninstances(samples))
return M2VV(samples.solutions[:, :, instance_id]), samples.parameters[:, instance_id]
end
all_solutions_samples(F::SampledSystem) = samples(F)[Vector(1:nsolutions(F))]
function random_samples(
F::SampledSystem;
path_ids::Vector{Int}
)
samples = all_solutions_samples(F)
instance_id = rand(1:ninstances(samples))
return M2VV(samples.solutions[:, path_ids, instance_id]), samples.parameters[:, instance_id]
end
"""
run_monodromy(F::Union{System, AbstractSystem}, xp₀=nothing; options...) -> SampledSystem
Runs [`monodromy_solve`]($(MONODROMY_SOLVE_REF)) on a given polynomial system `F` with starting
solutions `xp₀[1]` and parameters `xp₀[2]` (if given).
```julia-repl
julia> @var x a b;
julia> F = System([x^3+a*x+b]; variables=[x], parameters=[a,b]);
julia> F = run_monodromy(F, ([[1]], [1,-2]); max_loops_no_progress = 10)
SampledSystem with 3 samples
1 unknown: x
2 parameters: a, b
number of solutions: 3
sampled instances: 1
```
"""
function run_monodromy(
F::Union{System, AbstractSystem},
xp₀::Union{Nothing, Tuple{AbstractVector{<:AbstractVector{<:Number}}, AbstractVector{<:Number}}}=nothing;
options...
)
if isnothing(xp₀)
MR = HC.monodromy_solve(F; permutations=true, options...)
else
sols, p₀ = xp₀
MR = HC.monodromy_solve(F, sols, ComplexF64.(p₀); permutations=true, options...)
end
if length(HC.solutions(MR)) == 1
error("Only 1 solution found, no monodromy group available. Try running again...")
end
return SampledSystem(F, MR)
end
"""
run_monodromy(F::SampledSystem, xp₀=nothing; options...) -> SampledSystem
Reruns [`monodromy_solve`]($(MONODROMY_SOLVE_REF)) on a given sampled polynomial system `F`.
"""
function run_monodromy(
F::SampledSystem,
xp₀::Union{Nothing, Tuple{AbstractVector{<:AbstractVector{<:Number}}, AbstractVector{<:Number}}}=nothing;
options...
)
if isnothing(xp₀)
sols, p₀ = random_samples(F; path_ids=Vector(1:nsolutions(F)))
else
sols, p₀ = xp₀ # TODO: do we need this?
end
MR = HC.monodromy_solve(F.system, sols, ComplexF64.(p₀); permutations=true, options...)
if length(HC.solutions(MR)) == 1
error("Only 1 solution found, no monodromy group available. Try running again...")
end
return SampledSystem(F.system, MR)
end
function extract_samples(
results::Vector{Tuple{Result, Vector{ComplexF64}}},
F::SampledSystem;
resample::Bool=false
)
n_tracked = ntracked(results[1][1])
n_instances = length(results)
all_sols = zeros(ComplexF64, nunknowns(F), n_tracked, n_instances)
all_params = zeros(ComplexF64, nparameters(F), n_instances)
k = 1
for (res, p) in results
sols = HC.solutions(res)
if length(sols) == n_tracked
all_sols[:, :, k] = hcat(sols...)
all_params[:, k] = p
k += 1
elseif !resample
error("Number of solutions in the $(k)-th result is $(length(sols)), expected $(n_tracked)")
end
end
for i in k:n_instances
while true
instance_id = rand(1:i-1) # TODO: what if i == 1?
p₀ = all_params[:, instance_id]
sols₀ = M2VV(all_sols[:, :, instance_id])
p₁ = randn(ComplexF64, nparameters(F))
res = HC.solve(
F.system,
sols₀,
start_parameters = p₀,
target_parameters = p₁
)
sols = HC.solutions(res)
if length(sols) == n_tracked
all_sols[:, :, i] = hcat(sols...)
all_params[:, i] = p₁
break
end
end
end
return all_sols, all_params
end
"""
sample!(F::SampledSystem; path_ids=Vector(1:nsolutions(F)), n_instances=1) -> SampledSystem
Uses [`solve`]($(SOLVE_REF)) method to track the solutions of a poynomial system `F` with ids
defined by `path_ids` to `n_instances` random parameters.
"""
function sample!(
F::SampledSystem;
path_ids::AbstractVector{Int}=1:nsolutions(F),
n_instances::Int=1
)
(length(path_ids) == 0 || n_instances ≤ 0) && return F
p₁s = [randn(ComplexF64, nparameters(F)) for _ in 1:n_instances]
path_ids = sort(Vector{Int}(path_ids))
samples = get(F.samples, path_ids, nothing)
if isnothing(samples)
sols₀, p₀ = random_samples(F; path_ids=path_ids)
res = HC.solve(
F.system,
sols₀,
start_parameters = p₀,
target_parameters = p₁s
)
sols, params = extract_samples(res, F; resample=true)
F.samples[path_ids] = Samples(sols, params)
else
sols₀, p₀ = random_samples(samples)
res = HC.solve(
F.system,
sols₀,
start_parameters = p₀,
target_parameters = p₁s
)
sols, params = extract_samples(res, F; resample=true)
sols = cat(samples.solutions, sols; dims=3)
params = cat(samples.parameters, params; dims=2)
F.samples[path_ids] = Samples(sols, params)
end
return F
end
function track_parameter_homotopy(
F::System,
(x₀, p₀)::NTuple{2, AbstractVector{<:Number}},
p₁::AbstractVector{<:Number},
p_inter::AbstractVector{<:Number} # intermediate parameter
)
H₁ = ParameterHomotopy(F; start_parameters=p₀, target_parameters=p_inter)
res = track(Tracker(H₁), x₀)
if !is_success(res)
@warn "Tracking was not successful: stopped at t = $(res.t)"
end
H₂ = ParameterHomotopy(F; start_parameters=p_inter, target_parameters=p₁)
res = track(Tracker(H₂), solution(res))
if !is_success(res)
@warn "Tracking was not successful: stopped at t = $(res.t)"
end
return solution(res)
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 10166 | export Grading,
ScalingGroup,
grading,
scaling_symmetries
using AbstractAlgebra: ZZ, matrix, GF, lift, hnf, snf_with_transform
using LinearAlgebra: diag
mutable struct Grading{Tv<:Integer,Ti<:Integer}
nscalings::Int
free_part::Union{Nothing, SparseMatrixCSC{Tv,Ti}}
mod_part::Vector{Tuple{Tv, SparseMatrixCSC{Tv,Ti}}}
end
Grading{Tv,Ti}() where {Tv<:Integer,Ti<:Integer} = Grading{Tv,Ti}(0, nothing, [])
function Grading{Tv,Ti}(
s::Vector{Int},
U::Vector{Matrix{Int}}
) where {Tv<:Integer,Ti<:Integer}
grading = Grading{Tv,Ti}()
for (sᵢ, Uᵢ) in zip(s, U)
if sᵢ == 0
grading.free_part = Uᵢ
else
push!(grading.mod_part, (sᵢ, Uᵢ))
end
grading.nscalings += size(Uᵢ, 1)
end
return grading
end
nfree(grading::Grading) = isnothing(grading.free_part) ? 0 : size(grading.free_part, 1)
nscalings(grading::Grading) = grading.nscalings
Base.isempty(grading::Grading) = isnothing(grading.free_part) && isempty(grading.mod_part)
Base.copy(grading::Grading) = Grading(grading.free_part, grading.mod_part)
function _structure(grading::Grading)
str = ""
U₀ = grading.free_part
if !isnothing(U₀)
n_free = size(U₀, 1)
free_str = n_free == 1 ? "ℤ" : "ℤ$(superscript(n_free))"
str = str * free_str
end
for (i, (sᵢ, Uᵢ)) in enumerate(grading.mod_part)
prod_str = isnothing(U₀) && i == 1 ? "" : " × "
n_sᵢ = size(Uᵢ, 1)
sᵢ_str = n_sᵢ == 1 ? "ℤ$(subscript(sᵢ))" : "ℤ$(subscript(sᵢ))$(superscript(n_sᵢ))"
str = str * prod_str * sᵢ_str
end
return str
end
function Base.show(io::IO, grading::Grading)
print(io, "Grading with $(phrase(nscalings(grading), "scaling"))")
end
SparseAction = Vector{Tuple{Variable, Expression}}
"""
ScalingGroup
A `ScalingGroup` is the result of the [`scaling_symmetries`](@ref) computation.
"""
struct ScalingGroup{Tv<:Integer,Ti<:Integer}
grading::Grading{Tv,Ti}
structure::String
vars::Vector{Variable}
action::Tuple{Vector{SparseAction}, Vector{Tuple{Tv, Vector{SparseAction}}}}
end
ScalingGroup{Tv,Ti}(
vars::Vector{Variable}
) where {Tv<:Integer,Ti<:Integer} = ScalingGroup(Grading{Tv,Ti}(), "N/A", vars, ([], []))
function create_action(vars::Vector{Variable}, base::Union{Number, Vector{Variable}}, U::SparseMatrixCSC)
action = Vector{SparseAction}([[] for _ in axes(U, 1)])
for j in axes(U, 1)
nzind, nzval = findnz(U[j, :])
b = typeof(base) <: Number ? base : base[j]
exprs = (b.^nzval).*vars[nzind]
action[j] = collect(zip(vars[nzind], exprs))
end
return action
end
function ScalingGroup(
grading::Grading{Tv,Ti},
vars::Vector{Variable}
) where {Tv<:Integer,Ti<:Integer}
free_action = Vector{SparseAction}([])
U₀ = grading.free_part
if !isnothing(U₀)
if size(U₀, 1) == 1
@var λ
λ = [λ]
else
@var λ[1:size(U₀, 1)]
end
free_action = create_action(vars, λ, U₀)
end
mod_action = Vector{Tuple{Tv, Vector{SparseAction}}}([])
for (sᵢ, Uᵢ) in grading.mod_part
if sᵢ == 2
sᵢ_action = create_action(vars, -1, Uᵢ)
elseif sᵢ == 4
sᵢ_action = create_action(vars, im, Uᵢ)
else
@var ω[Int(sᵢ)]
sᵢ_action = create_action(vars, ω[1], Uᵢ)
end
push!(mod_action, (sᵢ, sᵢ_action))
end
action = (free_action, mod_action)
return ScalingGroup(grading, _structure(grading), vars, action)
end
grading(scalings::ScalingGroup) = scalings.grading
Base.copy(s::ScalingGroup) = ScalingGroup(s.grading, s.structure, s.vars, s.action)
function Base.show(io::IO, scalings::ScalingGroup)
action = scalings.action
n_free, n_mod = length(action[1]), length(action[2])
if n_free + n_mod == 0
print(io, "ScalingGroup with 0 scalings")
return
end
println(io, "ScalingGroup isomorphic to $(scalings.structure)")
if n_free != 0
print(io, " $(phrase(n_free, "free scaling")):")
for free_action in scalings.action[1]
print(io, "\n ")
for (j, (var, expr)) in enumerate(free_action)
print(io, var, " ↦ ", expr)
j < length(free_action) && print(io, ", ")
end
end
end
if n_mod != 0
n_free != 0 && println(io, "\n")
println(io, " modular scalings:")
for (i, (sᵢ, sᵢ_actions)) in enumerate(scalings.action[2])
print(io, " $(length(sᵢ_actions)) of order $(sᵢ):")
for mod_action in sᵢ_actions
print(io, "\n ")
for (j, (var, expr)) in enumerate(mod_action)
print(io, var, " ↦ ", expr)
j < length(mod_action) && print(io, ", ")
end
end
i < length(scalings.action[2]) && print(io, "\n")
end
end
end
function _snf_scaling_symmetries(F::System)::Tuple{Vector{Int}, Vector{Matrix{Int}}}
vars = vcat(F.variables, F.parameters)
Es = [exponents_coefficients(f, vars)[1] for f in F.expressions]
K = hcat([column_diffs(E) for E in Es]...)
if size(K, 1) > size(K, 2)
K = [K zeros(eltype(K), size(K, 1), size(K,1)-size(K,2))]
end
K = matrix(ZZ, K)
S, U, _ = snf_with_transform(K)
U, S = Matrix(U), Int.(diag(Matrix(S)))
s = reverse(filter(el->el!=1, unique(S)))
if length(s) == 0
return [], []
end
idxs = [findall(x->x==el, S) for el in s]
Us = [U[idxs[i], :] for i in eachindex(idxs)]
return s, Us
end
function _hnf_reduce!(
s::Vector{Int},
U::Vector{Matrix{Int}}
)
for (i, (sᵢ, Uᵢ)) in enumerate(zip(s, U))
if sᵢ == 0
U[i] = Matrix(hnf(matrix(ZZ, Uᵢ)))
else
try
U[i] = Int.(lift.(Matrix(hnf(matrix(GF(sᵢ), Uᵢ)))))
catch
U[i] = mod.(Uᵢ, sᵢ)
end
end
end
return s, U
end
function _hnf_reduce(grading::Grading{Tv,Ti}) where {Tv<:Integer,Ti<:Integer}
U₀ = grading.free_part
red_grading = Grading{Tv,Ti}()
if !isnothing(U₀)
red_grading.free_part = Matrix(hnf(matrix(ZZ, U₀)))
red_grading.nscalings = size(U₀, 1)
end
for (sᵢ, Uᵢ) in grading.mod_part
try
Uᵢ = lift.(Matrix(hnf(matrix(GF(sᵢ), Uᵢ))))
catch
Uᵢ = mod.(Uᵢ, sᵢ)
end
push!(red_grading.mod_part, (sᵢ, Uᵢ))
red_grading.nscalings += size(Uᵢ, 1)
end
return red_grading
end
"""
scaling_symmetries(F::System)
Given a polynomial system `F` returns the group of scaling symmetries
of `F`. The scalings that change the parameters are considered as well.
```julia-repl
julia> @var x y a b c;
julia> F = System([x^4+a^2+1, y^2+b+c]; variables=[x, y], parameters=[a,b,c]);
julia> scaling_symmetries(F)
ScalingGroup isomorphic to ℤ × ℤ₄ × ℤ₂
1 free scaling:
y ↦ y*λ, b ↦ b*λ^2, c ↦ c*λ^2
modular scalings:
1 of order 4:
x ↦ -im*x, y ↦ im*y, b ↦ -b, c ↦ -c
1 of order 2:
x ↦ -x, y ↦ -y, a ↦ -a
```
"""
function scaling_symmetries(F::System)
vars = vcat(F.variables, F.parameters)
s, U = _snf_scaling_symmetries(F)
length(s) == 0 && return ScalingGroup{Int8, Int16}(vars)
_hnf_reduce!(s, U)
return ScalingGroup(Grading{Int8, Int16}(s, U), vars)
end
scaling_symmetries(F::SampledSystem) = scaling_symmetries(F.system)
# TODO: extend to remove rows dependent on other blocks
function reduce(grading::Grading{Tv,Ti}) where {Tv<:Integer,Ti<:Integer}
hnf_grading = _hnf_reduce(grading)
red_grading = Grading{Tv,Ti}()
U₀ = take_rows(!iszero, hnf_grading.free_part)
if size(U₀, 1) != 0
red_grading.free_part = U₀
red_grading.nscalings = size(U₀, 1)
end
for (sᵢ, Uᵢ) in hnf_grading.mod_part
Uᵢ = take_rows(!iszero, Uᵢ)
if size(Uᵢ, 1) != 0
push!(red_grading.mod_part, (sᵢ, Uᵢ))
red_grading.nscalings += size(Uᵢ, 1)
end
end
return red_grading
end
function restrict_scalings(scalings::ScalingGroup, var_ids::Vector{Int})
restr_grading = copy(scalings.grading)
U₀ = restr_grading.free_part
restr_grading.free_part = isnothing(U₀) ? nothing : U₀[:, var_ids]
for (i, (sᵢ, Uᵢ)) in enumerate(restr_grading.mod_part)
restr_grading.mod_part[i] = (sᵢ, Uᵢ[:, var_ids])
end
return ScalingGroup(reduce(restr_grading), scalings.vars[var_ids])
end
function restrict_scalings(scalings::ScalingGroup, vars::Vector{Variable})
var_ids = [findfirst(v->v==var, scalings.vars) for var in vars]
if nothing in var_ids
throw(ArgumentError("vars contains variables not present in scalings.vars"))
end
return restrict_scalings(scalings, var_ids)
end
scaling_symmetries(
F::System,
vars::Vector{Variable}
) = restrict_scalings(scaling_symmetries(F), vars)
scaling_symmetries(
F::SampledSystem,
vars::Vector{Variable}
) = scaling_symmetries(F.system, vars)
function HC.degree(
mexp::SparseVector{Tv,Ti},
grading::Grading{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
deg = spzeros(Tv, Ti, nscalings(grading))
U₀ = grading.free_part
if !isnothing(U₀)
deg[1:size(U₀,1)] = U₀*mexp
end
k = nfree(grading)
for (sᵢ, Uᵢ) in grading.mod_part
deg[(k+1):(k+size(Uᵢ,1))] = mod.(Uᵢ*mexp, sᵢ)
k += size(Uᵢ, 1)
end
return deg
end
function to_classes(
mons::DenseMonomialVector{Tv,Ti},
grading::Grading{Tv,Ti}
) where {Tv<:Integer,Ti<:Integer}
classes = Dict{SparseVector{Tv,Ti}, SparseMonomialVector{Tv,Ti}}()
for (uexp, pexp) in mons
deg = HC.degree(vcat(uexp, pexp), grading)
if isnothing(get(classes, deg, nothing)) # the key doesn't exist
classes[deg] = SparseMonomialVector{Tv,Ti}(mons.unknowns, mons.parameters)
end
push!(classes[deg], (uexp, pexp))
end
new_classes = Dict{SparseVector{Tv,Ti}, SparseMatrixMonomialVector{Tv,Ti}}()
for (deg, mons) in classes
new_classes[deg] = SparseMatrixMonomialVector{Tv,Ti}(classes[deg])
end
return new_classes
end
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 153 | include("utils/basic.jl")
include("utils/aliases.jl")
include("utils/groups.jl")
include("utils/Gauss-Jordan.jl")
include("utils/algebraic_geometry.jl")
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 1327 | export rref
function rref!(A::Matrix{T}, ɛ=T <: Union{Rational,Integer} ? 0 : eps(norm(A,Inf))) where T
nr, nc = size(A)
i = j = 1
while i <= nr && j <= nc
(m, mi) = findmax(abs.(A[i:nr,j]))
mi = mi+i - 1
if m <= ɛ
if ɛ > 0
A[i:nr,j] .= zero(T)
end
j += 1
else
for k=j:nc # swap i-th and mi-th rows
A[i, k], A[mi, k] = A[mi, k], A[i, k]
end
d = A[i,j] # == m
for k = j:nc
A[i,k] /= d # make pivot = 1
end
for k = 1:nr # subtract multiple of pivot row from every row except i-th
if k != i
d = A[k,j]
for l = j:nc
A[k,l] -= d*A[i,l]
end
end
end
i += 1
j += 1
end
end
A
end
rrefconv(::Type{T}, A::Matrix) where {T} = rref!(copyto!(similar(A, T), A))
rref(A::Matrix{T}) where {T} = rref!(copy(A))
rref(A::Matrix{T}) where {T <: Complex} = rrefconv(CC, A)
rref(A::Matrix{<:Number}, tol::Real) = rref!(copy(A), tol)
rref(A::Matrix{T}) where {T <: Union{Integer, Float16, Float32}} = rrefconv(Float64, A)
rref(A::AbstractMatrix, tol::Real) = rref(Matrix(A), tol)
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 462 | using HomotopyContinuation: differentiate
export jac
function jac(F::System)
return differentiate(F.expressions, F.variables)
end
function jac(F::System, x0::Vector{CC})
return Matrix{CC}(subs(differentiate(F.expressions, F.variables), F.variables => x0))
end
function jac(F::System, xp0::Tuple{Vector{CC}, Vector{CC}})
return Matrix{CC}(subs(differentiate(F.expressions, F.variables), vcat(F.variables, F.parameters) => vcat(xp0[1], xp0[2])))
end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 26 | export CC
CC = ComplexF64 | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 7628 | export sparsify!,
simplify_numbers,
eye, a2p, p2a,
num_mons, num_mons_upto
a2p(M::AbstractMatrix{<:Number}) = [M; ones(eltype(M), 1, size(M, 2))]
p2a(M::AbstractMatrix{<:Number}) = (M./M[end:end,:])[1:end-1,:]
M2VV(M::AbstractMatrix) = [M[:,i] for i in axes(M, 2)]
M2VM(M::AbstractMatrix) = [reshape(M[:,i], size(M,1), 1) for i in axes(M, 2)]
function Base.copyto!(M::AbstractMatrix{T}, v::AbstractVector{AbstractVector{T}}; dim::Integer) where {T}
for i in eachindex(v)
copyto!(selectdim(M, dim, i), v[i])
end
end
xx(v) = [0 -v[3] v[2]; v[3] 0 -v[1]; -v[2] v[1] 0]
xx2v(xx) = [-xx[2,3], xx[1,3], -xx[1,2]]
eye(T, n::Integer) = Matrix{T}(I(n))
prodpow(v::AbstractVector, e::AbstractSparseVector) = prod(v[e.nzind].^e.nzval)
num_mons(n::Integer, d::Integer) = n > 0 ? binomial(Int(n - 1 + d), Int(d)) : 0
num_mons_upto(n::Integer, d::Integer) = n > 0 ? binomial(Int(n + d), Int(d)) : 0
# TODO: test this
function sparsify!(v::AbstractVector{<:Number}, tol::Real; digits::Integer=0)
for j in eachindex(v)
if abs(imag(v[j])) < tol
v[j] = real(v[j])
elseif abs(round(imag(v[j]); digits=digits) - imag(v[j])) < tol
v[j] = real(v[j]) + round(imag(v[j]); digits=digits)*im
end
if abs(real(v[j])) < tol
v[j] = imag(v[j])*im
elseif abs(round(real(v[j]); digits=digits) - real(v[j])) < tol
v[j] = round(real(v[j]); digits=digits) + imag(v[j])*im
end
end
end
function sparsify!(M::AbstractMatrix{<:Number}, tol::Real; digits::Integer=0)
for r in eachrow(M)
sparsify!(r, tol; digits=digits)
end
end
function simplify_numbers(v::Vector{<:Number})
v = Vector{Number}(v)
for (i, vᵢ) in enumerate(v)
try
v[i] = Integer(vᵢ)
catch
try
v[i] = Real(vᵢ)
catch
try
v[i] = Complex{Integer}(vᵢ)
catch
end
end
end
end
return v
end
function to_ordinal(n::Integer)::String
if mod(n, 10) == 1
mod(n, 100) == 11 && return "$(n)th"
return "$(n)st"
end
if mod(n, 10) == 2
mod(n, 100) == 12 && return "$(n)th"
return "$(n)nd"
end
if mod(n, 10) == 3
mod(n, 100) == 13 && return "$(n)th"
return "$(n)rd"
end
return "$(n)th"
end
function subscript(n::Integer)::String
c = n < 0 ? [Char(0x208B)] : []
for d in reverse(digits(abs(n)))
push!(c, Char(0x2080+d))
end
return join(c)
end
function superscript(n::Integer)::String
c = n < 0 ? [Char(0x207B)] : []
for d in reverse(digits(abs(n)))
if d == 0 push!(c, Char(0x2070)) end
if d == 1 push!(c, Char(0x00B9)) end
if d == 2 push!(c, Char(0x00B2)) end
if d == 3 push!(c, Char(0x00B3)) end
if d > 3 push!(c, Char(0x2070+d)) end
end
return join(c)
end
# TODO: eachcol?
Base.findfirst(
v::AbstractVector{<:Number},
M::AbstractMatrix{<:Number};
tol::Real=1e-5
) = findfirst(i->norm(M[:,i]-v)<tol, axes(M,2))
take_rows(
f::Function,
M::AbstractMatrix{T}
) where {T} = M[[f(r) for r in eachrow(M)], :]
function column_diffs(M::AbstractMatrix{T}) where {T<:Number}
M = M - M[:,1]*ones(T, 1, size(M,2))
return M[:,2:end]
end
phrase(i::Integer, word::String) = i == 1 ? "$(i) $(word)" : "$(i) $(word)s"
# function exprDet(M; expnd=true)
# n = size(M, 1)
# @assert(n == size(M, 2))
# if n == 1 return M[1,1]
# elseif n == 2
# if expnd return expand(M[1,1]*M[2,2]-M[1,2]*M[2,1])
# else return M[1,1]*M[2,2]-M[1,2]*M[2,1]
# end
# else
# if expnd return expand(sum([(-1)^(j+1)*M[1,j]*exprDet(M[2:n,filter(k->k!=j,convert(Vector,1:n))], expnd=expnd) for j=1:n]))
# else return sum([(-1)^(j+1)*M[j,1]*exprDet(M[filter(k->k!=j,convert(Vector,1:n)), 2:n], expnd=expnd) for j=1:n])
# end
# end
# end
# function exprAdj(M::Matrix{Expression}; expnd=true)
# m, n = size(M)
# @assert m == n
# A = Matrix{Expression}(zeros(n, n))
# for i=1:n for j=1:n A[i,j] = (-1)^(i+j)*exprDet(M[1:end .!= i, 1:end .!= j], expnd=expnd) end end
# return Matrix{Expression}(transpose(A))
# end
# function exprInv(M::Matrix{Expression})
# m, n = size(M)
# @assert m == n
# return 1/exprDet(M)*exprAdj(M)
# end
# function get_specific_monomials(x::Vector{Variable}, d::Int64)::Vector{Expression}
# mons = Vector{Expression}([1])
# n_vars = length(x)
# append!(mons, x)
# n_appended = n_vars
# for i = 1:d-1
# M = mons[end-n_appended+1:end] * transpose(x)
# for j = i+1:n_vars
# append!(mons, M[1:j-i, j])
# end
# n_appended = Int((n_vars-i)*(n_vars-i+1)/2)
# end
# return mons
# end
# function get_monomials(x::Vector{Variable}, d::Int64)::Vector{Expression}
# return [prod(x.^exp) for exp in collect(multiexponents(length(x), d))]
# end
#
# function get_monomials(x::Vector{Variable}, degrees::Vector{Int64})::Vector{Expression}
# mons = []
# n = length(x)
# for d in degrees
# append!(mons, get_monomials(x, d))
# end
# return mons
# end
# # Returns list of monomials up to degree d
# function get_monomials(x::Vector{Variable}, d::Int)::Vector{Expression}
# mons = Vector{Expression}([1])
# n_vars = length(x)
# append!(mons, x)
# k = 1
# for i = 1:d-1
# M = mons[(k + 1):(k + num_mons(n_vars, i))] * transpose(x)
# for j = 1:n_vars
# append!(mons, M[1:num_mons(j, i), j])
# end
# k += num_mons(n_vars, i)
# end
# return mons
# end
# function next_deg_mons(x::Vector{Variable}, mons::Vector{Expression}, d::Int)::Vector{Expression}
# new_mons = Vector{Expression}([])
# n_vars = length(x)
# M = mons[(end-num_mons(n_vars, d-1)+1):end] * transpose(x)
# for j = 1:n_vars
# append!(new_mons, M[1:num_mons(j, d-1), j])
# end
# return new_mons
# end
# function get_monomials_fixed_degree(x::Vector{Variable}, d::Int)::Vector{Expression}
# if d == 0 return [1] end
# if d == 1 return x end
# mons = Vector{Expression}([1])
# n_vars = length(x)
# append!(mons, x)
# k = 1
# for i = 1:d-1
# M = mons[(k + 1):(k + num_mons(n_vars, i))] * transpose(x)
# for j = 1:n_vars
# append!(mons, M[1:num_mons(j, i), j])
# end
# k += num_mons(n_vars, i)
# if i == d-1
# return mons[(k+1):(k + num_mons(n_vars, d))]
# end
# end
# end
# # Returns list of monomials up to degree d for factorizing map
# function get_monomials_factorization(x::Vector{Variable}, d::Int, n_params::Int)::Vector{Expression}
# mons = Vector{Expression}([1])
# n_vars = length(x)
# n_unknowns = n_vars - n_params
# append!(mons, x[1:n_unknowns])
# k = 1
# for i = 1:d-1
# n_old_mons = num_mons(n_vars, i) - num_mons(n_params, i)
# M = mons[(k + 1):(k + n_old_mons)] * transpose(x)
# for j = 1:n_vars
# n_new_mons = min(num_mons(j, i) - num_mons(j-n_unknowns, i), size(M, 1))
# append!(mons, M[1:n_new_mons, j])
# end
# k += n_old_mons
# end
# popfirst!(mons)
# return mons
# end
# # rewrite
# # TODO: what is this?
# function v2SymM(v)
# n = Int((-1+sqrt(1+8*length(v)))/2)
# M = zeros(n, n)
# k = 1
# for i=1:n
# for j=i:n
# M[i,j] = M[j,i] = v[k]
# k += 1
# end
# end
# return M
# end | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 3515 | export group_structure, to_group, to_permutations
using GAP: GapObj, julia_to_gap, gap_to_julia, Globals
Gl = Globals
to_gap = julia_to_gap
to_julia = gap_to_julia
group_structure(G::GapObj) = String(Gl.StructureDescription(G))
order(G::GapObj) = Gl.Order(G)
function group_structure(perms::Vector{Vector{Int}})
return to_julia(Gl.StructureDescription(to_group(perms)))
end
function to_group(perms::Vector{Vector{Int}})
if length(perms) == 0
error("Cannot create a group: the list of permutations is empty")
end
Sym = Gl.SymmetricGroup(length(perms[1]))
gap_gens = [Gl.PermList(to_gap(perm)) for perm in perms]
Gal = to_gap(gap_gens)
return Gl.Subgroup(Sym,Gal)
end
function perm_to_list(perm::GapObj, n::Int)
return [Int(Gl.OnPoints(i, perm)) for i in 1:n]
end
function to_permutations(G::GapObj)
elems_gap = Gl.Elements(G)
n = Gl.LargestMovedPoint(Gl.Parent(G))
return [perm_to_list(elem, n) for elem in elems_gap]
end
centralizer(G::GapObj) = Gl.Centralizer(Gl.Parent(G), G)
function centralizer(perms::Vector{Vector{Int}})
if length(perms) == 0
return GAP.evalstr( "Group(())" )
end
Sym = Gl.SymmetricGroup(length(perms[1]))
cents = [Gl.Centralizer(Sym, Gl.PermList(to_gap(perm))) for perm in perms]
if length(cents) == 1
return cents[1]
end
return Gl.Intersection(cents...)
end
function block_partition(G::GapObj)
n = Gl.LargestMovedPoint(G)
return Vector{Vector{Int}}(to_julia(Gl.Blocks(G, to_gap(Vector(1:n)))))
end
function all_block_partitions(G::GapObj)
all_blocks = to_julia(Vector{Vector{Int}}, Gl.AllBlocks(G))
n = Gl.LargestMovedPoint(G)
block_partitions = Vector{Vector{Vector{Int}}}([])
for block in all_blocks
block_partition = Gl.Blocks(G, to_gap(Vector{Int}(1:n)), to_gap(block))
push!(block_partitions, to_julia(Vector{Vector{Int}}, block_partition))
end
return block_partitions
end
function action_on_blocks(G::GapObj, block_partition::Vector{Vector{Int}})
blocks_gap = to_gap([to_gap(block) for block in block_partition])
return Gl.Action(G, blocks_gap, Gl.OnSets)
end
function action_on_block(
G::GapObj,
block_partition::Vector{Vector{Int}},
block_id::Int
)
n_blocks = length(block_partition)
B = block_partition[block_id]
block_size = length(B)
elems = Set(vcat(block_partition...))
remaining = collect(setdiff(elems, Set(B)))
new_blocks = to_gap([to_gap(sort(vcat([B[j]], remaining))) for j in 1:block_size])
S = Gl.Stabilizer(G, to_gap(B), Gl.OnSets)
return Gl.Action(S, new_blocks, Gl.OnSets)
end
function action_on_given_blocks(
G::GapObj,
block_partition::Vector{Vector{Int}},
block_ids::Vector{Int}
)
Bs = sort(vcat(block_partition[block_ids]...))
set_size = length(Bs)
elems = Set(vcat(block_partition...))
remaining = collect(setdiff(elems, Set(Bs)))
new_blocks = to_gap([to_gap(sort(vcat([Bs[j]], remaining))) for j in 1:set_size])
S = Gl.Stabilizer(G, to_gap(Bs), Gl.OnSets)
return Gl.Action(S, new_blocks, Gl.OnSets)
end
# Intersection of all block stabilizers
function kernel_of_action_on_blocks(G::GapObj, block_partition::Vector{Vector{Int}})
B1 = block_partition[1]
K = Gl.Stabilizer(G, to_gap(B1), Gl.OnSets)
n_blocks = length(block_partition)
for i = 2:n_blocks
Bi = block_partition[i]
Si = Gl.Stabilizer(G, to_gap(Bi), Gl.OnSets)
K = Gl.Intersection(K, Si)
end
return K
end
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | code | 130 | using DecomposingPolynomialSystems
using Test
@testset "DecomposingPolynomialSystems.jl" begin
# Write your tests here.
end
| DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | docs | 1428 | # DecomposingPolynomialSystems.jl
[](https://multivariatepolynomialsystems.github.io/DecomposingPolynomialSystems.jl/dev)
DecomposingPolynomialSystems.jl is a Julia package that computes the symmetries that fix the parameters (specifically, the group of deck transformations) of a parametric polynomial system with finitely many solutions with a view towards decomposing the given polynomial system.
## Installation
Enter the Pkg REPL by pressing `]` from the Julia REPL and then type
```julia
add https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git
```
To get back to the Julia REPL, press backspace.
## Usage
### Computing symmetries
```julia
using DecomposingPolynomialSystems
@var x[1:2] p[1:2]
F = System([x[1]^2 - x[2]^2 - p[1], 2*x[1]*x[2] - p[2]]; variables=x, parameters=p)
symmetries_fixing_parameters(F; degree_bound=1, param_dep=false)
```
The result of the last command is the object of type `DeckTransformationGroup` that contains 4 deck transformations acting on the unknowns `x₁`, `x₂` of the polynomial system `F`:
```
DeckTransformationGroup of order 4
structure: C2 x C2
action:
1st map:
x₁ ↦ x₁
x₂ ↦ x₂
2nd map:
x₁ ↦ -x₁
x₂ ↦ -x₂
3rd map:
x₁ ↦ im*x₂
x₂ ↦ -im*x₁
4th map:
x₁ ↦ -im*x₂
x₂ ↦ im*x₁
```
where `im` is the imaginary unit.
### Computing invariants
TBW
### Decomposition
TBW | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | docs | 444 | # Introduction
DecomposingPolynomialSystems.jl is a Julia package for decomposing systems of polynomial equations, i.e. representing a possibly complicated polynomial system as an equivalent sequence of simpler polynomial systems.
## Quick start
TBW
## Contents
```@contents
Pages = [
"sampling.md",
]
Depth = 2
```
**Computing symmetries**
```@contents
Pages = [
"symmetries/scalings.md",
"symmetries/deck.md",
]
Depth = 2
``` | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | docs | 754 | # Sampling Polynomial Systems
In this Julia package we deal with parametric polynomial systems with finitely many solutions for generic parameters. We use [`HomotopyContinuation.jl`](https://www.juliahomotopycontinuation.org/) to sample such polynomial systems.
## Run monodromy
```@docs
run_monodromy
```
## SampledSystem
`SampledSystem` is a struct type that initially contains a polynomial system, the result of monodromy computations, and the solutions-parameters samples obtained with [`run_monodromy`](@ref) or [`sample!`](@ref).
```@docs
unknowns
parameters
variables
nunknowns
nparameters
nvariables
nsolutions
samples
ninstances
nsamples
monodromy_permutations
block_partitions
deck_permutations
```
## Sample system
```@docs
sample!
``` | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | docs | 295 | # Deck transformations
Deck transformations of a parametric polynomial system are the birational maps that fix the parameters of the polynomial system.
## Computing deck transformations
```@docs
symmetries_fixing_parameters
```
## DeckTranfromationGroup
```@docs
DeckTransformationGroup
``` | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 1.0.0 | 0e592b369bff4c1d9ac484c2ee8603e6353f9d5d | docs | 260 | # Scaling symmetries
Scaling symmetries of a (parametric) polynomial system are the maps that act by scaling individual variables of the formulation.
## Computing scaling symmetries
```@docs
scaling_symmetries
```
## ScalingGroup
```@docs
ScalingGroup
``` | DecomposingPolynomialSystems | https://github.com/MultivariatePolynomialSystems/DecomposingPolynomialSystems.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 444 | using Documenter
using Literate
using TaskMaster
Literate.markdown(joinpath(@__DIR__, "../examples/introduction.jl"), joinpath(@__DIR__,"src/"); credit = false, name = "index")
Literate.markdown(joinpath(@__DIR__, "../examples/adaptive.jl"), joinpath(@__DIR__,"src/"); credit = false, name = "adaptive")
makedocs(sitename="TaskMaster.jl",pages = ["index.md","adaptive.md"])
deploydocs(
repo = "github.com/akels/TaskMaster.jl.git",
)
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 1326 | # # Adaptive
# Here I show how one can use [Adaptive.jl](https://github.com/akels/Adaptive.jl) wrapper for [python adaptive package](https://github.com/python-adaptive/adaptive) to make adaptively sampled figures. Before we start, let's set up our environment by loading packages:
using Distributed
addprocs(2)
using TaskMaster
using Adaptive
using PyPlot
# where Adaptive.jl at the moment needs to be added directly from the GitHub repository.
# ## AdaptiveLearner1D
@everywhere f(x) = exp(-x^2)
fig = figure()
x = collect(range(-2,stop=2,length=200))
plot(x,f.(x),label=L"e^{-x^2}")
xx = collect(range(-2,stop=2,length=20))
plot(xx,f.(xx),".-",label="even sampling")
master = WorkMaster(f)
learner1d = AdaptiveLearner1D((-2,+2))
loop = Loop(master,learner1d)
evaluate!(loop,1:20)
plot(learner1d.x,learner1d.y,".-",label="AdaptiveLearner1D")
legend()
savefig("learner1d.svg")
# 
# ## AdaptiveLearner2D
@everywhere f(p) = exp(-p[1]^2 - p[2]^2)
master = WorkMaster(f)
learner2d = AdaptiveLearner2D([(-3,+3),(-3,+3)])
loop = Loop(master,learner2d)
evaluate!(loop,learner->learner.loss()<0.05)
fig = figure()
p,tri,v = learner2d.points, learner2d.vertices, learner2d.values
tricontourf(p[:,1],p[:,2],tri.-1,v)
triplot(p[:,1],p[:,2],tri.-1,"k.-")
savefig("learner2d.svg")
# 
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 6522 | # # Introduction
# A very daunting thing to program is a feedback loop with parallelism. Parallelism introduces stochasticity and thus debugging a feedback loop, or the part which learns in such a system is painful. On the other hand, we have so many different kinds of systems which implement parallelism - processes, threads, GPUs, job schedulers, GRID, etc. And so one ends up writing non-reusable code a case by a case.
# A TaskMater is an abstraction for all of those issues through two critical concepts - Master and Learner. Master is a process which takes input values from a channel `Master.tasks` and evaluates in an arbitrary fashion/order and puts that in `Master.results` channel as a tuple (input, output). One makes a concrete implementation which uses processes, threads, GPUs, TPUs, job scheduler, etc. for evaluation. Or one can treat Master as some process which comes from a piece of experimental equipment, for example, from a multi-head scanning tunnelling microscope. Or one could try to find optimal parameters for plant growth where parallelism is very natural.
# The other concept is Learner which tries to learn from the Master by asking questions and receiving answers. Nature is that Master has received multiple questions, and he answers them in arbitrary order. Thus the Learner needs to be smart to optimize the objective. Again the Learner could be a computer program, animal (if you can teach them parallelism), or human pressing buttons.
# Particularly in the case of a computer program, there is quite a variety. There is a class of learners where the programmer had programmed all cases of how the system should behave. My Python colleagues make a very great example in the [adaptive package](https://github.com/python-adaptive/adaptive), which allows adaptive function evaluation for reducing computational needs to make a beautiful figure (see Adaptive.jl for a wrapper). Another type of learners had been brainstormed in [this reedit](https://www.reddit.com/r/dataisbeautiful/comments/b8vv2p/i_wrote_a_python_package_to_do_adaptive_sampling/). The other class which might gain traction is a machine-learned Learner, for example, a plant state recognition algorithm with some ML to optimize the growth.
# # Interface
# For what follows we need to load and execute:
using Distributed
addprocs(2)
# before loading TaskMaster. That would give us two workers to proceed.
# To load the package, install it from the Julia central registry and execute:
using TaskMaster
# further on, we assume that it is loaded.
# ## Learner
# To see how to implement Learner, let's consider IgnorantLearner from the package. To initiate it, we do:
learner = IgnorantLearner(1:4)
# The first part of the interface is asking part. To `ask!` a point we execute:
x1 = ask!(learner,4)
x2 = ask!(learner,2)
# x1 and x2 now would give us points which Learner thinks as most beneficial to reach the objective. The numbers 4 and 2 represent the input, for example, a random number in that purifying Learner from randomness. That is particularly useful when one wants to debug the Learner from output values alone (see debugging section).
# The second part of the interface is telling about masters evaluated points to Learner. Let's say that x2=3 and thus y2=9 represent the results of the evaluation. Then to tell Learner about them we do:
tell!(learner,(3,9))
# Which would affect the state of the Learner, giving us better predictions on where exciting things would happen even when x1 is still being evaluated. (That is the reason why ask is written with exclamation mark ask!). Ignorant Learner as the name implies would ignore this knowledge and would proceed with evaluating points given by iterator.
# ## Master
# To see how Master works, let's consider WorkMaster from the package. To initiate it, we need to define a function on all workers and then start Master:
@everywhere f(x) = x^2
master = WorkMaster(f)
# Now we can evaluate the function with specific values as simple as:
put!(master.tasks,3)
put!(master.tasks,5)
# And take out the results:
take!(master.results)
# As one can see, it is pretty apparent to make ThreadMaster and other different kinds of Master implementations.
# ## Loop
# The third and the final concept is the Loop which represents the execution of master on the Loop. That can be initiated as follows:
master = WorkMaster(f)
learner = IgnorantLearner(1:10)
loop = Loop(master,learner)
# Also if one wishes to follow the learning process, it is possible to pass a function `iterhook(::Loop)` is executed in every iteration with constructor `Loop(::Master,::Learner,iterhook(::Loop))`.
# Now the central part is the execution. If one knows the collection beforehand on what one wants to execute the Loop (so it is finite) one can do:
output1 = evaluate!(loop,1:4)
# Often, however, one wants to learn until some convergence criteria are being met. That one can also do by passing a stopping condition which is executed every time before a new point is asked:
output2 = evaluate!(loop,learner->learner.state==7,5:9)
# which will terminate when the Learner's state would be 7. A note is that evaluate would continue to execute the previous state of the Loop (thus exclamation mark !).
# ## Debugging Learner
# Let's imagine a situation where one had spent hours evaluating the function with a Learner. For some particular reason looking at the output, the Learner seems had misbehaved. The question then is how one could debug that?
# A way the package overcomes such pitfall is by ensuring a deterministic process of `evaluate!` function which communicates with Master. This is why it is crucial that `ask!` does not have stochasticity inside but if needed, takes that from the input. That allows us to replay the history and explore the Learner's state as it evolved.
# In TaskMaster that is implemented with `HisotyMaster` type. To see how to use it, let's apply it on the previous execution:
output = [output1...,output2...]
master = HistoryMaster(output,2)
# where 2 is the number of unresolved points which were allowed during the original Masters run. Now to repeat the history, we do:
learner = IgnorantLearner(1:10)
loop = Loop(master,learner,loop->@show loop.learner.state)
evaluate!(loop,learner->learner.state==7,1:9)
# As you can see the `iterhook(::Loop)` makes a lot of sense for debugging. Also, HistoryMaster could be useful to write tests for Learner, so when code changes, one would immediately see the effects of that.
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 468 | module TaskMaster
abstract type Learner end
ask!(learner::Learner) = error("For runner learner needs to implement ask! and tell! methods")
tell!(learner::Learner,message) = error("For runner learner needs to implement ask! and tell! methods")
abstract type Master end
include("abstractions.jl")
include("workmaster.jl")
include("evaluate.jl")
export captureslave!, releaseslave!
export HistoryMaster, WorkMaster, Loop, IgnorantLearner, ask!, tell!, evaluate!
end
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 1490 | struct WrappedLearner <: Learner
learner
stop
askhook
tellhook
end
WrappedLearner(learner,stop) = WrappedLearner(learner, stop, (x,y)->nothing, (x,y)->nothing)
function ask!(learner::WrappedLearner,input)
if learner.stop(learner.learner)
val = nothing
else
val = ask!(learner.learner,input)
end
learner.askhook(learner.learner,val)
return val
end
function tell!(learner::WrappedLearner,message)
tell!(learner.learner,message)
learner.tellhook(learner.learner,message)
end
### The interface probabily needs to be a little different
mutable struct IgnorantLearner <: Learner
iter
state
end
IgnorantLearner(iter) = IgnorantLearner(iter,nothing)
function ask!(learner::IgnorantLearner,input)
if input==nothing
return nothing
end
temp = learner.state==nothing ? iterate(learner.iter) : iterate(learner.iter,learner.state)
if temp==nothing
return nothing
else
val,learner.state = temp
return val
end
end
tell!(learner::IgnorantLearner,m) = nothing
### Something which would help to reproduce the state of the Learner.
mutable struct HistoryMaster <: Master
tasks
results
np
end
nparalel(master::HistoryMaster) = master.np
function HistoryMaster(output,np)
N = length(output)
tasks = Channel{Any}(N)
results = Channel{Tuple{Any,Any}}(N)
for oi in output
put!(results,oi)
end
HistoryMaster(tasks,results,np)
end
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 2108 | struct Loop
master::Master
learner::Learner
iterhook::Function
end
Loop(master::Master,learner::Learner) = Loop(master,learner,x->nothing)
function evaluate!(loop::Loop,inch,outch)
np = nparalel(loop.master)
unresolved = 0
x = 0 ### Needs to be some value of ask! output type
while true
if x!=nothing && unresolved<np && isopen(inch)
i = take!(inch)
x = ask!(loop.learner,i)
if x!=nothing
put!(loop.master.tasks,x)
unresolved+=1
else
close(inch)
end
else
res = take!(loop.master.results)
tell!(loop.learner,res)
put!(outch,res)
unresolved -= 1
if unresolved==0
break
end
end
loop.iterhook(loop)
end
close(outch)
end
Looptr(master,learner) = inch -> Channel() do outch
loop = Loop(master,learner)
evaluate!(loop, inch, outch)
end
"""
Evaluates until evaluate closes inchannel.
"""
function evaluate!(loop::Loop)
inch = Channel(1)
@async while true
put!(inch,true)
end
inch |> Looptr(loop.master,loop.learner) |> collect
end
"""
Easy way to evaluate master with learner and a stopping condition.
"""
function evaluate!(loop::Loop,stop::Function)
wl = WrappedLearner(loop.learner, stop, (x,y)->nothing, (x,y)->nothing)
loop = Loop(loop.master,wl,loop.iterhook)
evaluate!(loop)
end
### Channel(Map(identity)) is the only place where I use transducers.
"""
Evaluates until evaluate closes inchannel or iterator ends. Could be also used to pass random numbers.
"""
function evaluate!(loop::Loop,stop::Function,iter)
wl = WrappedLearner(loop.learner, stop, (x,y)->nothing, (x,y)->nothing)
loop = Loop(loop.master,wl,loop.iterhook)
inch = Channel(1)
@async begin
for i in iter
put!(inch,i)
end
close(inch)
end
inch |> Looptr(loop.master,loop.learner) |> collect
end
evaluate!(loop::Loop,iter) = evaluate!(loop,learner->false,iter)
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 1930 | using Distributed
mutable struct WorkMaster <: Master
tasks
results
slaves
end
nparalel(master::WorkMaster) = length(master.slaves)
"""
Releases one worker (who is the next one of taking a new task) from the duty calculating function values given him by the master. Returns pid.
"""
function releaseslave!(master::WorkMaster)
@assert length(master.slaves)>0
put!(master.tasks,nothing)
while true
for i in 1:length(master.slaves)
### I need to find first one which is free
if isready(master.slaves[i])==true
pid = master.slaves[i].where
deleteat!(master.slaves,i)
return pid
end
end
end
end
"""
Releases all workers from the Master's duties.
"""
function releaseall!(master::WorkMaster)
for s in master.slaves
put!(master.tasks,nothing)
end
for s in master.slaves
wait(s)
end
pids = [s.where for s in master.slaves]
master.slaves = []
return pids
end
"""
Gives the slave a duty to follow orders of his new Master
"""
function captureslave!(pid,f::Function,master::WorkMaster)
tasks, results = master.tasks, master.results
wp = @spawnat pid begin
while true
x = take!(tasks)
if x==nothing
break
else
y = f(x)
put!(results,(x,y))
end
end
end
push!(master.slaves,wp)
end
WorkMaster(tasks,results) = WorkMaster(tasks,results,[])
function WorkMaster(f::Function,wpool::AbstractWorkerPool)
tasks = RemoteChannel(()->Channel{Any}(10))
results = RemoteChannel(()->Channel{Tuple{Any,Any}}(10))
master = WorkMaster(tasks,results)
for p in wpool.workers
captureslave!(p,f,master)
end
return master
end
WorkMaster(f::Function) = WorkMaster(f,WorkerPool(nprocs()==1 ? [1] : workers()))
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | code | 1066 | using Distributed
addprocs(2)
using TaskMaster
@everywhere f(x) = x^2
@info "Testing evaluate"
master = WorkMaster(f)
learner = IgnorantLearner(1:10)
loop = Loop(master,learner)
evaluate!(loop)
@info "Testing evaluate with source"
master = WorkMaster(f)
learner = IgnorantLearner(1:10)
loop = Loop(master,learner)
output1 = evaluate!(loop,1:6) ### In a way looks like a transducer
@info "Testing the debugger for Learner"
master = HistoryMaster(output1,length(master.slaves))
learner = IgnorantLearner(1:10)
loop = Loop(master,learner,loop->println("Learner state $(loop.learner.state)"))
output2 = evaluate!(loop,1:6)
@info "Testing evaluate with stopping condition"
master = WorkMaster(f)
learner = IgnorantLearner(1:10)
loop = Loop(master,learner)
evaluate!(loop,l->l.state==4)
@info "Testing capturing and releasing of the slave"
@everywhere f(x) = x^2
learner = IgnorantLearner(1:10)
master = WorkMaster(f,WorkerPool())
captureslave!(2,f,master)
captureslave!(3,f,master)
@show releaseslave!(master)
@show releaseslave!(master)
@info "Success!!!"
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 0.1.0 | a67577e309dd5fb21c0065aa0f7d573bdca68762 | docs | 2558 | # TaskMaster
[](https://akels.github.io/TaskMaster.jl/stable)
[](https://akels.github.io/TaskMaster.jl/dev)
[](https://travis-ci.org/akels/TaskMaster.jl)
A very daunting thing to program is a feedback loop with parallelism. Parallelism introduces stochasticity and thus debugging a feedback loop, or the part which learns in such a system is painful. On the other hand, we have so many different kinds of systems which implement parallelism - processes, threads, GPUs, job schedulers, GRID, etc. And so one ends up writing non-reusable code a case by a case.
A TaskMater is an abstraction for all of those issues through two critical concepts - Master and Learner. Master is a process which takes input values from a channel `Master.tasks` and evaluates in an arbitrary fashion/order and puts that in `Master.results` channel as a tuple (input, output). One makes a concrete implementation which uses processes, threads, GPUs, TPUs, job scheduler, etc. for evaluation. Or one can treat Master as some process which comes from a piece of experimental equipment, for example, from a multi-head scanning tunnelling microscope. Or one could try to find optimal parameters for plant growth where parallelism is very natural.
The other concept is Learner which tries to learn from the Master by asking questions and receiving answers. Nature is that Master has received multiple questions, and he answers them in arbitrary order. Thus the Learner needs to be smart to optimize the objective. Again the Learner could be a computer program, animal (if you can teach them parallelism), or human pressing buttons.
Particularly in the case of a computer program, there is quite a variety. There is a class of learners where the programmer had programmed all cases of how the system should behave. My Python colleagues make a very great example in the [adaptive package](https://github.com/python-adaptive/adaptive), which allows adaptive function evaluation for reducing computational needs to make a beautiful figure (see Adaptive.jl for a wrapper). Another type of learners had been brainstormed in [this reedit](https://www.reddit.com/r/dataisbeautiful/comments/b8vv2p/i_wrote_a_python_package_to_do_adaptive_sampling/). The other class which might gain traction is a machine-learned Learner, for example, a plant state recognition algorithm with some ML to optimize the growth.
| TaskMaster | https://github.com/akels/TaskMaster.jl.git |
|
[
"MIT"
] | 1.4.0 | ea3e54c2bdde39062abf5a9758a23735558705e1 | code | 3022 | module TruncatedStacktraces
using InteractiveUtils, MacroTools, Preferences
const DISABLE = @load_preference("disable", true) || VERSION ≥ v"1.10"
const VERBOSE = Ref(parse(Bool, get(ENV, "CI", string(DISABLE))))
VERBOSE_MSG = """
Some of the types have been truncated in the stacktrace for improved reading. To emit complete information
in the stack trace, evaluate `TruncatedStacktraces.VERBOSE[] = true` and re-run the code."""
function __init__()
@static if !DISABLE
for type in InteractiveUtils.subtypes(Exception)
if type == MethodError
Base.Experimental.register_error_hint(type) do io, e, args, kwargs
!VERBOSE[] && println(io, VERBOSE_MSG)
end
else
Base.Experimental.register_error_hint(type) do io, e
!VERBOSE[] && println(io, VERBOSE_MSG)
end
end
end
end
end
"""
@truncate_stacktrace MyCustomType short_display_ordering...
Convenience Macro to generate `Base.show` for `::Type{MyCustomType{...}}`. For example, lets
say you have the following struct.
```julia
struct MyCustomType{A, B, C}
a::A
b::B
c::C
end
```
Invoking `@truncate_stacktrace MyCustomType 3 1` generates the following code block
automatically:
```julia
function Base.show(io::IO, t::Type{<:(MyCustomType){var"##301", var"##302", var"##303"}}; ) where {var"##301", var"##302", var"##303"}
if TruncatedStacktraces.VERBOSE[]
invoke(show, Tuple{IO, Type}, io, t)
else
print(io, string(MyCustomType) * "{" * join([var"##303", var"##301"], ", ") * ", " * "…}")
end
end
```
"""
macro truncate_stacktrace(l::Symbol, short_display...)
@static if !DISABLE
l = getproperty(__module__, l)
pcount = __get_parameter_count(l)
@assert __maximum(short_display, pcount) <= pcount &&
__minimum(short_display, 1) >= 1
name = :(Base.show)
whereparams = ntuple(_ -> gensym(), pcount)
args = Any[:(io::IO), :(t::Type{<:$l{$(whereparams...)}})]
kwargs = []
body = quote
wparams = [$(whereparams[[short_display...]]...)]
any_not_defined = any(!@isdefined(w) for w in wparams)
if TruncatedStacktraces.VERBOSE[] || any_not_defined
invoke(show, Tuple{IO, Type}, io, t)
else
print(io,
string($l) * "{" * join(wparams, ",") *
$(length(short_display) == 0 ? "" : ",") * "…}")
end
end
fdef = Dict(:name => name, :args => args, :kwargs => kwargs, :body => body,
:whereparams => whereparams)
return MacroTools.combinedef(fdef)
end
end
__maximum(x, ::Int) = maximum(x)
__maximum(::Tuple{}, t::Int) = t
__minimum(x, ::Int) = minimum(x)
__minimum(::Tuple{}, ::Int) = 1
function __get_parameter_count(T::Union{DataType, UnionAll})
length(Base.unwrap_unionall(T).parameters)
end
end
| TruncatedStacktraces | https://github.com/SciML/TruncatedStacktraces.jl.git |
|
[
"MIT"
] | 1.4.0 | ea3e54c2bdde39062abf5a9758a23735558705e1 | code | 1916 |
# default is disable = true, so explicitly enable first
using Preferences, UUIDs
Preferences.set_preferences!(UUID("781d530d-4396-4725-bb49-402e4bee1e77"), "disable" => false)
using Test, TruncatedStacktraces
@testset "Test that VERBOSE can remove the notice message" begin
TruncatedStacktraces.VERBOSE[] = false
error_msg = Ref{String}()
try
x
catch e
io = IOBuffer()
showerror(io, e)
error_msg[] = String(take!(io))
end
@static if VERSION >= v"1.9.0-rc1"
actual_error_msg = "UndefVarError: `x` not defined" *
"\n\nSome of the types have been truncated in the" *
" stacktrace for improved reading. To emit complete " *
"information\nin the stack trace, evaluate " *
"`TruncatedStacktraces.VERBOSE[] = true` and re-run the code.\n"
else
actual_error_msg = "UndefVarError: x not defined" *
"\n\nSome of the types have been truncated in the" *
" stacktrace for improved reading. To emit complete " *
"information\nin the stack trace, evaluate " *
"`TruncatedStacktraces.VERBOSE[] = true` and re-run the code.\n"
end
# Printing the hint message is broken in Julia 1.6
@static if v"1.6" <= VERSION < v"1.7"
@test_broken error_msg[] == actual_error_msg
else
@test error_msg[] == actual_error_msg
end
TruncatedStacktraces.VERBOSE[] = true
try
x
catch e
io = IOBuffer()
showerror(io, e)
error_msg[] = String(take!(io))
end
@static if VERSION >= v"1.9.0-rc1"
actual_error_msg = "UndefVarError: `x` not defined"
else
actual_error_msg = "UndefVarError: x not defined"
end
@test error_msg[] == actual_error_msg
end
| TruncatedStacktraces | https://github.com/SciML/TruncatedStacktraces.jl.git |
|
[
"MIT"
] | 1.4.0 | ea3e54c2bdde39062abf5a9758a23735558705e1 | docs | 10339 | # TruncatedStacktraces.jl: Truncated and Simpler Stacktraces for the Julia Programming Language
Don't you wish Julia stacktraces were simpler? Introducing TruncatedStacktraces.jl! The purpose of this
package is to give package authors a single uniform system for implementing truncation of type printing
in stack traces.
> **Note**
> Starting v1.10 a similar feature is inbuilt into julia. Starting julia v1.10, this package does nothing!
## Enabling TruncatedStacktraces.jl
TruncatedStacktraces.jl is currently disabled by default, as it causes invalidations which will slow down package loading.
It can be enabled using Preferences.jl. To enable it, create a `LocalPreferences.toml` with the following entry:
```toml
[TruncatedStacktraces]
disable = false
```
Alternatively, you can generate the `LocalPreferences.toml` using:
```julia
using Preferences, UUIDs
using TruncatedStacktraces
Preferences.set_preferences!(TruncatedStacktraces, "disable" => false)
# OR if you don't want to load TruncatedStacktraces.jl
Preferences.set_preferences!(UUID("781d530d-4396-4725-bb49-402e4bee1e77"), "disable" => false)
```
In either case, you need to reload your packages (depending on TruncatedStacktraces) for the
change to take effect.
**TruncatedStacktraces is known to create invalidations, to remove these simply set the preference to disable it!**
## Users: How to Interact with TruncatedStacktraces.jl
If a package you are using is making use of TruncatedStacktraces.jl, you will see shorter stack traces. Everything
is easier to read by default! This looks like:
```julia
[14] initialize!(integrator::ODEIntegrator{true, Tsit5{Static.False, …}, Vector{Float64}, Float64, …}, cache::Tsit5Cache{Vector{Float64}, …})
@ OrdinaryDiffEq C:\Users\accou\.julia\packages\OrdinaryDiffEq\0Pm1I\src\perform_step\low_order_rk_perform_step.jl:766
```
But if you want to see the type in full glory, say to share with developers on Discourse, then you can opt to show
the entire stacktrace via simply running:
```julia
TruncatedStacktraces.VERBOSE[] = true
```
then if you run the code to error again, it will print out exactly what everyone wants to read:
```julia
[14] initialize!(integrator::OrdinaryDiffEq.ODEIntegrator{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, true, Vector{Float64}, Nothing, Float64, SciMLBase.NullParameters, Float64, Float64, Float64, Float64, Vector{Vector{Float64}}, ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, SciMLBase.NullParameters, ODEFunction{true, SciMLBase.AutoSpecialize, FunctionWrappersWrappers.FunctionWrappersWrapper{Tuple{FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{Float64}, Vector{Float64}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{Float64}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}}, false}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.InterpolationData{ODEFunction{true, SciMLBase.AutoSpecialize, FunctionWrappersWrappers.FunctionWrappersWrapper{Tuple{FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{Float64}, Vector{Float64}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{Float64}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}}, false}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.Tsit5Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}, typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, DiffEqBase.DEStats, Nothing}, ODEFunction{true, SciMLBase.AutoSpecialize, FunctionWrappersWrappers.FunctionWrappersWrapper{Tuple{FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{Float64}, Vector{Float64}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, Float64}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{Float64}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}, FunctionWrappers.FunctionWrapper{Nothing, Tuple{Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}, SciMLBase.NullParameters, ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}}}}, false}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, OrdinaryDiffEq.Tsit5Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}, typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.DEOptions{Float64, Float64, Float64, Float64, PIController{Rational{Int64}}, typeof(DiffEqBase.ODE_DEFAULT_NORM), typeof(LinearAlgebra.opnorm), Nothing, CallbackSet{Tuple{}, Tuple{}}, typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), DataStructures.BinaryHeap{Float64, DataStructures.FasterForward}, DataStructures.BinaryHeap{Float64, DataStructures.FasterForward}, Nothing, Nothing, Int64, Tuple{}, Tuple{}, Tuple{}}, Vector{Float64}, Float64, Nothing, OrdinaryDiffEq.DefaultInit}, cache::OrdinaryDiffEq.Tsit5Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}, typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False})
@ OrdinaryDiffEq C:\Users\accou\.julia\packages\OrdinaryDiffEq\0Pm1I\src\perform_step\low_order_rk_perform_step.jl:766
```
Beautiful. You can turn it back into the not beautiful short stacktrace with the command:
```julia
TruncatedStacktraces.VERBOSE[] = false
```
## How to Opt A Package Into TruncatedStacktraces.jl
Opting into TruncatedStacktraces.jl is easy: for every type that you want to omit the printing of something,
use the macro `TruncatedStacktraces.@truncate_stacktrace` like:
```julia
TruncatedStacktraces.@truncate_stacktrace ODEProblem 3 1 2
```
where `3 1 2` gives the order of the types to print, with indices corresponding to the original type. For example,
on a type `MyType{T1,T2,T3,T4}`, this will change the stacktrace printing to default to `MyType{T3,T1,T2,…}`.
For any new error exception you add to your package, make sure to include the note from TruncatedStacktraces.jl on
how to effect the type printing. This is done by adding `println(io, VERBOSE_MSG)` to the bottom of any error message.
## Default values
* `TruncatedStacktraces.VERBOSE[]` defaults to `false` for non-CI workflows and to `true` for CI jobs.
* `TruncatedStacktraces.DISABLE` defaults to `true`.
## How It's Implemented
This is done by writing an overload on `Base.show` on the DataType which is conditional on `TruncatedStacktraces.VERBOSE[]`.
For example, the following does this for the `SciMLBase.ODEProblem`:
```julia
@static if !TruncatedStacktraces.DISABLE
function Base.show(io::IO,
t::Type{<:ODEProblem{uType, tType, isinplace}}) where {uType, tType, isinplace}
if TruncatedStacktraces.VERBOSE[]
invoke(show, Tuple{IO, Type}, io, t)
else
print(io, "ODEProblem{$isinplace,$uType,$tType,…}")
end
end
end
```
## FAQ: Why is this not in Base Julia?
There are attempts like https://github.com/JuliaLang/julia/pull/48444, but no one agrees on what exactly to do and how to
make it perfect. So until people agree, we can use this solution as a nice hack that gets the job done 90%.
## Related Projects
Check out https://github.com/BioTurboNick/AbbreviatedStackTraces.jl which doesn't change type printing but instead the
number of calls which are shown.
| TruncatedStacktraces | https://github.com/SciML/TruncatedStacktraces.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 853 | # Full model sampling, follows prescription from jax-ns paper
using BenchmarkTools
using CSV
using NestedSamplers
using PythonCall
using Statistics
using StatsBase
rows = []
dims = [2, 4, 8, 16, 32]
for D in dims
model, true_lnZ = Models.CorrelatedGaussian(D)
splr = Nested(D, 50D; proposal=Proposals.Slice(), bounds=Bounds.Ellipsoid)
# run once to extract values from state, also precompile
ch, state = sample(model, splr; dlogz=0.01, chain_type=Array)
lnZ = state.logz
lnZstd = state.logzerr
tt = @belapsed sample($model, $splr; dlogz=0.01, chain_type=Array)
dlnZ = abs(true_lnZ - lnZ)
row = (; library="NestedSamplers.jl", D, t=median(tt), lnZ, lnZstd, dlnZ)
@info "$row"
push!(rows, row)
end
path = joinpath(@__DIR__, "sampling_results.csv")
CSV.write(path, rows)
@info "output saved to $path"
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1008 | using Documenter
using NestedSamplers
DocMeta.setdocmeta!(
NestedSamplers,
:DocTestSetup,
:(using NestedSamplers);
recursive=true
)
makedocs(
sitename = "NestedSamplers.jl",
pages = [
"Home" => "index.md",
"Examples" => [
"Gaussian Shells" => "examples/shells.md",
"Correlated Gaussian" => "examples/correlated.md",
"Eggbox" => "examples/eggbox.md",
],
"Benchmarks" => "benchmarks.md",
"API/Reference" => "api.md"
],
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
modules = [NestedSamplers],
# https://github.com/JuliaLang/julia/pull/37085#issuecomment-683356098
doctestfilters = [
r"{([a-zA-Z0-9]+,\s?)+[a-zA-Z0-9]+}",
r"(Array{[a-zA-Z0-9]+,\s?1}|Vector{[a-zA-Z0-9]+})",
r"(Array{[a-zA-Z0-9]+,\s?2}|Matrix{[a-zA-Z0-9]+})",
]
)
deploydocs(repo = "github.com/TuringLang/NestedSamplers.jl.git", push_preview=true, devbranch="main")
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1014 | module NestedSamplers
using LinearAlgebra
using Random
using Random: AbstractRNG, GLOBAL_RNG
using AbstractMCMC
using AbstractMCMC: AbstractSampler,
AbstractModel,
samples,
save!!
import AbstractMCMC: step,
bundle_samples,
mcmcsample
using Distributions: quantile, UnivariateDistribution
using LogExpFunctions: logaddexp, log1mexp
using MCMCChains: Chains
using ProgressLogging
import StatsBase
export Bounds,
Proposals,
Models,
NestedModel,
Nested
include("model.jl") # The default model for nested sampling
# load submodules
include("bounds/Bounds.jl")
using .Bounds
include("proposals/Proposals.jl")
using .Proposals
include("staticsampler.jl") # The static nested sampler
include("step.jl") # The stepping mechanics (extends AbstractMCMC)
include("sample.jl") # Custom sampling (extends AbstractMCMC)
include("models/Models.jl")
using .Models
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 2523 | struct PriorTransformAndLogLikelihood{T,L}
prior_transform::T
loglikelihood::L
end
function (f::PriorTransformAndLogLikelihood)(u)
v = f.prior_transform(u)
return (v, f.loglikelihood(v))
end
prior_transform(f::PriorTransformAndLogLikelihood, u) = f.prior_transform(u)
function loglikelihood_from_uniform(f::PriorTransformAndLogLikelihood, u)
return last(prior_transform_and_loglikelihood(f, u))
end
prior_transform_and_loglikelihood(f::PriorTransformAndLogLikelihood, u) = f(u)
"""
NestedModel(loglike, prior_transform)
NestedModel(loglike, priors::AbstractVector{<:Distribution})
`loglike` must be callable with a signature `loglike(::AbstractVector)` where the length of the vector must match the number of parameters in your model.
`prior_transform` must be a callable with a signature `prior_transform(::AbstractVector)` that returns the transformation from the unit-cube to parameter space. This is effectively the quantile or ppf of a statistical distribution. For convenience, if a vector of `Distribution` is provided (as a set of priors), a transformation function will automatically be constructed using `Distributions.quantile`.
**Note:**
`loglike` is the only function used for likelihood calculations. This means if you want your priors to be used for the likelihood calculations they must be manually included in the `loglike` function.
"""
struct NestedModel{F} <: AbstractModel
prior_transform_and_loglikelihood::F
end
function NestedModel(loglike, prior_transform)
return NestedModel(PriorTransformAndLogLikelihood(prior_transform, loglike))
end
function NestedModel(loglike, priors::AbstractVector{<:UnivariateDistribution})
prior_transform(X) = quantile.(priors, X)
return NestedModel(loglike, prior_transform)
end
function prior_transform(model, args...)
return first(prior_transform_and_loglikelihood(model, args...))
end
function prior_transform(model::NestedModel{<:PriorTransformAndLogLikelihood}, args...)
return prior_transform(model.prior_transform_and_loglikelihood, args...)
end
function loglikelihood_from_uniform(model, args...)
return last(prior_transform_and_loglikelihood(model, args...))
end
function loglikelihood_from_uniform(model::NestedModel{<:PriorTransformAndLogLikelihood}, args...)
return loglikelihood_from_uniform(model.prior_transform_and_loglikelihood, args...)
end
function prior_transform_and_loglikelihood(model::NestedModel, args...)
return model.prior_transform_and_loglikelihood(args...)
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1322 | ###
### Interface Implementations for running full sampling loops
###
using Printf
StatsBase.sample(rng::AbstractRNG, model::AbstractModel, sampler::Nested; kwargs...) =
mcmcsample(rng, model, sampler, nested_isdone; progressname="Nested Sampling", chain_type=Chains, kwargs...)
StatsBase.sample(model::AbstractModel, sampler::Nested; kwargs...) =
StatsBase.sample(GLOBAL_RNG, model, sampler; kwargs...)
function nested_isdone(rng, model, sampler, samples, state, i; progress=true, maxiter=Inf, maxcall=Inf, dlogz=0.5, maxlogl=Inf, kwargs...)
# 1) iterations exceeds maxiter
done_sampling = state.it ≥ maxiter
# 2) number of loglike calls has been exceeded
done_sampling |= state.ncall ≥ maxcall
# 3) remaining fractional log-evidence below threshold
logz_remain = maximum(state.logl) - state.it / sampler.nactive
delta_logz = logaddexp(state.logz, logz_remain) - state.logz
done_sampling |= delta_logz ≤ dlogz
# 4) last dead point loglikelihood exceeds threshold
done_sampling |= state.logl_dead ≥ maxlogl
# 5) number of effective samples
# TODO
if progress
str = @sprintf "iter=%d\tncall=%d\tΔlogz=%.2g\tlogl=%.2g\tlogz=%.2g" i state.ncall delta_logz state.logl_dead state.logz
print("\r\33[2K", str)
end
return done_sampling
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 3797 | # Sampler and model implementations
struct Nested{B, P <: AbstractProposal} <: AbstractSampler
ndims::Int
nactive::Int
bounds::B
enlarge::Float64
update_interval::Int
min_ncall::Int
min_eff::Float64
proposal::P
dlv::Float64
end
"""
Nested(ndims, nactive;
bounds=Bounds.MultiEllipsoid,
proposal=:auto,
enlarge=1.25,
update_interval=default_update_interval(proposal, ndims),
min_ncall=2nactive,
min_eff=0.10)
Static nested sampler with `nactive` active points and `ndims` parameters.
`ndims` is equivalent to the number of parameters to fit, which defines the dimensionality of the prior volume used in evidence sampling. `nactive` is the number of live or active points in the prior volume. This is a static sampler, so the number of live points will be constant for all of the sampling.
## Bounds and Proposals
`bounds` declares the Type of [`Bounds.AbstractBoundingSpace`](@ref) to use in the prior volume. The available bounds are described by [`Bounds`](@ref). `proposal` declares the algorithm used for proposing new points. The available proposals are described in [`Proposals`](@ref). If `proposal` is `:auto`, will choose the proposal based on `ndims`
* `ndims < 10` - [`Proposals.Rejection`](@ref)
* `10 ≤ ndims ≤ 20` - [`Proposals.RWalk`](@ref)
* `ndims > 20` - [`Proposals.Slice`](@ref)
The original nested sampling algorithm is roughly equivalent to using `Bounds.Ellipsoid` with `Proposals.Rejection`. The MultiNest algorithm is roughly equivalent to `Bounds.MultiEllipsoid` with `Proposals.Rejection`. The PolyChord algorithm is roughly equivalent to using `Proposals.RSlice`.
## Other Parameters
* `enlarge` - When fitting the bounds to live points, they will be enlarged (in terms of volume) by this linear factor.
* `update_interval` - How often to refit the live points with the bounds as a fraction of `nactive`. By default this will be determined using `default_update_interval` for the given proposal
* `Proposals.Rejection` - `1.5`
* `Proposals.RWalk` and `Proposals.RStagger` - `0.15 * walks`
* `Proposals.Slice` - `0.9 * ndims * slices`
* `Proposals.RSlice` - `2 * slices`
* `min_ncall`: The minimum number of iterations before fitting the first bound; used to
avoid shrinking the bounds before burn-in is completed. By default 2*`nactive`.
* `min_eff`: Minimum efficiency `(samples accepted / samples generated)` before fitting the
first bound; used to avoid shrinking the bounds before burn-in is completed By default 0.1.
"""
function Nested(ndims,
nactive;
bounds = Bounds.MultiEllipsoid,
proposal = :auto,
enlarge = 1.25,
min_ncall=2nactive,
min_eff=0.10,
kwargs...)
nactive < 2ndims && @warn "Using fewer than 2ndim ($(2ndims)) active points is discouraged"
# get proposal
if proposal === :auto
proposal = if ndims < 10
Proposals.Rejection()
elseif 10 ≤ ndims ≤ 20
Proposals.RWalk()
else
Proposals.Slice()
end
end
dlv = log(nactive + 1) - log(nactive)
update_interval_frac = get(kwargs, :update_interval, default_update_interval(proposal, ndims))
update_interval = round(Int, update_interval_frac * nactive)
return Nested(ndims,
nactive,
bounds,
enlarge,
update_interval,
min_ncall,
min_eff,
proposal,
dlv)
end
default_update_interval(p::Proposals.Rejection, ndims) = 1.5
default_update_interval(p::Proposals.RWalk, ndims) = 0.15 * p.walks
default_update_interval(p::Proposals.RStagger, ndims) = 0.15 * p.walks
default_update_interval(p::Proposals.Slice, ndims) = 0.9 * ndims * p.slices
default_update_interval(p::Proposals.RSlice, ndims) = 2.0 * p.slices
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 8179 |
function step(rng, model, sampler::Nested; kwargs...)
# Initialize particles
# us are in unit space, vs are in prior space
us, vs, logl = init_particles(rng, model, sampler)
# Find least likely point
logl_dead, idx_dead = findmin(logl)
u_dead = us[:, idx_dead]
v_dead = vs[:, idx_dead]
# update weight using trapezoidal rule
logvol = log1mexp(-1 / sampler.nactive)
logwt = logl_dead + logvol
# sample a new live point without bounds
point = rand(rng, eltype(us), sampler.ndims)
bound = Bounds.fit(Bounds.NoBounds, us)
proposal = Proposals.Rejection()
u, v, ll, nc = proposal(rng, v_dead, logl_dead, bound, model)
us[:, idx_dead] .= u
vs[:, idx_dead] .= v
logl[idx_dead] = ll
ncall = since_update = nc
# update evidence and information
logz = logwt
h = logl_dead - logz
logzerr = sqrt(h / sampler.nactive)
logvol -= 1 / sampler.nactive
sample = (u = u_dead, v = v_dead, logwt = logwt, logl = logl_dead)
state = (it = 1, ncall = ncall, us = us, vs = vs, logl = logl, logl_dead = logl_dead,
logz = logz, logzerr = logzerr, h = h, logvol = logvol,
since_update = since_update, has_bounds = false, active_bound = nothing)
return sample, state
end
function step(rng, model, sampler, state; kwargs...)
## Update bounds
pointvol = exp(state.logvol) / sampler.nactive
# check if ready for first update
if !state.has_bounds && state.ncall > sampler.min_ncall && state.it / state.ncall < sampler.min_eff
@debug "First update: it=$(state.it), ncall=$(state.ncall), eff=$(state.it / state.ncall)"
active_bound = Bounds.scale!(Bounds.fit(sampler.bounds, state.us, pointvol=pointvol), sampler.enlarge)
since_update = 0
has_bounds = true
# if accepted first update, is it time to update again?
elseif iszero(state.since_update % sampler.update_interval)
@debug "Updating bounds: it=$(state.it), ncall=$(state.ncall), eff=$(state.it / state.ncall)"
active_bound = Bounds.scale!(Bounds.fit(sampler.bounds, state.us, pointvol=pointvol), sampler.enlarge)
since_update = 0
has_bounds = true
else
active_bound = state.active_bound
since_update = state.since_update + 1
has_bounds = state.has_bounds
end
## Replace least-likely active point
# Find least likely point
logl_dead, idx_dead = findmin(state.logl)
u_dead = state.us[:, idx_dead]
v_dead = state.vs[:, idx_dead]
# sample a new live point using bounds and proposal
if has_bounds
point, bound = rand_live(rng, active_bound, state.us)
if isnothing(bound)
# live point not inside active bounds: refit them
active_bound = Bounds.scale!(Bounds.fit(sampler.bounds, state.us, pointvol=pointvol), sampler.enlarge)
since_update = 0
point, bound = rand_live(rng, active_bound, point[:, :])
end
u, v, logl, nc = sampler.proposal(rng, point, logl_dead, bound, model)
else
point = rand(rng, eltype(state.us), sampler.ndims)
bound = Bounds.fit(Bounds.NoBounds, state.us)
proposal = Proposals.Rejection()
u, v, logl, nc = proposal(rng, point, logl_dead, bound, model)
end
state.us[:, idx_dead] .= u
state.vs[:, idx_dead] .= v
state.logl[idx_dead] = logl
it = state.it + 1
ncall = state.ncall + nc
since_update += nc
# update weight
logwt = state.logvol + logl_dead
# update evidence and information
logz = logaddexp(state.logz, logwt)
h = (exp(logwt - logz) * logl_dead +
exp(state.logz - logz) * (state.h + state.logz) - logz)
logzerr = h ≥ 0 ? sqrt(h / sampler.nactive) : NaN
logvol = state.logvol - 1 / sampler.nactive
## prepare returns
sample = (u = u_dead, v = v_dead, logwt = logwt, logl = logl_dead)
state = (it = it, ncall = ncall, us = state.us, vs = state.vs, logl = state.logl, logl_dead = logl_dead,
logz = logz, logzerr = logzerr, h = h, logvol = logvol,
since_update = since_update, has_bounds = has_bounds, active_bound = active_bound)
return sample, state
end
function bundle_samples(samples,
model::AbstractModel,
sampler::Nested,
state,
::Type{Chains};
add_live=true,
param_names=missing,
check_wsum=true,
kwargs...)
if add_live
samples, state = add_live_points(samples, model, sampler, state)
end
vals = mapreduce(t -> hcat(t.v..., exp(t.logwt - state.logz)), vcat, samples)
if check_wsum
wsum = sum(vals[:, end, 1])
err = !iszero(state.logzerr) ? 3 * state.logzerr : 1e-3
isapprox(wsum, 1, atol=err) || @warn "Weights sum to $wsum instead of 1; possible bug"
end
# Parameter names
if param_names === missing
param_names = ["Parameter $i" for i in 1:length(vals[1, :]) - 1]
end
push!(param_names, "weights")
return Chains(vals, param_names, Dict(:internals => ["weights"]), evidence=state.logz), state
end
function bundle_samples(samples,
model::AbstractModel,
sampler::Nested,
state,
::Type{Array};
add_live=true,
check_wsum=true,
kwargs...)
if add_live
samples, state = add_live_points(samples, model, sampler, state)
end
vals = mapreduce(t -> hcat(t.v..., exp(t.logwt - state.logz)), vcat, samples)
if check_wsum
wsum = sum(vals[:, end])
err = !iszero(state.logzerr) ? 3 * state.logzerr : 1e-3
isapprox(wsum, 1, atol=err) || @warn "Weights sum to $wsum instead of 1; possible bug"
end
return vals, state
end
## Helpers
init_particles(rng, ndims, nactive, model) =
init_particles(rng, Float64, ndims, nactive, model)
init_particles(rng, model, sampler) =
init_particles(rng, sampler.ndims, sampler.nactive, model)
# loop and fill arrays, checking validity of points
# will retry 100 times before erroring
function init_particles(rng, T, ndims, nactive, model)
us = rand(rng, T, ndims, nactive)
vs_and_logl = mapslices(
Base.Fix1(prior_transform_and_loglikelihood, model), us;
dims=1
)
vs = mapreduce(first, hcat, vs_and_logl)
logl = dropdims(map(last, vs_and_logl), dims=1)
ntries = 1
while true
any(isfinite, logl) && break
rand!(rng, us)
vs_and_logl .= mapslices(
Base.Fix1(prior_transform_and_loglikelihood, model), us;
dims=1
)
vs .= mapreduce(first, hcat, vs_and_logl)
map!(last, logl, vs_and_logl)
ntries += 1
ntries > 100 && error("After 100 attempts, could not initialize any live points with finite loglikelihood. Please check your prior transform and loglikelihood methods.")
end
# force -Inf to be a finite but small number to keep estimators from breaking
@. logl[logl == -Inf] = -1e300
return us, vs, logl
end
# add remaining live points to `samples`
function add_live_points(samples, model, sampler, state)
logvol = -state.it / sampler.nactive - log(sampler.nactive)
prev_logz = state.logz
prev_h = state.h
local logl, logz, h, logzerr
N = length(samples)
@inbounds for (i, idx) in enumerate(eachindex(state.logl))
# get new point
u = state.us[:, idx]
v = state.vs[:, idx]
logl = state.logl[idx]
# update sampler
logwt = logvol + logl
logz = logaddexp(prev_logz, logwt)
h = (exp(logwt - logz) * logl +
exp(prev_logz - logz) * (prev_h + prev_logz) - logz)
logzerr = sqrt(h / sampler.nactive)
prev_logz = logz
prev_h = h
sample = (u = u, v = v, logwt = logwt, logl = logl)
save!!(samples, sample, N + i, model, sampler)
end
state = (it = state.it + sampler.nactive, us = state.us, vs = state.vs, logl = logl,
logz = logz, logzerr = logzerr, logvol = logvol,
since_update = state.since_update, has_bounds = state.has_bounds, active_bound = state.active_bound)
return samples, state
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 4075 | """
NestedSamplers.Bounds
This module contains the different algorithms for bounding the prior volume.
The available implementations are
* [`Bounds.NoBounds`](@ref) - no bounds on the prior volume (equivalent to a unit cube)
* [`Bounds.Ellipsoid`](@ref) - bound using a single ellipsoid
* [`Bounds.MultiEllipsoid`](@ref) - bound using multiple ellipsoids in an optimal cluster
"""
module Bounds
using LinearAlgebra
using Random: GLOBAL_RNG, AbstractRNG
using Clustering
using Distributions: Categorical, Uniform
using StatsBase: mean_and_cov
export AbstractBoundingSpace, rand_live, randoffset
"""
Bounds.AbstractBoundingSpace{T<:Number}
Abstract type for describing the bounding algorithms. For information about the interface, see the extended help (`??Bounds.AbstractBoundingSpace`)
# Extended Help
## Interface
The following functionality defines the interface for `AbstractBoundingSpace` for an example type `::MyBounds`
| Function | Required | Description |
|---------:|:--------:|:------------|
| `Base.rand(::AbstractRNG, ::MyBounds)` | x | Sample a single point from the prior volume |
| `Bounds.randoffset(::AbstractRNG, ::MyBounds)` | | Get a random offset from the center of the bounds. Required for random walk schemes, although a fallback is provided. |
| `Base.in(point, ::MyBounds)` | x | Checks if the point is contained by the bounding space |
| `Bounds.scale!(::MyBounds, factor)` | x | Scale the volume by the linear `factor`|
| `Bounds.volume(::MyBounds)` | | Retrieve the current prior volume occupied by the bounds. |
| `Bounds.fit(::Type{<:MyBounds}, points, pointvol=0)` | x | update the bounds given the new `points` each with minimum volume `pointvol`|
| `Bounds.axes(::MyBounds)` | | Used for transforming points from the unit cube to the encompassing bound. Worth storing as a property.
"""
abstract type AbstractBoundingSpace{T <: Number} end
Base.eltype(::AbstractBoundingSpace{T}) where {T} = T
# convenience
Base.rand(B::AbstractBoundingSpace) = rand(GLOBAL_RNG, B)
Base.rand(B::AbstractBoundingSpace, N::Integer) = rand(GLOBAL_RNG, B, N)
randoffset(B::AbstractBoundingSpace) = randoffset(GLOBAL_RNG, B)
# fallback method
Base.rand(rng::AbstractRNG, B::AbstractBoundingSpace, N::Integer) = reduce(hcat, rand(rng, B) for _ in 1:N)
"""
rand_live([rng], ::AbstractBoundingSpace, us) -> (u, bound)
Returns a random live point and the bounds associated with it.
"""
function rand_live(rng::AbstractRNG, B::AbstractBoundingSpace, us)
idx = rand(rng, Base.axes(us, 2))
u = us[:, idx]
return u, u ∈ B ? B : nothing
end
rand_live(B::AbstractBoundingSpace, us) = rand_live(GLOBAL_RNG, B, us)
function Base.show(io::IO, bound::B) where {T,B <: AbstractBoundingSpace{T}}
base = nameof(B) |> string
print(io, "$base{$T}(ndims=$(ndims(bound)))")
return nothing
end
# ---------------------------------------------------
"""
Bounds.NoBounds([T=Float64], N)
Unbounded prior volume; equivalent to the unit cube in `N` dimensions. This matches the original nested sampling derivation in Skilling (2004).[^1]
[^1]: John Skilling, 2004, AIP 735, 395 ["Nested Sampling"](https://aip.scitation.org/doi/abs/10.1063/1.1835238)
"""
struct NoBounds{T} <: AbstractBoundingSpace{T}
ndims::Int
end
NoBounds(D::Integer) = NoBounds{Float64}(D)
NoBounds(T::Type, D::Integer) = NoBounds{T}(D)
Base.ndims(B::NoBounds) = B.ndims
randoffset(rng::AbstractRNG, b::NoBounds{T}) where {T} = rand(rng, Uniform(0, 1), ndims(b)) .- 0.5 .|> T
Base.rand(rng::AbstractRNG, b::NoBounds{T}) where {T} = rand(rng, Uniform(0, 1), ndims(b)) .|> T
Base.rand(rng::AbstractRNG, b::NoBounds{T}, N::Integer) where {T} = rand(rng, Uniform(0, 1), ndims(b), N) .|> T
Base.in(pt, ::NoBounds) = all(p -> 0 < p < 1, pt)
fit(::Type{<:NoBounds}, points::AbstractMatrix{T}; kwargs...) where T =
NoBounds(T, size(points, 1))
scale!(b::NoBounds, factor) = b
volume(::NoBounds{T}) where {T} = one(T)
axes(b::NoBounds{T}) where {T} = Diagonal(ones(T, b.ndims))
include("ellipsoid.jl")
include("multiellipsoid.jl")
end # module Bounds
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 5396 | """
Bounds.Ellipsoid([T=Float64], N)
Bounds.Ellipsoid(center::AbstractVector, A::AbstractMatrix)
An `N`-dimensional ellipsoid defined by
```math
(x - center)^T A (x - center) = 1
```
where `size(center) == (N,)` and `size(A) == (N,N)`.
This implementation follows the algorithm presented in Mukherjee et al. (2006).[^2]
[^2]: Pia Mukherjee, et al., 2006, ApJ 638 L51 ["A Nested Sampling Algorithm for Cosmological Model Selection"](https://iopscience.iop.org/article/10.1086/501068)
"""
mutable struct Ellipsoid{T} <: AbstractBoundingSpace{T}
center::Vector{T}
A::Matrix{T}
axes::Matrix{T}
axlens::Vector{T}
volume::T
end
function Ellipsoid(center::AbstractVector, A::AbstractMatrix)
axes, axlens = decompose(A)
Ellipsoid(center, A, axes, axlens, _volume(A))
end
Ellipsoid(ndim::Integer) = Ellipsoid(Float64, ndim)
Ellipsoid(T::Type, ndim::Integer) = Ellipsoid(zeros(T, ndim), diagm(0 => ones(T, ndim)))
Ellipsoid{T}(center::AbstractVector, A::AbstractMatrix) where {T} = Ellipsoid(T.(center), T.(A))
Base.broadcastable(e::Ellipsoid) = (e,)
Base.ndims(ell::Ellipsoid) = length(ell.center)
# Returns the volume of an ellipsoid given its axes matrix
_volume(A::AbstractMatrix{T}) where {T} = T(volume_prefactor(size(A, 1))) / sqrt(det(A))
volume(ell::Ellipsoid) = ell.volume
# Returns the principal axes
axes(ell::Ellipsoid) = ell.axes
decompose(A::AbstractMatrix) = decompose(Symmetric(A)) # ensure that eigen() always returns real values
function decompose(A::Symmetric)
E = eigen(A)
axlens = @. 1 / sqrt(E.values)
axes = E.vectors * Diagonal(axlens)
return axes, axlens
end
# axes and axlens
decompose(ell::Ellipsoid) = ell.axes, ell.axlens
# Scale to new volume
function scale!(ell::Ellipsoid, factor)
# linear factor
f = factor^(1 / ndims(ell))
ell.A ./= f^2
ell.axes .*= f
ell.axlens .*= f
ell.volume *= factor
return ell
end
function endpoints(ell::Ellipsoid)
axes, axlens = decompose(ell)
# find major axis
major_axis = axes[:, argmax(axlens)]
return ell.center .- major_axis, ell.center .+ major_axis
end
function Base.in(x::AbstractVector, ell::Ellipsoid)
d = x .- ell.center
return dot(d, ell.A * d) ≤ 1.0
end
randoffset(rng::AbstractRNG, ell::Ellipsoid{T}) where {T} = axes(ell) * randball(rng, T, ndims(ell))
Base.rand(rng::AbstractRNG, ell::Ellipsoid) = ell.center .+ randoffset(rng, ell)
fit(E::Type{<:Ellipsoid}, x::AbstractMatrix{S}; pointvol = 0) where {S} = fit(E{float(S)}, x; pointvol = pointvol)
function fit(E::Type{<:Ellipsoid{R}}, x::AbstractMatrix{S}; pointvol = 0) where {R,S}
T = float(promote_type(R, S))
x = T.(x)
ndim, npoints = size(x)
# single element is an n-sphere with pointvol volume
if npoints == 1
pointvol > 0 || error("Cannot compute bounding ellipsoid with one point without a valid pointvol (got $pointvol)")
d = log(pointvol) - log(volume_prefactor(ndim))
r = exp(d / ndim)
A = diagm(0 => fill(1 / r^2, ndim))
return Ellipsoid(vec(x), A)
end
# get estimators
center, cov = mean_and_cov(x, 2)
delta = x .- center
# Covariance is smaller than r^2 by a factor of 1/(n+2)
cov .*= ndim + 2
# Ensure cov is nonsingular
targetprod = (npoints * pointvol / volume_prefactor(ndim))^2
make_eigvals_positive!(cov, targetprod)
# get transformation matrix. Note: use pinv to avoid error when cov is all zeros
A = pinv(cov)
# calculate expansion factor necessary to bound each points
f = diag(delta' * (A * delta))
fmax = maximum(f)
# try to avoid round-off errors s.t. furthest point obeys
# x^T A x < 1 - √eps
flex = 1 - sqrt(eps(T))
if fmax > flex
A .*= flex / fmax
end
ell = E(vec(center), A)
if pointvol > 0
minvol = npoints * pointvol
vol = volume(ell)
vol < minvol && scale!(ell, minvol / vol)
end
return ell
end
# ---------------------------------------------
# Helper functions
"""
volume_prefactor(::Integer)
Volume constant for an n-dimensional sphere:
for n even: (2pi)^(n /2) / (2 * 4 * ... * n)
for n odd : 2 * (2pi)^((n-1)/2) / (1 * 3 * ... * n)
"""
function volume_prefactor(n::Integer)
f, range = iseven(n) ? (1.0, 2:2:n) : (2.0, 3:2:n)
for i in range
f *= 2π / i
end
return f
end
# sample N samples from unit D-dimensional ball
randball(T::Type, D::Integer, N::Integer) = randball(GLOBAL_RNG, T, D, N)
function randball(rng::AbstractRNG, T::Type, D::Integer, N::Integer)
z = randn(rng, T, D, N)
z .*= rand(rng, T, 1, N).^(1 ./ D) ./ sqrt.(sum(p->p^2, z, dims = 1))
return z
end
# sample from unit D-dimensional ball
randball(T::Type, D::Integer) = randball(GLOBAL_RNG, T, D)
function randball(rng::AbstractRNG, T::Type, D::Integer)
z = randn(rng, T, D)
z .*= rand(rng)^(1 / D) / sqrt(sum(p->p^2, z))
return z
end
function make_eigvals_positive!(cov::AbstractMatrix, targetprod)
E = eigen(cov)
mask = E.values .< 1e-10
if any(mask)
nzprod = prod(E.values[.!mask])
nzeros = count(mask)
E.values[mask] .= (targetprod / nzprod)^(1 / nzeros)
cov .= E.vectors * Diagonal(E.values) / E.vectors
end
return cov
end
make_eigvals_positive(cov::AbstractMatrix, targetprod) = make_eigvals_positive!(copy(cov), targetprod)
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 5142 |
"""
Bounds.MultiEllipsoid([T=Float64], ndims)
Bounds.MultiEllipsoid(::AbstractVector{Ellipsoid})
Use multiple [`Ellipsoid`](@ref)s in an optimal clustering to bound prior space. This implementation follows the MultiNest implementation outlined in Feroz et al. (2008,2009).[^3][^4] For more details about the bounding algorithm, see the extended help (`??Bounds.MultiEllipsoid`)
[^3]: Feroz and Hobson, 2008, MNRAS 384, 2 ["Multimodal nested sampling: an efficient and robust alternative to Markov Chain Monte Carlo methods for astronomical data analyses"](https://academic.oup.com/mnras/article/384/2/449/1023701)
[^4]: Feroz et al., 2009, MNRAS 398, 4 ["MultiNest: an efficient and robust Bayesian inference tool for cosmology and particle physics"](https://academic.oup.com/mnras/article/398/4/1601/981502)
## Extended help
The multiple-ellipsoidal implementation is defined as follows:
1. Fit a [`Bounds.Ellipsoid`](@ref) to the sample.
2. Perform K-means clustering (here using [Clustering.jl](https://github.com/JuliaStats/Clustering.jl)) centered at the endpoints of the bounding ellipsoid. This defines two clusters within the sample.
3. If either cluster has fewer than two points, consider it ill-defined and end any recursion.
4. Fit [`Bounds.Ellipsoid`](@ref) to each of the clusters assigned in (2).
5. If the volume of the parent ellipsoid is more than twice the volume of the two child ellipsoids, recurse (1-5) to each child.
To sample from this distribution, a random ellipsoid is selected, and a random sample is sampled from that ellipsoid. We then reverse this and find all of the ellipsoids which enclose the sampled point, and select one of those randomly for the enclosing bound.
"""
struct MultiEllipsoid{T} <: AbstractBoundingSpace{T}
ellipsoids::Vector{Ellipsoid{T}}
end
MultiEllipsoid(ndims::Integer) = MultiEllipsoid(Float64, ndims)
MultiEllipsoid(T::Type, ndims::Integer) = MultiEllipsoid([Ellipsoid(T, ndims)])
Base.eltype(::MultiEllipsoid{T}) where {T} = T
Base.broadcastable(me::MultiEllipsoid) = (me,)
Base.length(me::MultiEllipsoid) = length(me.ellipsoids)
Base.ndims(me::MultiEllipsoid) = ndims(me.ellipsoids[1])
volume(me::MultiEllipsoid) = sum(volume, me.ellipsoids)
function scale!(me::MultiEllipsoid, factor)
scale!.(me.ellipsoids, factor)
return me
end
function fit(::Type{<:MultiEllipsoid}, x::AbstractMatrix; pointvol = 0)
parent = fit(Ellipsoid, x, pointvol = pointvol)
ells = fit(MultiEllipsoid, x, parent, pointvol = pointvol)
return MultiEllipsoid(ells)
end
function fit(::Type{<:MultiEllipsoid}, x::AbstractMatrix, parent::Ellipsoid; pointvol = 0)
ndim, npoints = size(x)
# Clustering will fail with fewer than k=2 points
npoints ≤ 2 && return [parent]
p1, p2 = endpoints(parent)
starting_points = hcat(p1, p2)
R = kmeans!(x, starting_points; maxiter = 10)
labels = assignments(R)
x1 = x[:, labels .== 1]
x2 = x[:, labels .== 2]
# if either cluster has fewer than ndim points, it is ill-defined
if size(x1, 2) < 2ndim || size(x2, 2) < 2ndim
return [parent]
end
# Getting bounding ellipsoid for each cluster
ell1, ell2 = fit.(Ellipsoid, (x1, x2), pointvol = pointvol)
# If total volume decreased by over half, recurse
if volume(ell1) + volume(ell2) < 0.5volume(parent)
return vcat(fit(MultiEllipsoid, x1, ell1, pointvol = pointvol),
fit(MultiEllipsoid, x2, ell2, pointvol = pointvol))
end
# Otherwise see if total volume is much larger than expected
# and split into more than 2 clusters
if volume(parent) > 2npoints * pointvol
out = vcat(fit(MultiEllipsoid, x1, ell1, pointvol = pointvol),
fit(MultiEllipsoid, x2, ell2, pointvol = pointvol))
sum(volume, out) < 0.5volume(parent) && return out
end
# Otherwise, return single bounding ellipse
return [parent]
end
Base.in(x::AbstractVector, me::MultiEllipsoid) = any(ell->x ∈ ell, me.ellipsoids)
function Base.rand(rng::AbstractRNG, me::MultiEllipsoid)
length(me) == 1 && return rand(rng, me.ellipsoids[1])
vols = volume.(me.ellipsoids)
weights = vols ./ sum(vols)
local x
while true
# Select random ellipsoid
idx = rand(rng, Categorical(weights))
ell = me.ellipsoids[idx]
# Select point
x = rand(rng, ell)
# How many ellipsoids is the sample in
n = count(ell -> x ∈ ell, me.ellipsoids)
# Only accept with probability 1/n
(n == 1 || rand(rng) < 1 / n) && break
end
return x
end
"""
Returns a random live point and a bounding ellipsoid, since MultiEllipsoid doesn't have
valid transformation axes
"""
function rand_live(rng::AbstractRNG, me::MultiEllipsoid, us)
idx = rand(rng, Base.axes(us, 2))
u = us[:, idx]
# find which Ellipsoid/s it overlaps with
idxs = findall(ell -> u ∈ ell, me.ellipsoids)
# TODO if point isn't bounded, update bounds
if isempty(idxs)
return u, nothing
end
# pick random encompassing ellipsoid
idx = rand(rng, idxs)
return u, me.ellipsoids[idx]
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 406 | """
This module contains various statistical models in the form of [`NestedModel`](@ref)s. These models can be used for examples and for testing.
* [`Models.GaussianShells`](@ref)
* [`Models.CorrelatedGaussian`](@ref)
"""
module Models
using ..NestedSamplers
using Distributions
using LinearAlgebra
using LogExpFunctions
include("shells.jl")
include("correlated.jl")
include("eggbox.jl")
end # module
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 982 |
@doc raw"""
Models.CorrelatedGaussian(ndims)
Creates a highly-correlated Gaussian with the given dimensionality.
```math
\mathbf\theta \sim \mathcal{N}\left(2\mathbf{1}, \mathbf{I}\right)
```
```math
\Sigma_{ij} = \begin{cases} 1 &\quad i=j \\ 0.95 &\quad i\neq j \end{cases}
```
```math
\mathcal{L}(\mathbf\theta) = \mathcal{N}\left(\mathbf\theta | \mathbf{0}, \mathbf\Sigma \right)
```
the analytical evidence of the model is
```math
Z = \mathcal{N}\left(2\mathbf{1} | \mathbf{0}, \mathbf\Sigma + \mathbf{I} \right)
```
## Examples
```jldoctest
julia> model, lnZ = Models.CorrelatedGaussian(10);
julia> lnZ
-12.482738597926607
```
"""
function CorrelatedGaussian(ndims)
priors = fill(Normal(2, 1), ndims)
Σ = fill(0.95, ndims, ndims)
Σ[diagind(Σ)] .= 1
cent_dist = MvNormal(Σ)
loglike(X) = logpdf(cent_dist, X)
model = NestedModel(loglike, priors)
true_lnZ = logpdf(MvNormal(fill(2, ndims), Σ + I), zeros(ndims))
return model, true_lnZ
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 571 | @doc raw"""
Models.Eggbox()
Eggbox/Egg carton likelihood function
```math
z(x, y) = \left[a + \cos\frac{x}{b} \cdot \cos\frac{x}{b} \right]^5
```
# Examples
```jldoctest
julia> model, lnZ = Models.Eggbox();
julia> lnZ
235.88
```
"""
function Eggbox()
tmax = 5π
# uniform prior from 0, 1
prior(X) = X
function loglike(X)
a = cos(tmax * (2 * first(X) - 1) / 2)
b = cos(tmax * (2 * last(X) - 1) / 2)
return (2 + a * b)^5
end
lnZ = 235.88 # where do we get this from??
return NestedModel(loglike, prior), lnZ
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 622 | """
Models.GaussianShells()
2-D Gaussian shells centered at `[-3.5, 0]` and `[3.5, 0]` with a radius of 2 and a shell width of 0.1
# Examples
```jldoctest
julia> model, lnZ = Models.GaussianShells();
julia> lnZ
-1.75
```
"""
function GaussianShells()
μ1 = [-3.5, 0]
μ2 = [3.5, 0]
prior(X) = 12 .* X .- 6
loglike(X) = logaddexp(logshell(X, μ1), logshell(X, μ2))
lnZ = -1.75
return NestedModel(loglike, prior), lnZ
end
function logshell(X, μ, radius=2, width=0.1)
d = LinearAlgebra.norm(X - μ)
norm = -log(sqrt(2 * π * width^2))
return norm - (d - radius)^2 / (2 * width^2)
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 15806 | """
NestedSamplers.Proposals
This module contains the different algorithms for proposing new points within a bounding volume in unit space.
The available implementations are
* [`Proposals.Rejection`](@ref) - samples uniformly within the bounding volume
* [`Proposals.RWalk`](@ref) - random walks to a new point given an existing one
* [`Proposals.RStagger`](@ref) - random staggering away to a new point given an existing one
* [`Proposals.Slice`](@ref) - slicing away to a new point given an existing one
* [`Proposals.RSlice`](@ref) - random slicing away to a new point given an existing one
"""
module Proposals
using ..NestedSamplers: prior_transform_and_loglikelihood
using ..Bounds
using Random
using LinearAlgebra
using Parameters
export AbstractProposal
"""
NestedSamplers.AbstractProposal
The abstract type for live point proposal algorithms.
# Interface
Each `AbstractProposal` must have this function,
```julia
(::AbstractProposal)(::AbstractRNG, point, loglstar, bounds, loglikelihood, prior_transform)
```
which, given the input `point` with loglikelihood `loglstar` inside a `bounds`, returns a new point in unit space, prior space, the loglikelihood, and the number of function calls.
"""
abstract type AbstractProposal end
# ----------------------------------------
# Helper for checking unit-space bounds
unitcheck(us) = all(u -> 0 < u < 1, us)
"""
Proposals.Rejection(;maxiter=100_000)
Propose a new live point by uniformly sampling within the bounding volume and rejecting samples that do not meet the likelihood constraints. This follows the original nested sampling algorithm proposed in Skilling (2004)[^1]
[^1]: John Skilling, 2004, AIP 735, 395 ["Nested Sampling"](https://aip.scitation.org/doi/abs/10.1063/1.1835238)
## Parameters
- `maxiter` is the maximum number of samples that can be rejected before giving up and throwing an error.
"""
Base.@kwdef struct Rejection <: AbstractProposal
maxiter::Int = 100_000
end
@deprecate Uniform() Rejection()
function (prop::Rejection)(
rng::AbstractRNG,
point::AbstractVector,
logl_star,
bounds::AbstractBoundingSpace,
model
)
ncall = 0
for _ in 1:prop.maxiter
u = rand(rng, bounds)
unitcheck(u) || continue
v, logl = prior_transform_and_loglikelihood(model, u)
ncall += 1
logl ≥ logl_star && return u, v, logl, ncall
end
throw(ErrorException("Couldn't generate a proper point after $(prop.maxiter) attempts including $ncall likelihood calls. Bounds=$bounds, logl_star=$logl_star."))
end
Base.show(io::IO, p::Rejection) = print(io, "NestedSamplers.Proposals.Rejection")
"""
Proposals.RWalk(;ratio=0.5, walks=25, scale=1)
Propose a new live point by random walking away from an existing live point. This follows the algorithm outlined in Skilling (2006).[^5]
[^5]: Skilling, 2006, Bayesian Anal. 1(4), ["Nested sampling for general Bayesian computation"](https://projecteuclid.org/journals/bayesian-analysis/volume-1/issue-4/Nested-sampling-for-general-Bayesian-computation/10.1214/06-BA127.full)
## Parameters
- `ratio` is the target acceptance ratio
- `walks` is the minimum number of steps to take
- `scale` is the proposal distribution scale, which will update _between_ proposals.
"""
@with_kw mutable struct RWalk <: AbstractProposal
ratio = 0.5
walks = 25
scale = 1.0
@assert 1 / walks ≤ ratio ≤ 1 "Target acceptance ratio must be between 1/`walks` and 1"
@assert walks > 1 "Number of steps must be greater than 1"
@assert scale ≥ 0 "Proposal scale must be non-negative"
end
function (prop::RWalk)(
rng::AbstractRNG,
point::AbstractVector,
logl_star,
bounds::AbstractBoundingSpace,
model;
kwargs...
)
# setup
n = length(point)
scale_init = prop.scale
accept = reject = fail = nfail = nc = ncall = 0
local du, u_prop, logl_prop, u, v, logl
while nc < prop.walks || iszero(accept)
# get proposed point
while true
# check scale factor to avoid over-shrinking
prop.scale < 1e-5 * scale_init && error("Random walk sampling appears to be stuck.")
# transform to proposal distribution
du = randoffset(rng, bounds)
u_prop = @. point + prop.scale * du
# inside unit-cube
unitcheck(u_prop) && break
fail += 1
nfail += 1
# check if stuck generating bad numbers
if fail > 100 * prop.walks
@warn "Random number generation appears extremely inefficient. Adjusting the scale-factor accordingly"
fail = 0
prop.scale *= exp(-1/n)
end
end
# check proposed point
v_prop, logl_prop = prior_transform_and_loglikelihood(model, u_prop)
if logl_prop ≥ logl_star
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
else
reject += 1
end
nc += 1
ncall += 1
# check if stuck generating bad points
if nc > 50 * prop.walks
@warn "Random walk proposals appear to be extremely inefficient. Adjusting the scale-factor accordingly"
prop.scale *= exp(-1/n)
nc = accept = reject = 0
end
end
# update proposal scale using acceptance ratio
update_scale!(prop, accept, reject, n)
return u, v, logl, ncall
end
# update proposal scale using target acceptance ratio
function update_scale!(prop, accept, reject, n)
ratio = accept / (accept + reject)
norm = max(prop.ratio, 1 - prop.ratio) * n
scale = prop.scale * exp((ratio - prop.ratio) / norm)
prop.scale = min(scale, sqrt(n))
return prop
end
"""
Proposals.RStagger(;ratio=0.5, walks=25, scale=1)
Propose a new live point by random staggering away from an existing live point.
This differs from the random walk proposal in that the step size here is exponentially adjusted
to reach a target acceptance rate _during_ each proposal, in addition to _between_
proposals. This follows the algorithm outlined in Skilling (2006).[^5]
[^5]: Skilling, 2006, Bayesian Anal. 1(4), ["Nested sampling for general Bayesian computation"](https://projecteuclid.org/journals/bayesian-analysis/volume-1/issue-4/Nested-sampling-for-general-Bayesian-computation/10.1214/06-BA127.full)
## Parameters
- `ratio` is the target acceptance ratio
- `walks` is the minimum number of steps to take
- `scale` is the proposal distribution scale, which will update _between_ proposals.
"""
@with_kw mutable struct RStagger <: AbstractProposal
ratio = 0.5
walks = 25
scale = 1.0
@assert 1 / walks ≤ ratio ≤ 1 "Target acceptance ratio must be between 1/`walks` and 1"
@assert walks > 1 "Number of steps must be greater than 1"
@assert scale ≥ 0 "Proposal scale must be non-negative"
end
function (prop::RStagger)(
rng::AbstractRNG,
point::AbstractVector,
logl_star,
bounds::AbstractBoundingSpace,
model;
kwargs...
)
#setup
n = length(point)
scale_init = prop.scale
accept = reject = fail = nfail = nc = ncall = 0
stagger = 1
local du, u_prop, logl_prop, u, v, logl
while nc < prop.walks || iszero(accept)
# get proposed point
while true
# check scale factor to avoid over-shrinking
prop.scale < 1e-5 * scale_init && error("Random walk sampling appears to be stuck.")
# transform to proposal distribution
du = randoffset(rng, bounds)
u_prop = @. point + prop.scale * stagger * du
# inside unit-cube
unitcheck(u_prop) && break
fail += 1
nfail += 1
# check if stuck generating bad numbers
if fail > 100 * prop.walks
@warn "Random number generation appears extremely inefficient. Adjusting the scale-factor accordingly"
fail = 0
prop.scale *= exp(-1/n)
end
end
# check proposed point
v_prop, logl_prop = prior_transform_and_loglikelihood(model, u_prop)
if logl_prop ≥ logl_star
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
else
reject += 1
end
nc += 1
ncall += 1
# adjust _stagger_ to target an acceptance ratio of `prop.ratio`
ratio = accept / (accept + reject)
if ratio > prop.ratio
stagger *= exp(1 / accept)
elseif ratio < prop.ratio
stagger /= exp(1 / reject)
end
# check if stuck generating bad points
if nc > 50 * prop.walks
@warn "Random walk proposals appear to be extremely inefficient. Adjusting the scale-factor accordingly"
prop.scale *= exp(-1 / n)
nc = accept = reject = 0
end
end
# update proposal scale using acceptance ratio
update_scale!(prop, accept, reject, n)
return u, v, logl, ncall
end
"""
Proposals.Slice(;slices=5, scale=1)
Propose a new live point by a series of random slices away from an existing live point.
This is a standard _Gibbs-like_ implementation where a single multivariate slice is a combination of `slices` univariate slices through each axis. This follows the algorithm outlined in Neal (2003).[^6]
[^6]: Neal, 2003, Ann. Statist. 31(3), ["Slice Sampling"](https://projecteuclid.org/journals/annals-of-statistics/volume-31/issue-3/Slice-sampling/10.1214/aos/1056562461.full)
## Parameters
- `slices` is the minimum number of slices
- `scale` is the proposal distribution scale, which will update _between_ proposals.
"""
@with_kw mutable struct Slice <: AbstractProposal
slices = 5
scale = 1.0
@assert slices ≥ 1 "Number of slices must be greater than or equal to 1"
@assert scale ≥ 0 "Proposal scale must be non-negative"
end
function (prop::Slice)(
rng::AbstractRNG,
point::AbstractVector,
logl_star,
bounds::AbstractBoundingSpace,
model;
kwargs...
)
# setup
n = length(point)
nc = nexpand = ncontract = 0
local u, v, logl
# modifying axes and computing lengths
axes = Bounds.axes(bounds)
axes = prop.scale .* axes'
# slice sampling loop
for it in 1:prop.slices
# shuffle axis update order
idxs = shuffle!(rng, collect(Base.axes(axes, 1)))
# slice sample along a random direction
for idx in idxs
# select axis
axis = axes[idx, :]
u, v, logl, nc, nexpand, ncontract = sample_slice(
rng, axis, point, logl_star,
model,
nc, nexpand, ncontract
)
end # end of slice sample along a random direction
end # end of slice sampling loop
# update slice proposal scale based on the relative size of the slices compared to the initial guess
prop.scale = prop.scale * nexpand / (2.0 * ncontract)
return u, v, logl, nc
end # end of function Slice
"""
Proposals.RSlice(;slices=5, scale=1)
Propose a new live point by a series of random slices away from an existing live point. This is a standard _random_ implementation where each slice is along a random direction based on the provided axes. This more closely matches the PolyChord implementation outlined in Handley et al. (2015a,b).[^7][^8]
[^7]: Handley, et al., 2015a, MNRAS 450(1), ["polychord: nested sampling for cosmology"](https://academic.oup.com/mnrasl/article/450/1/L61/986122)
[^8]: Handley, et al., 2015b, MNRAS 453(4), ["polychord: next-generation nested sampling"](https://academic.oup.com/mnras/article/453/4/4384/2593718)
## Parameters
- `slices` is the minimum number of slices
- `scale` is the proposal distribution scale, which will update _between_ proposals.
"""
@with_kw mutable struct RSlice <: AbstractProposal
slices = 5
scale = 1.0
@assert slices ≥ 1 "Number of slices must be greater than or equal to 1"
@assert scale ≥ 0 "Proposal scale must be non-negative"
end
function (prop::RSlice)(
rng::AbstractRNG,
point::AbstractVector,
logl_star,
bounds::AbstractBoundingSpace,
model;
kwargs...
)
# setup
n = length(point)
nc = nexpand = ncontract = 0
local u, v, logl
# random slice sampling loop
for it in 1:prop.slices
# propose a direction on the unit n-sphere
drhat = randn(rng, n)
drhat /= norm(drhat)
# transform and scale into parameter space
axis = prop.scale .* (Bounds.axes(bounds) * drhat)
u, v, logl, nc, nexpand, ncontract = sample_slice(
rng, axis, point, logl_star,
model,
nc, nexpand, ncontract
)
end # end of random slice sampling loop
# update random slice proposal scale based on the relative size of the slices compared to the initial guess
prop.scale = prop.scale * nexpand / (2.0 * ncontract)
return u, v, logl, nc
end # end of function RSlice
# Method for slice sampling
function sample_slice(rng, axis, u, logl_star, model, nc, nexpand, ncontract)
# define starting window
r = rand(rng) # initial scale/offset
u_l = @. u - r * axis # left bound
if unitcheck(u_l)
v_l, logl_l = prior_transform_and_loglikelihood(model, u_l)
else
logl_l = -Inf
end
nc += 1
nexpand += 1
u_r = u_l .+ axis # right bound
if unitcheck(u_r)
v_r, logl_r = prior_transform_and_loglikelihood(model, u_r)
else
logl_r = -Inf
end
nc += 1
nexpand += 1
# stepping out left and right bounds
while logl_l ≥ logl_star
u_l .-= axis
if unitcheck(u_l)
v_l, logl_l = prior_transform_and_loglikelihood(model, u_l)
else
logl_l = -Inf
end
nc += 1
nexpand += 1
end
while logl_r ≥ logl_star
u_r .+= axis
if unitcheck(u_r)
v_r, logl_r = prior_transform_and_loglikelihood(model, u_r)
else
logl_r = -Inf
end
nc += 1
nexpand += 1
end
# sample within limits. If the sample is not valid, shrink the limits until the `logl_star` bound is hit
window_init = norm(u_r - u_l) # initial window size
while true
# define slice and window
u_hat = u_r - u_l
window = norm(u_hat)
# check if the slice has shrunk to be ridiculously small
window < 1e-5 * window_init && error("Slice sampling appears to be stuck.")
# propose a new position
r = rand(rng)
u_prop = @. u_l + r * u_hat
if unitcheck(u_prop)
v_prop, logl_prop = prior_transform_and_loglikelihood(model, u_prop)
else
logl_prop = -Inf
end
nc += 1
ncontract += 1
# if success, then move to the new position
if logl_prop ≥ logl_star
return u_prop, v_prop, logl_prop, nc, nexpand, ncontract
# if fail, then check if the new point is to the left/right of the original point along the proposal axis and update the bounds accordingly
else
s = dot(u_prop - u, u_hat) # check sign (+/-)
if s < 0 # left
u_l = u_prop
elseif s > 0 # right
u_r = u_prop
else # if `s = 0` something has gone wrong
error("Slice sampler has failed to find a valid point.")
end
end
end # end of sample within limits while
end
end # module Proposals
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 163 |
@testset "Proposals.Uniform -> Proposals.Rejection deprecation" begin
prop = @test_deprecated Proposals.Uniform()
@test prop === Proposals.Rejection()
end | NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 3490 | const test_bounds = [
Bounds.Ellipsoid,
Bounds.MultiEllipsoid
]
const test_props = [
Proposals.Rejection(maxiter=Int(1e6)),
Proposals.RWalk(ratio=0.5, walks=50),
Proposals.RStagger(ratio=0.5, walks=50),
Proposals.Slice(slices=10),
Proposals.RSlice(slices=10)
]
const MAXZSCORES = Dict(zip(
Iterators.product(test_bounds, test_props),
[4, 3, 9, 8, 6, 3, 5, 7, 4, 4] # rwalk is bad...
))
function test_logz(measured, actual, error, bound, proposal)
diff = measured - actual
zscore = abs(diff) / error
@test measured ≈ actual atol=MAXZSCORES[(bound, proposal)] * error
end
@testset "$(nameof(bound)), $(nameof(typeof(proposal)))" for bound in test_bounds, proposal in test_props
@testset "Correlated Gaussian Conjugate Prior - ndims=$D" for D in [2, 4]
model, logz = Models.CorrelatedGaussian(D)
# match JAXNS paper setup, generally
sampler = Nested(D, 50D; bounds=bound, proposal=proposal)
chain, state = sample(rng, model, sampler; dlogz=0.01)
chain_res = sample(rng, chain, Weights(vec(chain[:weights])), length(chain))
# test posteriors
vals = Array(chain_res)
means = mean(vals, dims=1)
tols = 2std(vals, mean=means, dims=1) # 2-sigma
μ = fill(2.0, D)
Σ = fill(0.95, D, D)
Σ[diagind(Σ)] .= 1
expected = Σ * ((Σ + I) \ μ)
@test all(@.(abs(means - expected) < tols))
# logz
test_logz(state.logz, logz, state.logzerr, bound, proposal)
end
@testset "Gaussian Shells" begin
model, logz = Models.GaussianShells()
sampler = Nested(2, 1000; bounds=bound, proposal=proposal)
chain, state = sample(rng, model, sampler; dlogz=0.01)
# logz
test_logz(state.logz, logz, state.logzerr, bound, proposal)
end
@testset "Gaussian Mixture Model" begin
σ = 0.1
μ1 = ones(2)
μ2 = -ones(2)
inv_σ = diagm(0 => fill(1 / σ^2, 2))
function logl(x)
dx1 = x .- μ1
dx2 = x .- μ2
f1 = -dx1' * (inv_σ * dx1) / 2
f2 = -dx2' * (inv_σ * dx2) / 2
return logaddexp(f1, f2)
end
prior(X) = muladd.(10, X, -5)
model = NestedModel(logl, prior)
analytic_logz = log(4π * σ^2 / 100)
spl = Nested(2, 1000, bounds=bound, proposal=proposal)
chain, state = sample(rng, model, spl; dlogz=0.01)
chain_res = sample(rng, chain, Weights(vec(chain[:weights])), length(chain))
test_logz(state.logz, analytic_logz, state.logzerr, bound, proposal)
xmodes = sort!(findpeaks(chain_res[:, 1, 1])[1:2])
@test xmodes[1] ≈ -1 atol = σ
@test xmodes[2] ≈ 1 atol = σ
ymodes = sort!(findpeaks(chain_res[:, 2, 1])[1:2])
@test ymodes[1] ≈ -1 atol = σ
@test ymodes[2] ≈ 1 atol = σ
end
@testset "Eggbox" begin
model, logz = Models.Eggbox()
sampler = Nested(2, 1000; bounds=bound, proposal=proposal)
chain, state = sample(rng, model, sampler; dlogz=0.1)
test_logz(state.logz, logz, state.logzerr, bound, proposal)
chain_res = sample(rng, chain, Weights(vec(chain[:weights])), length(chain))
xmodes = sort!(findpeaks(chain_res[:, 1, 1])[1:5])
@test all(isapprox.(xmodes, 0.1:0.2:0.9, atol=0.2))
ymodes = sort!(findpeaks(chain_res[:, 2, 1])[1:5])
@test all(isapprox.(ymodes, 0.1:0.2:0.9, atol=0.2))
end
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 605 | using AbstractMCMC
using Distributions
using LinearAlgebra
using LogExpFunctions
using MCMCChains
using NestedSamplers
using StableRNGs
using StatsBase
using Test
rng = StableRNG(1234)
AbstractMCMC.setprogress!(get(ENV, "CI", "false") == "false")
include("utils.jl")
@testset "Deprecations" begin include("deprecations.jl") end
@testset "Bounds" begin include("bounds/bounds.jl") end
@testset "Proposals" begin include("proposals/proposals.jl") end
@testset "Sampler" begin include("sampler.jl") end
@testset "Sampling" begin include("sampling.jl") end
@testset "Models" begin include("models.jl") end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1128 | using NestedSamplers: default_update_interval
@testset "default helpers" begin
@test default_update_interval(Proposals.Rejection(), 3) == 1.5
@test default_update_interval(Proposals.RWalk(), 10) == 3.75
@test default_update_interval(Proposals.RWalk(walks=10), 10) == 1.5
@test default_update_interval(Proposals.RStagger(), 10) == 3.75
@test default_update_interval(Proposals.RStagger(walks=10), 10) == 1.5
@test default_update_interval(Proposals.Slice(), 30) == 135
@test default_update_interval(Proposals.Slice(slices=10), 25) == 225
@test default_update_interval(Proposals.RSlice(), 30) == 10
@test default_update_interval(Proposals.RSlice(slices=10), 25) == 20
end
spl = Nested(3, 100)
@test spl.proposal isa Proposals.Rejection
@test spl.bounds == Bounds.MultiEllipsoid
@test spl.update_interval == 150
@test spl.enlarge == 1.25
@test spl.min_ncall == 200
@test spl.dlv ≈ log(101/100)
spl = Nested(10, 1000)
@test spl.proposal isa Proposals.RWalk
@test spl.update_interval == 3750
spl = Nested(30, 1500)
@test spl.proposal isa Proposals.Slice
@test spl.update_interval == 202500
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 2023 |
@testset "Bundles" begin
logl(x::AbstractVector) = exp(-x[1]^2 / 2) / √(2π)
priors = [Uniform(-1, 1)]
model = NestedModel(logl, priors)
spl = Nested(1, 500)
chains, _ = sample(rng, model, spl; dlogz=0.2, param_names=["x"], chain_type=Chains)
val_arr, _ = sample(rng, model, spl; dlogz=0.2, chain_type=Array)
@test size(chains, 2) == size(val_arr, 2)
# test with add_live = false
chains2, _ = sample(rng, model, spl; add_live=false, dlogz=0.2, param_names=["x"], chain_type=Chains)
val_arr2, _ = sample(rng, model, spl; add_live=false, dlogz=0.2, chain_type=Array)
@test size(chains2, 2) == size(val_arr2, 2)
@test size(chains2, 1) < size(chains, 1) && size(val_arr2, 1) < size(val_arr, 1)
# test check_wsum kwarg
chains3, _ = sample(rng, model, spl; dlogz=0.2, param_names=["x"], chain_type=Chains)
val_arr3, _ = sample(rng, model, spl; dlogz=0.2, chain_type=Array)
@test size(chains3, 2) == size(val_arr3, 2)
end
@testset "Zero likelihood" begin
logl(x::AbstractVector) = x[1] > 0 ? exp(-x[1]^2 / 2) / √(2π) : -Inf
priors = [Uniform(-1, 1)]
model = NestedModel(logl, priors)
spl = Nested(1, 500)
chains, _ = sample(rng, model, spl; param_names=["x"])
@test all(>(0), chains[:x][chains[:weights] .> 1e-10])
end
@testset "Stopping criterion" begin
logl(x::AbstractVector) = exp(-x[1]^2 / 2) / √(2π)
priors = [Uniform(-1, 1)]
model = NestedModel(logl, priors)
spl = Nested(1, 500)
chains, state = sample(rng, model, spl; add_live=false, dlogz=1.0)
logz_remain = maximum(state.logl) + state.logvol
delta_logz = logaddexp(state.logz, logz_remain) - state.logz
@test delta_logz ≤ 1.0
chains, state = sample(rng, model, spl; add_live=false, maxiter=3)
@test state.it == 3
chains, state = sample(rng, model, spl; add_live=false, maxcall=10)
@test state.ncall == 10
chains, state = sample(rng, model, spl; add_live=false, maxlogl=0.2)
@test state.logl[1] ≥ 0.2
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1342 | using IterTools
using KernelDensity
function integrate_on_grid(f, ranges, density)
rs = []
for r in ranges
step = (r[2] - r[1]) / density
rmin = r[1] + step / 2
rmax = r[2] - step / 2
push!(rs, range(rmin, rmax, length=density))
end
logsum = -1e300
for v in Iterators.product(rs...)
logsum = log(exp(logsum) + f(v))
end
logsum -= length(ranges) * log(density)
return logsum
end
function integrate_on_grid(f, ranges)
density = 100
logsum_old = -Inf
while true
logsum = integrate_on_grid(f, ranges, density)
if abs(logsum - logsum_old) < 0.001
return logsum
end
logsum_old = logsum
density *= 2
end
end
## Contrib from Firefly.jl
function findpeaks(samples::AbstractVector)
k = kde(samples)
# the sign of the difference will tell use whether we are increasing or decreasing
# using rle gives us the points at which the sign switches (local extreema)
runs = rle(sign.(diff(k.density)))
# if we start going up, first extreme will be maximum, else minimum
start = runs[1][1] == 1 ? 1 : 2
# find the peak indices at the local minima
peak_idx = cumsum(runs[2])[start:2:end]
sorted_idx = sortperm(k.density[peak_idx], rev=true)
return k.x[peak_idx[sorted_idx]]
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 3659 | # Helper that returns a random N-dimensional ellipsoid
function random_ellipsoid(N::Integer)
cov = rand(rng, N, N)
while abs(det(cov)) < 1e-10
cov = rand(rng, N, N)
end
return Ellipsoid(zeros(N), pinv(cov * cov'))
end
const BOUNDST = [
Bounds.Ellipsoid,
Bounds.MultiEllipsoid
]
@testset "pathological cases" begin
A_almost_symmetric = [2.6081830533175096e8 -5.4107420917559285e6 -1.9314298704966028e9 -2.360066561768968e9; -5.410742091755895e6 379882.440454782 6.715028007245775e7 2.0195280814040575e7; -1.931429870496611e9 6.715028007245693e7 9.811342987452753e10 -4.6579127705367036e7; -2.3600665617689605e9 2.0195280814042665e7 -4.6579127705418006e7 9.80946804720486e10]
# shouldn't fail:
ell = Bounds.Ellipsoid(zeros(4), A_almost_symmetric)
Bounds.volume(ell)
end
@testset "interface - $B, $T, D=$D" for B in BOUNDST, T in [Float32, Float64], D in 1:20
# creation, inspection
bound = B(T, D)
@test eltype(bound) == T
@test ndims(bound) == D
# sampling
sample = rand(rng, bound)
@test eltype(sample) == T
@test size(sample) == (D,)
@test sample ∈ bound
nsamples = 1000
samples = rand(rng, bound, nsamples)
@test eltype(samples) == T
@test size(samples) == (D, nsamples)
@test all(samples[:, i] ∈ bound for i in axes(samples, 2))
# fitting
bound = Bounds.fit(B, samples)
@test eltype(bound) == T
@test all(samples[:, i] ∈ bound for i in axes(samples, 2))
# robust fitting
pv = Bounds.volume(bound) / size(samples, 2)
bound2 = Bounds.fit(B, samples; pointvol = pv)
@test Bounds.volume(bound2) ≈ Bounds.volume(bound) rtol = 1e-3
# volume and scaling
volfrac = 0.5
bound_scaled = Bounds.scale!(deepcopy(bound), volfrac)
@test Bounds.volume(bound) ≈ Bounds.volume(bound_scaled) / volfrac rtol = 1e-3
# expected number of points that will fall within inner bound
npoints = 5000
expect = volfrac * npoints
σ = sqrt((1 - volfrac) * expect)
ninner = count(rand(rng, bound) ∈ bound_scaled for _ in 1:npoints)
@test ninner ≈ expect atol = 3σ
# printing
@test sprint(show, bound) == "$(string(nameof(B))){$T}(ndims=$D)"
# rand_live
x = rand(rng, bound, 10)
point, _bound = Bounds.rand_live(rng, bound, x)
count(point ∈ x[:, i] for i in axes(x, 2)) == 1
Btarget = B ∈ [Bounds.MultiEllipsoid] ? Bounds.Ellipsoid : B
@test _bound isa Btarget
@test point ∈ _bound && point ∈ bound
end
@testset "interface - NoBounds, $T, D=$D" for T in [Float32, Float64], D in 1:20
# creation, inspection
bound = Bounds.NoBounds(T, D)
@test bound == Bounds.NoBounds{T}(D)
@test eltype(bound) == T
@test ndims(bound) == D
# sampling
sample = rand(rng, bound)
@test eltype(sample) == T
@test length(sample) == D
@test sample ∈ bound
samples = rand(rng, bound, 3)
@test eltype(samples) == T
@test size(samples) == (D, 3)
@test all(samples[:, i] ∈ bound for i in axes(samples, 2))
# fitting
samples = randn(rng, T, D, 100)
@test Bounds.fit(Bounds.NoBounds, samples) == bound
@test eltype(bound) == T # matches eltype
# robust fitting
pv = 1 / size(samples, 2)
bound_fit = Bounds.fit(Bounds.NoBounds, samples; pointvol = pv)
@test bound_fit == bound
@test Bounds.volume(bound_fit) == 1
# volume and scaling
bound_scaled = Bounds.scale!(deepcopy(bound), 0.5)
@test bound_scaled == bound
@test Bounds.volume(bound_scaled) == 1
@test Bounds.axes(bound) == Diagonal(ones(D))
end
include("helpers.jl")
include("ellipsoids.jl")
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 1484 | using NestedSamplers.Bounds: Ellipsoid, MultiEllipsoid, fit, scale!, decompose, volume, volume_prefactor
const NMAX = 20
@testset "ndims=$N" for N in 1:NMAX
@testset "Spheres" begin
scale = 5
center = 2scale .* ones(N)
A = Matrix(I, N, N) ./ scale^2
ell = Ellipsoid(center, A)
@test volume(ell) ≈ volume_prefactor(N) * scale^N
axs, axlens = decompose(ell)
@test axlens ≈ fill(scale, N)
@test axs ≈ Bounds.axes(ell)
@test norm.(eachcol(axs)) == fill(scale, N)
end
@testset "Scaling" begin
scale = 1.5
center = zeros(N)
A = diagm(0 => rand(rng, N))
ell = Ellipsoid(center, A)
ell2 = Ellipsoid(center, A ./ scale^2)
scale!(ell, scale^N)
@test volume(ell) ≈ volume(ell2)
@test ell.A ≈ ell2.A
@test all(decompose(ell) .≈ decompose(ell2))
end
@testset "Contains" begin
E = 1e-7
ell = Ellipsoid(N)
# Point just outside unit n-Spheres
pt = (1 / √N + E) .* ones(N)
@test pt ∉ ell
# point just inside
pt = (1 / √N - E) .* ones(N)
@test pt ∈ ell
A = diagm(0 => rand(rng, N))
ell = Ellipsoid(zeros(N), A)
for i in 1:N
axlen = 1 / sqrt(A[i, i])
pt = zeros(N)
pt[i] = axlen + E
@test pt ∉ ell
pt[i] = axlen - E
@test pt ∈ ell
end
end
end # testset
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 575 | using NestedSamplers.Bounds: randball, volume_prefactor
@testset "Rand Sphere" begin
for _ in 1:100, k in 1:10
x = randball(rng, Float64, k)
@test sum(t->t^2, x) < 1
end
end
@testset "Volume Prefactor" begin
@test volume_prefactor(1) ≈ 2
@test volume_prefactor(2) ≈ π
@test volume_prefactor(3) ≈ 4 / 3 * π
@test volume_prefactor(4) ≈ 1 / 2 * π^2
@test volume_prefactor(5) ≈ 8 / 15 * π^2
@test volume_prefactor(6) ≈ 1 / 6 * π^3
@test volume_prefactor(7) ≈ 16 / 105 * π^3
@test volume_prefactor(9) ≈ 32 / 945 * π^4
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | code | 2565 | const PROPOSALS = [
Proposals.Rejection(),
Proposals.RWalk(),
Proposals.RStagger(),
Proposals.Slice(),
Proposals.RSlice()
]
const BOUNDS = [
Bounds.NoBounds(2),
Bounds.Ellipsoid(2),
Bounds.MultiEllipsoid(2)
]
@testset "interface - $(typeof(prop))" for prop in PROPOSALS, bound in BOUNDS
logl(X) = -sum(x->x^2, X)
prior(u) = 2u .- 1 # Uniform -1, to 1
us = 0.7 .* rand(rng, 2, 10) # live points should be within the ellipsoid
point, _bound = Bounds.rand_live(rng, bound, us)
loglstar = logl(prior(point))
u, v, logL = prop(rng, point, loglstar, _bound, NestedSamplers.PriorTransformAndLogLikelihood(prior, logl))
# simple bounds checks
@test all(x -> 0 < x < 1, u)
@test all(x -> -1 < x < 1, v)
# check new point actually has better likelihood
@test logl(v) == logL ≥ loglstar
end
@testset "Rejection" begin
# printing
@test sprint(show, Proposals.Rejection()) == "NestedSamplers.Proposals.Rejection"
end
@testset "RWalk" begin
prop = Proposals.RWalk()
@test prop.scale == 1
@test prop.ratio == 0.5
@test prop.walks == 25
@test_throws AssertionError Proposals.RWalk(ratio=-0.2)
@test_throws AssertionError Proposals.RWalk(ratio=1.2)
@test_throws AssertionError Proposals.RWalk(walks=0)
@test_throws AssertionError Proposals.RWalk(walks=2, ratio=0.2)
@test_throws AssertionError Proposals.RWalk(scale=-4)
end
@testset "RStagger" begin
prop = Proposals.RStagger()
@test prop.scale == 1
@test prop.ratio == 0.5
@test prop.walks == 25
@test_throws AssertionError Proposals.RStagger(ratio=-0.2)
@test_throws AssertionError Proposals.RStagger(ratio=1.2)
@test_throws AssertionError Proposals.RStagger(walks=0)
@test_throws AssertionError Proposals.RStagger(walks=2, ratio=0.2)
@test_throws AssertionError Proposals.RStagger(scale=-4)
end
@testset "unitcheck" begin
@test Proposals.unitcheck(rand(rng, 1000))
@test !Proposals.unitcheck(randn(rng, 1000))
# works with tuples, too
@test Proposals.unitcheck((0.3, 0.6, 0.8))
end
@testset "Slice" begin
prop = Proposals.Slice()
@test prop.slices == 5
@test prop.scale == 1
@test_throws AssertionError Proposals.Slice(slices=-2)
@test_throws AssertionError Proposals.Slice(scale=-3)
end
@testset "RSlice" begin
prop = Proposals.RSlice()
@test prop.slices == 5
@test prop.scale == 1
@test_throws AssertionError Proposals.RSlice(slices=-2)
@test_throws AssertionError Proposals.RSlice(scale=-3)
end
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 4506 |
# NestedSamplers.jl
[](https://github.com/TuringLang/NestedSamplers.jl/actions)
[](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
[](https://codecov.io/gh/TuringLang/NestedSamplers.jl)
[](LICENSE)
[](https://TuringLang.github.io/NestedSamplers.jl/stable)
[](https://TuringLang.github.io/NestedSamplers.jl/dev)
Implementations of single- and multi-ellipsoidal nested sampling algorithms in pure Julia. We implement the [AbstractMCMC.jl](https://github.com/TuringLang/AbstractMCMC.jl) interface, allowing straightforward sampling from a variety of statistical models.
This package was heavily influenced by [nestle](https://github.com/kbarbary/nestle), [dynesty](https://github.com/joshspeagle/dynesty), and [NestedSampling.jl](https://github.com/kbarbary/NestedSampling.jl).
## Citing
[](https://doi.org/10.5281/zenodo.3950594)
If you use this library, or a derivative of it, in your work, please consider citing it. This code is built off a multitude of academic works, which have been noted in the docstrings where appropriate. These references, along with references for the more general calculations, can all be found in [CITATION.bib](CITATION.bib)
## Installation
To use the nested samplers first install this library
```julia
julia> ]add NestedSamplers
```
## Usage
For in-depth usage, see the [online documentation](https://TuringLang.github.io/NestedSamplers.jl/dev/). In general, you'll need to write a log-likelihood function and a prior transform function. These are supplied to a `NestedModel`, defining the statistical model
```julia
using NestedSamplers
using Distributions
using LinearAlgebra
logl(X) = logpdf(MvNormal([1, -1], I), X)
prior(X) = 4 .* (X .- 0.5)
# or equivalently
priors = [Uniform(-2, 2), Uniform(-2, 2)]
model = NestedModel(logl, priors)
```
after defining the model, set up the nested sampler. This will involve choosing the bounding space and proposal scheme, or you can rely on the defaults. In addition, we need to define the dimensionality of the problem and the number of live points. More points results in a more precise evidence estimate at the cost of runtime. For more information, see the docs.
```julia
bounds = Bounds.MultiEllipsoid
prop = Proposals.Slice(slices=10)
# 1000 live points
sampler = Nested(2, 1000; bounds=bounds, proposal=prop)
```
once the sampler is set up, we can leverage all of the [AbstractMCMC.jl](https://github.com/TuringLang/AbstractMCMC.jl) interface, including the step iterator, transducer, and a convenience `sample` method. The `sample` method takes keyword arguments for the convergence criteria.
**Note:** both the samples *and* the sampler state will be returned by `sample`
```julia
using StatsBase
chain, state = sample(model, sampler; dlogz=0.2)
```
you can resample taking into account the statistical weights, again using StatsBase
```julia
chain_resampled = sample(chain, Weights(vec(chain["weights"])), length(chain))
```
These are chains from [MCMCChains.jl](https://github.com/TuringLang/MCMCChains.jl), which offer a lot of flexibility in exploring posteriors, combining data, and offering lots of convenient conversions (like to `DataFrame`s).
Finally, we can see the estimate of the Bayesian evidence
```julia
using Measurements
state.logz ± state.logzerr
```
## Contributions and Support
[](https://github.com/SciML/ColPrac)
**Primary Author:** Miles Lucas ([@mileslucas](https://github.com/mileslucas))
Contributions are always welcome! In general, contributions should follow [ColPrac](https://github.com/SciML/ColPrac). Take a look at the [issues](https://github.com/TuringLang/NestedSamplers.jl/issues) for ideas of open problems! To discuss ideas or plan contributions, open a [discussion](https://github.com/TuringLang/NestedSamplers.jl/discussions).
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 934 | # API/Reference
```@index
```
## Samplers
```@docs
NestedModel
Nested
```
### Convergence
There are a few convergence criteria available, by default the `dlogz` criterion will be used.
* `dlogz=0.5` sample until the *fraction of the remaining evidence* is below the given value ([more info](https://dynesty.readthedocs.io/en/latest/overview.html#stopping-criteria)).
* `maxiter=Inf` stop after the given number of iterations
* `maxcall=Inf` stop after the given number of log-likelihood function calls
* `maxlogl=Inf` stop after reaching the target log-likelihood
## Bounds
```@docs
Bounds
Bounds.AbstractBoundingSpace
Bounds.NoBounds
Bounds.Ellipsoid
Bounds.MultiEllipsoid
```
## Proposals
```@docs
Proposals
Proposals.AbstractProposal
Proposals.Rejection
Proposals.RWalk
Proposals.RStagger
Proposals.Slice
Proposals.RSlice
```
## Models
```@docs
Models
Models.GaussianShells
Models.CorrelatedGaussian
Models.Eggbox
```
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 2788 | # Benchmarks
The following benchmarks show the performance of NestedSamplers.jl. As with any statistical inference package, the likelihood function will often dominate the runtime. This is important to consider when comparing packages across different languages- in general a custom Julia likelihood function may be faster than the same code written in Python/numpy. As an example, compare the relative timings of these two simple Guassian likelihoods
```julia
using BenchmarkTools
using PyCall
# julia version
gauss_loglike(X) = sum(x -> exp(-0.5 * x^2) / sqrt(2π), X)
# python version
py"""
import numpy as np
def gauss_loglike(X):
return np.sum(np.exp(-0.5 * X ** 2) / np.sqrt(2 * np.pi))
"""
gauss_loglike_py = py"gauss_loglike"
xs = randn(100)
```
```julia
@btime gauss_loglike($xs)
```
```
611.971 ns (0 allocations: 0 bytes)
26.813747896467206
```
```julia
@btime gauss_loglike_py($xs)
```
```
13.129 μs (6 allocations: 240 bytes)
26.81374789646721
```
In certain cases, you can use language interop tools (like [PyCall.jl](https://github.com/JuliaPy/PyCall.jl)) to use Julia likelihoods with Python libraries.
## Setup and system information
The benchmark code can be found in the [`bench`](https://github.com/TuringLang/NestedSamplers.jl/blob/main/bench/) folder. The system information at the time these benchmarks were ran is
```julia
julia> versioninfo()
Julia Version 1.7.1
Commit ac5cc99908* (2021-12-22 19:35 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin20.5.0)
CPU: Intel(R) Core(TM) i5-8259U CPU @ 2.30GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-12.0.1 (ORCJIT, skylake)
Environment:
JULIA_NUM_THREADS = 1
```
## Highly-correlated multivariate Guassian
This benchmark uses [`Models.CorrelatedGaussian`](@ref) and simply measures the time it takes to fully sample down to `dlogz=0.01`. This benchmark is exactly the same as the first benchmark detailed in the [JAXNS paper](https://ui.adsabs.harvard.edu/abs/2020arXiv201215286A/abstract).
### Timing
```@example sample-benchmark
using CSV, DataFrames, NestedSamplers, Plots # hide
benchdir = joinpath(dirname(pathof(NestedSamplers)), "..", "bench") # hide
results = DataFrame(CSV.File(joinpath(benchdir, "sampling_results.csv"))) # hide
plot(results.D, results.t, label="NestedSamplers.jl", marker=:o, yscale=:log10, # hide
ylabel="runtime (s)", xlabel="prior dimension", leg=:topleft, ylims=(1e-2, 1e4)) # hide
```
### Accuracy
The following shows the Bayesian evidence estmiate as compared to the true value
```@example sample-benchmark
plot(results.D, results.dlnZ, yerr=results.lnZstd, label="NestedSamplers.jl", # hide
marker=:o, ylabel="ΔlnZ", xlabel="prior dimension", leg=:topleft) # hide
hline!([0.0], c=:black, ls=:dash, alpha=0.7, label="") # hide
```
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 5228 | ```@meta
CurrentModule = NestedSamplers
```
# NestedSamplers.jl
[](https://github.com/TuringLang/NestedSamplers.jl)
[](https://github.com/TuringLang/NestedSamplers.jl/actions)
[](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
[](https://codecov.io/gh/TuringLang/NestedSamplers.jl)
[](https://github.com/TuringLang/NestedSamplers.jl/blob/main/LICENSE)
Implementations of single- and multi-ellipsoidal nested sampling algorithms in pure Julia. We implement the [AbstractMCMC.jl](https://github.com/TuringLang/AbstractMCMC.jl) interface, allowing straightforward sampling from a variety of statistical models.
This package was heavily influenced by [nestle](https://github.com/kbarbary/nestle), [dynesty](https://github.com/joshspeagle/dynesty), and [NestedSampling.jl](https://github.com/kbarbary/NestedSampling.jl).
## Citing
[](https://doi.org/10.5281/zenodo.3950594)
If you use this library, or a derivative of it, in your work, please consider citing it. This code is built off a multitude of academic works, which have been noted in the docstrings where appropriate. These references, along with references for the more general calculations, can all be found in [CITATION.bib](https://github.com/TuringLang/NestedSamplers.jl/blob/main/CITATION.bib)
## Installation
To use the nested samplers first install this library
```julia
julia> ]add NestedSamplers
```
## Background
For statistical background and a more in-depth introduction to nested sampling, I recommend the [dynesty documentation](https://dynesty.readthedocs.io/en/latest/overview.html). In short, nested sampling is a technique for simultaneously estimating the Bayesian evidence and the posterior distribution (according to [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem)) from nested iso-likelihood shells. These shells allow a quadrature estimate of the integral for the Bayesian evidence, which we can use for model selection, as well as the statistical weights for the underlying "live" points, which is where we get our posterior samples from!
## Usage
The samplers are built using the [AbstractMCMC.jl](https://github.com/TuringLang/AbstractMCMC.jl) interface. To use it, we need to create a [`NestedModel`](@ref).
```@example usage
using Random
using AbstractMCMC
AbstractMCMC.setprogress!(false)
Random.seed!(8452);
nothing # hide
```
```@example usage
using Distributions
using LinearAlgebra
using NestedSamplers
using LogExpFunctions: logaddexp
# Gaussian mixture model
σ = 0.1
μ1 = ones(2)
μ2 = -ones(2)
inv_σ = diagm(0 => fill(1 / σ^2, 2))
function logl(x)
dx1 = x .- μ1
dx2 = x .- μ2
f1 = -dx1' * (inv_σ * dx1) / 2
f2 = -dx2' * (inv_σ * dx2) / 2
return logaddexp(f1, f2)
end
priors = [
Uniform(-5, 5),
Uniform(-5, 5)
]
# or equivalently
prior_transform(X) = 10 .* X .- 5
# create the model
# or model = NestedModel(logl, prior_transform)
model = NestedModel(logl, priors);
nothing # hide
```
now, we set up our sampling using [StatsBase](https://github.com/JuliaStats/StatsBase.jl).
**Important: the state of the sampler is returned in addition to the chain by `sample`.**
```@example usage
using StatsBase: sample, Weights
# create our sampler
# 2 parameters, 1000 active points, multi-ellipsoid. See docstring
spl = Nested(2, 1000)
# by default, uses dlogz for convergence. Set the keyword args here
# currently Chains and Array are support chain_types
chain, state = sample(model, spl; dlogz=0.2, param_names=["x", "y"])
# optionally resample the chain using the weights
chain_res = sample(chain, Weights(vec(chain["weights"])), length(chain));
```
let's take a look at the resampled posteriors
```@example usage
using StatsPlots
density(chain_res)
# analytical posterior maxima
vline!([-1, 1], c=:black, ls=:dash, subplot=1)
vline!([-1, 1], c=:black, ls=:dash, subplot=2)
```
and compare our estimate of the Bayesian (log-)evidence to the analytical value
```@example usage
analytic_logz = log(4π * σ^2 / 100)
# within 2-sigma
@assert isapprox(analytic_logz, state.logz, atol=2state.logzerr)
```
## Contributions and Support
[](https://github.com/SciML/ColPrac)
**Primary Author:** Miles Lucas ([@mileslucas](https://github.com/mileslucas))
Contributions are always welcome! In general, contributions should follow [ColPrac](https://github.com/SciML/ColPrac). Take a look at the [issues](https://github.com/TuringLang/NestedSamplers.jl/issues) for ideas of open problems! To discuss ideas or plan contributions, open a [discussion](https://github.com/TuringLang/NestedSamplers.jl/discussions). | NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 2337 | # Introduction
Nested sampling is a statistical technique first described in Skilling (2004)[^1] as a method for estimating the Bayesian evidence. Conveniently, it also produces samples with importance weighting proportional to the posterior distribution. To understand what this means, we need to comprehend [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem).
## Bayes' theorem
Bayes' theorem, in our nomenclature, is described as the relationship between the *prior*, the *likelihood*, the *evidence*, and the *posterior*. In its entirety-
```math
p(\theta | x) = \frac{p(x | \theta)p(\theta)}{p(x)}
```
### Posterior
``p(\theta | x)`` - the probability of the model parameters (``\theta``) conditioned on the data (``x``)
### Likelihood
``p(x | \theta)`` - the probability of the data (``x``) conditioned on the model parameters (``\theta``)
### Prior
``p(\theta)`` - the probability of the model parameters
### Evidence
``p(x)`` - the probability of the data
If you are familiar with Bayesian statistics and Markov Chain Monte Carlo (MCMC) techniques, you should be somewhat familiar with the relationships between the posterior, the likelihood, and the prior. The evidence, though, is somewhat hard to describe; what does "the probability of the data" mean? Well, another way of writing the evidence, is this integral
```math
p(x) \equiv Z = \int_\Omega{p(x | \theta) \mathrm{d}\theta}
```
which is like saying "the likelihood of the data [``p(x | \theta)``] integrated over *all of parameter space* [``\Omega``]". We have to write the probability this way, because the data are statistically dependent on the model parameters. This integral is intractable for all but the simplest combinations of distributions ([conjugate distributions](https://en.wikipedia.org/wiki/Conjugate_prior)), and therefore it must be estimated or approximated in some way.
## What can we do with the evidence?
Before we get into approximating the Bayesian evidence, let's talk about why it's important. After all, for most MCMC applications it is simply a normalization factor to be ignored (how convenient!).
## Further reading
For further reading, I recommend reading the cited sources in the footnotes, as well as the references below
* [dynesty documentation](https://dynesty.readthedocs.io)
[^1]: Skilling 2004 | NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 1906 | # Correlated Gaussian
This example will explore a highly-correlated Gaussian using [`Models.CorrelatedGaussian`](@ref). This model uses a conjuage Gaussian prior, see the docstring for the mathematical definition.
## Setup
For this example, you'll need to add the following packages
```julia
julia>]add Distributions MCMCChains Measurements NestedSamplers StatsBase StatsPlots
```
```@setup correlated
using AbstractMCMC
using Random
AbstractMCMC.setprogress!(false)
Random.seed!(8452)
```
## Define model
```@example correlated
using NestedSamplers
# set up a 4-dimensional Gaussian
D = 4
model, logz = Models.CorrelatedGaussian(D)
nothing; # hide
```
let's take a look at a couple of parameters to see what the likelihood surface looks like
```@example correlated
using StatsPlots
θ1 = range(-1, 1, length=1000)
θ2 = range(-1, 1, length=1000)
loglike = model.prior_transform_and_loglikelihood.loglikelihood
logf = [loglike([t1, t2, 0, 0]) for t2 in θ2, t1 in θ1]
heatmap(
θ1, θ2, exp.(logf),
aspect_ratio=1,
xlims=extrema(θ1),
ylims=extrema(θ2),
xlabel="θ1",
ylabel="θ2"
)
```
## Sample
```@example correlated
using MCMCChains
using StatsBase
# using single Ellipsoid for bounds
# using Gibbs-style slicing for proposing new points
sampler = Nested(D, 50D;
bounds=Bounds.Ellipsoid,
proposal=Proposals.Slice()
)
names = ["θ_$i" for i in 1:D]
chain, state = sample(model, sampler; dlogz=0.01, param_names=names)
# resample chain using statistical weights
chain_resampled = sample(chain, Weights(vec(chain[:weights])), length(chain));
nothing # hide
```
## Results
```@example correlated
chain_resampled
```
```@example correlated
corner(chain_resampled)
```
```@example correlated
using Measurements
logz_est = state.logz ± state.logzerr
diff = logz_est - logz
println("logz: $logz")
println("estimate: $logz_est")
println("diff: $diff")
nothing # hide
```
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 1829 | # Eggbox
This example will explore the classic eggbox function using [`Models.Eggbox`](@ref).
## Setup
For this example, you'll need to add the following packages
```julia
julia>]add Distributions MCMCChains Measurements NestedSamplers StatsBase StatsPlots
```
```@setup eggbox
using AbstractMCMC
using Random
AbstractMCMC.setprogress!(false)
Random.seed!(8452)
```
## Define model
```@example eggbox
using NestedSamplers
model, logz = Models.Eggbox()
nothing; # hide
```
let's take a look at a couple of parameters to see what the log-likelihood surface looks like
```@example eggbox
using StatsPlots
x = range(0, 1, length=1000)
y = range(0, 1, length=1000)
loglike = model.prior_transform_and_loglikelihood.loglikelihood
logf = [loglike([xi, yi]) for yi in y, xi in x]
heatmap(
x, y, logf,
xlims=extrema(x),
ylims=extrema(y),
xlabel="x",
ylabel="y",
)
```
## Sample
```@example eggbox
using MCMCChains
using StatsBase
# using multi-ellipsoid for bounds
# using default rejection sampler for proposals
sampler = Nested(2, 500)
chain, state = sample(model, sampler; dlogz=0.01, param_names=["x", "y"])
# resample chain using statistical weights
chain_resampled = sample(chain, Weights(vec(chain[:weights])), length(chain));
nothing # hide
```
## Results
```@example eggbox
chain_resampled
```
```@example eggbox
marginalkde(chain[:x], chain[:y])
plot!(xlims=(0, 1), ylims=(0, 1), sp=2)
plot!(xlims=(0, 1), sp=1)
plot!(ylims=(0, 1), sp=3)
```
```@example eggbox
density(chain_resampled, xlims=(0, 1))
vline!(0.1:0.2:0.9, c=:black, ls=:dash, sp=1)
vline!(0.1:0.2:0.9, c=:black, ls=:dash, sp=2)
```
```@example eggbox
using Measurements
logz_est = state.logz ± state.logzerr
diff = logz_est - logz
println("logz: $logz")
println("estimate: $logz_est")
println("diff: $diff")
nothing # hide
```
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.8.3 | 3f462c3d9acbe06b0cda45e55bd06e033bab48a7 | docs | 1874 | # Gaussian Shells
This example will explore the classic Gaussian shells model using [`Models.GaussianShells`](@ref).
## Setup
For this example, you'll need to add the following packages
```julia
julia>]add Distributions MCMCChains Measurements NestedSamplers StatsBase StatsPlots
```
```@setup shells
using AbstractMCMC
using Random
AbstractMCMC.setprogress!(false)
Random.seed!(8452)
```
## Define model
```@example shells
using NestedSamplers
model, logz = Models.GaussianShells()
nothing; # hide
```
let's take a look at a couple of parameters to see what the likelihood surface looks like
```@example shells
using StatsPlots
x = range(-6, 6, length=1000)
y = range(-2.5, 2.5, length=1000)
loglike = model.prior_transform_and_loglikelihood.loglikelihood
logf = [loglike([xi, yi]) for yi in y, xi in x]
heatmap(
x, y, exp.(logf),
xlims=extrema(x),
ylims=extrema(y),
xlabel="x",
ylabel="y",
)
```
## Sample
```@example shells
using MCMCChains
using StatsBase
# using multi-ellipsoid for bounds
# using default rejection sampler for proposals
sampler = Nested(2, 1000)
chain, state = sample(model, sampler; dlogz=0.05, param_names=["x", "y"])
# resample chain using statistical weights
chain_resampled = sample(chain, Weights(vec(chain[:weights])), length(chain));
nothing # hide
```
## Results
```@example shells
chain_resampled
```
```@example shells
marginalkde(chain[:x], chain[:y])
plot!(xlims=(-6, 6), ylims=(-2.5, 2.5), sp=2)
plot!(xlims=(-6, 6), sp=1)
plot!(ylims=(-2.5, 2.5), sp=3)
```
```@example shells
density(chain_resampled)
vline!([-5.5, -1.5, 1.5, 5.5], c=:black, ls=:dash, sp=1)
vline!([-2, 2], c=:black, ls=:dash, sp=2)
```
```@example shells
using Measurements
logz_est = state.logz ± state.logzerr
diff = logz_est - logz
println("logz: $logz")
println("estimate: $logz_est")
println("diff: $diff")
nothing # hide
```
| NestedSamplers | https://github.com/TuringLang/NestedSamplers.jl.git |
|
[
"MIT"
] | 0.1.3 | e1032040d1be0b6a9bcfba77379c6384e301b24b | code | 821 | using BioSequenceMappings
using Documenter
DocMeta.setdocmeta!(BioSequenceMappings, :DocTestSetup, :(using BioSequenceMappings); recursive=true)
makedocs(;
modules=[BioSequenceMappings],
authors="Pierre Barrat-Charlaix",
sitename="BioSequenceMappings.jl",
format=Documenter.HTML(;
canonical="https://pierrebarrat.github.io/BioSequenceMappings.jl",
edit_link="master",
assets=String[],
),
pages = [
"Quickstart" => "index.md",
"Manual" => [
"Alphabets" => "alphabets.md",
"Alignments" => "alignments.md",
"Utilities" => "utilities.md"
],
"Reference" => "reference.md"
],
checkdocs=:exports,
)
deploydocs(;
repo="github.com/PierreBarrat/BioSequenceMappings.jl.git",
devbranch="master",
)
| BioSequenceMappings | https://github.com/PierreBarrat/BioSequenceMappings.jl.git |
|
[
"MIT"
] | 0.1.3 | e1032040d1be0b6a9bcfba77379c6384e301b24b | code | 864 | module BioSequenceMappings
using FASTX
using OneHotArrays
using Random
import Base: length, size
import Base: in, ==, hash, convert, copy
import Base: getindex, firstindex, lastindex, eachindex, view, keys
import Base: iterate, eltype
import Base: unique
import Base: write
include("alphabet.jl")
export Alphabet
export default_alphabet, symbols, translate
export aa_alphabet, nt_alphabet
include("alignment.jl")
export Alignment, AbstractAlignment
export eachsequence, sequence_length, sequence_number, subsample, subsample_random
export find_sequence, match_sequences, named_sequences
include("weights.jl")
export compute_weights, compute_weights!
include("IO.jl")
export read_fasta
include("misc.jl")
export hamming, pairwise_hamming
include("statistics.jl")
export site_specific_frequencies, consensus, pairwise_frequencies, pairwise_correlations
end
| BioSequenceMappings | https://github.com/PierreBarrat/BioSequenceMappings.jl.git |
|
[
"MIT"
] | 0.1.3 | e1032040d1be0b6a9bcfba77379c6384e301b24b | code | 2534 | """
read_fasta(fastafile::AbstractString; alphabet = :auto, kwargs...)
read_fasta(
fastafile::AbstractString, alphabet;
weights = false, theta = 0.2, verbose = false,
)
"""
function read_fasta(fastafile::AbstractString; alphabet = :auto, kwargs...)
return read_fasta(fastafile, alphabet; kwargs...)
end
function read_fasta(fastafile::AbstractString, alphabet::Symbol; kwargs...)
alphabet = if alphabet == :auto
alphabet = auto_alphabet_from_fasta(fastafile)
else
Alphabet(alphabet)
end
return read_fasta(fastafile, alphabet; kwargs...)
end
function read_fasta(
fastafile::AbstractString, alphabet::Alphabet;
weights = false, theta = 0.2, verbose = false,
)
verbose && @info "Reading sequences from $fastafile using Alphabet $alphabet"
data = map(FASTAReader(open(fastafile))) do rec
description(rec), sequence(rec)
end
@assert allequal(Iterators.map(x -> length(x[2]), data)) """
All sequences must have the same length
"""
aln = Alignment(
mapreduce(x -> alphabet(x[2]), hcat, data), alphabet;
names = map(first, data)
)
L, M = size(aln)
verbose && @info "Found $M sequence of length $M"
if weights
compute_weights!(aln, theta)
end
return aln
end
function auto_alphabet_from_fasta(fastafile::AbstractString; n = 5)
# first n sequences (at most)
sequences = map(sequence, first(FASTAReader(open(fastafile)), n))
return auto_alphabet_from_sequences(sequences)
end
function auto_alphabet_from_sequences(sequences::AbstractVector{<:AbstractString})
characters = sort(unique(prod(sequences)))
return if all(in(_DEFAULT_NT_ALPHABET_STRING), characters)
Alphabet(:dna)
elseif all(in(_DEFAULT_AA_ALPHABET_STRING), characters)
Alphabet(:aa)
elseif all(in(_DEFAULT_BINARY_ALPHABET_STRING), characters)
Alphabet(:binary)
else
alphabet = Alphabet(prod(characters))
@warn "Could not find a default alphabet for characters $characters\
\n Using $alphabet"
alphabet
end
end
function Base.write(file::AbstractString, X::Alignment)
return open(file, "w") do io
write(io, X)
end
end
function Base.write(io::IO, X::Alignment)
return FASTAWriter(io) do fw
for (i, seq) in enumerate(X)
header = isempty(X.names[i]) ? "$i" : X.names[i]
rec = FASTARecord(header, to_string(seq, X.alphabet))
write(fw, rec)
end
end
end
| BioSequenceMappings | https://github.com/PierreBarrat/BioSequenceMappings.jl.git |
|
[
"MIT"
] | 0.1.3 | e1032040d1be0b6a9bcfba77379c6384e301b24b | code | 14674 | abstract type AbstractAlignment end
###################################################################################
#################################### Alignment ####################################
###################################################################################
"""
mutable struct Alignment{A,T} where {A, T<:Integer}
```
data::Matrix{T}
alphabet::Union{Nothing, Alphabet{A,T}}
weights::Vector{Float64} = ones(size(dat,1))/size(dat,1) # phylogenetic weights of sequences
names::Vector{String} = fill("", size(dat, 1))
```
Biological sequences as vectors of type `T<:Integer`.
`data` stores sequences in *columns*: `size(dat)` returns a tuple `(L, M)` with `L` the
length and `M` the number of sequences.
When displayed, shows `data` as an `MxL` matrix to match with traditional alignments.
`alphabet{A,T}` represents the mapping between integers in `data` and biological symbols of type `A` (nucleotides, amino acids...).
If `nothing`, the alignment cannot be mapped to biological sequences.
`weights` represent phylogenetic weights, and are initialized to `1/M`. They must sum to 1.
`names` are the label of sequences, and are expected to be in the same order as the columns
of `data`. They do not have to be unique, and can be ignored
**Important**: When built from a matrix, assumes that the sequences are stored in *columns*.
## Methods
- `getindex(X::Alignment, i)` returns a matrix/vector `X.data[:, i]`.
- `for s in X::Alignment` iterates over sequences.
- `eachsequence(X::Alignment)` returns an iterator over sequences (`Vector{Int}`).
- `eachsequence_weighted(X::Alignment)` returns an iterator over sequences and weights as tuples
- `subaln(X::Alignment, idx)` constructs the subaln defined by index `idx`.
"""
@kwdef mutable struct Alignment{A, T<:Integer} <: AbstractAlignment
data::Matrix{T}
alphabet::Union{Nothing, Alphabet{A,T}}
weights::Vector{Float64} = ones(size(data, 2))/size(data, 2) # phylogenetic weights of sequences
names::Vector{String} = fill("", size(data, 2))
function Alignment{A,T}(data, alphabet, weights, ::Nothing) where {A, T}
return Alignment{A,T}(data, alphabet, weights, fill("", size(data, 2)))
end
function Alignment{A,T}(data, alphabet, weights, names) where {A,T}
@assert length(names) == length(weights) == size(data, 2) """\
Inconsistent sizes between `data`, `weight` and `names` \
- got $(size(data,2)), $(length(weights)), $(length(names))
"""
# Check data and alphabet are consistent
@assert isnothing(alphabet) || all(i -> in(i, alphabet), data) """\
Some elements of `data` are not in `alphabet`
Alphabet: $alphabet
Problematic data: $(data[findall(x -> !in(x, alphabet), data)])
"""
# Check weights
@assert all(>(0), weights) "Weights must be positive"
@assert isapprox(sum(weights), 1; rtol = 1e-8) """
Weights must sum to 1 - got $(sum(weights))
"""
alphabet_copy = isnothing(alphabet) ? nothing : copy(alphabet)
return new{A,T}(Matrix(data), alphabet_copy, copy(weights), string.(names))
end
end
################################################
################# Constructors #################
################################################
#=
- from data matrix alone - kwargs to determine alphabet
- from data + alphabet
- from data + any alphabet constructor input (should be easy)
=#
function autofind_alphabet(data::AbstractMatrix{T}; verbose=true) where T <: Integer
verbose && @info "Finding alphabet automatically from data ..."
q = maximum(data)
A = default_alphabet(q, T)
verbose && @info "Found $A"
return A
end
#=
Different options for alphabet
1. an `Alphabet` of the right type --> main constructor
2. `nothing` --> special case
3. an `Alphabet` of the wrong type --> convert the data matrix if possible and fall back to 1
4. an input `X` to the `Alphabet` constructor: string or symbol --> call `Alphabet(X, T)` and fall back to 1
=#
"""
Alignment(data::AbstractMatrix, alphabet; kwargs...)
`data` is a matrix of integers, with sequences stored in columns.
`alphabet` can be either
- an `Alphabet`
- `nothing`: no conversion from integers to biological symbols.
- something to build an alphabet from (*e.g.* a symbol like `:aa`, a string, ...).
The constructor `Alphabet` will be called like so: `Alphabet(alphabet)`.
If the types of `alphabet` and `data` mismatch, `data` is converted.
`data` can also have the following shape:
- vector of integer vectors, *e.g.* [[1,2], [3,4]]: each element is considered as a sequence
- vector of integers: single sequence alignment
"""
function Alignment(data::AbstractMatrix{T}, alphabet::Alphabet{A,T}; kwargs...) where {A,T}
return Alignment{A,T}(;data, alphabet, kwargs...)
end
function Alignment(data::AbstractMatrix{T}, ::Nothing; kwargs...) where T
return Alignment{Nothing,T}(; data, alphabet=nothing, kwargs...)
end
function Alignment(D::AbstractMatrix{T}, alphabet::Alphabet{A,U}; kwargs...) where {A,T,U}
data = convert(Matrix{U}, D)
return Alignment(data, alphabet; kwargs...) # go to the first constructor
end
function Alignment(data::AbstractMatrix{T}, alphabet; kwargs...) where T
return Alignment(data, Alphabet(alphabet, T); kwargs...) # go to first constructor
end
function Alignment(data::AbstractVector{<:AbstractVector{T}}, alphabet; kwargs...) where T
# each element of `data` is one sequence
return Alignment(reduce(hcat, data), alphabet; kwargs...)
end
function Alignment(data::AbstractVector{T}, alphabet; kwargs...) where T<:Integer
return Alignment(reshape(data, length(data), 1), alphabet; kwargs...)
end
"""
Alignment(data::AbstractMatrix{T}; alphabet = :auto, kwargs...)
Keyword argument `alphabet` can be `:auto`, `:none`/`nothing`, or an input to the
constructor `Alphabet`.
Other keyword arguments are passed to the default constructor of `Alignment`.
"""
function Alignment(
data::AbstractMatrix{T}; alphabet = :auto, verbose = true, kwargs...
) where T <: Integer
A = if alphabet == :auto
autofind_alphabet(data; verbose)
elseif isnothing(alphabet) || in(alphabet, (:none, :no))
nothing
else
Alphabet(alphabet, T)
end
return Alignment(data, A; kwargs...)
end
function Alignment(data::AbstractVector{<:AbstractVector}; kwargs...)
return Alignment(reduce(hcat, data); kwargs...)
end
function Alignment(data::AbstractVector{<:Integer}; kwargs...)
return Alignment(reshape(data, length(data), 1); kwargs...)
end
################################################
##################### Misc #####################
################################################
function Base.show(io::IO, X::Alignment)
L, M = size(X)
print(io, "Alignment of M=$M sequences of length L=$L")
end
function Base.show(io::IO, x::MIME"text/plain", X::Alignment)
L, M = size(X)
println(io, "Alignment of M=$M sequences of length L=$L - Shown as `MxL` matrix")
show(io, x, X.data')
end
function Base.copy(X::Alignment{A,T}) where {A,T}
return Alignment{A,T}(;
data = copy(X.data),
alphabet = copy(X.alphabet),
weights = copy(X.weights),
names = copy(X.names),
)
end
Base.convert(::Type{T}, X::Alignment{A,T}) where {A,T} = X
Base.convert(::Type{Alignment{A,T}}, X::Alignment{A,T}) where {A,T} = X
function Base.convert(::Type{I}, X::Alignment{A,J}) where {I<:Integer,A,J}
return Alignment{A,I}(;
data = convert(Matrix{I}, X.data),
alphabet = convert(I, X.alphabet),
weights = copy(X.weights),
names = copy(X.names),
)
end
function Base.convert(::Type{Alignment{A,T}}, X::Alignment) where {A,T<:Integer}
return convert(T, X)
end
###################################################################################
################################# OneHotAlignment #################################
###################################################################################
#=
to implement...
=#
@kwdef mutable struct OneHotAlignment{A,T} <: AbstractAlignment
data::OneHotArray{UInt32, 2, 3, Matrix{UInt32}}
alphabet::Union{Nothing, Alphabet{A,T}}
weights::Vector{Float64} = ones(size(data, 2))/size(data, 2) # phylogenetic weights of sequences
names::Vector{String} = fill("", size(data, 2))
function OneHotAlignment{A,T}(data, alphabet, weights, names) where {A,T}
@assert length(names) == length(weights) == size(data, 3) """\
Inconsistent sizes between `data`, `weight` and `names` \
- got $(size(data,2)), $(length(weights)), $(length(names))
"""
# Check data and alphabet are consistent
@assert isnothing(alphabet) || all(i -> in(i, alphabet), 1:size(data,1)) """\
Some elements of `data` are not in `alphabet`
"""
# Check weights
@assert all(>(0), weights) "Weights must be positive"
@assert isapprox(sum(weights), 1; rtol = 1e-8) """
Weights must sum to 1 - got $(sum(weights))
"""
return new{A,T}(data, alphabet, weights, names)
end
end
function onehot(X::Alignment{A,T}) where {A,T}
return OneHotAlignment{A,T}(;
data = onehotbatch(X.data, 1:length(Alphabet(X))),
alphabet = Alphabet(X),
weights = X.weights,
names = X.names,
)
end
###################################################################################
################################# AbstractAlignment ###############################
###################################################################################
################################################
############# Iterating / Indexing #############
################################################
"""
size(A::AbstractAlignment)
Return a tuple with (in order) the length and the number of sequences.
"""
Base.size(aln::AbstractAlignment) = size(aln.data)
Base.size(aln::AbstractAlignment, dim) = size(aln.data, dim)
"""
length(A::AbstractAlignment)
Return the number of sequences in `A`.
"""
Base.length(aln::AbstractAlignment) = size(aln.data, ndims(aln.data))
Base.iterate(X::AbstractAlignment) = iterate(eachslice(X.data, dims=ndims(X.data)))
function Base.iterate(X::AbstractAlignment, state)
return iterate(eachslice(X.data, dims=ndims(X.data)), state)
end
"""
eachsequence(X::AbstractAlignment[, indices]; skip)
Return an iterator over the sequences in `X`.
If `indices` is specified, consider only sequences at the corresponding indices.
Use the integer argument `skip` to return only one sequence every `skip` (~ `1:skip:end`).
"""
function eachsequence(X::AbstractAlignment, indices)
return Iterators.map(i -> selectdim(X.data, ndims(X.data), i), indices)
end
function eachsequence(X::AbstractAlignment; skip::Integer = 1)
@assert skip > 0 "`skip` kwarg must be positive - instead $skip"
return if skip == 1
eachslice(X.data, dims=ndims(X.data))
else
eachsequence(X, 1:skip:size(X.data)[end])
end
end
"""
named_sequences(X::AbstractAlignment; skip)
Return an iterator of the form `(name, sequence)` over `X`.
"""
function named_sequences(X::AbstractAlignment, indices)
return zip(X.names[indices], eachsequence(X, indices))
end
function named_sequences(X::AbstractAlignment; skip::Integer = 1)
@assert skip > 0 "`skip` kwarg must be positive - instead $skip"
return if skip == 1
zip(X.names, eachslice(X.data, dims=ndims(X.data)))
else
indices = 1:skip:size(X.data)[end]
zip(X.names[indices], eachsequence(X, indices))
end
end
# Different for OneHot and normal alignment
Base.eltype(X::Alignment{A,T}) where {A,T} = AbstractVector{T}
function Base.iterate(rX::Iterators.Reverse{<:AbstractAlignment})
iterate(Iterators.Reverse(eachslice(rX.itr.data, dims = ndims(rX.itr.data))))
end
function Base.iterate(rX::Iterators.Reverse{<:AbstractAlignment}, state)
iterate(Iterators.Reverse(eachslice(rX.itr.data, dims = ndims(rX.itr.data))), state)
end
Base.getindex(X::AbstractAlignment, i) = selectdim(X.data, ndims(X.data), i) # returns a view!!!
Base.firstindex(X::AbstractAlignment) = 1
Base.lastindex(X::AbstractAlignment) = length(X)
Base.view(X::AbstractAlignment, i) = getindex(X, i)
Base.keys(X::AbstractAlignment) = LinearIndices(1:length(X))
"""
subsample(X::AbstractAlignment, indices)
Return an `Alignment` containing only the sequences of `X` at `indices`.
"""
subsample(X::AbstractAlignment, i::Int) = subsample(X, i:i)
function subsample(X::AbstractAlignment, indices)
data_copy = copy(X[indices])
Y = Alignment(data_copy, copy(X.alphabet))
Y.weights = X.weights[indices] / sum(X.weights[indices])
Y.names = X.names[indices]
return Y
end
"""
subsample_random(X::AbstractAlignment, m::Int)
Return an `Alignment` with `m` sequences taking randomly from `X`.
Sampling is done without replacement, meaning the `m` sequences are all at different
positions in `X`.
"""
function subsample_random(X::AbstractAlignment, m::Int)
M = length(X)
@assert m < M "Cannot take $m different sequences from alignment of size $M"
return subsample(X, randperm(M)[1:m])
end
"""
find_sequence(label::AbstractString, aln::AbstractAlignment)
Find sequence with name `label` in `aln`, and return `(index, sequence)`.
Scales as the number of sequences.
!!! Return a *view* of the sequence.
"""
function find_sequence(label::AbstractString, aln::AbstractAlignment)
i = findfirst(==(label), aln.names)
return (i, isnothing(i) ? nothing : aln[i])
end
"""
match_sequences(pattern, aln::AbstractAlignment)
Find sequences whose name matches `label` in `aln`, and return `(indices, sequences)`.
Sequences are returned as columns.
!!! Return a *view* of the sequences.
"""
function match_sequences(pattern, aln::AbstractAlignment)
idx = findall(x -> occursin(pattern, x), aln.names)
return idx, eachsequence(aln, idx)
end
################################################
##################### Misc #####################
################################################
Alphabet(A::AbstractAlignment) = A.alphabet
Base.unique(X::AbstractAlignment) = subsample(X, unique(i -> X[i], eachindex(X)))
sequence_length(X::Alignment) = size(X, 1)
sequence_length(X::OneHotAlignment) = size(X, 2)
sequence_number(X::AbstractAlignment) = last(size(X))
function Random.rand(rng::AbstractRNG, X::Random.SamplerTrivial{<:AbstractAlignment})
M = sequence_number(X[])
return X[][rand(rng, 1:M)]
end
| BioSequenceMappings | https://github.com/PierreBarrat/BioSequenceMappings.jl.git |
Subsets and Splits