licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 2323 | using LinearAlgebra
import AtomicLevels: angular_integral
@testset "jj2ℓsj" begin
@testset "Transform matrix" begin
R = jj2ℓsj(ros"1[s] 2[s-p] k[s-d]"...)
# Since it is a rotation matrix, its inverse is simply the
# transpose.
@test norm(inv(Matrix(R)) - R') < 10eps()
end
@testset "Transform individual spin-orbitals" begin
for o in [o"5s", o"5p", o"5d"]
for so in spin_orbitals(o)
ℓ = so.orb.ℓ
mℓ,ms = so.m
mj = mℓ + ms
pure = abs(mj) == ℓ + half(1)
linear_combination = jj2ℓsj(so)
@test length(linear_combination) == (pure ? 1 : 2)
@test norm(last.(linear_combination)) ≈ 1
@test all(isequal(mj), map(o -> first(o.m), first.(linear_combination)))
end
end
end
@testset "Angular integrals" begin
@test isone(angular_integral(SpinOrbital(o"ks", (0,half(1))), SpinOrbital(o"1s", (0,half(1)))))
@test iszero(angular_integral(SpinOrbital(o"ks", (0,half(1))), SpinOrbital(o"2p", (0,half(1)))))
@test isone(angular_integral(SpinOrbital(ro"ks", half(1)), SpinOrbital(ro"1s", half(1))))
@test iszero(angular_integral(SpinOrbital(ro"ks", half(1)), SpinOrbital(ro"2p-", half(1))))
@test isone(angular_integral(SpinOrbital(o"ks", (0,half(1))), SpinOrbital(ro"1s", half(1))))
@test isone(angular_integral(SpinOrbital(ro"1s", half(1)), SpinOrbital(o"ks", (0,half(1)))))
@test angular_integral(SpinOrbital(o"2p", (0,half(1))), SpinOrbital(ro"2p-", half(1))) == clebschgordan(1, 0, half(1), half(1), half(1), half(1))
end
@testset "#orbitals = $(length(ros))" for (ros,nb) in ((rsos"l[p]", 4), (rsos"k[s-d] l[d]", 18), (filter(o -> o.m[1]==1//2, rsos"l[d]"), 1))
os, bs = jj2ℓsj(ros)
@test length(os) == length(ros)
for (ro,o) in zip(ros, os)
@test sum(o.m) == ro.m[1]
@test o.orb.n == ro.orb.n
@test o.orb.ℓ == ro.orb.ℓ
end
@test length(bs) == nb
for (i,j,b) in bs
if size(b,1) == 1
@test i == j
else
@test i ≠ j
end
@test b == b'
@test b^2 ≈ I
end
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 3306 | @testset "JJ terms" begin
@testset "_terms_jw" begin
function terms_reference(j::HalfInteger, w::Integer)
w <= 2j+1 || throw(ArgumentError("w=$w too large for $orb orbital"))
2w ≥ 2j+1 && (w = convert(Int, 2j) + 1 - w)
w == 0 && return [zero(HalfInt)]
w == 1 && return [j]
# Forms full Cartesian product of all mⱼ, not necessarily the most
# performant method.
mⱼs = filter(allunique, collect(AtomicLevels.allchoices([-j:j for i = 1:w])))
MJs = map(x -> reduce(+, x), mⱼs) # TODO: make map(sum, mⱼs) work
Js = HalfInt[]
while !isempty(MJs)
# Identify the maximum MJ and associate it with J.
MJmax = maximum(MJs)
N = count(isequal(MJmax), MJs)
append!(Js, repeat([MJmax], N))
# Remove the -MJ:MJ series, N times.
for MJ = -MJmax:MJmax
deleteat!(MJs, findall(isequal(MJ), MJs)[1:N])
end
end
# Do we really want unique here?
sort(unique(Js))
end
for twoj = 0:10, w = 1:twoj+1
j = HalfInteger(twoj//2)
ts = AtomicLevels._terms_jw(j, w)
@test issorted(ts, rev=true) # make sure that the array is sorted in descending order
@test terms_reference(j, w) == sort(unique(ts))
# Check particle-hole symmetry
if w != twoj + 1
@test AtomicLevels._terms_jw(j, twoj+1-w) == ts
end
end
end
@testset "jj coupling of equivalent electrons" begin
@test terms(ro"1s", 0) == [0]
@test terms(ro"1s", 1) == [1//2]
@test terms(ro"1s", 2) == [0]
@test terms(ro"3d-", 0) == [0]
@test terms(ro"3d-", 1) == [3//2]
@test terms(ro"3d-", 4) == [0]
@test terms(ro"Xd", 0) == [0]
@test terms(ro"Xd", 1) == [5//2]
@test terms(ro"Xd", 6) == [0]
# Table 4.5, Cowan 1981
foreach([
(ro"1s",ro"2p-") => [(0,2) => 0, 1 => 1//2],
(ro"2p",ro"3d-") => [(0,4) => 0, (1,3) => 3//2, 2 => [0,2]],
(ro"3d",ro"4f-") => [(0,6) => 0, (1,5) => 5//2, (2,4) => [0,2,4],
3 => [3//2,5//2,9//2]],
(ro"4f",ro"5g-") => [(0,8) => 0, (1,7) => 7//2, (2,6) => [0,2,4,6],
(3,5) => [3//2,5//2,7//2,9//2,11/2,15//2],
4 => [0,2,2,4,4,5,6,8]],
(ro"5g",ro"6h-") => [(0,10) => 0, (1,9) => 9//2, (2,8) => [0,2,4,6,8],
(3,7) => [3//2,5//2,7//2,9//2,9//2,11//2,13//2,15//2,17//2,21//2],
(4,6) => [0,0,2,2,3,4,4,4,5,6,6,6,7,8,8,9,10,12],
5 => [1//2,3//2,5//2,5//2,7//2,7//2,9//2,9//2,9//2,11//2,11//2,13//2,13//2,15//2,15//2,17//2,17//2,19//2,21//2,25//2]]
]) do (orbs,wsj)
foreach(orbs) do orb
foreach(wsj) do (ws,j)
js = map(HalfInteger, isa(j, Number) ? [j] : j)
foreach(ws) do w
@test terms(orb,w) == js
end
end
end
end
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 2019 | @testset "Levels & States" begin
@testset "J values" begin
@test J_range(T"¹S") == 0:0
@test J_range(T"²S") == 1/2:1/2
@test J_range(T"³P") == 0:2
@test J_range(T"²D") == 3/2:5/2
@test J_range(half(1)) == 1/2:1/2
@test J_range(1) == 1:1
end
@testset "Construction" begin
csfs_3s_3p = csfs(c"3s 3p")
csf_1 = first(csfs_3s_3p)
@test_throws ArgumentError Level(csf_1, HalfInteger(2))
@test_throws ArgumentError Level(csf_1, 2)
@test_throws ArgumentError State(Level(csf_1, HalfInteger(1)), HalfInteger(2))
@test string(Level(csf_1, HalfInteger(1))) == "|3s(₁²S|²S) 3p(₁²Pᵒ|¹Pᵒ)-, J = 1⟩"
@test string(Level(csf_1, 1)) == "|3s(₁²S|²S) 3p(₁²Pᵒ|¹Pᵒ)-, J = 1⟩"
@test string(State(Level(csf_1, 1), HalfInteger(-1))) == "|3s(₁²S|²S) 3p(₁²Pᵒ|¹Pᵒ)-, J = 1, M_J = -1⟩"
@test string(State(Level(csf_1, 1), -1)) == "|3s(₁²S|²S) 3p(₁²Pᵒ|¹Pᵒ)-, J = 1, M_J = -1⟩"
@test sort([State(Level(csf_1, HalfInteger(1)), M_J) for M_J ∈ reverse(HalfInteger(-1):HalfInteger(1))]) ==
[State(Level(csf_1, HalfInteger(1)), M_J) for M_J ∈ HalfInteger(-1):HalfInteger(1)]
@test states.(csfs_3s_3p) == [[[State(Level(csf_1, HalfInteger(1)), M_J) for M_J ∈ HalfInteger(-1):HalfInteger(1)]],
[[State(Level(csfs_3s_3p[2], J), M_J) for M_J ∈ -J:J] for J ∈ HalfInteger(0):HalfInteger(2) ]]
rcsfs_3s_3p = csfs(rc"3s" ⊗ rcs"3p")
@test states.(rcsfs_3s_3p)== [[[State(Level(rcsfs_3s_3p[1], HalfInteger(0)), HalfInteger(0))]],
[[State(Level(rcsfs_3s_3p[2], HalfInteger(1)), M_J) for M_J ∈ HalfInteger(-1):HalfInteger(1)]],
[[State(Level(rcsfs_3s_3p[3], HalfInteger(1)), M_J) for M_J ∈ HalfInteger(-1):HalfInteger(1)]],
[[State(Level(rcsfs_3s_3p[4], HalfInteger(2)), M_J) for M_J ∈ HalfInteger(-2):HalfInteger(2)]]]
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 15397 | using Random
@testset "Orbitals" begin
@testset "kappa" begin
import AtomicLevels: assert_orbital_ℓj
@test assert_orbital_ℓj(0, HalfInteger(1//2)) === nothing
@test assert_orbital_ℓj(1, HalfInteger(1//2)) === nothing
@test assert_orbital_ℓj(1, 3//2) === nothing
@test assert_orbital_ℓj(2, 2.5) === nothing
@test_throws ArgumentError assert_orbital_ℓj(0, 0)
@test_throws ArgumentError assert_orbital_ℓj(0, 1)
@test_throws ArgumentError assert_orbital_ℓj(0, 3//2)
@test_throws ArgumentError assert_orbital_ℓj(5, 1//2)
@test_throws MethodError assert_orbital_ℓj(HalfInteger(1), HalfInteger(1//2))
import AtomicLevels: κ2ℓ
@test_throws ArgumentError κ2ℓ(0)
@test κ2ℓ(-1) == 0
@test κ2ℓ( 1) == 1
@test κ2ℓ(-2) == 1
@test κ2ℓ( 2) == 2
@test κ2ℓ(-3) == 2
@test κ2ℓ( 3) == 3
import AtomicLevels: κ2j
@test_throws ArgumentError κ2j(0)
@test κ2j(-1) === half(1)
@test κ2j( 1) === half(1)
@test κ2j(-2) === half(3)
@test κ2j( 2) === half(3)
@test κ2j(-3) === half(5)
@test κ2j( 3) === half(5)
import AtomicLevels: ℓj2κ
@test ℓj2κ(0, half(1)) == -1
@test str2κ("s") == -1
@test κ"s" == -1
@test ℓj2κ(1, half(1)) == 1
@test str2κ("p-") == 1
@test κ"p-" == 1
@test ℓj2κ(1, 3//2) == -2
@test str2κ("p") == -2
@test κ"p" == -2
@test ℓj2κ(2, 3//2) == 2
@test str2κ("d-") == 2
@test κ"d-" == 2
@test ℓj2κ(2, 5//2) == -3
@test str2κ("d") == -3
@test κ"d" == -3
@test ℓj2κ(3, 5//2) == 3
@test str2κ("f-") == 3
@test κ"f-" == 3
@test ℓj2κ(3, 7//2) == -4
@test str2κ("f") == -4
@test κ"f" == -4
@test_throws ArgumentError ℓj2κ(0, half(3))
@test_throws ArgumentError ℓj2κ(0, 0)
@test_throws ArgumentError ℓj2κ(6, 1//2)
end
@testset "Construction" begin
@test o"1s" == Orbital(1, 0)
@test o"2p" == Orbital(2, 1)
@test o"2[1]" == Orbital(2, 1)
@test parse(Orbital, "1s") == Orbital(1, 0)
@test parse(Orbital{Int}, "1s") == Orbital(1, 0)
@test parse(Orbital{Symbol}, "ks") == Orbital(:k, 0)
@test ro"1s" == RelativisticOrbital(1, -1) # κ=-1 => s orbital
@test ro"2p-" == RelativisticOrbital(2, 1, half(1))
@test ro"2p-" == RelativisticOrbital(2, 1, 1//2)
@test ro"2p" == RelativisticOrbital(2, 1, 3//2)
@test ro"2[1]" == RelativisticOrbital(2, 1, 3//2)
@test o"kp" == Orbital(:k,1)
@test o"ϵd" == Orbital(:ϵ,2)
@test ro"kp" == RelativisticOrbital(:k, 1, 3//2)
@test ro"ϵd-" == RelativisticOrbital(:ϵ, 2, 3//2)
@test_throws ArgumentError parse(Orbital, "2p-")
@test_throws ArgumentError parse(Orbital, "sdkfl")
@test_throws ArgumentError Orbital(0, 0)
@test_throws ArgumentError Orbital(1, 1)
@test_throws ArgumentError RelativisticOrbital(0, 0)
@test_throws ArgumentError RelativisticOrbital(1, 1)
@test_throws ArgumentError RelativisticOrbital(1, 0, 3//2)
@test nonrelorbital(o"2p") == o"2p"
@test nonrelorbital(ro"2p") == o"2p"
@test nonrelorbital(ro"2p-") == o"2p"
end
@testset "Properties" begin
let o = o"3d"
@test o.n == 3
@test o.ℓ == 2
end
let o = o"ks"
@test o.n == :k
@test o.ℓ == 0
end
@test propertynames(o"1s") == (:n , :ℓ)
@test propertynames(ro"1s") == (:n, :κ, :j, :ℓ)
let o = ro"3d"
@test o.n == 3
@test o.κ == -3
@test o.j == 5//2
@test o.ℓ == 2
end
let o = ro"ks"
@test o.n == :k
@test o.κ == -1
@test o.j == 1//2
@test o.ℓ == 0
end
let o = ro"2p-"
@test o.n == 2
@test o.κ == 1
@test o.j == 1//2
@test o.ℓ == 1
end
end
@testset "Order" begin
@test sort(shuffle([o"1s", o"2s", o"2p", o"3s", o"3p"])) ==
[o"1s", o"2s", o"2p", o"3s", o"3p"]
@test sort([o"ls", o"kp", o"2p", o"1s"]) ==
[o"1s", o"2p", o"kp", o"ls"]
@test sort(shuffle([ro"1s", ro"2s", ro"2p-", ro"2p", ro"3s", ro"3p-", ro"3p"])) ==
[ro"1s", ro"2s", ro"2p-", ro"2p", ro"3s", ro"3p-", ro"3p"]
@test sort([ro"ls", ro"kp", ro"2p", ro"1s"]) ==
[ro"1s", ro"2p", ro"kp", ro"ls"]
end
@testset "Range of orbitals" begin
@test os"6[s-d] 5[d]" == [o"5d", o"6s", o"6p", o"6d"]
@test os"k[s-g] l[s-g]" == [o"ks", o"kp", o"kd", o"kf", o"kg",
o"ls", o"lp", o"ld", o"lf", o"lg"]
@test ros"6[s-d] 5[d]" == [ro"5d-", ro"5d", ro"6s", ro"6p-", ro"6p", ro"6d-", ro"6d"]
@test ros"k[s-g] l[s-g]" == [ro"ks", ro"kp-", ro"kp", ro"kd-", ro"kd", ro"kf-", ro"kf", ro"kg-", ro"kg",
ro"ls", ro"lp-", ro"lp", ro"ld-", ro"ld", ro"lf-", ro"lf", ro"lg-", ro"lg"]
end
@testset "Flip j" begin
@test AtomicLevels.flip_j(ro"1s") == ro"1s"
@test AtomicLevels.flip_j(ro"2p-") == ro"2p"
@test AtomicLevels.flip_j(ro"2p") == ro"2p-"
@test AtomicLevels.flip_j(ro"3d-") == ro"3d"
@test AtomicLevels.flip_j(ro"3d") == ro"3d-"
@test AtomicLevels.flip_j(ro"kd-") == ro"kd"
@test AtomicLevels.flip_j(ro"kd") == ro"kd-"
end
@testset "Degeneracy" begin
@test degeneracy(o"1s") == 2
@test degeneracy(o"2p") == 6
@test degeneracy(o"3d") == 10
@test degeneracy(o"kp") == 6
@test degeneracy(ro"1s") == 2
@test degeneracy(ro"2p-") == 2
@test degeneracy(ro"2p") == 4
@test degeneracy(ro"3d-") == 4
@test degeneracy(ro"3d") == 6
@test degeneracy(ro"kp-") == 2
@test degeneracy(ro"kp") == 4
end
@testset "Parity" begin
@test iseven(parity(o"1s"))
@test isodd(parity(o"2p"))
@test iseven(parity(o"3s"))
@test isodd(parity(o"3p"))
@test iseven(parity(o"3d"))
@test isodd(parity(o"kp"))
@test iseven(parity(ro"1s"))
@test isodd(parity(ro"2p"))
@test iseven(parity(ro"3s"))
@test isodd(parity(ro"3p"))
@test iseven(parity(ro"3d"))
@test isodd(parity(ro"kp"))
end
@testset "Symmetry" begin
@test symmetry(o"1s") == 0
@test symmetry(o"2s") == symmetry(o"1s")
@test symmetry(o"2s") != symmetry(o"2p")
@test symmetry(ro"1s") == symmetry(ro"2s")
@test symmetry(ro"2p") == symmetry(ro"3p")
@test symmetry(ro"2p") != symmetry(ro"2p-")
end
@testset "Bound" begin
@test isbound(o"1s")
@test isbound(o"3d")
@test isbound(ro"1s")
@test isbound(ro"3d")
@test !isbound(o"ks")
@test !isbound(o"ϵd")
@test !isbound(ro"ks")
@test !isbound(ro"ϵd")
end
@testset "Angular momenta" begin
@test angular_momenta(o"2s") == (0,half(1))
@test angular_momenta(o"2p") == (1,half(1))
@test angular_momenta(o"4f") == (3,half(1))
@test angular_momentum_ranges(o"4f") == (-3:3,-half(1):half(1))
@test angular_momenta(ro"1s") == (half(1),)
@test angular_momenta(ro"2p-") == (half(1),)
@test angular_momenta(ro"2p") == (half(3),)
@test angular_momenta(ro"3d-") == (half(3),)
@test angular_momenta(ro"3d") == (half(5),)
@test angular_momentum_ranges(ro"3d") == (-half(5):half(5),)
end
@testset "Spin orbitals" begin
up, down = half(1),-half(1)
@test_throws ArgumentError SpinOrbital(o"1s", 1, up)
@test_throws ArgumentError SpinOrbital(o"ks", 1, up)
@test_throws ArgumentError SpinOrbital(o"2p", -3, up)
@test_throws ArgumentError SpinOrbital(o"2p", 1, half(3))
@test_throws ArgumentError SpinOrbital(ro"2p-", half(3))
soα = SpinOrbital(o"1s", 0, up)
soβ = SpinOrbital(o"1s", 0, down)
po₋α = SpinOrbital(o"2p", -1, up)
po₀α = SpinOrbital(o"2p", 0, up)
po₊α = SpinOrbital(o"2p", 1, up)
po₋β = SpinOrbital(o"2p", -1, down)
po₀β = SpinOrbital(o"2p", 0, down)
po₊β = SpinOrbital(o"2p", 1, down)
@test degeneracy(soα) == 1
@test soα < soβ
@test parity(soα) == parity(soβ) == p"even"
@test parity(po₋α) == p"odd"
@test parity(po₀α) == p"odd"
@test parity(po₊α) == p"odd"
@test parity(po₋β) == p"odd"
@test parity(po₀β) == p"odd"
@test parity(po₊β) == p"odd"
@test symmetry(soα) != symmetry(soβ)
@test symmetry(po₋α) != symmetry(po₀α)
@test symmetry(po₋α) != symmetry(po₊α)
@test symmetry(po₊α) != symmetry(po₀α)
@test symmetry(po₋β) != symmetry(po₀β)
@test symmetry(po₋β) != symmetry(po₊β)
@test symmetry(po₊β) != symmetry(po₀β)
@test symmetry(po₋α) != symmetry(po₋β)
@test symmetry(po₀α) != symmetry(po₀β)
@test symmetry(po₊α) != symmetry(po₊β)
@test symmetry(po₋α) == symmetry(SpinOrbital(o"3p", -1, up))
@test isbound(soα)
@test !isbound(SpinOrbital(o"ks", 0, up))
@test spin_orbitals(o"1s") == [soα, soβ]
@test spin_orbitals(o"2p") == [po₋α, po₋β, po₀α, po₀β, po₊α, po₊β]
for orb in [o"1s", o"2p", o"3d", o"4f", o"5g"]
@test length(spin_orbitals(orb)) == degeneracy(orb)
end
@test sos"1[s] 2[p]" == [soα, soβ, po₋α, po₋β, po₀α, po₀β, po₊α, po₊β]
@test "$(soα)" == "1s₀α"
@test "$(po₊β)" == "2p₁β"
@testset "Construction" begin
@test so"1s(0,α)" == SpinOrbital(o"1s", (0,1/2))
@test so"1s(0,β)" == SpinOrbital(o"1s", (0,-1/2))
@test so"2p(1,β)" == SpinOrbital(o"2p", (1,-1/2))
@test so"1s(0,-1/2)" == SpinOrbital(o"1s", (0,-1/2))
@test so"1s(0,-0.5)" == SpinOrbital(o"1s", (0,-1/2))
@test so"1s(0,1/2)" == SpinOrbital(o"1s", (0,1/2))
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "1s(0,1/2)")
@test rso"1s(1/2)" == SpinOrbital(ro"1s", (1/2))
@test rso"1s(α)" == SpinOrbital(ro"1s", (1/2)) # This should not work, but hey...
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "1s(3/2)")
@test rso"2p(3/2)" == SpinOrbital(ro"2p", (3/2))
@test rso"3p(-3/2)" == SpinOrbital(ro"3p", (-3/2))
@test rso"3p(-1/2)" == SpinOrbital(ro"3p", (-1/2))
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "3p(5/2)")
@test rso"3p(1/2)" == SpinOrbital(ro"3p", (1/2))
@test rso"3p-(1/2)" == SpinOrbital(ro"3p-", (1/2))
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "3p-(3/2)")
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "3p-(-3/2)")
@test rso"3p-(-1/2)" == SpinOrbital(ro"3p-", (-1/2))
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "3d-(-5/2)")
@test_throws ArgumentError parse(SpinOrbital{RelativisticOrbital}, "3d-(..)")
@test rso"3d-(-3/2)" == SpinOrbital(ro"3d-", (-3/2))
@test rso"3d(5/2)" == SpinOrbital(ro"3d", (5/2))
@test parse(SpinOrbital{Orbital}, "1s(0,α)") == SpinOrbital(Orbital(1, 0), (0, up))
@test parse(SpinOrbital{Orbital{Int}}, "1s(0,α)") == SpinOrbital(Orbital(1, 0), (0, up))
@test parse(SpinOrbital{Orbital{Symbol}}, "ks(0,α)") == SpinOrbital(Orbital(:k, 0), (0, up))
@test so"1s₀β" == so"1s(0,β)"
@test so"2p₋₁α" == so"2p(-1,α)"
@test so"k[31]₋₁₃α" == so"k[31](-13,α)"
for o in [SpinOrbital(o"1s", (0,-1/2)),
SpinOrbital(o"1s", (0,1/2)),
SpinOrbital(o"2p", (1,-1/2)),
SpinOrbital(ro"1s", (1/2)),
SpinOrbital(ro"2p", (3/2)),
SpinOrbital(ro"3d", (5/2)),
SpinOrbital(ro"3d-", (-3/2)),
SpinOrbital(ro"3p", (-1/2)),
SpinOrbital(ro"3p", (-3/2)),
SpinOrbital(ro"3p", (1/2)),
SpinOrbital(ro"3p-", (-1/2)),
SpinOrbital(ro"3p-", (1/2))]
O = typeof(o.orb)
@test parse(SpinOrbital{O}, string(o)) == o
end
end
end
@testset "Internal methods" begin
@test AtomicLevels.mqtype(o"2s") == Int
@test AtomicLevels.mqtype(o"ks") == Symbol
@test AtomicLevels.mqtype(ro"2s") == Int
@test AtomicLevels.mqtype(ro"ks") == Symbol
end
@testset "String representation" begin
@test string(o"1s") == "1s"
@test string(ro"2p-") == "2p-"
@test ascii(ro"2p-") == "2p-"
end
@testset "Hashing" begin
@test hash(o"3s") == hash(o"3s")
@test hash(ro"3p-") == hash(ro"3p-")
end
@testset "Serialization" begin
@testset "Orbitals" begin
o = o"1s"
p = o"kg"
q = Orbital(:k̃, 14)
oo = SpinOrbital(o, (0, 1/2))
r = Orbital(Symbol("[$(oo)]"), 14)
no,np,nq,nr = let io = IOBuffer()
foreach(Base.Fix1(write, io), (o,p,q,r))
seekstart(io)
[read(io, Orbital)
for i = 1:4]
end
@test no == o
@test np == p
@test nq == q
@test nr == r
end
@testset "Relativistic orbitals" begin
o = ro"1s"
p = ro"kg"
q = RelativisticOrbital(:k̃, 14)
oo = SpinOrbital(o, (1/2))
r = RelativisticOrbital(Symbol("[$(oo)]"), 14)
no,np,nq,nr = let io = IOBuffer()
foreach(Base.Fix1(write, io), (o,p,q,r))
seekstart(io)
[read(io, RelativisticOrbital)
for i = 1:4]
end
@test no == o
@test np == p
@test nq == q
@test nr == r
end
@testset "Spin-orbitals" begin
a = so"1s(0,α)"
b = rso"2p-(1/2)"
c = rso"kd-(-1.5)"
d = so"3d(-2,-0.5)"
e = SpinOrbital(Orbital(Symbol("[$(d)]"), 14), (-13, -0.5))
na,nb,nc,nd,ne = let io = IOBuffer()
foreach(Base.Fix1(write, io), (a,b,c,d,e))
seekstart(io)
[read(io, SpinOrbital)
for i = 1:5]
end
@test na == a
@test nb == b
@test nc == c
@test nd == d
@test ne == e
end
end
@testset "Broadcasting" begin
@test ([o"1s", o"2p"] .== o"1s") == [true, false]
@test ([ro"1s", ro"2p-"] .== ro"1s") == [true, false]
@test ([so"1s(0,α)", so"2p(0,α)"] .== so"1s(0,α)") == [true, false]
@test ([rso"1s(1/2)", rso"2p-(1/2)"] .== rso"1s(1/2)") == [true, false]
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 1335 | using UnicodeFun
@testset "Parity" begin
@testset "Construction" begin
@test p"even".p
@test !p"odd".p
@test convert(Parity, 1) == p"even"
@test convert(Parity, -1) == p"odd"
@test_throws ArgumentError convert(Parity, 5)
end
@testset "Boolean properties" begin
@test iseven(p"even")
@test !isodd(p"even")
@test !iseven(p"odd")
@test isodd(p"odd")
@test p"odd" < p"even"
@test p"even" ≥ p"odd"
end
@testset "Arithmetic" begin
@test iseven(p"even"*p"even")
@test isodd(p"even"*p"odd")
@test isodd(p"odd"*p"even")
@test iseven(p"odd"*p"odd")
@test iseven(p"even"^0)
@test iseven(p"even"^1)
@test iseven(p"even"^2)
@test iseven(p"even"^3)
@test iseven(p"odd"^0)
@test isodd(p"odd"^1)
@test iseven(p"odd"^2)
@test isodd(p"odd"^3)
@test isodd(-p"even")
@test iseven(-p"odd")
end
@testset "Conversion" begin
@test convert(Int, p"even") == 1
@test convert(Int, p"odd") == -1
end
@testset "Pretty-printing" begin
@test "$(p"even")" == "even"
@test "$(p"odd")" == "odd"
@test to_superscript(p"even") == ""
@test to_superscript(p"odd") == "ᵒ"
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 538 | using AtomicLevels
using WignerSymbols
using HalfIntegers
using Test
@testset "Unicode super-/subscripts" begin
@test AtomicLevels.from_subscript("₋₊₁₂₃₄₅₆₇₈₉₀") == "-+1234567890"
@test AtomicLevels.from_superscript("⁻⁺¹²³⁴⁵⁶⁷⁸⁹⁰") == "-+1234567890"
end
include("parity.jl")
include("orbitals.jl")
include("configurations.jl")
include("excited_configurations.jl")
include("terms.jl")
include("jj_terms.jl")
include("intermediate_terms.jl")
include("couple_terms.jl")
include("csfs.jl")
include("levels.jl")
include("jj2lsj.jl")
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 10841 | using AtomicLevels
using UnicodeFun
using Combinatorics: combinations
using WignerSymbols
using Test
@testset "Terms" begin
@testset "Construction" begin
@test T"1S" == Term(0, 0, p"even")
@test T"1S" == Term(0, 0, 1)
@test T"1Se" == Term(0, 0, p"even")
@test T"1So" == Term(0, 0, p"odd")
@test T"1So" == Term(0, 0, -1)
@test T"2So" == Term(0, 1//2, p"odd")
@test T"4P" == Term(1, 3//2, p"even")
@test T"3D" == Term(2, 1, p"even")
@test T"3Do" == Term(2, 1, p"odd")
@test T"1[54]" == Term(54, 0, p"even")
@test T"1[3/2]" == Term(3//2, 0, p"even")
@test T"2[3/2]o" == Term(3//2, 1//2, p"odd")
@test T"2Z" == Term(20, 1//2, p"even")
for T in [T"1S", T"1Se", T"1So", T"2So", T"4P", T"3D", T"3Do",
T"1[54]", T"1[3/2]", T"2[3/2]o", T"2Z"]
@test parse(Term, string(T)) == T
end
@test_throws DomainError Term(HalfInteger(-1//2), HalfInteger(1//2), p"even")
@test_throws DomainError Term(3//2, -1//2, p"odd")
@test_throws DomainError Term(-2, 1//2, 1)
@test_throws ArgumentError parse(Term, "1[4/3]")
@test_throws ArgumentError parse(Term, "1[43/]")
@test_throws ArgumentError parse(Term, "1[/43/]")
@test_throws ArgumentError parse(Term, "1[/43]")
@test_throws ArgumentError parse(Term, "P")
@test_throws ArgumentError parse(Term, "asdf")
end
@testset "Properties" begin
@test multiplicity(T"1S") == 1
@test multiplicity(T"1So") == 1
@test multiplicity(T"2S") == 2
@test multiplicity(T"2So") == 2
@test multiplicity(T"3S") == 3
@test multiplicity(T"3So") == 3
@test weight(T"1S") == 1
@test weight(T"1P") == 3
@test weight(T"2S") == 2
@test weight(T"2P") == 6
@test T"1S" > T"1So"
@test T"1So" < T"1S"
@test T"1S" < T"2S"
@test T"1S" < T"1P"
@test T"1S" < T"3S"
@test T"1P" < T"3S"
end
@testset "Pretty printing" begin
map([T"1S" => "¹S",
T"2So" => "²Sᵒ",
T"4[3/2]" => "⁴[3/2]"]) do (t,s)
@test "$(t)" == s
end
end
@testset "Orbital terms" begin
function test_single_orbital_terms(orb::Orbital, occ::Int, ts::Vector{Term})
cts = sort(terms(orb, occ))
ts = sort(ts)
ccts = copy(cts)
for t in cts
if t in ts
@test count(e -> e == t, ccts) == count(e -> e == t, ts)
ccts = filter!(e -> e != t, ccts)
ts = filter!(e -> e != t, ts)
end
end
if length(ts) != 0
println("fail, terms: ", join(string.(cts), ", "))
println("missing: ", join(string.(ts), ", "))
println("should not be there: ", join(string.(ccts), ", "))
println("==========================================================")
end
@test length(ts) == 0
end
function test_single_orbital_terms(orb::Orbital, occs::Tuple{Int,Int}, ts::Vector{Term})
map(occs) do occ
test_single_orbital_terms(orb, occ, ts)
end
end
function get_orbital(o::AbstractString)
n = something(findfirst(isequal(o[1]), AtomicLevels.spectroscopic), 0)
ℓ = n - 1
orb = Orbital(n, ℓ)
g = 2(2ℓ + 1)
occ = length(o)>1 ? parse(Int, o[2:end]) : 1
if g != occ && g/2 != occ
occ = (occ, g-occ)
end
orb, occ
end
function test_orbital_terms(o_ts::Pair{<:ST,<:ST}) where {ST<:AbstractString}
orb,occ = get_orbital(o_ts[1])
m = match(r"([0-9]+)([A-Z])", o_ts[2])
L = something(findfirst(isequal(lowercase(m[2])[1]), AtomicLevels.spectroscopic), 0)-1
S = (parse(Int, m[1])-1)//2
test_single_orbital_terms(orb, occ, [Term(L, S, parity(orb)^occ[1])])
end
function test_orbital_terms(o_ts::Pair{<:ST,<:Vector{ST}}) where {ST<:AbstractString}
orb,occ = get_orbital(o_ts[1])
p = parity(orb)^occ[1]
ts = o_ts[2]
p1 = r"([0-9]+)\(((?:(?:[A-Z]|\[[0-9]+\])[0-9]*)+)\)"
p2 = r"([0-9]+)([A-Z])"
ts = map(ts) do t
t = replace(t, " " => "")
m = match(p1, t)
if m != nothing
S = (parse(Int, m[1]) - 1)//2
map(eachmatch(r"([A-Z]|\[[0-9]+\])([0-9]*)", m[2])) do mm
term = parse(Term, "$(m[1])$(mm[1])")
nterms = isempty(mm[2]) ? 1 : parse(Int, mm[2])
[Term(term.L, term.S, p) for j in 1:nterms]
end
else
m = match(p2, t)
term = parse(Term, t)
Term(term.L, term.S, p)
end
end
test_single_orbital_terms(orb, occ, vcat(vcat(ts...)...))
end
# Table taken from Cowan, p. 110
# Numbers following term symbols indicate the amount of times
# different terms with the same (L,S) occur.
test_orbital_terms("s" => "2S")
for o in ["s2", "p6", "d10", "f14"]
test_orbital_terms(o => "1S")
end
test_orbital_terms("p" => "2P")
test_orbital_terms("p2" => ["1(SD)", "3P"])
test_orbital_terms("p3" => ["2(PD)", "4S"])
test_orbital_terms("d" => "2D")
test_orbital_terms("d2" => ["1(SDG)", "3(PF)"])
test_orbital_terms("d3" => ["2(PD2FGH)", "4(PF)"])
test_orbital_terms("d4" => ["1(S2D2FG2I)", "3(P2DF2GH)", "5D"])
test_orbital_terms("d5" => ["2(SPD3F2G2HI)", "4(PDFG)", "6S"])
test_orbital_terms("f" => "2F")
test_orbital_terms("f2" => ["1(SDGI)", "3(PFH)"])
test_orbital_terms("f3" => ["2(PD2F2G2H2IKL)", "4(SDFGI)"])
test_orbital_terms("f4" => ["1(S2D4FG4H2I3KL2N)", "3(P3D2F4G3H4I2K2LM)", "5(SDFGI)"])
test_orbital_terms("f5" => ["2(P4D5F7G6H7I5K5L3M2NO)", "4(SP2D3F4G4H3I3K2LM)", "6(PFH)"])
test_orbital_terms("f6" => ["1(S4PD6F4G8H4I7K3L4M2N2Q)", "3(P6D5F9G7H9I6K6L3M3NO)", "5(SPD3F2G3H2I2KL)", "7F"])
test_orbital_terms("f7" => ["2(S2P5D7F10G10H9I9K7L5M4N2OQ)", "4(S2P2D6F5G7H5I5K3L3MN)", "6(PDFGHI)", "8S"])
# # Data below are from Xu2006
test_orbital_terms("g9" => [
"2(S8 P19 D35 F40 G52 H54 I56 K53 L53 M44 N40 O32 Q26 R19 T15 U9 V7 W4 X2 YZ)",
"4(S6 P16 D24 F34 G38 H40 I42 K39 L35 M32 N26 O20 Q16 R11 T7 U5 V3 WX)",
"6(S3P3D9F8G12H10I12K9L9M6N6O3Q3RT)",
"8(PDFGHIKL)", "10(S)"
])
test_orbital_terms("h11" => [
"2(S36 P107 D173 F233 G283 H325 I353 K370 L376 M371 N357 O335 Q307 R275 T241 U207 V173 W142 X114 Y88 Z68 [21]50 [22]36 [23]25 [24]17 [25]11 [26]7 [27]4 [28]2 [29] [30])",
"4(S37 P89 D157 F199 G253 H277 I309 K313 L323 M308 N300 O271 Q251 R216 T190 U155 V131 W101 X81 Y59 Z45 [21]30 [22]22 [23]13 [24]9 [25]5 [26]3 [27] [28])",
"6(S12 P35 D55 F76 G90 H101 I109 K111 L109 M105 N97 O87 Q77 R65 T53 U43 V33 W24 X18 Y12 Z8 [21]5 [22]3 [23] [24])",
"8(S4 P4 D12 F11 G17 H15 I19 K16 L18 M14 N14 O10 Q10 R6 T6 U3 V3WX)",
"10(PDFGHIKLMN)", "12S"
])
@test_throws DomainError terms(o"2p", 7)
@testset "Reference implementation" begin
# This is an independent implementation that calculates all the terms (L, S) terms
# of a given ℓ^w orbital is LS coupling. It works by considering the distribution
# of the M_L and M_S quantum numbers of the many-particle basis states of the ℓ^w
# orbital. It is much less performant than the implementation of terms() in AtomicLevels.
function ls_terms_reference(ℓ::Integer, w::Integer, parity::Parity)
ℓ >= 0 || throw(DomainError("ℓ must be non-negative"))
w >= 1 || throw(DomainError("w must be positive"))
# First, let's build a histogram of (M_L, M_S) values of all the product
# basis states.
lsbasis = [(ml, ms) for ms = HalfInteger(-1//2):HalfInteger(1//2), ml = -ℓ:ℓ]
@assert length(lsbasis) == 2*(2ℓ+1)
Lmax, Smax = w * ℓ, w * HalfInteger(1//2)
NL, NS = convert(Int, 2*Lmax + 1), convert(Int, 2*Smax + 1)
hist = zeros(Int, NL, NS)
for c in combinations(1:length(lsbasis), w)
ML, MS = 0, 0
for i in c
ML += lsbasis[i][1]
MS += lsbasis[i][2]
end
i = convert(Int, ML + Lmax) + 1
j = convert(Int, MS + Smax) + 1
hist[i, j] += 1
end
# Find the valid (L, S) terms by removing maximal rectangular bases from the
# 2D histogram. The width and breadth of the rectangles determines the (L,S)
# term that generates the corresponding (M_L, M_S) values.
terms = Term[]
Lmid, Smid = div(NL, 2) + (isodd(NL) ? 1 : 0), div(NS, 2) + (isodd(NS) ? 1 : 0)
for i = 1:Lmid
while !all(hist[i,:] .== 0)
is = i:(NL-i+1)
for j = 1:Smid
js = j:(NS-j+1)
any(hist[is, js] .== 0) && continue
L = convert(HalfInteger, Lmax - i + 1)
S = convert(HalfInteger, Smax - j + 1)
push!(terms, Term(L, S, parity))
hist[is, js] .-= 1
break
end
end
@assert all(hist[NL-i+1, :] .== 0) # make sure everything is symmetric
end
return terms
end
# We can't go too high in ℓ, since ls_terms_reference becomes quite slow.
for ℓ = 0:5, w = 1:(4ℓ+2)
o = Orbital(:X, ℓ)
p = parity(o)^w
reference_terms = ls_terms_reference(ℓ, w, p)
@test sort(terms(o, w)) == sort(reference_terms)
end
end
end
@testset "Count terms" begin
@test count_terms(o"1s", 1, T"2S") == 1
@test count_terms(o"1s", 2, T"1S") == 1
@test count_terms(o"3d", 1, T"2D") == 1
@test count_terms(o"3d", 3, T"2D") == 2
@test count_terms(o"3d", 5, T"2D") == 3
end
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 1347 | using AtomicLevels: CSF
p_suffix(p::Parity) = isodd(p) ? "o" : ""
p_suffix(cfg::Configuration) = p_suffix(parity(cfg))
function parse_csf(filename)
# It is assumed that all term symbols are on the form ML (and MLS
# for intermediate terms) where M is multiplicity, and S is
# seniority number.
ref_csfs = NonRelativisticCSF[]
open(filename) do file
readline(file)
core_cfg = close(fill(parse(Configuration{Orbital}, join(split(readline(file)), " "))))
while true
peel_cfg = readline(file)
peel_cfg[1] == '*' && break
peel_cfg = parse(Configuration{Orbital},
join(split(replace(replace(replace(peel_cfg, "( "=>""), "("=>""), ")"=>"")), " "))
cfg = core_cfg + peel_cfg
ts = split(readline(file))
np = length(peel_cfg)
its = map(enumerate(ts[1:np])) do (i,t)
ip = p_suffix(parity(Configuration(peel_cfg[i]...)))
IntermediateTerm(parse(Term, "$(t[1:2])$(ip)"), Seniority(parse(Int, t[end])))
end
coupled_terms = map(enumerate(ts[vcat(1,np+1:end)])) do (i,t)
parse(Term, "$(t[1:2])$(p_suffix(peel_cfg[1:i]))")
end
push!(ref_csfs, CSF(cfg, its, coupled_terms))
end
end
ref_csfs
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | code | 7789 | using AtomicLevels: CSF
using HalfIntegers
angularmomentum(o::RelativisticOrbital) = AtomicLevels.κ2j(o.κ)
angularmomentum(csf::CSF{O,<:HalfInteger}) where O = last(csf.terms)
# Relativistic CSLs and parsing of GRASP CSL files
function parse_rcsf(filename)
open(filename, "r") do io
# First line should be "Core subshells:"
line = readline(io); @assert strip(line) == "Core subshells:"
line_cores = strip(readline(io), ['\n', '\r'])
line = readline(io); @assert strip(line) == "Peel subshells:"
line_peels = readline(io)
line = readline(io); @assert strip(line) == "CSF(s):"
core_orbitals = parse_cores(line_cores)
core_couplings = zeros(HalfInt, length(core_orbitals))
core_occupations = map(degeneracy, core_orbitals)
blockid, csfid = 1, 1
csfs = CSF{RelativisticOrbital{Int},HalfInt,Seniority}[]
while ! eof(io)
line1 = readline(io)
if startswith(line1, " *")
blockid += 1
csfid = 1
line1 = readline(io)
end
line1 = strip(line1, ['\n', '\r'])
line2 = strip(readline(io), ['\n', '\r'])
line3 = strip(readline(io), ['\n', '\r'])
orbitals, noccupations, orbcouplings, csfcouplings = parse_csflines(line1, line2, line3)
@assert !any(isequal(0), noccupations) # we should never get 0-electron orbitals
# Fix orbcouplings that are not explicitly written in the CSL file (represented
# with nothings in the array). It is assumed that this is the case only if the
# orbital is fully occupied.
#
# NOTE: we could, though, also omit values if there is only 1 electron or
# maxelectrons(orb) - 1 electrons, this which case the angular momentum can
# only have only one value too.
for i = 1:length(orbitals)
orbital, nelec = orbitals[i], noccupations[i]
if orbcouplings[i] === nothing
nelec == degeneracy(orbital) || error("""
Unable to fix missing orbital coupling.
Orbital $i, orbital=$(repr(orbital)), nelec=$nelec
1: $(line1)
2: $(line2)
3: $(line3)
""")
orbcouplings[i] = 0
elseif (nelec == 1 || nelec == degeneracy(orbital) - 1) && orbcouplings[i] != angularmomentum(orbital)
@warn "Bad orbital coupling" orbcouplings[i] nelec angularmomentum(orbital)
elseif orbcouplings[i] > nelec * angularmomentum(orbital)
# If the orbital coupling is larger than (# particles) * (l of orbital),
# then that has to be an error.
@warn "Bad orbital coupling" orbcouplings[i] nelec angularmomentum(orbital)
end
end
# Fix csfcouplings which are not explicitly written to the CSL file. This appears
# to be the case for "trivial" zero couplings, if the previous CSF layer and
# current orbital are both zeros.
for i = 1:length(orbitals)
oj = orbcouplings[i]
cj = (i > 1) ? csfcouplings[i-1] : zero(HalfInt)
Δupper, Δlower = oj+cj, abs(oj-cj)
if csfcouplings[i] === nothing
Δupper == Δlower || error("""
Unable to fix missing CSF coupling.
Orbital $i, orbital=$(repr(orbitals[i])), oj=$(repr(oj)), cj=$(repr(cj))
1: $(line1)
2: $(line2)
3: $(line3)
""")
csfcouplings[i] = Δlower
elseif !(Δlower <= csfcouplings[i] <= Δupper)
@warn "Invalid csfcoupling value?" csfcouplings[i] Δupper Δlower
end
end
config = Configuration(vcat(core_orbitals, orbitals), vcat(core_occupations, noccupations), sorted=true)
# I have no idea how to generate correct seniority numbers, even in the case where
# it does not matter (no degeneracy). So instead, I'll try to match the couplings
# to the intermediate terms generated by AtomicLevels.
subshell_terms = let its = intermediate_terms.(vcat(core_orbitals, orbitals), vcat(core_occupations, noccupations))
map(its, vcat(core_couplings, Vector{HalfInt}(orbcouplings))) do its, grasp_term
idxs = findall(it -> it.term == grasp_term, its)
@assert length(idxs) == 1 # make sure there is only one intermediate term with the coupling we're interested in
its[first(idxs)]
end
end
terms = map(x -> convert(Rational{Int}, x),
vcat(core_couplings, Vector{HalfInt}(csfcouplings)))
csf = CSF(config, subshell_terms, terms)
push!(csfs, csf)
end
return csfs
end
end
function parse_csflines(line1, line2, line3)
# Assuming that the CSF line consists of NNNLL(EE) blocks, each 9 characters long.
@assert length(line1) % 9 == 0
orbs = RelativisticOrbital{Int}[]
orbs_nelectrons = Int[]
orbs_orbcouplings = Union{HalfInt,Nothing}[]
orbs_csfcouplings = Union{HalfInt,Nothing}[]
norbitals = div(length(line1), 9) # number of orbitals in this group
for i = 1:norbitals
orb = line1[9*(i-1)+1:9*i]
@assert orb[6]=='(' && orb[9]==')'
orbital = parse(RelativisticOrbital, strip(orb[1:5]))
# n = parse(Int, orb[1:3])
# kappa = parse_j(orb[4:5])
nelec = parse(Int, orb[7:8])
# pick out coupled angular momentum from the second line:
angmom = if length(line2) < 9*i
nothing
else
parse_angmom_string(line2[9*(i-1)+1:9*i])
end
# Pick the J-coupling from between the orbitals (line3).
# The items in that line are shifted by three characters to the right for
# some reason.. except for the last one, which defines the J^P values of
# the CSF. That one is shifted by 1 character.. and then one after that
# is the parity +/- symbol.
c2J_idx_first, c2J_idx_last = if i < norbitals
# So, for the non-last ones we assume a 9 character block that has been
# shifted by 3 characters to the right.
# +1 to go from 0-based to 1-based
9*(i-1)+3+1, 9*i+3
else # i == norbitals -- the last non-regular coupling
9*(i-1)+3+1, 9*i+1
end
c2J_string = line3[c2J_idx_first:c2J_idx_last]
coupled_angmom = try
parse_angmom_string(c2J_string)
catch e
error("""
Error parsing 2J value on line 3 (i=$i)
range $(c2J_idx_first):$(c2J_idx_last) -> '$(c2J_string)'
1: $(line1)
2: $(line2)
3: $(line3)
$(' '^(c2J_idx_first+2))^$('-'^(c2J_idx_last-c2J_idx_first-1))^
""")
end
push!(orbs_csfcouplings, coupled_angmom)
push!(orbs, orbital)
push!(orbs_nelectrons, nelec)
push!(orbs_orbcouplings, angmom)
end
@assert length(orbs_csfcouplings) == norbitals
return orbs, orbs_nelectrons, orbs_orbcouplings, orbs_csfcouplings
end
function parse_angmom_string(s)
length(strip(s)) == 0 && return nothing
parse(HalfInt, s)
end
function parse_cores(line)
orbstrings = split(line)
orbs = RelativisticOrbital{Int}[]
for os in orbstrings
push!(orbs, parse(RelativisticOrbital, strip(os)))
end
orbs
end
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 3214 | # AtomicLevels
[![Documentation][docs-stable-img]][docs-stable-url]
[![Documentation (dev)][docs-dev-img]][docs-dev-url]
[![GitHub Actions CI][ci-gha-img]][ci-gha-url]
[![CodeCov][codecov-img]][codecov-url]
AtomicLevels provides a collections of types and methods to facilitate working with atomic states (or, more generally, states with spherical symmetry), both in the relativistic (eigenstates of `J = L + S`) and non-relativistic (eigenstates of `L` and `S` separately) frameworks.
* Orbitals and orbital subshells
* Configurations
* Configuration state functions (CSFs)
* Term symbols
The aim is to make sure that the types used to label and store information about atomic states are both efficient and user-friendly at the same time.
In addition, it also provides various utility methods, such as generation of a list CSFs corresponding to a given configuration, serialization of orbitals and configurations, methods for introspecting physical quantities etc.
## Example
To install and load the package, you can just type in the Julia REPL
```julia-repl
julia> using Pkg; Pkg.add("DataFrames")
julia> using AtomicLevels
```
As a simple usage example, constructing a configuration for an S-like state with an open `3p` shell looks like
```julia-repl
julia> configuration = c"[Ne]* 3s2 3p4"
1s² 2s² 2p⁶ 3s² 3p⁴
```
which is of type `Configuration`. To access information about subshells, you can index into
the configuration which returns a tuple. The tuple contains an `Orbital` object, so you
can, for example, ask for the `ℓ` and `s` angular momentum quantum numbers of the subshell
```
julia> shell = configuration[end]
(3p, 4, :open)
julia> angular_momenta(shell[1])
(1, 1/2)
```
Also, you can convert the configurations to the corresponding relativistic configurations
and CSFs by simply doing
```
julia> rconfigurations = relconfigurations(configuration)
3-element Array{Configuration{RelativisticOrbital{Int64}},1}:
1s² 2s² 2p-² 2p⁴ 3s² 3p-² 3p²
1s² 2s² 2p-² 2p⁴ 3s² 3p- 3p³
1s² 2s² 2p-² 2p⁴ 3s² 3p⁴
julia> csfs(rconfigurations)
5-element Array{CSF{RelativisticOrbital{Int64},HalfIntegers.Half{Int64},Seniority},1}:
1s²(₀0|0) 2s²(₀0|0) 2p-²(₀0|0) 2p⁴(₀0|0) 3s²(₀0|0) 3p-²(₀0|0) 3p²(₀0|0)+
1s²(₀0|0) 2s²(₀0|0) 2p-²(₀0|0) 2p⁴(₀0|0) 3s²(₀0|0) 3p-²(₀0|0) 3p²(₂2|2)+
1s²(₀0|0) 2s²(₀0|0) 2p-²(₀0|0) 2p⁴(₀0|0) 3s²(₀0|0) 3p-(₁1/2|1/2) 3p³(₁3/2|1)+
1s²(₀0|0) 2s²(₀0|0) 2p-²(₀0|0) 2p⁴(₀0|0) 3s²(₀0|0) 3p-(₁1/2|1/2) 3p³(₁3/2|2)+
1s²(₀0|0) 2s²(₀0|0) 2p-²(₀0|0) 2p⁴(₀0|0) 3s²(₀0|0) 3p⁴(₀0|0)+
```
For more examples and information about how to work with the various types, please see the [documentation][docs-stable-url].
[ci-gha-url]: https://github.com/JuliaAtoms/AtomicLevels.jl/actions
[ci-gha-img]: https://github.com/JuliaAtoms/AtomicLevels.jl/workflows/CI/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaAtoms/AtomicLevels.jl
[codecov-img]: https://codecov.io/gh/JuliaAtoms/AtomicLevels.jl/branch/master/graph/badge.svg
[docs-stable-url]: https://juliaatoms.org/AtomicLevels.jl/stable/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-dev-url]: https://juliaatoms.org/AtomicLevels.jl/dev/
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 4982 | # [Atomic configurations](@id man-configurations)
```@meta
DocTestSetup = quote
using AtomicLevels
end
```
We define a configuration to be a set of orbitals with their
associated occupation (i.e. the number of electron on that
orbital). We can represent a particular configuration with an instance
of the [`Configuration`](@ref) type. The orbitals of a configuration
can be unsorted (default) or sorted according to the canonical
ordering (first by ``n``, then by ``\ell``, &c). It is important to
allow for arbitrary order, since permutation of the orbitals in a
configuration, in general incurs a phase shift of matrix elements, &c.
```@docs
Configuration
```
The [`@c_str`](@ref) and [`@rc_str`](@ref) string macros can be used to conveniently
construct configurations:
```@docs
@c_str
@rc_str
```
## Interface
For example, it is possible to index into a configuration, including with a range of
indices, returning a sub-configuration consisting of only those orbitals. With an integer
index, an `(orbital, occupancy, state)` tuple is returned.
```jldoctest confexamples
julia> config = c"1s2c 2si 2p3"
[He]ᶜ 2sⁱ 2p³
julia> config[2]
(2s, 1, :inactive)
julia> config[1:2]
[He]ᶜ 2sⁱ
julia> config[[3,1]]
[He]ᶜ 2p³
```
The configuration can also be iterated over. Each item is a `(orbital, occupancy, state)`
tuple.
```jldoctest confexamples
julia> for (o, nelec, s) in config
@show o, nelec, s
end
(o, nelec, s) = (1s, 2, :closed)
(o, nelec, s) = (2s, 1, :inactive)
(o, nelec, s) = (2p, 3, :open)
```
Various other methods exist to manipulate or transform configurations or to query them for
information.
```@docs
issimilar
Base.:(==)(a::Configuration{<:O}, b::Configuration{<:O}) where {O<:AbstractOrbital}
num_electrons(::Configuration)
num_electrons(::Configuration, ::AtomicLevels.AbstractOrbital)
orbitals(::Configuration)
Base.delete!
Base.:(+)
Base.:(-)
Base.close
close!
Base.fill
Base.fill!
Base.in
Base.filter
Base.replace
core
peel
active
inactive
bound
continuum
parity(::Configuration)
nonrelconfiguration
relconfigurations
multiplicity(::Configuration)
```
## Generating configuration lists
The [`⊗`](@ref) operator can be used to easily generate lists of configurations from existing
pieces. E.g. to create all the valence configurations on top of an closed core, you only
need to write
```jldoctest
julia> c"[Ne]" ⊗ [c"3s2", c"3s 3p", c"3p2"]
3-element Vector{Configuration{Orbital{Int64}}}:
[Ne]ᶜ 3s²
[Ne]ᶜ 3s 3p
[Ne]ᶜ 3p²
```
That can be combined with the [`@rcs_str`](@ref) string macro to easily generate all possible
relativistic configurations from a non-relativistic definition:
```jldoctest
julia> rc"[Ne] 3s2" ⊗ rcs"3p2"
3-element Vector{Configuration{RelativisticOrbital{Int64}}}:
[Ne]ᶜ 3s² 3p-²
[Ne]ᶜ 3s² 3p- 3p
[Ne]ᶜ 3s² 3p²
```
```@docs
⊗
@rcs_str
```
## Spin configurations
```@docs
SpinConfiguration
spin_configurations
substitutions
@sc_str
@rsc_str
@scs_str
@rscs_str
```
## Excited configurations
AtomicLevels.jl provides an easy interface for generating lists of
configurations which are the result of exciting one or more orbitals
of a reference set to a set of substitution orbitals. This is done
with [`excited_configurations`](@ref), which provides various
parameters for controlling which excitations are generated. A very
simple example could be
```jldoctest
julia> excited_configurations(c"1s2", os"2[s-p]"...)
4-element Vector{Configuration{Orbital{Int64}}}:
1s²
1s 2s
2s²
2p²
```
which as we see contains all configurations generated by at most
exciting two orbitals `1s²` and keeping the overall parity. By lifting
these restrictions, more configurations can be generated:
```jldoctest
julia> excited_configurations(c"1s2 2s", os"3[s-p]"...,
keep_parity=false, max_excitations=2)
14-element Vector{Configuration{Orbital{Int64}}}:
1s² 2s
1s 2s²
1s 2s 3s
1s 2s 3p
1s² 3s
1s² 3p
2s² 3s
2s² 3p
2s 3s²
2s 3s 3p
1s 3s²
1s 3s 3p
2s 3p²
1s 3p²
julia> excited_configurations(c"1s2 2s", os"3[s-p]"...,
keep_parity=false, max_excitations=3)
17-element Vector{Configuration{Orbital{Int64}}}:
1s² 2s
1s 2s²
1s 2s 3s
1s 2s 3p
1s² 3s
1s² 3p
2s² 3s
2s² 3p
2s 3s²
2s 3s 3p
1s 3s²
1s 3s 3p
2s 3p²
1s 3p²
3s² 3p
3s 3p²
3p³
```
Since configurations by default are unsorted, when exciting from
[`SpinConfiguration`](@ref)s, the substitutions are performed
in-place:
```jldoctest
julia> excited_configurations(first(scs"1s2"), sos"2[s-p]"...)
21-element Vector{SpinConfiguration{SpinOrbital{Orbital{Int64}, Tuple{Int64, HalfIntegers.Half{Int64}}}}}:
1s₀α 1s₀β
2s₀α 1s₀β
2s₀β 1s₀β
1s₀α 2s₀α
1s₀α 2s₀β
2s₀α 2s₀β
2p₋₁α 2p₋₁β
2p₋₁α 2p₀α
2p₋₁α 2p₀β
2p₋₁α 2p₁α
⋮
2p₋₁β 2p₀β
2p₋₁β 2p₁α
2p₋₁β 2p₁β
2p₀α 2p₀β
2p₀α 2p₁α
2p₀α 2p₁β
2p₀β 2p₁α
2p₀β 2p₁β
2p₁α 2p₁β
```
```@docs
excited_configurations
```
## Index
```@index
Pages = ["configurations.md"]
```
```@meta
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 8994 | ```@meta
DocTestSetup = :(using AtomicLevels)
```
# [Atomic configuration state functions (CSFs)](@id man-csfs)
AtomicLevels also provides types to represent symmetry-adapted atomic states, commonly referred to as [configuration state functions (CSFs)](https://en.wikipedia.org/wiki/Configuration_state_function). These are linear combinations of Slater determinants arising from a particular _configuration_, that are also eigenstates of angular momentum operators.
!!! info "Angular momentum operators"
With relativistic orbitals and ``jj``-coupling, _angular momentum operators_ refer to the ``J^2`` and ``J_z`` operators.
However, when working with non-relativistic orbitals and in ``LS``-coupling, they refer to both the orbital angular momentum operators (``L^2``, ``L_z``) and the spin angular momentum operators (``S^2``, ``S_z``). In this case, the orbitals and CSFs are simultaneous eigenstates of both the orbital and spin angular momentum.
In the [Background](@ref) section, we use just the total angular momentum operators (``J^2``, ``J_z``) when illustrating the theoretical background.
## Background
When constructing a basis for many-particle atomic states, you start from a set of single-particle states (orbitals) and form anti-symmetric many-particle product states (Slater determinants) of the desired number of particles. Superpositions of said determinants can then represent full many-electron wavefunctions. In principle, if the set of one-particle orbitals is complete, the set of all Slater determinants would form a complete many-particle basis.
However, even if your orbitals are eigenstates of the angular momentum operators ``J^2`` and ``J_z``, the Slater determinants, formed from these orbitals, in general, are not. As angular momentum symmetry is a useful symmetry to adhere to when working with atomic states, this is where CSFs come in: they form a _symmetry adapted_ many body basis, representing the same many-particle space, but each state is now also an eigenstate of angular momentum. They are related to the underlying Slater determinats via a basis transformation.
!!! note "Other symmetries"
You can also imagine CSFs that are adapted to other symmetries. However, at this time, AtomicLevels only supports CSFs adapted to angular momentum.
### Forming a CSF
The philosophy behind CSFs is similar to how you can use the [Clebsch–Gordan coefficients](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients) ``C_{j_1m_1j_2m_2}^{J M}`` to couple product states of angular momentum eigenstates ``\ket{j_1 m_1} \ket{j_2 m_2}``, which themselves in general are not eigenstates of total angular momentum, into angular momentum eigenstates ``\ket{j_1, j_2; J M}`` by creating superpositions with appropriate coefficients:
```math
\ket{j_1, j_2; J M} = \sum_{m_1,m_2,M}
C_{j_1m_1j_2m_2}^{J M}
\ket{j_1 m_1} \ket{j_2 m_2}
```
where the valid ``J`` values are ``|j_1 - j_2| \leq J \leq j_1 + j_2``.
In the multi-electron case, the states that you multiply together are the atomic orbitals. However, it is complicated by two facts:
1. There are usually more than two electrons.
In a multi-electron case, it is perfectly valid to apply the Clebsch–Gordan relation recursively until all electrons have been coupled, but in general you can end up with the same total angular momentum for different states (corresponding to different coupling sequences). So, the angular momentum eigenvalues are no longer sufficient to always uniquely identify a state.
2. Electrons are fermionic particles adhering to the [Pauli principle](https://en.wikipedia.org/wiki/Pauli_exclusion_principle).
This means that not all direct products of single-particle states are valid (the same single-particle state can not be repeated) or unique (anti-symmetry means that the order in the product does not matter). This, in turn, means that not all the coupled eigenstates predicted by the Clebsch-Gordan relation actually exist and you can not use the Clebsch–Gordan relation directly to determine their coefficients.
To work within those constraints, AtomicLevels specifies a _coupling scheme_. That is, the CSFs contain additional data that allows the states to be identified uniquely.
!!! note "Orbital vs subshell"
In the following the word "subshell" is used to refer to the orbitals in a configuration (i.e. a set of states with e.g. the same ``n`` and ``\ell`` quantum numbers). This is because the word "orbital" can be ambiguous (referring to either to a subshell or an specific state).
**`CSF` type.** In AtomicLevels, CSFs are represented with the [`CSF`](@ref) type. An instance of a [`CSF`](@ref) only specifies CSFs up to the total angular momentum (i.e. it actually represent a set of states corresponding to the different possible ``J_z`` quantum numbers).
Forming a CSF is a multi-step process:
1. The starting point for a CSF is a configuration ([`Configuration`](@ref)), i.e. a list of subshells (the orbitals in the configuration) and their occupations (how many electrons on the subshell). Each configuration corresponds to a set of Slater determinants, generated by considering all the possible combinations of `m` quantum numbers of the orbitals.
2. The next step is to couple _each subshell_ into an angular momentum eigenstate (e.g. to form a single angular momentum eigenstate out of the 3 electrons on a ``3d_{5/2}`` orbital/subshell). As the single particle spaces for the subshells are disjoint, the space of many-particle determinants can be thought of as a product space of subshells determinant spaces.
Due to the fermionic nature of the electrons, even determining the valid ``J`` values for the subshells is non-trivial. Also, if you go to high enough angular momenta of the orbital and high enough particle number, the angular momentum eigenvalues are no longer sufficient to uniquely identify the subshell terms. So the [`CSF`](@ref) type stores them as instances of [`IntermediateTerm`](@ref), instead of simple numbers (see [Term symbols](@ref man-terms) for more information).
3. Once the electrons on individual subshells are coupled, we can couple the subshells themselves together. As the orbitals in a subshell are distinct from the ones in other subshells, this can easily be done with just Clebsch–Gordan coefficients.
In AtomicLevels, we assume that the coupling is done by starting from the leftmost orbital pair, coupling those subshells together. Then the coupled two subshells are taken and coupled to the next subshells, and so on. In the end, we get a _coupling tree_ that looks something like this:

On the illustration, ``J_i, q_i`` pairs refer to the subshell couplings (``q_i`` disambiguating the state if ``J_i`` is not sufficient for uniqueness), and ``J_{1:i}`` refers to the total angular momentum of the first ``i`` coupled subshells. The total angular momenta of the last coupling (``J_{1:k}``) determines the angular momentum of the whole CSF.
So, all in all, a CSF is a configuration, together with the numbers ``J_i``, ``q_i`` and ``J_{1:i}`` uniquely determining the coupling.
## Coupling schemes
Various coupling schemes exist and AtomicLevels is currently opinionated about it, using the scheme described above. The only generality is that it allows for different coupling schemes depending on whether one works with relativistic or non-relativistic orbitals.
### ``LS``-coupling
In ``LS``-coupling, each orbital must be a [`Orbital`](@ref), specified by its ``n`` and ``\ell`` quantum numbers. Implicitly, each orbital also has a spin of ``s = 1/2``.
When coupling is performed, the ``L`` and ``S`` spaces are coupled separately, which is possible because the real and spin spaces are orthogonal. Each subshell then gets an ``L`` and ``S`` values eigenvalue (together with an additional quantum number to resolve any ambiguity). Similarly, couplings between subshells are also defined by the ``L`` and ``S`` values separately.
The CSF will then be a simultaneous eigenstate of ``L`` and ``S``, but does not define a ``J`` value. In other words, AtomicLevels currently does not perform ``LSJ``-coupling.
The convenience type [`NonRelativisticCSF`](@ref) is provided to construct CSFs with non-relativistic orbitals in ``LS``-coupling.
### ``jj``-coupling
``jj``-coupling is used for [`RelativisticOrbital`](@ref)s, where each orbital only has the total angular momentum value ``J``. In this coupling scheme, only the ``J`` values coupled. Intermediate terms are the ``J`` values (together with disambiguating quantum numbers, like seniority), and the intra-shell couplings are also defined by their ``J`` value.
The convenience type [`RelativisticCSF`](@ref) is provided to construct CSFs with relativistic orbitals in ``jj``-coupling.
## Reference
```@docs
CSF
NonRelativisticCSF
RelativisticCSF
csfs
orbitals(::CSF)
```
## Index
```@index
Pages = ["csfs.md"]
```
```@meta
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 4446 | # AtomicLevels.jl
AtomicLevels provides a collections of types and methods to facilitate working with atomic states (or, more generally, states with spherical symmetry), both in the relativistic (eigenstates of ``J = L + S``) and non-relativistic (eigenstates on ``L`` and ``S`` separately) frameworks.
* [Orbitals and orbital subshells](@ref man-orbitals)
* [Configurations](@ref man-configurations)
* [Configuration state functions (CSFs)](@ref man-csfs)
* [Term symbols](@ref man-terms)
The aim is to make sure that the types used to label and store information about atomic states are both efficient and user-friendly at the same time.
In addition, it also provides various utility methods, such as generation of a list CSFs corresponding to a given configuration, serialization of orbitals and configurations, methods for introspecting physical quantities etc.
## Usage examples
### Orbitals
```@setup orbitals
using AtomicLevels
```
A single orbital can be constructed using string macros
```@repl orbitals
orbitals = o"2s", ro"5f-"
```
Various methods are provided to look up the properties of the orbitals
```@repl orbitals
for o in orbitals
@info "Orbital: $o :: $(typeof(o))" parity(o) degeneracy(o) angular_momenta(o)
end
```
You can also create a range of orbitals quickly using the [`@os_str`](@ref) (or [`@ros_str`](@ref)) string macros
```@repl orbitals
os"5[d] 6[s-p] k[7-10]"
```
### Configurations
```@setup configurations
using AtomicLevels
```
The ground state of hydrogen and helium.
```@repl configurations
c"1s",(c"1s2",c"[He]")
```
The ground state configuration of xenon, in relativistic notation.
```@repl configurations
Xe = rc"[Kr] 5s2 5p6"
```
As we see above, by default, the krypton core is declared “closed”. This is useful for calculations when the core should be frozen. We can “open” it by affixing `*`.
```@repl configurations
Xe = c"[Kr]* 5s2 5p6"
```
Note that the `5p` shell was broken up into 2 `5p-` electrons and 4 `5p` electrons. If we are not filling the shell, occupancy of the spin-up and spin-down electrons has to be given separately.
```@repl configurations
Xe⁺ = rc"[Kr] 5s2 5p-2 5p3"
```
It is also possible to work with “continuum orbitals”, where the main quantum number is replaced by a `Symbol`.
```@repl configurations
Xe⁺e = rc"[Kr] 5s2 5p-2 5p3 ks"
```
### Excitations
```@setup excitations
using AtomicLevels
```
We can easily generate all possible excitations from a reference configuration. If no extra orbitals are specified, only those that are “open” within the reference set will be considered.
```@repl excitations
excited_configurations(rc"[Kr] 5s2 5p-2 5p3")
```
By appending virtual orbitals, we can generate excitations to configurations beyond those spanned by the reference set.
```@repl excitations
excited_configurations(rc"[Kr] 5s2 5p-2 5p3", ros"5[d] 6[s-p]"...)
```
Again, using the “continuum orbitals”, it is possible to generate the state space accessible via one-photon transitions from the ground state.
```@repl excitations
Xe⁺e = excited_configurations(rc"[Kr] 5s2 5p6", ros"k[s-d]"...,
max_excitations=:singles,
keep_parity=false)
```
We can then query for the bound and continuum orbitals thus.
```@repl excitations
map(Xe⁺e) do c
b = bound(c)
num_electrons(b) => b
end
map(Xe⁺e) do c
b = continuum(c)
num_electrons(b) => b
end
```
### Term symbol calculation
```@setup termsymbols
using AtomicLevels
```
[Overview of angular momentum coupling on Wikipedia.](https://en.wikipedia.org/wiki/Angular_momentum_coupling)
**``LS``-coupling.**
This is done purely non-relativistic, i.e. `2p-` is considered equivalent to `2p`.
```@repl termsymbols
terms(c"1s")
terms(c"[Kr] 5s2 5p5")
terms(c"[Kr] 5s2 5p4 6s 7g")
```
**``jj``-coupling**.
``jj``-coupling is implemented slightly differently, it calculates the possible ``J`` values resulting from coupling `n` equivalent electrons in all combinations allowed by the Pauli principle.
```@repl termsymbols
intermediate_terms(ro"1s", 1)
intermediate_terms(ro"5p", 2)
intermediate_terms(ro"7g", 3)
```
### Configuration state functions
```@setup csfs
using AtomicLevels
```
CSFs are formed from electronic configurations and their possible term couplings (along with intermediate terms, resulting from unfilled subshells).
```@repl csfs
sort(vcat(csfs(rc"3s 3p2")..., csfs(rc"3s 3p- 3p")...))
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 1218 | # Internals
!!! note
The functions, methods and types documented here are _not_ part of the public API.
```@meta
CurrentModule = AtomicLevels
DocTestSetup = quote
using AtomicLevels
end
```
```@autodocs
Modules = [AtomicLevels]
Public = false
Filter = fn -> !in(fn, [Base.parse, Base.fill, Base.fill!]) &&
fn != AtomicLevels.xu_terms
```
## Internal implementation of term multiplicity calculation
AtomicLevels.jl uses the algorithm presented in
- _Alternative mathematical technique to determine LS spectral terms_
by Xu Renjun and Dai Zhenwen, published in JPhysB, 2006.
[doi:10.1088/0953-4075/39/16/007](https://dx.doi.org/10.1088/0953-4075/39/16/007)
to compute the multiplicity of individual subshells in ``LS``-coupling, beyond the
trivial cases of a single electron or a filled subshell. These
routines need not be used directly, instead use [`terms`](@ref) and
[`count_terms`](@ref).
In the following, ``S'=2S\in\mathbb{Z}`` and
``M_S'=2M_S\in\mathbb{Z}``, as in the original article.
```@docs
AtomicLevels.Xu.X
AtomicLevels.Xu.A
AtomicLevels.Xu.f
AtomicLevels.xu_terms
```
## Index
```@index
Pages = ["internals.md"]
```
```@meta
CurrentModule = nothing
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 1653 | # [Atomic orbitals](@id man-orbitals)
```@meta
DocTestSetup = quote
using AtomicLevels
end
```
Atomic orbitals, i.e. single particle states with well-defined orbital or total angular
momentum, are usually the basic building blocks of atomic states. AtomicLevels provides
various types and methods to conveniently label the orbitals.
## Orbital types
AtomicLevels provides two basic types for labelling atomic orbitals: [`Orbital`](@ref) and
[`RelativisticOrbital`](@ref). Stricly speaking, these types do not label orbitals, but
groups of orbitals with the same angular symmetry and radial behaviour (i.e. a
[subshell](https://en.wikipedia.org/wiki/Electron_shell#Subshells)).
All orbitals are subtypes of [`AbstractOrbital`](@ref). Types and methods that work on
generic orbitals can dispatch on that.
```@docs
Orbital
RelativisticOrbital
AbstractOrbital
```
The [`SpinOrbital`](@ref) type can be used to fully qualify all the quantum numbers (that
is, also ``m_\ell`` and ``m_s``) of an [`Orbital`](@ref). It represent a since, distinct
orbital.
```@docs
SpinOrbital
```
The string macros [`@o_str`](@ref) and [`@ro_str`](@ref) can be used
to conveniently construct orbitals, while [`@os_str`](@ref),
[`@sos_str`](@ref), [`@ros_str`](@ref), and [`@rsos_str`](@ref) can be
used to construct whole lists of them very easily.
```@docs
@o_str
@so_str
@ro_str
@rso_str
@os_str
@sos_str
@ros_str
@rsos_str
```
## Methods
```@docs
isless
degeneracy
parity(::Orbital)
symmetry
isbound
angular_momenta
angular_momentum_ranges
spin_orbitals
nonrelorbital
```
## Index
```@index
Pages = ["orbitals.md"]
```
```@meta
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 9962 | # [Term symbols](@id man-terms)
```@meta
DocTestSetup = :(using AtomicLevels)
```
AtomicLevels provides types and methods to work and determine term symbols. The ["Term
symbol"](https://en.wikipedia.org/wiki/Term_symbol) and ["Angular momentum
coupling"](https://en.wikipedia.org/wiki/Angular_momentum_coupling) Wikipedia articles give
a good basic overview of the terminology.
For term symbols in LS coupling, AtomicLevels provides the [`Term`](@ref) type.
```@docs
Term
```
The [`Term`](@ref) objects can also be constructed with the [`@T_str`](@ref) string macro.
```@docs
@T_str
Base.parse(::Type{Term}, ::AbstractString)
```
The [`terms`](@ref) function can be used to generate all possible term symbols. In the case
of relativistic orbitals, the term symbols are simply the valid ``J`` values, represented
using the [`HalfInteger`](https://github.com/sostock/HalfIntegers.jl) type.
```@docs
terms
count_terms
multiplicity(t::Term)
weight(t::Term)
```
## Term multiplicity and intermediate terms
For subshells starting with `d³`, a term symbol can occur multiple times, each occurrence
corresponding to a different physical state (multiplicity higher than one). This happens
when there are distinct ways of coupling the electrons, but they yield the same total
angular momentum. E.g. a `d³` subshell can be coupled in 8 different ways, two of which are
both described by the `²D` term symbol:
```jldoctest
julia> terms(o"3d", 3)
8-element Vector{Term}:
²P
²D
²D
²F
²G
²H
⁴P
⁴F
julia> count_terms(o"3d", 3, T"2D")
2
```
The multiplicity can be even higher if more electrons and higher angular momenta are
involved:
```jldoctest
julia> count_terms(o"4f", 5, T"2Do")
5
```
To distinguish these subshells, extra quantum numbers must be specified. In AtomicLevels,
that can be done with the [`IntermediateTerm`](@ref) type. This is primarily used when
specifying the subshell couplings in [CSFs](@ref man-csfs).
```@docs
IntermediateTerm
intermediate_terms
```
### Disambiguating quantum numbers
The [`IntermediateTerm`](@ref) type does not specify how to interpret the disambiguating
quantum number(s) ``ν``, or even what the type of it should be. In AtomicLevels, we use two
different types, depending on the situation:
* **A simple `Integer`.** In this case, the quantum number ``\nu`` must be in the range
``1 \leq \nu \leq N_{\rm{terms}}``, where ``N_{\rm{terms}}`` is the multiplicity of the
term symbol (i.e. the number of times this term symbol appears for this subshell
``\ell^w`` or ``\ell_j^w``).
AtomicLevels does not prescribe any further interpretation for the quantum number.
It can be used as a simple counter to distinguish the different terms, or the user can
define their own mapping from the set of integers to physical states.
* **`Seniority`.** In this case the number is interpreted to be _Racah's seniority
number_. This gives the intermediate term a specific physical interpretation, but only
works for certain subshells. See the [`Seniority`](@ref) type for more information.
```@docs
Seniority
```
## Term couplings
The angular momentum coupling method is based on the [vector
model](https://en.wikipedia.org/wiki/Vector_model_of_the_atom),
where two angular momenta can be combined via vector addition to form
a total angular momentum:
```math
\vec{J} = \vec{L} + \vec{S},
```
where the length of the resultant momentum ``\vec{J}`` obeys
```math
|L-S| \leq J \leq L+S.
```
Relations such as these are used to couple the term symbols in both
``LS`` and ``jj`` coupling; however, not all values of ``J`` predicted
by the vector model are valid physical states, see
[`couple_terms`](@ref).
To generate the possible [`terms`](@ref) of a configuration, all the
possible terms of the individual subshells, have to be coupled
together to form the final terms; this is done from
left-to-right. When generating all possible [`CSFs`](@ref man-csfs) from a
configuration, it is also necessary to find the intermediate couplings
of the individual subshells. As an example, if we want to find the
possible terms of `3p² 4s 5p²`, we first find the possible terms of the
individual subshells:
```jldoctest intermediate_term_examples
julia> its = intermediate_terms(c"3p2 4s 5p2")
3-element Vector{Vector{IntermediateTerm{Term, Seniority}}}:
[₀¹S, ₂¹D, ₂³P]
[₁²S]
[₀¹S, ₂¹D, ₂³P]
```
where the seniority numbers are indicated as preceding subscripts. We
then need to couple each intermediate term of the first subshell with
each of the second subshell, and couple each of the resulting terms
with each of the third subshell, and so on. E.g. coupling the `₂³P`
intermediate term with `₁²S` produces two terms:
```jldoctest
julia> couple_terms(T"3P", T"2S")
2-element Vector{Term}:
²P
⁴P
```
each of which need to be coupled with e.g. `₂¹D`:
```jldoctest
julia> couple_terms(T"2P", T"1D")
3-element Vector{Term}:
²P
²D
²F
julia> couple_terms(T"4P", T"1D")
3-element Vector{Term}:
⁴P
⁴D
⁴F
```
[`terms`](@ref) uses [`couple_terms`](@ref) (through
[`AtomicLevels.final_terms`](@ref)) to produce all possible terms
coupling trees, folding from left-to-right:
```jldoctest
julia> a = couple_terms([T"1S", T"1D", T"3P"], [T"2S"])
4-element Vector{Term}:
²S
²P
²D
⁴P
julia> couple_terms(a, [T"1S", T"1D", T"3P"])
12-element Vector{Term}:
²S
²P
²D
²F
²G
⁴S
⁴P
⁴D
⁴F
⁶S
⁶P
⁶D
```
which gives the same result as
```jldoctest
julia> terms(c"3p2 4s 5p2")
12-element Vector{Term}:
²S
²P
²D
²F
²G
⁴S
⁴P
⁴D
⁴F
⁶S
⁶P
⁶D
```
Note that for the generation of final terms, the intermediate terms
need not be kept (and their seniority is not important). However, for
the generation of [`CSFs`](@ref man-csfs), we need to form all possible
combinations of intermediate terms for each subshell, and couple them,
again left-to-right, to form all possible coupling chains (each one
corresponding to a unique physical state). E.g. for the last term of
each subshell of `3p² 4s 5p²`
```jldoctest intermediate_term_examples
julia> last.(its)
3-element Vector{IntermediateTerm{Term, Seniority}}:
₂³P
₁²S
₂³P
```
we find the following chains:
```jldoctest intermediate_term_examples
julia> intermediate_couplings(last.(its))
15-element Vector{Vector{Term}}:
[¹S, ³P, ²P, ²S]
[¹S, ³P, ²P, ²P]
[¹S, ³P, ²P, ²D]
[¹S, ³P, ²P, ⁴S]
[¹S, ³P, ²P, ⁴P]
[¹S, ³P, ²P, ⁴D]
[¹S, ³P, ⁴P, ²S]
[¹S, ³P, ⁴P, ²P]
[¹S, ³P, ⁴P, ²D]
[¹S, ³P, ⁴P, ⁴S]
[¹S, ³P, ⁴P, ⁴P]
[¹S, ³P, ⁴P, ⁴D]
[¹S, ³P, ⁴P, ⁶S]
[¹S, ³P, ⁴P, ⁶P]
[¹S, ³P, ⁴P, ⁶D]
```
```@docs
couple_terms
AtomicLevels.final_terms
intermediate_couplings
```
## Levels & States
Coupling ``L`` and ``S`` to a total ``J``, as discussed under [Term
couplings](@ref) above, yields a [`Level`](@ref); in ``jj`` coupling,
final term of the [`CSF`](@ref) already has its final ``J`` given. In
both coupling schemes, the same values of final ``J`` will result, but
via different intermediate couplings. As an example, we will consider
the configuration ``1s\;2p``, which in the ``LS`` and ``jj`` coupling
schemes has the following [`CSF`](@ref)s:
```jldoctest levels_and_states
julia> csls = csfs(c"1s 2p")
2-element Vector{NonRelativisticCSF{Orbital{Int64}, Seniority}}:
1s(₁²S|²S) 2p(₁²Pᵒ|¹Pᵒ)-
1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-
julia> csjj = vcat(csfs(rc"1s 2p"), csfs(rc"1s 2p-"))
4-element Vector{RelativisticCSF{RelativisticOrbital{Int64}, Seniority}}:
1s(₁1/2|1/2) 2p(₁3/2|1)-
1s(₁1/2|1/2) 2p(₁3/2|2)-
1s(₁1/2|1/2) 2p-(₁1/2|0)-
1s(₁1/2|1/2) 2p-(₁1/2|1)-
```
If we now generate the permissible [`Level`](@ref)s, we find the valid
values of ``J``, i.e. ``0``, ``2\times 1``, and ``2``:
```jldoctest levels_and_states
julia> levels.(csls)
2-element Vector{Vector{Level{Orbital{Int64}, Term, Seniority}}}:
[|1s(₁²S|²S) 2p(₁²Pᵒ|¹Pᵒ)-, J = 1⟩]
[|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 0⟩, |1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 1⟩, |1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2⟩]
julia> levels.(csjj)
4-element Vector{Vector{Level{RelativisticOrbital{Int64}, HalfIntegers.Half{Int64}, Seniority}}}:
[|1s(₁1/2|1/2) 2p(₁3/2|1)-, J = 1⟩]
[|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2⟩]
[|1s(₁1/2|1/2) 2p-(₁1/2|0)-, J = 0⟩]
[|1s(₁1/2|1/2) 2p-(₁1/2|1)-, J = 1⟩]
```
```@docs
Level
weight(l::Level)
J_range
levels
```
Similarly, by additionally specifying the projection quantum number
``M_J``, we get a fully quantified [`State`](@ref). In the same way,
the permissible values of ``M_J`` must agree between the coupling
schemes, sorting by ``M_J`` for clarity:
```jldoctest levels_and_states
julia> sort(reduce(vcat, reduce(vcat, states.(csls))), by=s->s.M_J)
12-element Vector{State{Orbital{Int64}, Term, Seniority}}:
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2, M_J = -2⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|¹Pᵒ)-, J = 1, M_J = -1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 1, M_J = -1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2, M_J = -1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|¹Pᵒ)-, J = 1, M_J = 0⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 0, M_J = 0⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 1, M_J = 0⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2, M_J = 0⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|¹Pᵒ)-, J = 1, M_J = 1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 1, M_J = 1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2, M_J = 1⟩
|1s(₁²S|²S) 2p(₁²Pᵒ|³Pᵒ)-, J = 2, M_J = 2⟩
julia> sort(reduce(vcat, reduce(vcat, states.(csjj))), by=s->s.M_J)
12-element Vector{State{RelativisticOrbital{Int64}, HalfIntegers.Half{Int64}, Seniority}}:
|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2, M_J = -2⟩
|1s(₁1/2|1/2) 2p(₁3/2|1)-, J = 1, M_J = -1⟩
|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2, M_J = -1⟩
|1s(₁1/2|1/2) 2p-(₁1/2|1)-, J = 1, M_J = -1⟩
|1s(₁1/2|1/2) 2p(₁3/2|1)-, J = 1, M_J = 0⟩
|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2, M_J = 0⟩
|1s(₁1/2|1/2) 2p-(₁1/2|0)-, J = 0, M_J = 0⟩
|1s(₁1/2|1/2) 2p-(₁1/2|1)-, J = 1, M_J = 0⟩
|1s(₁1/2|1/2) 2p(₁3/2|1)-, J = 1, M_J = 1⟩
|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2, M_J = 1⟩
|1s(₁1/2|1/2) 2p-(₁1/2|1)-, J = 1, M_J = 1⟩
|1s(₁1/2|1/2) 2p(₁3/2|2)-, J = 2, M_J = 2⟩
```
```@docs
State
states
```
## Index
```@index
Pages = ["terms.md"]
```
```@meta
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.1.11 | b1d6aa60cb01562ae7dca8079b76ec83da36388e | docs | 770 | # Other utilities
```@meta
DocTestSetup = :(using AtomicLevels)
```
## Parity
AtomicLevels defines the [`Parity`](@ref) type, which is used to represent the parity of
atomic states etc.
```@docs
Parity
@p_str
```
The parity values also define an algebra and an ordering:
```jldoctest
julia> p"odd" < p"even"
true
julia> p"even" * p"odd"
odd
julia> (p"odd")^3
odd
julia> -p"odd"
even
```
The exported [`parity`](@ref) function is overloaded for many of the types in AtomicLevels,
defining a uniform API to determine the parity of an object.
```@docs
parity
```
## JJ to LSJ
```@docs
jj2ℓsj
```
## Angular momentum quantum numbers
```@docs
str2κ
@κ_str
κ2ℓ
κ2j
ℓj2κ
```
## Index
```@index
Pages = ["utilities.md"]
```
```@meta
DocTestSetup = nothing
```
| AtomicLevels | https://github.com/JuliaAtoms/AtomicLevels.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 2966 | using OrdinaryDiffEq, ForwardDiff, Statistics, LinearAlgebra
# which md curve to plot
which_dir = 1
## define dynamics of differential equation
function f(du, u, p, t)
du[1] = p[1] * u[1] - p[2] * u[1] * u[2] # prey
du[2] = -p[3] * u[2] + p[4] * u[1] * u[2] # predator
end
u0 = [1.0;1.0] # initial populations
tspan = (0.0, 10.0) # time span to simulate over
t = collect(range(0, stop=10., length=200)) # time points to measure
p = [1.5,1.0,3.0,1.0] # initial parameter values
nom_prob = ODEProblem(f, u0, tspan, p) # package as an ODE problem
nom_sol = solve(nom_prob, Tsit5()) # solve
## Model features of interest are mean prey population, and max predator population (over time)
function features(p)
prob = remake(nom_prob; p=p)
sol = solve(prob, Vern9(); saveat=t)
return [mean(sol[1,:]), maximum(sol[2,:])]
end
nom_features = features(p)
## loss function, we can take as l2 difference of features vs nominal features
function loss(p)
prob = remake(nom_prob; p=p)
p_features = features(p)
loss = sum(abs2, p_features - nom_features)
return loss
end
## gradient of loss function
function lossgrad(p, g)
g[:] = ForwardDiff.gradient(p) do p
loss(p)
end
return loss(p)
end
## package the loss and gradient into a DiffCost structure
cost = DiffCost(loss, lossgrad)
"""
We evaluate the hessian once only, at p.
Why? to find locally insensitive directions of parameter perturbation
The small eigenvalues of the Hessian are one easy way of defining these directions
"""
hess0 = ForwardDiff.hessian(loss, p)
ev(i) = -eigen(hess0).vectors[:,i]
init_dir = ev(which_dir); momentum = 1. ; span = (-15., 15.)
curve_prob = MDCProblem(cost, p, init_dir, momentum, span)
# rr = map(1:2) do i
# curve_prob_orig = curveProblem(cost, p, init_dir, momentum, span)
cb = [
Verbose([CurveDistance(0.1:1:10), HamiltonianResidual(2.3:4:10)]),
ParameterBounds([1,3], [-10.,-10.], [10.,10.])
]
cb = [
Verbose([CurveDistance(0.1:1:10), HamiltonianResidual(2.3:4:10)])
]
# don't make the user do (curve_prob...) for Verbose
# make MomentumReadjustment
@time mdc = evolve(curve_prob, Tsit5; mdc_callback=cb);
# return cost_trajectory(mdc, mdc.sol.t) |> mean, cost_trajectory(mdc2, mdc2.sol.t) |> mean
# end
# function sol_at_p(p)
# prob = remake(nom_prob; p=p)
# sol = solve(prob, Tsit5())
# end
# p1 = plot(mdc; pnames=[L"p_1" L"p_2" L"p_3" L"p_4"])
# cost_vec = [mdc.cost(el) for el in eachcol(trajectory(mdc))]
# p2 = plot(distances(mdc), log.(cost_vec), ylabel="log(cost)", xlabel="distance", title="cost over MD curve");
# mdc_plot = plot(p1, p2, layout=(2, 1), size=(800, 800))
# nominal_trajectory = plot(sol_at_p(mdc(0.)[:states]), label=["prey" "predator"])
# perturbed_trajectory = plot(sol_at_p(mdc(-15.)[:states]), label=["prey" "predator"])
# traj_comparison = plot(nominal_trajectory, perturbed_trajectory, layout=(2, 1), xlabel="time", ylabel="population")
# Lessons | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 11450 | """
MDCProblem(cost, p0, dp0, momentum, tspan)
Creates an MDCProblem, that can then generate a minimally disruptive curve using evolve(c::MDCProblem, ...; ...)
"""
struct MDCProblem{A,B,C,D,E} <: CurveProblem
cost::A
p0::B
dp0::C
momentum::D
tspan::E
## reverse initial direction and signflip curve span if the latter is nonpositive
function MDCProblem(a::A, b::B, c::C, d::D, e::E) where A where B where C where D where E
if max(e...) <= 0.
e = map(x -> -x |> abs, e) |> reverse
c = -c
end
new{A,B,C,D,E}(a, b, c, d, e)
end
end
isjumped(c::MDCProblem) = ZeroStart()
whatdynamics(c::MDCProblem) = MDCDynamics()
num_params(c::CurveProblem) = length(c.p0)
param_template(c::CurveProblem) = deepcopy(c.p0)
initial_params(c::CurveProblem) = c.p0
"""
DEPRECATED. use MDCProblem
"""
function curveProblem(a, b, c, d, e)
@warn("curveProblem and specify_curve are DEPRECATED. please use MDCProblem (with the same arguments) instead")
return MDCProblem(a, b, c, d, e)
end
"""
DEPRECATED. use MDCproblem
"""
specify_curve(cost, p0, dp0, momentum, tspan) = curveProblem(cost, p0, dp0, momentum, tspan)
specify_curve(;cost=nothing, p0=nothing, dp0=nothing,momentum=nothing,tspan=nothing) = curveProblem(cost, p0, dp0, momentum, tspan)
"""
Callback to readjust momentum in the case that the numerical residual from the identity dHdu = 0 crosses a user-specified threshold
"""
(m::MomentumReadjustment)(c::CurveProblem) = readjustment(c, ResidualCondition(), CostateAffect(), m.tol, m.verbose)
"""
Callback to readjust state in the case that the numerical residual from the identity dHdu = 0 crosses a user-specified threshold. EXPERIMENTAL AND WILL PROBABLY BREAK
"""
(m::StateReadjustment)(c::CurveProblem) = readjustment(c, ResidualCondition(), StateAffect(), m.tol, m.verbose)
"""
(c::MDCProblem)()
returns a tuple of ODEProblems specificed by the MDCProblem. Usually a single ODEProblem. Two are provided if the curve crosses zero, so that one can run two curves in parallel going backwards/forwards from zero
"""
function (c::MDCProblem)()
spans = make_spans(c, c.tspan)
cs = map(spans) do span
mult = sign(span[end])
return MDCProblem(c.cost, c.p0, mult * c.dp0, c.momentum, abs.(span))
end
spans = map(x -> abs.(x), spans)
u0s = initial_conditions.(cs)
u0 = map(span -> initial_conditions(c), spans)
fs = dynamics.(cs)
return ODEProblem.(fs, u0s, spans)
# return map(sp -> ODEProblem(f, u0, sp), spans) # make two problems for 2-sided tspan
end
"""
make_spans(c::MDCProblem, span)
- makes sure span of curve is increasing.
- if the span crosses zero, then returns two separate spans. evolve then runs two curves in parallel, going backwards/forwards from zero.
"""
function make_spans(c::CurveProblem, span, ::ZeroStart)
(span[1] > span[2]) && error("make your curve span monotone increasing")
if (span[2] > 0) && (span[1] < 0)
spans = ((0., span[1]), (0., span[2]))
else
spans = (span,)
end
return spans
end
"""
initial_costate(c::MDCProblem)
solves for the initial costate required to evolve a MD curve.
"""
function initial_costate(c::MDCProblem)
μ₂ = (-c.momentum + c.cost(c.p0)) / 2.
λ₀ = -2. * μ₂ * c.dp0
return λ₀
end
"""
Generate initial conditions of an MDCProblem
"""
function initial_conditions(c::MDCProblem)
λ₀ = initial_costate(c)
return cat(c.p0, λ₀, dims=1)
end
"""
Generate vector field for MD curve, as specified by MDCProblem
"""
function dynamics(c::CurveProblem, ::MDCDynamics)
cost = c.cost
∇C = param_template(c)
N = num_params(c)
H = c.momentum
θ₀ = initial_params(c)
function upd(du, u, p, t)
θ = u[1:N] # current parameter vector
λ = u[N + 1:end] # current costate vector
dist = sum((θ - θ₀).^2) # which should = t so investigate cost/benefits of using t instead of dist
C = cost(θ, ∇C) # also updates ∇C as a mutable
μ2 = (C - H) / 2
μ1 = dist > 1e-3 ? (λ' * λ - 4 * μ2^2 ) / (λ' * (θ - θ₀)) : 0.
# if mu1 < -1e-4 warn of numerical issue
# if mu1 > 1e-3 and dist > 1e-3 then set mu1 = 0
du[1:N] = @. (-λ + μ1 * (θ - θ₀)) / (2 * μ2) # ie dθ
du[1:N] /= (sqrt(sum((du[1:N]).^2)))
damping_constant = (λ' * du[1:N]) / (H - C) # theoretically = 1 but not numerically
du[N + 1:end] = @. (μ1 * du[1:N] - ∇C) * damping_constant # ie dλ
res = λ + 2 * μ2 * du[1:N]
return nothing
end
return upd
end
"""
Callback to stop MD Curve evolving if cost > momentum
"""
function (t::TerminalCond)(c::CurveProblem)
cost = c.cost
H = c.momentum
N = num_params(c)
function condition(u, t, integrator)
return (cost(u[1:N]) > H)
end
return DiscreteCallback(condition, terminate!)
end
function readjustment(c::CurveProblem, cnd::ConditionType, aff::AffectType, momentum_tol, verbose::Bool)
if isnan(momentum_tol)
return nothing
end
cond = build_cond(c, cnd, momentum_tol)
affect! = build_affect(c, aff)
return DiscreteCallback(cond, affect!)
end
function build_cond(c::CurveProblem, ::ResidualCondition, tol, ::MDCDynamics)
function rescond(u, t, integ)
absres = dHdu_residual(c, u, t, integ)
absres > tol ? begin
# @info "applying readjustment at t=$t, |res| = $absres"
return true
end : return false
end
return rescond
end
function build_cond(c::CurveProblem, ::CostCondition, tol)
N = num_params(c)
function costcond(u, t, integ)
(c.cost(u[1:N]) > tol) ? (return true) : (return false)
end
return costcond
end
"""
For dHdu_residual and build_affect(::MDCProblem, ::CostateAffect): there is an unnecessary allocation in the line `dθ = ...`. I initially used `dθ[:] = ....`, but this produced unreliable output (the MDCurve changed on each solution). I found that this was because temporary arrays like this are not safe in callbacks, for some reason. The solution is to use SciMLBase.get_tmp_cache. Don't have time to figure out how to do this right now. Do at some point.
"""
"""
dHdu_residual(c::MDCProblem, u, t, dθ)
Checks dHdu residual (u deriv of Hamiltonian). Returns true if abs(residual) is greater than some tolerance (it should be zero)
"""
function dHdu_residual(c::CurveProblem, u, t, integ, ::MDCDynamics)
N = num_params(c)
H = c.momentum
θ₀ = initial_params(c)
θ = u[1:N]
λ = u[N + 1:end]
μ2 = (c.cost(θ) - H) / 2.
μ1 = t > 1e-3 ? (λ' * λ - 4 * μ2^2 ) / (λ' * (θ - θ₀)) : 0.
# dθ = SciMLBase.get_tmp_cache(integ)[1][1:N]
dθ = (-λ + μ1 * (θ - θ₀)) / (2 * μ2)
dθ /= (sqrt(sum((dθ).^2)))
return sum(abs.(λ + 2 * μ2 * dθ))
end
"""
build_affect(c::MDCProblem, ::CostateAffect)
Resets costate to undo effect of cumulative numerical error. Specifically, finds costate so that dHdu = 0, where H is the Hamiltonian.
*I wanted to put dθ[:] = ... here instead of dθ = ... . Somehow the output of the MDC changes each time if I do that, there is a dirty state being transmitted. But I don't at all see how from the code. Figure out.*
"""
function build_affect(c::CurveProblem, ::CostateAffect, ::MDCDynamics)
N = num_params(c)
H = c.momentum
θ₀ = initial_params(c)
dp = param_template(c)
function reset_costate!(integ, dθ)
θ = integ.u[1:N] # current parameter vector
λ = integ.u[N + 1:end] # current costate vector
μ2 = (c.cost(θ) - H) / 2
μ1 = integ.t > 1e-3 ? (λ' * λ - 4 * μ2^2 ) / (λ' * (θ - θ₀)) : 0.
# dθ = SciMLBase.get_tmp_cache(integ)[1][1:N]
dθ = (-λ + μ1 * (θ - θ₀)) / (2 * μ2)
dθ /= (sqrt(sum((dθ).^2)))
integ.u[N + 1:end] = -2 * μ2 * dθ
return integ
end
return integ -> reset_costate!(integ, dp)
end
"""
build_affect(c::MDCProblem, ::StateAffect)
resets state so that residual is zero. also resets costate necessarily. NOT YET FULLY IMPLEMENTED
min C(θ) such that norm(θ - θ₀)^2 = K where K is current distance
we will do this with unconstrained optimisation and lagrange multipliers
ideally would have an inequality constraint >=K. But Optim.jl doesn't support this
"""
function build_affect(c::CurveProblem, ::StateAffect, ::MDCDynamics)
N = num_params(c)
H = c.momentum
cost = c.cost
θ₀ = initial_params(c)
dp = param_template(c)
_reset_costate! = build_affect(c, CostateAffect())
function reset_state!(integ, dθ)
K = sum((integ.u[1:N] - θ₀).^2)
println(K)
function constr(x) # constraint func: g = 0
return K - sum((x - θ₀).^2)
end
function L(x)
θ, λ = x[1:end - 1], x[end]
return cost(θ) + λ * constr(θ)
end
gc = deepcopy(θ₀)
function L(x, g)
θ, λ = x[1:end - 1], x[end]
C = cost(θ, dθ) # dθ is just an arbitrary pre-allocation
cstr = constr(θ)
g[1:end - 1] = gc + 2 * λ * (θ - θ₀)
g[end] = cstr
return C + λ * cstr
end
g = zeros(N + 1)
opt = optimize(L, cat(integ.u[1:N], 0., dims=1), LBFGS())
C0 = cost(θ₀)
@info "cost after readjustment is $(opt.minimum). cost before readjustment was $C0"
(opt.ls_success == true) && (integ.u[1:N] = opt.minimizer[1:N])
integ = _reset_costate!(integ, dθ)
return integ
end
return integ -> reset_state!(integ, dp)
end
function saving_callback(prob::ODEProblem, saved_values::SavedValues)
# save states in case simulation is interrupted
saving_cb = SavingCallback((u, t, integrator) -> u[1:length(u)÷2], saved_values, saveat=0.0:0.1:prob.tspan[end])
return remake(prob, callback=saving_cb)
end
function build_callbacks(c::CurveProblem, callbacks::SciMLBase.DECallback)
# DECallback supertype includes CallbackSet
return CallbackSet(callbacks)
end
build_callbacks(c::CurveProblem, n::Nothing) = nothing
function build_callbacks(c::CurveProblem, mdc_callbacks::Vector{T}, mtol::Number) where T <: CallbackCallable
if !any(x -> x isa MomentumReadjustment, mdc_callbacks)
push!(mdc_callbacks, MomentumReadjustment(mtol))
end
push!(mdc_callbacks, TerminalCond())
actual_callbacks = map(mdc_callbacks) do cb
a = cb(c)
end |> x -> vcat(x...)
return actual_callbacks
end
function Base.summary(io::IO, prob::MDCProblem)
type_color, no_color = SciMLBase.get_colorizers(io)
print(io,
type_color, nameof(typeof(prob)),
no_color, " with uType ",
type_color, typeof(prob.p0),
no_color, " and tType ",
type_color,
prob.tspan isa Function ?
"Unknown" : (prob.tspan === nothing ?
"Nothing" : typeof(prob.tspan[1])),
no_color,
" holding cost function of type ", type_color, nameof(typeof(prob.cost)), no_color
)
end
function Base.show(io::IO, mime::MIME"text/plain", A::MDCProblem)
type_color, no_color = SciMLBase.get_colorizers(io)
summary(io, A)
println(io)
print(io, "timespan: ", A.tspan, "\n")
print(io, "momentum: ", A.momentum, "\n" )
print(io, "Initial parameters p0: ", A.p0, "\n")
print(io, "Initial parameter direction dp0: ", A.dp0, "\n")
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 4840 | """
MDCProblemJumpStart(cost, p0 dp0, momentum, tspan, jumpsize, reinitialise_dp0)
Do I want to add an option: (reinitialise_dp0 == true) && give option to recalculate hessian
"""
struct MDCProblemJumpStart{A,B,C,D,E,F} <: CurveProblem
cost::A
p0::B
dp0::C
momentum::D
tspan::E
jumpsize::F
reinitialise_dp0::Bool
## reverse initial direction and signflip curve span if the latter is nonpositive
function MDCProblemJumpStart(a::A, b::B, c::C, d::D, e::E, f::F, g::Bool) where A where B where C where D where E where F
if max(e...) <= 0.
e = map(x -> -x |> abs, e) |> reverse
c = -c
end
new{A,B,C,D,E,F}(a, b, c, d, e, f, g)
end
end
isjumped(c::MDCProblemJumpStart) = JumpStart(c.jumpsize)
whatdynamics(c::MDCProblemJumpStart) = MDCDynamics()
function jumpstart(m::MDCProblem, jumpsize, reinitialise_dp0=false)
return MDCProblemJumpStart(m.cost, m.p0, m.dp0, m.momentum, m.tspan, jumpsize, reinitialise_dp0)
end
function (j::JumpStart)(m::MDCProblem; reinitialise_dp0=false)
return MDCProblemJumpStart(m.cost, m.p0, m.dp0, m.momentum, m.tspan, j.jumpsize, reinitialise_dp0)
end
function (c::MDCProblemJumpStart)()
spans = make_spans(c, c.tspan, ZeroStart())
cs = map(spans) do span
mult = sign(span[end])
return MDCProblemJumpStart(c.cost, c.p0, mult * c.dp0, c.momentum, abs.(span), c.jumpsize, c.reinitialise_dp0)
end
spans = map(x -> abs.(x), spans)
u0s = initial_conditions.(cs)
fs = dynamics.(cs)
return ODEProblem.(fs, u0s, spans)
end
function make_spans(c::CurveProblem, span, j::JumpStart)
spans = make_spans(c, span, ZeroStart())
spans = map(spans) do span
(span[1] + 0.1 * sign(span[2]), span[2])
end
return spans
end
"""
First need to move p0 in the jumpstart direction. BUT NOT change c.p0.
Then need to make ODEProblem tspan start at the jumpstart time instead of time. ie modify the spans.
Then need to make the costate: first find an initial u, potentially with reinitialise_dp0==true. Then its easy
Then dynamics are the same
"""
function initial_conditions(c::MDCProblemJumpStart)
θ₀ = c.p0 + get_jump(c)
λ₀ = initial_costate(c)
return cat(θ₀, λ₀, dims=1)
end
function get_jump(c::CurveProblem)
get_jump(c, isjumped(c))
end
get_jump(c, ::ZeroStart) = zero.(param_template(c))
function get_jump(c, j::JumpStart)
return j.jumpsize * (c.dp0 / norm(c.dp0))
end
function initial_costate(c::MDCProblemJumpStart)
u = get_initial_velocity(c::MDCProblemJumpStart)
μ₂ = (-c.momentum + c.cost(c.p0)) / 2.
λ₀ = -2. * μ₂ * u # + μ₁*get_jump(c), but μ₁ = 0 by complementary slackness at this point
return λ₀
end
"""
Algorithm:
Let y = θ - θ₀;
f(x) = ∇Cᵀx; at θ
g₁(x) = xᵀx - 1;
g₂(x) = - xᵀy
Then optimisation problem is:
minₓ f(x) subject to
gᵢ(x) ≤ 0.
In other words, find a direction that maximally anticorrelates with the gradient, but has norm ≤ 1 and is pointing away from the curve origin. Norm = 1 would be ideal, but deconvexifies.
KKT conditions give:
∇C + 2μ₁x - μ₂y = 0;
μᵢ ⩾ 0 + complementary slackness
Analytic solution:
Case 1: ∇Cᵀy ≤ 0. Then μ₂ = 0 from complementary slackness and x ∝ - ∇C
Case 2: ∇Cᵀ ≥ 0. then
2μ₁x = μ₂y - ∇C
⇒ μ₂ = ∇Cᵀy / yᵀy
⇒ ∇Cᵀx ≥ 0 → x = 0
∇Cᵀx ≤ 0 → μ₁ s.t. norm(x) = 1
Issues:
- ∇C might be quite noisy close to the minimum. Might want the option of a second order condition involving hessian recalculation for cheaper problems. IE
- What do we do if x = 0? For now, keep the old dp0. Because at least that is in a shallow direction of the old Hessian. In future, could provide option to recalculate Hessian OR reuse Hessian at p0, which would be a good estimate of the new Hessian. If we have the Hessian, the optimisation problem would change:
f(x) = ∇Cᵀx + 0.5 xᵀ∇²Cx ; at θ
KKT:
∇C + ∇²Cx + 2μ₁x - μ₂y = 0
(∇²C + 2μ₁𝕀)x = μ₂y - ∇C
And then go through the μᵢ = or ≂̸ 0 cases.
"""
function get_initial_velocity(c::MDCProblemJumpStart)
(c.reinitialise_dp0 == false) && (return c.dp0)
∇C = param_template(c)
y = get_jump(c)
new_p0 = c.p0 + y
c.cost(new_p0, ∇C)
d = dot(∇C, y)
if d ≤ 0
new_dp0 = -∇C/(norm(∇C))
else
μ₂ = dot(∇C, y) / sum(abs2,y)
x = μ₂*y - ∇C; nx = norm(x)
new_dp0 = x/nx
end
if c.cost(new_p0 + 0.001new_dp0) > c.cost(new_p0 + 0.001c.dp0)
@info("couldn't cheaply find a better initial curve direction dp0 for the jumpstarted problem. will not reinitialise dp0. Provide your own if you want")
return c.dp0
else
@info("cheaply found a better initial curve direction dp0 for the jumpstarted problem. . re-initialising dp0")
return x / nx
end
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 2193 | abstract type AbstractCurveSolution end
"""
mutable struct MDCSolution{S, F} <: AbstractCurve
holds solution for minimally disruptive curve.
fields: sol, N, cost
sol is a conventional ODESolution structure
"""
mutable struct MDCSolution{S,F} <: AbstractCurveSolution
sol::S
N::Int64
cost::F
end
"""
function MDCSolution(sol, costf=nothing)
returns an mdc::MDCSolution out of a sol::ODESolution
"""
function MDCSolution(sol, costf=nothing)
N = length(sol.prob.u0) ÷ 2
return MDCSolution(sol, N, costf)
end
"""
function (mdc::MDCSolution)(t::N) where N <: Number
returns array view of states and costates of curve at distance t
"""
function (mdc::MDCSolution)(t::N) where N <: Number
return (states = (@view mdc.sol(t)[1:mdc.N]), costates = (@view mdc.sol(t)[mdc.N + 1:end]))
end
"""
function (mdc::MDCSolution)(ts::A) where A <: Array
returns array of states and costates of curve at distances t
"""
function (mdc::MDCSolution)(ts::A) where A <: Array
states_ = Array{Float64,2}(undef, mdc.N, length(ts))
costates_ = Array{Float64,2}(undef, mdc.N, length(ts))
for (i, el) in enumerate(ts)
states_[:,i] = mdc(el)[:states]
costates_[:,i] = mdc(el)[:costates]
end
return (states = states_, costates = costates_)
end
trajectory(mdc::MDCSolution) = Array(mdc.sol)[1:mdc.N, :]
trajectory(mdc::MDCSolution, ts) = mdc(ts)[:states]
costate_trajectory(mdc::MDCSolution) = Array(mdc.sol)[mdc.N + 1:end,:]
costate_trajectory(mdc::MDCSolution, ts) = mdc(ts)[:costates]
distances(mdc::MDCSolution) = mdc.sol.t
Δ(mdc::MDCSolution) = trajectory(mdc) .- mdc(0.)[:states]
Δ(mdc::MDCSolution, ts) = mdc(ts)[:states] .- mdc(0.)[:states]
"""
cost_trajectory(mdc::MDCSolution, ts)
calculates cost on mdc curve at each point in the array/range ts
"""
function cost_trajectory(mdc::MDCSolution, ts)
if mdc.cost === nothing
@warn "MDCSolution struct has no cost function. You need to run mdc = add_cost(mdc, cost)"
return
else
return [mdc.cost(el) for el in eachcol(mdc(ts)[:states])]
end
end
function Base.show(io::IO, m::MIME"text/plain", M::MDCSolution)
show(io, m, M.sol)
end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 2228 | abstract type CurveProblem end
abstract type CurveModifier end
abstract type WhatJump <: CurveModifier end
abstract type WhatDynamics <: CurveModifier end
struct JumpStart{F <: AbstractFloat} <: WhatJump
jumpsize::F
end
struct ZeroStart <: WhatJump end
function make_spans(c::CurveProblem, span)
return make_spans(c::CurveProblem, span, isjumped(c))
end
struct MDCDynamics <: WhatDynamics end
dynamics(c::CurveProblem) = dynamics(c::CurveProblem, whatdynamics(c))
build_cond(c::CurveProblem, r, tol) = build_cond(c, r, tol, whatdynamics(c))
dHdu_residual(c::CurveProblem, u, t, p) = dHdu_residual(c, u, t, p, whatdynamics(c))
build_affect(c::CurveProblem, affect) = build_affect(c, affect, whatdynamics(c))
"""
For callbacks to tune MD Curve
"""
abstract type ConditionType end
struct ResidualCondition <: ConditionType end
struct CostCondition <: ConditionType end
abstract type CallbackCallable end
abstract type AdjustmentCallback <: CallbackCallable end
"""
MomentumReadjustment(tol::AbstractFloat, verbose::Bool)
Ideally, dHdu = 0 throughout curve evolution, where H is the Hamiltonian/momentum, and u is the curve velocity in parameter space. Numerical error integrates and prevents this. This struct readjusts momentum when `abs(dHdu) > tol`, so that `dHdu = 0` is recovered.
"""
struct MomentumReadjustment{T <: AbstractFloat} <: AdjustmentCallback
tol::T
verbose::Bool
end
"""
Terminates curve evolution when the cost exceeds the momentum
"""
struct TerminalCond <: AdjustmentCallback end
MomentumReadjustment(a; verbose=false) = MomentumReadjustment(a, verbose)
"""
Experimental. See documentation for MomentumReadjustment. This acts the same, but instead of modifying the momentum, it modifies the state of the curve (i.e. current parameters) itself, by doing gradient descent to minimise the cost function, subject to the constraint that the distance from the initial parameters does not decrease.
"""
struct StateReadjustment{T <: AbstractFloat} <: AdjustmentCallback
tol::T
verbose::Bool
end
StateReadjustment(a; verbose=false) = StateReadjustment(a, verbose)
abstract type AffectType end
struct StateAffect <: AffectType end
struct CostateAffect <: AffectType end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 1323 | module MinimallyDisruptiveCurves
using SciMLBase, DiffEqCallbacks, OrdinaryDiffEq
using FiniteDiff, LinearAlgebra, ModelingToolkit, ForwardDiff
using RecipesBase, ThreadsX
include("MDCTypes.jl")
include("MDCProblem.jl")
include("MDCProblemJumpstart.jl")
include("MDCSolution.jl")
include("plotting_utilities.jl")
include("utilities/loss_algebra.jl")
include("utilities/extra_loss_functions.jl")
include("utilities/helper_functions.jl")
include("utilities/solution_parsing.jl")
include("utilities/transform_structures.jl")
include("evolve_options.jl")
include("evolve.jl")
import Base.show
export DiffCost, make_fd_differentiable, l2_hessian
export CurveProblem, specify_curve, evolve, trajectory, costate_trajectory
export MDCProblem, MDCProblemJumpStart, JumpStart, jumpstart
export TransformationStructure, logabs_transform, bias_transform, transform_problem, transform_ODESystem, only_free_params, fix_params, transform_cost
export sum_losses, build_injection_loss, get_name_ids, soft_heaviside, biggest_movers, get_ids_names
export MomentumReadjustment, StateReadjustment, VerboseOutput, ParameterBounds, CurveInfoSnippet, CurveDistance, HamiltonianResidual, Verbose, TerminalCond, CallbackCallable
export Δ, distances, trajectory, costate_trajectory, add_cost, cost_trajectory, output_on_curve
end # module
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 1594 | """
evolve(c::CurveProblem, solmethod=Tsit5; mdc_callback=nothing, callback=nothing, saved_values=nothing, momentum_tol=1e-3,kwargs...)
Evolves a minimally disruptive curve, with curve parameters specified by curveProblem. Uses DifferentialEquations.solve() to run the ODE.
MinimallyDisruptiveCurves.jl callbacks go in the `mdc_callback` keyword argument. You can also use any DifferentialEquations.jl callbacks compatible with DifferentialEquations.solve(). They go in the `callback` keyword argument
"""
function evolve(c::CurveProblem, solmethod=Tsit5; mdc_callback=CallbackCallable[], callback=nothing, saved_values=nothing, momentum_tol=1e-3, kwargs...)
(!(eltype(mdc_callback) == CallbackCallable)) && (mdc_callback = convert(Vector{CallbackCallable}, mdc_callback))
function merge_sols(ens, p)
if length(ens) == 1
return ens[1]
elseif length(ens) == 2
t = cat(-ens[1].t[end:-1:1], ens[2].t, dims=1)
u = cat(ens[1].u[end:-1:1], ens[2].u, dims=1)
return DiffEqBase.build_solution(p, Tsit5(), t, u)
end
end
probs = c()
!isnothing(saved_values) && (probs = saving_callback.(probs, saved_values))
prob_func = (prob, i, repeat) -> probs[i]
callbacks = CallbackSet(
build_callbacks(c, mdc_callback, momentum_tol)...,
build_callbacks(c, callback)
)
e = EnsembleProblem(probs[1], prob_func=prob_func)
sim = solve(e, Tsit5(), EnsembleThreads(), trajectories=length(probs); callback=callbacks, kwargs...)
return merge_sols(sim, probs[1]) |> MDCSolution
end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 4070 | abstract type CurveInfoSnippet end
struct EmptyInfo <: CurveInfoSnippet end
"""
CurveDistance(timepoints::AbstractRange)
CurveDistance(timepoints::Vector{<:AbstractFloat})
Print @info REPL output when curve distance reaches each of the timepoints::Vector{<:AbstractFloat}
Example
c = CurveDistance(0.1:0.1:2.1)
c can then be used as an argument to Verbose(), which then goes as a keyword argument into evolve.
"""
struct CurveDistance{V <: AbstractFloat} <: CurveInfoSnippet
timepoints::Vector{V}
end
"""
HamiltonianResidual(timepoints::AbstractRange)
HamiltonianResidual(timepoints::Vector{<:AbstractFloat})
Print @info REPL output on the numerical value of dHdu, the u-derivative of the Hamiltonian, at timepoints::Vector{<:AbstractFloat}
Example
c = HamiltonianResidual(0.1:0.1:2.1)
c can then be used as an argument to Verbose(), which then goes as a keyword argument into evolve.
"""
struct HamiltonianResidual{V <: AbstractFloat} <: CurveInfoSnippet
timepoints::Vector{V}
end
CurveDistance(a::AbstractRange) = CurveDistance(a |> collect)
HamiltonianResidual(a::AbstractRange) = HamiltonianResidual(a |> collect)
"""
Construct as `v = Verbose(a::Vector{T}) where T <: CallbackCallable`
Example:
```v = Verbose([HamiltonianResidual(0.1:2.:10), CurveDistance(0.1:0.1:5)])
evolve(c::MDCProblem, ...; mdc_callback=[v, other_mdc_callbacks])
````
In this case, you would get REPL output as the curve evolved, indicating when the curve reached each distance in `0.1:0.1:5`, and indicating the residual on dHdu, which is ideally zero, at `0.1:0.1:5`.
"""
struct Verbose{T <: CurveInfoSnippet} <: CallbackCallable
snippets::Vector{T}
end
Verbose() = Verbose([EmptyInfo()])
Verbose(snippet::EmptyInfo) = Verbose()
Verbose(snippet::T) where T <: CurveInfoSnippet = Verbose([snippet])
"""
ParameterBounds(ids, lbs, ubs)
- ids are the indices of the model parameters that you want to bound
- lbs are an array of lower bounds, with length == to indices
- ubs are...well you can guess.
"""
struct ParameterBounds{I <: Integer,T <: Number} <: AdjustmentCallback
ids::Vector{I}
lbs::Vector{T}
ubs::Vector{T}
end
function (c::CurveDistance)(cp::CurveProblem, u, t, integ)
@info "curve length is $t"
nothing
end
function (h::HamiltonianResidual)(c::CurveProblem, u, t, integ)
x = dHdu_residual(c, u, t, nothing)
@info "dHdu residual = $x at curve length $t"
end
(e::EmptyInfo)(c, u, t, integ) = nothing
function (v::Verbose)(c::CurveProblem)
to_call = map(v.snippets) do snippet
(u, t, _integ) -> snippet(c, u, t, _integ)
end
return map(to_call, v.snippets) do each, snippet
FunctionCallingCallback(each; funcat=snippet.timepoints)
end
end
"""
DEPRECATED, use Verbose() instead.
VerboseOutput(level=:low, times = 0:0.1:1.)
Callback to give online info on how the solution is going, as the MDCurve evolves. activates at curve distances specified by times
"""
function VerboseOutput(level=:low, times=0:0.1:1.)
function affect!(integ)
if level == :low
@info "curve length is $(integ.t)"
end
if level == :medium
@info "dHdu residual = "
end
if level == :high
end
return integ
end
return PresetTimeCallback(times, affect!)
end
"""
ParameterBounds(ids::Vector{Integer},lbs::Vector{Number},ubs::Vector{Number})
parameters[ids] must fall within lbs and ubs, where lbs and ubs are Arrays of the same size as ids.
Create hard bounds on the parameter space over which the minimally disruptive curve can trace. Curve evolution terminates if it hits a bound.
"""
function (p::ParameterBounds)(c::CurveProblem)
function condition(u, t, integrator)
tests = u[p.ids]
any(tests .< p.lbs) && return true
any(tests .> p.ubs) && return true
return false
end
return DiscreteCallback(condition, terminate!)
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 1721 | """
plot recipe for ::MDCSolution
kwargs: pnames are array of parameter names
idxs: are parameter indices to plot
what ∈ (:trajectory, :final_changes) determines the plot type
"""
@recipe function f(mdc::MDCSolution; pnames=nothing, idxs=nothing, what=:trajectory)
if idxs === nothing
num = min(5, mdc.N)
idxs = biggest_movers(mdc, num)
end
# if !(names === nothing)
# labels --> names[idxs]
# end
# ["hi" "lo" "lo" "hi" "lo"]
tfirst = mdc.sol.t[1]
tend = mdc.sol.t[end]
layout := (1, 1)
bottom_margin := :match
if what == :trajectory
@series begin
if !(pnames === nothing)
label --> reshape(pnames[idxs], 1, :)
end
title --> "change in parameters over minimally disruptive curve"
xguide --> "distance"
yguide --> "Δ parameters"
distances(mdc), Δ(mdc)[idxs,:]'
end
end
if what == :final_changes
@series begin
title --> "biggest changers"
seriestype := :bar
label --> "t=$tend"
xticks --> (1:5, reshape(pnames[idxs], 1, :))
xrotation --> 90
Δ(mdc, tend)[idxs]
end
if tfirst < 0.
@series begin
label --> "t=$tfirst"
seriestype := :bar
xticks --> (1:5, reshape(pnames[idxs], 1, :))
xrotation --> 90
Δ(mdc, tfirst)[idxs]
end
end
end
end
"""
output_on_curve(f, mdc, t)
Useful when building an animation of f(p) as the parameters p vary along the curve.
"""
function output_on_curve(f, mdc, t)
return f(mdc(t)[:states])
end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 1694 | """
The two stage method in DiffEqParamEstim is great, but it employs (intelligent) estimation of the derivative of the data over time. If we actually have a nominal solution, we know exactly what the derivative is over time. So this exploits it
"""
"""
For a prob::ODEProblem, with nominal parameters p0, creates a cost function C(p)
Let pprob = remake(prob, p=p).
C(p) is the collocation cost associated with pprob. Calculated by integrating the following over the trajectory of solve(prob; saveat=tsteps):
int_u sum(pprob.f(u) - prob.f(u)).^2
with output map g(x), this turns into
int_u sum(dgdx(u)*pprob.f(u) - dgdx(u)*prob.f(u)).^2
"""
function build_injection_loss(prob::ODEProblem, solmethod::T, tpoints, output_map= x -> x) where T <: DiffEqBase.AbstractODEAlgorithm
pdim = length(prob.u0)
nom_sol = Array(solve(prob, solmethod, saveat=tpoints))
n = length(tpoints)
dgdx = x -> ForwardDiff.jacobian(output_map, x)
dgdx_template = dgdx(prob.u0)
dgdx_template2 = deepcopy(dgdx_template)
function cost(p)
pprob = remake(prob, p=p)
du_nom = similar(prob.u0, promote_type(eltype(prob.u0), eltype(p)))
du_p = similar(pprob.u0, promote_type(eltype(pprob.u0), eltype(p)))
c = 0.
@inbounds for i = 1:n
prob.f(du_nom,nom_sol[:,i], prob.p, tpoints[i])
pprob.f(du_p, nom_sol[:,i], p, tpoints[i])
dgdx_template = dgdx(nom_sol[:,i])
c += sum(abs2, dgdx_template*du_nom .- dgdx_template*du_p)
end
return c
end
function cost2(p,g)
g[:] = ForwardDiff.gradient(cost, p)
return cost(p)
end
return DiffCost(cost, cost2)
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 978 |
"""
makes a soft analogue of the heaviside step function. useful for inputs to differential equations, as it's easier on the numerics.
"""
soft_heaviside(t, nastiness, step_time) = 1 / (1 + exp(nastiness * (step_time - t)))
soft_heaviside(nastiness, step_time) = t -> soft_heaviside(t, nastiness, step_time)
get_ids_names(opArray) = repr.(opArray)
"""
l2_hessian(nom_sol)
gets hessian according to L2 loss under the assumption that loss(θ₀) = 0. nom_sol is the solution of the nominal ODEProblem.
The Hessian then only requires first derivatives: it is sum_ij dyi/dθ * dyj/dtheta
"""
function l2_hessian(nom_sol)
prob = nom_sol.prob
function pToL2(p)
pprob = remake(prob, p=p)
psol = solve(pprob, nom_sol.alg, saveat=nom_sol.t) |> Array
psol = reshape(psol, 1, :)
return psol
end
gr = ForwardDiff.jacobian(pToL2, prob.p)
u, d, v = svd(gr)
return v * diagm(d.^2) * v'
# = gr'*gr but a bit more accurate
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 1846 | """
Utilities to add cost functions together
"""
"""
A struct for holding differentiable cost functions. Let d::DiffCost.
d(p) returns cost at p
d(p,g) returns cost and **mutates** gradient at p
"""
struct DiffCost{F,F2} <: Function
cost_function::F
cost_function2::F2
end
(f::DiffCost)(x) = f.cost_function(x)
(f::DiffCost)(x,y) = f.cost_function2(x, y)
"""
given a cost function C(p), makes a new cost d::DiffCost, which has a method for returning the finite-difference gradient.
"""
function make_fd_differentiable(cost)
function cost2(p, g)
FiniteDiff.finite_difference_gradient!(g, cost, p)
return cost(p)
end
return DiffCost(cost, cost2)
end
"""
sum_losses(lArray::Array{T,1}, p0) where T <: Function
- Given array of cost functions c1...cn, makes new cost function D. Multithreads evaluation of D(p) and D(p,g) if threads are available.
- c1...cn must be differentiable: ci(p,g) returns cost and mutates gradient g
D(p) = sum_i c_i(p)
D(p,g) mutates g to give gradient of D.
"""
function sum_losses(lArray::Array{T,1}, p0) where T <: Function
(Threads.nthreads() == 1) && (@info "Note that restarting julia with multiple threads will increase performance of the generated loss function from sum_losses()")
dummy_gs = [deepcopy(p0) for el in lArray]::Vector{typeof(p0)}
cs = [0. for el in lArray]
function cost1(p)
return sum(lArray) do loss
loss(p)
end
end
n = length(lArray)
cs = Vector{Float64}(undef, n)
pure_costs = map(lArray) do el
(p, g) -> (el[i](p, g), g)
end
function cost2(p, g)
ThreadsX.foreach(enumerate(lArray)) do (i, el)
cs[i] = el(p, dummy_gs[i])
end
g[:] = sum(dummy_gs)
return sum(cs)
end
return DiffCost(cost1, cost2)
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 355 | """
Utilities to find and plot the biggest changing parameters
"""
"""
find parameter indices of the biggest changing parametesr in the curve
"""
function biggest_movers(mdc::AbstractCurveSolution, num::Integer; rev=false)
diff = trajectory(mdc)[:,end] - trajectory(mdc)[:,1]
ids = sortperm(diff, by=abs, rev=!rev)
ids = ids[1:num]
end
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 6513 |
struct TransformationStructure{T <: Function,U <: Function}
name::Union{String,Nothing}
p_transform::T
inv_p_transform::U
end
"""
returns TransformationStructure that flips the signs of negative parameters, and then does the transform p -> log(p).
"""
function logabs_transform(p0)
name = "logabs_"
is_positive = convert.(Int64, p0 .>= 0)
is_positive[is_positive .== 0] .= -1
pos(p) = p .* is_positive
p_transform(p) = log.(p .* is_positive)
inv_p_transform(logp) = exp.(logp) .* is_positive
return TransformationStructure(name, p_transform, inv_p_transform)
end
"""
returns TransformationStructure that fixes parameters[indices]
"""
function fix_params(p0, indices)
indices |> unique! |> sort!
not_indices = setdiff(collect(1:length(p0)), indices)
p0 = deepcopy(p0)
function p_transform(p)
return deleteat!(deepcopy(p), indices)
end
function inv_p_transform(p)
out = [p[1] for el in p0]
out[not_indices] .= p
out[indices] .= p0[indices]
return out
end
return TransformationStructure(nothing, p_transform, inv_p_transform)
end
"""
returns TransformationStructure. params[indices] -> biases.*params[indices]
"""
function bias_transform(p0, indices, biases)
indices |> unique! |> sort!
not_indices = setdiff(collect(1:length(p0)), indices)
all_biases = ones(size(p0))
all_biases[indices] = biases
all_inv_biases = 1. ./ all_biases
function p_transform(p)
return p .* all_biases
end
function inv_p_transform(p)
return p .* all_inv_biases
end
return TransformationStructure(nothing, p_transform, inv_p_transform)
end
"""
returns TransformationStructure that fixes **all but** parameters[indices]
"""
function only_free_params(p0, indices)
name = "only free indices are $indices"
indices |> unique! |> sort!
not_indices = setdiff(collect(1:length(p0)), indices)
return fix_params(p0, not_indices)
end
"""
transform_cost(cost, p0, tr::TransformationStructure; unames=nothing, pnames=nothing)
return DiffCost(new_cost, new_cost2), newp0
Given a cost function C(p), makes a new differentiable cost function D(q), where q = tr(p) and D(q) = C(p)
"""
function transform_cost(cost, p0, tr::TransformationStructure; unames=nothing, pnames=nothing)
newp0 = tr.p_transform(p0)
jac = ForwardDiff.jacobian
function new_cost(p)
return p |> tr.inv_p_transform |> cost
end
function new_cost2(p, g)
orig_p = tr.inv_p_transform(p)
orig_grad = deepcopy(orig_p)
val = cost(orig_p, orig_grad)
g[:] = jac(tr.inv_p_transform, p) * orig_grad
return val
end
return DiffCost(new_cost, new_cost2), newp0
end
"""
new_od = transform_ODESystem(od::ODESystem, tr::TransformationStructure)
- reparameterises the parameters of an ODE system via the transformation tr.
- within the ODE eqs, any instance of a parameter p is replaced with tr.inv_p_transform(p)
- if there are default parameter values p0, they are changed to tr.p_transform(p0)
"""
function transform_ODESystem(od::ModelingToolkit.AbstractSystem, tr::TransformationStructure)
t = independent_variable(od)
unames = states(od) .|> Num
eqs =equations(od)
ps = parameters(od) .|> Num
new_ps = transform_names(ps, tr) # modified names under transformation
of = ODEFunction(od, eval_expression=false) # to solve world age issues
rhs = similar(unames, eltype(unames))
of(rhs, unames, tr.inv_p_transform(new_ps), t) # in place modification of rhs
lhs = [el.lhs for el in eqs]
_defaults = ModelingToolkit.get_defaults(od)
# new_defaults -= map(_defaults) do (key, val)
# if k
# end
if length(_defaults) > 0
p_collect = [el => _defaults[el] for el in ps] # in correct order
if length(p_collect) > 0
new_p_vals = tr.p_transform(last.(p_collect))
new_p_dict = Dict(new_ps .=> new_p_vals)
_defaults = merge(_defaults, new_p_dict)
end
end
de = ODESystem(lhs .~ rhs, t, unames, new_ps, defaults=_defaults, checks=false, name=nameof(od))
return de # (vars .=> last.(ic)), (new_ps .=> newp0)
end
"""
Reparameterises prob::ODEProblem via the transformation tr. so newprob.p = tr(p) is an equivalent ODEProblem to prob.p = p
"""
function transform_problem(prob::ODEProblem, tr::TransformationStructure; unames=nothing, pnames=nothing)
println(pnames)
sys = modelingtoolkitize(prob)
eqs = ModelingToolkit.get_eqs(sys)
pname_tr = parameters(sys) .=> pnames
uname_tr = states(sys) .=> unames
neweqs = eqs
if !(pnames === nothing)
neweqs = [el.lhs ~ substitute(el.rhs, pname_tr) for el in neweqs]
else
pnames = ModelingToolkit.get_ps(sys)
end
if !(unames === nothing)
neweqs = [substitute(el.lhs, uname_tr) ~ substitute(el.rhs, uname_tr) for el in neweqs]
else
unames = ModelingToolkit.get_states(sys)
end
named_sys = ODESystem(neweqs, ModelingToolkit.get_iv(sys), unames, pnames, defaults=merge(Dict(unames .=> prob.u0), Dict(pnames .=> prob.p)), name=nameof(sys))
newp0 = tr.p_transform(prob.p)
t_sys = transform_ODESystem(named_sys, tr)
return t_sys, (ModelingToolkit.get_states(t_sys) .=> prob.u0),
(ModelingToolkit.get_ps(t_sys) .=> newp0)
end
"""
Transforms a set of parameter names via the name provided in tr
The output names are ModelingToolkit.Variable types
# Example
nv = [m, c, k]
tr.name = log
returns the variables: [log(m)], log(c), log(k)]
"""
function transform_names(nv, tr::TransformationStructure)
if tr.name === nothing
names = Symbol.(repr.(tr.p_transform(nv)))
else
names = Symbol.(tr.name .* repr.(nv))
end
new_vars = [Num(Variable(el)) for el in names]
return new_vars
end
"""
searches for the parameter indices corresponding to names.
ps is an array of names
"""
function get_name_ids(ps, names::Array{String,1})
# can't do a single findall as this doesn't preserve ordering
all_names = repr.(ps)
ids = [first(findall(x -> x == names[i], all_names)) for (i, el) in enumerate(names)]
return ids
end
"""
searches for the parameter indices corresponding to names.
ps is an array of pairs: names .=> vals
"""
function get_name_ids(ps::Array{Pair{T,U},1}, names::Array{String,1}) where T where U
return get_name_ids(first.(ps), names)
end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 5043 | using ModelingToolkit, OrdinaryDiffEq, ForwardDiff, DiffEqCallbacks, LinearAlgebra, Test
function make_model(input)
@parameters t
@parameters k, c, m
D = Differential(t)
@variables pos(t) vel(t)
eqs = [D(pos) ~ vel,
D(vel) ~ (-1 / m) * (c * vel + k * pos - input(t))
]
ps = [k, c, m] .=> [2.0, 1.0, 4.0]
ics = [pos, vel] .=> [1.0, 0.0]
od = ODESystem(eqs, t, first.(ics), first.(ps), defaults=merge(Dict(first.(ics) .=> last.(ics)), Dict(first.(ps) .=> last.(ps))), name=:mass_spring
)
tspan = (0.0, 100.0)
# prob = ODEProblem(od, ics, tspan, ps)
return od, ics, tspan, ps
end
od, ics, tspan, ps = make_model(t -> 0.0)
"""
take a log transform of the od parameter space in two ways: at the level of the ODESystem and the level of the ODEProblem
"""
# to_fix = ["c2c","c2","c2a","c3c", "c1c", "a2"]
# tstrct_fix = fix_params(last.(ps), get_name_ids(ps, to_fix))
p0 = last.(ps)
tr = logabs_transform(p0)
log_od = transform_ODESystem(od, tr)
@test typeof(log_od) == ODESystem
prob1 = ODEProblem{true,SciMLBase.FullSpecialize}(od, [], tspan, [])
log_od2, log_ics2, log_ps2 = transform_problem(prob1, tr; unames=ModelingToolkit.get_states(od), pnames=ModelingToolkit.get_ps(od))
"""
check if the two manners of transforming the ODE system give the same output
"""
@test repr.(ModelingToolkit.get_ps(log_od)) == repr.(ModelingToolkit.get_ps(log_od2))
log_prob1 = ODEProblem{true,SciMLBase.FullSpecialize}(log_od, [], tspan, [])
log_prob2 = ODEProblem(log_od2, [], tspan, [])
sol1 = solve(log_prob1, Tsit5())
sol2 = solve(log_prob2, Tsit5())
@test sol1[end] == sol2[end]
"""
check if log transforming the cost function on od gives the same result as an untransformed cost function on log_od
"""
tsteps = tspan[1]:1.0:tspan[end]
nom_sol = solve(prob1, Tsit5())
function build_loss(which_sol::ODESolution)
function retf(p )
sol = solve(which_sol.prob, Tsit5(), p=p, saveat=which_sol.t, u0=convert.(eltype(p), which_sol.prob.u0))
return sum(sol.u - which_sol.u) do unow
sum(x -> x^2, unow)
end
end
return retf
end
function build_loss_gradient(which_sol::ODESolution)
straight_loss = build_loss(which_sol)
function retf(p, grad)
ForwardDiff.gradient!(grad, straight_loss, p)
return straight_loss(p)
end
return retf
end
cost1 = build_loss(nom_sol)
cost1_grad = build_loss_gradient(nom_sol)
nom_cost = DiffCost(cost1, cost1_grad)
cost2 = build_loss(sol1)
cost2_grad = build_loss_gradient(sol1)
log_cost = DiffCost(cost2, cost2_grad)
@test nom_cost(p0) == log_cost(log.(p0))
tr_cost, newp0 = transform_cost(nom_cost, p0, tr)
@test tr_cost(newp0) == log_cost(log.(p0))
grad_holder = deepcopy(p0)
g2 = deepcopy(grad_holder)
"""
test that summing losses works
"""
ll = sum_losses([nom_cost, nom_cost], p0)
@test ll(p0 .+ 1.0, grad_holder) == 2nom_cost(p0 .+ 1, g2)
@test grad_holder == 2g2
"""
test gradients of cost functions are zero at minimum as a proxy for correctness of their gradients
"""
for el in (log_cost, tr_cost)
el(newp0, grad_holder)
@test norm(grad_holder) < 1e-2 # 0 gradient at minimum
end
"""
test that mdc curve evolves, and listens to mdc_callbacks
"""
H0 = ForwardDiff.hessian(tr_cost, newp0)
mom = 1.0
span = (-2.0, 1.0);
newdp0 = (eigen(H0)).vectors[:, 1]
eprob = MDCProblem(log_cost, newp0, newdp0, mom, span);
cb = [
Verbose([CurveDistance(0.1:0.1:2.0), HamiltonianResidual(2.3:4:10)]),
ParameterBounds([1, 3], [-10.0, -10.0], [10.0, 10.0])
]
@time mdc = evolve(eprob, Tsit5; mdc_callback=cb);
"""
check saving callback saves data from interrupted computation.
Keyword saved_values holds two objects (for two directions) where states and time points are saved.
"""
span_long = (-20.0, 19.0);
eprob_long = MDCProblem(log_cost, newp0, newdp0, mom, span_long);
cb = [Verbose([CurveDistance(0.1:0.1:2.0)])]
saved_values = (SavedValues(Float64, Vector{Float64}), SavedValues(Float64, Vector{Float64}))
mdc = evolve(eprob_long, Tsit5; mdc_callback=cb, saved_values);
#@show saved_values;
"""
check mdc works with mdc_callback vector of subtype T <: CallbackCallable, strict subtype.
"""
cb = [
Verbose([CurveDistance(0.1:1:10), HamiltonianResidual(2.3:4:10)])
]
@time mdc = evolve(eprob, Tsit5; mdc_callback=cb);
"""
test MDC works and gives reasonable output
"""
@test log_cost(mdc.sol[1][1:3]) < 1e-3
@test log_cost(mdc.sol[end][1:3]) < 1e-3
"""
test injection loss works OK
"""
l2 = build_injection_loss(prob1, Tsit5(), tsteps)
@test l2(p0, grad_holder) == 0
@test norm(grad_holder) < 1e-5
"""
test fixing parameters (i.e. a transformation that changes the number of parameters) works ok
"""
to_fix = ["c"]
trf = fix_params(last.(ps), get_name_ids(ps, to_fix))
de = MinimallyDisruptiveCurves.transform_ODESystem(od, trf)
@test length(ModelingToolkit.get_ps(de)) == 2
"""
test jumpstarting works
"""
jprob = jumpstart(eprob, 1e-2, true)
mdcj = evolve(jprob, Tsit5; mdc_callback=cb);
@test log_cost(mdcj.sol[1][1:3]) < 1e-3 | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | code | 123 | using MinimallyDisruptiveCurves
using Test
@testset "mass_spring" begin
include("mass_spring.jl")
end | MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.3.3 | 2e9f403933df6501557f38da5c4cd02357f47119 | docs | 1060 | # MinimallyDisruptiveCurves
This is a toolbox implementing the algorithm introduced in [1]. **Documentation, examples, and user guide are found [here](https://dhruva2.github.io/MinimallyDisruptiveCurves.docs/).**
The package is a model analysis tool. It finds functional relationships between model parameters that best preserve model behaviour.
- You provide a differentiable cost function that maps parameters to 'how bad the model behaviour is'. You also provide a locally optimal set of parameters θ*.
- The package will generate curves in parameter space, emanating from θ*. Each point on the curve corresponds to a set of model parameters. These curves are 'minimally disruptive' with respect to the cost function (i.e. model behaviour).
- These curves can be used to better understand interdependencies between model parameters, as detailed in the documentation.
[1] Raman, Dhruva V., James Anderson, and Antonis Papachristodoulou. "Delineating parameter unidentifiabilities in complex models." Physical Review E 95.3 (2017): 032314.
| MinimallyDisruptiveCurves | https://github.com/SciML/MinimallyDisruptiveCurves.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 571 | using Documenter, RAFF
push!(LOAD_PATH, "../test/scripts/")
makedocs(
assets = ["assets/favicon.ico"],
sitename = "RAFF- Robust Algebraic Fitting Function",
pages = ["Overview" => "index.md",
"Tutorial"=> "tutorial.md",
"Examples" => "examples.md",
"API" => "api.md",
"Advanced" => "advanced.md",
"Random generation" => "randomgeneration.md"],
#html_prettyurls = false
#format = Documenter.HTML(prettyurls = false),
modules = [RAFF]
)
deploydocs(
repo = "github.com/fsobral/RAFF.jl.git"
)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 418 | using RAFF
#
# This example is the "Basic Usage" example in the documentation
#
# Matrix with data
A=[-2.0 5.0;
-1.5 3.25;
-1.0 2.0 ;
-0.5 1.25;
0.0 1.0 ;
0.5 2.55;
1.0 2.0 ;
1.5 3.25;
2.0 5.0 ;]
# Define the model to fit data
model(x, θ) = θ[1] * x[1]^2 + θ[2]
# Number of parameters in the model
n = 2
# Run RAFF
output = raff(model, A, n)
# Print output
println(output)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1481 | using RAFF
using DelimitedFiles
using PyPlot
#
# This example shows how solve a problem given by data in a
# file. Also, we show how to use the `model_list` utility structure to
# retrieve pre-defined models in RAFF.
#
# Retrieve the number of parameters, the model function and the model
# function string (not used) for the "Cubic" model.
n, model, modelstr = RAFF.model_list["cubic"]
# Read data from file
open("C1.txt") do fp
global data = readdlm(fp)
end
# Set starting point
initguess = [0.0, 0.0, 0.0, 0.0]
# Set the number of multistart iterations.
maxms = 10
# Call RAFF. In this case, the model is not a multivariate one. The
# last column of the file indicates which observation is the outlier.
rsol = raff(model, data[:, 1:end - 1], n; MAXMS=maxms, initguess=initguess)
println(rsol)
# Now we plot the solution of the problem
x = data[:, 1]
y = data[:, 2]
c = data[:, 3]
# Plot the obtained solution
modl1 = (x) -> model(x, rsol.solution)
t = minimum(x):0.01:maximum(x)
PyPlot.plot(t, modl1.(t), "r", label="RAFF")
# Plot the 'true' solutios, i. e., the one use to generate the
# problem.
θSol = [2.0, 0, -4.0, -10]
modl2 = (x) -> model(x, θSol)
PyPlot.plot(t, modl2.(t), "b--", label="True Solution")
PyPlot.legend(loc=4)
# Color the obtained outliers
c[rsol.outliers] .= 5.0
# Plot all the points
PyPlot.scatter(x, y, c=c, marker="o", s=50.0, linewidths=0.2,
cmap=PyPlot.cm."Paired", alpha=0.6)
PyPlot.show()
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 3839 | # This example shows how to use RAFF.jl to detect circles drawn from
# the user. This example was inspired in `GtkReactive.jl` drawing
# example.
# Packages needed
#
# add Gtk
# add GtkReactive
# add Graphics
# add Colors
using Gtk, Gtk.ShortNames, GtkReactive, Graphics, Colors
using RAFF
win = Window("Drawing") |> (bx = Box(:v))
set_gtk_property!(bx, :spacing, 10)
cb = GtkReactive.dropdown(["RAFF", "Least Squares"])
push!(bx, cb)
c = canvas(UserUnit, 200, 200) # create a canvas with user-specified coordinates
push!(bx, c)
const moddraw = Signal([]) # the model points
const newline = Signal([]) # the in-progress line (will be added to list above)
const drawing = Signal(false) # this will become true if we're actively dragging
choice = map(x->(push!(moddraw, []);value(x)), cb)
function run_raff()
n, model, = RAFF.model_list["circle"]
np, = size(value(newline))
data = Array{Float64, 2}(undef, np, 3)
l = value(newline)
for i in 1:np
data[i,1] = l[i].x
data[i,2] = l[i].y
data[i,3] = 0.0
end
r = if value(choice) == "RAFF"
raff(model, data, n, ftrusted=0.7, MAXMS=10)
else
raff(model, data, n, ftrusted=1.0, MAXMS=10)
end
println(r)
# Build points for drawing the circle
p = (α) -> [abs(r.solution[3]) * cos(α) + r.solution[1], abs(r.solution[3]) * sin(α) + r.solution[2]]
push!(moddraw, [p(i) for i in LinRange(0.0, 2 * π, 100)])
end
# If has changed the selection box, then run RAFF again.
sigc = map(choice) do c
run_raff()
end
sigstart = map(c.mouse.buttonpress) do btn
# This is the beginning of the function body, operating on the argument `btn`
if btn.button == 1 && btn.modifiers == 0 # is it the left button, and no shift/ctrl/alt keys pressed?
push!(drawing, true) # activate dragging
push!(newline, [btn.position]) # initialize the line with the current position
push!(moddraw, [])
end
end
const dummybutton = MouseButton{UserUnit}()
# See the Reactive.jl documentation for `filterwhen`
sigextend = map(filterwhen(drawing, dummybutton, c.mouse.motion)) do btn
# while dragging, extend `newline` with the most recent point
push!(newline, push!(value(newline), btn.position))
end
sigend = map(c.mouse.buttonrelease) do btn
if btn.button == 1
push!(drawing, false) # deactivate dragging
run_raff()
end
end
function drawline(ctx, l, color)
isempty(l) && return
p = first(l)
move_to(ctx, p.x, p.y)
set_source(ctx, color)
for i = 2:length(l)
p = l[i]
line_to(ctx, p.x, p.y)
end
stroke(ctx)
end
function drawcircle(ctx, l, color)
isempty(l) && return
p = first(l)
move_to(ctx, p[1], p[2])
set_source(ctx, color)
for i = 2:length(l)
p = l[i]
line_to(ctx, p[1], p[2])
end
stroke(ctx)
end
# Because `draw` isn't a one-line function, we again use do-block syntax:
redraw = draw(c, moddraw, newline) do cnvs, circ, newl # the function body takes 3 arguments
fill!(cnvs, colorant"white") # set the background to white
set_coordinates(cnvs, BoundingBox(0, 1, 0, 1)) # set coordinates to 0..1 along each axis
ctx = getgc(cnvs) # gets the "graphics context" object (see Cairo/Gtk)
drawcircle(ctx, circ, colorant"blue") # draw old lines in blue
drawline(ctx, newl, colorant"red") # draw new line in red
end
showall(win)
#If we are not in a REPL
if (!isinteractive())
# Create a condition object
c = Condition()
# Get the window
# win = guidict["gui"]["window"]
# Notify the condition object when the window closes
signal_connect(win, :destroy) do widget
notify(c)
end
# Wait for the notification before proceeding ...
wait(c)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 770 | using RAFF
#
# This example is the "Multivariate" example in the documentation. It
# also shows some parameters of RAFF. To check all the possible
# parameters, please refer to the documentation.
#
# Matrix with data. Now the experiments has two variables
data = [1.0 1.0 2.0
0.0 0.0 4.0
7.0 1.5 -4.5
2.0 2.0 -17.0 # outlier
0.0 8.6 -4.6]
# Define the model to fit data
model(x, θ) = θ[1] * x[1] + θ[2] * x[2] + θ[3]
# Number of parameters in the model (dimension of θ)
n = 3
# Run RAFF. Uncomment the other piece of code, in order to run RAFF
# with different options.
# output = raff(model, data, 3; MAXMS=1, initguess=[0.5,0.5,0.5], ε=1.0e-10)
output = raff(model, data, n)
# Print output
println(output)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 692 | using RAFF
using Distributed
#
# This example is the "Parallel running" example from the
# documentation.
#
# Add 3 worker processes
addprocs(3)
# Distribute RAFF, the model and its gradien (the gradient is not
# mandatory, just an example)
@everywhere using RAFF
@everywhere function model(x, θ)
θ[1] * x[1]^2 + θ[2]
end
@everywhere function gmodel!(g, x, θ)
g[1] = x[1]^2
g[2] = 1.0
end
# Define the data matrix
A=[-2.0 5.0;
-1.5 3.25;
-1.0 2.0 ;
-0.5 1.25;
0.0 1.0 ;
0.5 2.55;
1.0 2.0 ;
1.5 3.25;
2.0 5.0 ;];
# Number of parameters of the model
n = 2
# Run Parallel RAFF
output = praff(model, gmodel!, A, n)
# Print output
println(output)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 998 | using DelimitedFiles
using PyPlot
using RAFF
datafile = "/tmp/output.txt"
fp = open(datafile, "r")
N = parse(Int, readline(fp))
M = readdlm(fp)
close(fp)
x = M[:, 1]
y = M[:, 2]
c = M[:, 3]
sol = [779.616, 5315.36, 0.174958, 2.52718]
n, model, modelstr = RAFF.model_list["logistic"]
modl1 = (x) -> model(x, sol)
modl2 = (x) -> model(x, θSol)
t = minimum(x):0.01:maximum(x)
PyPlot.plot(t, modl1.(t), "r", label="RAFF")
θSol = [1000.0, 5000.0, 0.2, 3.0]
PyPlot.plot(t, modl2.(t), "b--", label="True")
θLS = [-959.07, 8151.03, 0.0927191, 0.940711]
modl3 = (x) -> model(x, θLS)
PyPlot.plot(t, modl3.(t), "g-.", label="Least Squares")
PyPlot.legend(loc=4)
PyPlot.scatter(x[c .== 0.0], y[c .== 0.0], color=PyPlot.cm."Paired"(0.1), marker="o", s=50.0, linewidths=0.2,
alpha=1.0)
PyPlot.scatter(x[c .== 1.0], y[c .== 1.0], color=PyPlot.cm."Paired"(0.5), marker="^", s=50.0, linewidths=0.2,
alpha=1.0)
PyPlot.show()
PyPlot.savefig("/tmp/figure.png", DPI=100)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 19550 | __precompile__(false)
"""
`RAFF.jl` is a Jula package.
"""
module RAFF
# Dependencies
using Distributed
using ForwardDiff
using LinearAlgebra
using Statistics
using Printf
using Random
using SharedArrays
using Logging
export lmlovo, raff, praff
# Set RAFF logger
raff_logger = ConsoleLogger(stdout, Logging.Error)
lm_logger = ConsoleLogger(stdout, Logging.Error)
# Load code
include("raffoutput.jl")
include("utils.jl")
include("dutils.jl")
include("generator.jl")
"""
lmlovo(model::Function [, θ::Vector{Float64} = zeros(n)], data::Array{Float64, 2},
n::Int, p::Int [; kwargs...])
lmlovo(model::Function, gmodel!::Function [, θ::Vector{Float64} = zeros(n)],
data::Array{Float64,2}, n::Int, p::Int [; MAXITER::Int=200,
ε::Float64=10.0^-4])
Fit the `n`-parameter model `model` to the data given by matrix
`data`. The strategy is based on the LOVO function, which means that
only `p` (0 < `p` <= rows of `data`) points are trusted. The
Levenberg-Marquardt algorithm is implemented in this version.
Matriz `data` is the data to be fit. This matrix should be in the form
x11 x12 ... x1N y1
x21 x22 ... x2N y2
:
where `N` is the dimension of the argument of the model
(i.e. dimension of `x`).
If `θ` is provided, then it is used as the starting point.
The signature of function `model` should be given by
model(x::Union{Vector{Float64}, SubArray}, θ::Vector{Float64})
where `x` are the variables and `θ` is a `n`-dimensional vector of
parameters. If the gradient of the model `gmodel!`
gmodel! = (g::SubArray, x::Union{Vector{Float64}, SubArray},
θ::Vector{Float64})
is not provided, then the function ForwardDiff.gradient! is called to
compute it. **Note** that this choice has an impact in the
computational performance of the algorithm. In addition, if
`ForwardDiff.jl` is being used, then one **MUST** remove the signature
of vector `θ` from function `model`.
The optional arguments are
- `MAXITER`: maximum number of iterations
- `ε`: tolerance for the gradient of the function
Returns a [`RAFFOutput`](@ref) object.
"""
function lmlovo(model::Function, gmodel!::Function, θ::Vector{Float64},
data::Array{Float64,2}, n::Int, p::Int;
MAXITER::Int=200, ε::Float64=10.0^-4)
@assert(n > 0, "Dimension should be positive.")
@assert(p >= 0, "Trusted points should be nonnegative.")
npun, = size(data)
with_logger(lm_logger) do
@debug("Size of data matrix ", size(data))
end
# Counters for calls to F and its Jacobian
nf = 0
nj = 0
(p == 0) && return RAFFOutput(1, θ, 0, p, 0.0, nf, nj, [1:npun;])
# Main function - the LOVO function
LovoFun = let
npun_::Int = npun
ind::Vector{Int} = Vector{Int}(undef, npun_)
F::Vector{Float64} = Vector{Float64}(undef, npun_)
p_::Int = p
# Return a ordered set index and lovo value
(θ) -> begin
nf += 1
@views for i = 1:npun_
F[i] = (model(data[i,1:(end - 1)], θ) - data[i, end])^2
end
indF, orderedF = sort_fun!(F, ind, p_)
return indF, sum(orderedF)
end
end
# Residue and Jacobian of residue
val_res::Vector{Float64} = Vector{Float64}(undef, p)
jac_res::Array{Float64, 2} = Array{Float64}(undef, p, n)
# This function returns the residue and Jacobian of residue
ResFun!(θ::Vector{Float64}, ind, r::Vector{Float64},
rJ::Array{Float64, 2}) = begin
nj += 1
for (k, i) in enumerate(ind)
x = data[i, 1:(end - 1)]
r[k] = model(x, θ) - data[i, end]
v = @view(rJ[k, :])
gmodel!(v, x, θ)
end
end
# Levenberg-Marquardt algorithm
# -----------------------------
Id = Matrix(1.0I, n, n)
# Status = 1 means success
status = 1
# Parameters
λ_up = 2.0
λ_down = 2.0
λ = 1.0
maxoutind = min(p, 5)
# Allocation
θnew = Vector{Float64}(undef, n)
d = Vector{Float64}(undef, n)
y = Vector{Float64}(undef, n)
G = Array{Float64, 2}(undef, n, n)
grad_lovo = Vector{Float64}(undef, n)
# Initial steps
ind_lovo, best_val_lovo = LovoFun(θ)
ResFun!(θ, ind_lovo, val_res, jac_res)
BLAS.gemv!('T', 1.0, jac_res, val_res, 0.0, grad_lovo)
ngrad_lovo = norm(grad_lovo, 2)
safecount = 1
# Main loop
while (ngrad_lovo >= ε) && (safecount < MAXITER)
with_logger(lm_logger) do
@info("Iteration $(safecount)")
@info(" Current value: $(best_val_lovo)")
@info(" ||grad_lovo||_2: $(ngrad_lovo)")
@info(" Current iterate: $(θ)")
@info(" Best indices (first $(maxoutind)): $(ind_lovo[1:maxoutind])")
@info(" lambda: $(λ)")
end
G .= Id
BLAS.gemm!('T', 'N', 1.0, jac_res, jac_res, λ, G)
F = qr!(G)
#F = cholesky!(G, Val(true))
ad = try
ldiv!(d, F, grad_lovo)
d .*= -1.0
catch
"error"
end
if ad == "error" #restarting if lapack fails
with_logger(lm_logger) do
@warn "Failed to solve the linear system. Will try new point."
d .= - 1.0 .* grad_lovo
θ .= rand(n)
end
else
d .= ad
end
θnew .= θ .+ d
ind_lovo, val_lovo = LovoFun(θnew)
if val_lovo <= best_val_lovo
θ .= θnew
best_val_lovo = val_lovo
λ = λ / λ_down
ResFun!(θ, ind_lovo, val_res, jac_res)
BLAS.gemv!('T', 1.0, jac_res, val_res, 0.0, grad_lovo)
ngrad_lovo = norm(grad_lovo, 2)
with_logger(lm_logger) do
@info(" Better function value found, lambda changed to $(λ).")
end
else
λ = λ * λ_up
with_logger(lm_logger) do
@info(" No improvement, lambda changed to $(λ).")
end
end
safecount += 1
end
if safecount == MAXITER
with_logger(lm_logger) do
@info("No solution was found in $(safecount) iterations.")
end
status = 0
end
# TODO: Create a test for this case
if isnan(ngrad_lovo)
with_logger(lm_logger) do
@info("Incorrect value for gradient norm $(ngrad_lovo).")
end
status = 0
end
outliers = [1:npun;]
setdiff!(outliers, ind_lovo)
with_logger(lm_logger) do
@info("""
Final iteration (STATUS=$(status))
Solution found: $(θ)
||grad_lovo||_2: $(ngrad_lovo)
Function value: $(best_val_lovo)
Number of iterations: $(safecount)
Outliers: $(outliers)
""")
end
return RAFFOutput(status, θ, safecount, p, best_val_lovo, nf, nj, outliers)
end
function lmlovo(model::Function, θ::Vector{Float64}, data::Array{Float64,2},
n::Int, p::Int; kwargs...)
# Define closures for derivative and initializations
# 'x' is considered as global parameter for this function
model_cl(θ) = model(x, θ)
grad_model!(g, x_, θ) = begin
global x = x_
ForwardDiff.gradient!(g, model_cl, θ)
end
return lmlovo(model, grad_model!, θ, data, n, p; kwargs...)
end
lmlovo(model::Function, gmodel!::Function, data::Array{Float64,2}, n::Int, p::Int; kwargs...) =
lmlovo(model, gmodel!, zeros(Float64, n), data, n, p; kwargs...)
lmlovo(model::Function, data::Array{Float64,2}, n::Int, p::Int; kwargs...) =
lmlovo(model, zeros(Float64, n), data, n, p; kwargs...)
"""
raff(model::Function, data::Array{Float64, 2}, n::Int; kwargs...)
raff(model::Function, gmodel!::Function, data::Array{Float64, 2},
n::Int; MAXMS::Int=1, SEEDMS::Int=123456789,
initguess::Vector{Float64}=zeros(Float64, n),
ε::Float64=1.0e-4, noutliers::Int=-1, ftrusted::Union{Float64,
Tuple{Float64, Float64}}=0.5)
Robust Algebric Fitting Function (RAFF) algorithm. This function uses
a voting system to automatically find the number of trusted data
points to fit the `model`.
- `model`: function to fit data. Its signature should be given by
model(x, θ)
where `x` is the multidimensional argument and `θ` is the
`n`-dimensional vector of parameters
- `gmodel!`: gradient of the model function. Its signature should be
given by
gmodel!(g, x, θ)
where `x` is the multidimensional argument, `θ` is the
`n`-dimensional vector of parameters and the gradient is written
in `g`.
- `data`: data to be fit. This matrix should be in the form
x11 x12 ... x1N y1
x21 x22 ... x2N y2
:
where `N` is the dimension of the argument of the model
(i.e. dimension of `x`).
- `n`: dimension of the parameter vector in the model function
The optional arguments are
- `MAXMS`: number of multistart points to be used
- `SEEDMS`: integer seed for random multistart points
- `initialguess`: a good guess for the starting point and for
generating random points in the multistart strategy
- `ε`: gradient stopping criteria to `lmlovo`
- `noutliers`: integer describing the maximum expected number of
outliers. The default is *half*. *Deprecated*.
- `ftrusted`: float describing the minimum expected percentage of
trusted points. The default is *half* (0.5). Can also be a
Tuple of the form `(fmin, fmax)` percentages of trusted points.
Returns a [`RAFFOutput`](@ref) object with the best parameter found.
"""
function raff(model::Function, gmodel!::Function,
data::Array{Float64, 2}, n::Int; MAXMS::Int=1,
SEEDMS::Int=123456789, initguess::Vector{Float64}=zeros(Float64,
n), ε::Float64=1.0e-4, noutliers::Int=-1,
ftrusted::Union{Float64, Tuple{Float64, Float64}}=0.5)
np, = size(data)
if noutliers != -1
Base.depwarn("Optional argument `noutliers::Int` is deprecated and will be removed from future releases. Use `ftrusted::Float` or `itrusted::Tuple{Float, Float}` instead.", :raff)
ftrusted = (noutliers >= 0) ? max(0, np - noutliers) / np : 0.5
end
# Initialize random generator
seedMS = MersenneTwister(SEEDMS)
# Define interval for trusted points
pliminf, plimsup = try
check_ftrusted(ftrusted, np)
catch e
with_logger(raff_logger) do
@error("Error in optional parameter `ftrusted`.", e)
end
return RAFFOutput()
end
lv = plimsup - pliminf + 1
sols = Vector{RAFFOutput}(undef, lv)
for i = pliminf:plimsup
vbest = RAFFOutput(initguess, i)
ind = i - pliminf + 1
for j = 1:MAXMS
with_logger(raff_logger) do
@debug("Running lmlovo for p = $(i). Repetition $(j).")
end
# Starting point
θ = randn(seedMS, Float64, n)
θ .= θ .+ initguess
# Call function and store results
sols[ind] = lmlovo(model, gmodel!, θ, data, n, i; ε=ε, MAXITER=400)
# Update the best point and functional value
(sols[ind].status == 1) && (sols[ind].f < vbest.f) && (vbest = sols[ind])
end
sols[ind] = vbest
with_logger(raff_logger) do
@debug("Best solution for p = $(i).", vbest.solution)
end
end
# Count the total number of iterations, and function and Jacobian
# evaluations.
nf = 0
nj = 0
ni = 0
for s in sols
nf += s.nf
nj += s.nj
ni += s.iter
end
# Apply the filter and the voting strategy to all the solutions
# found
votsis = with_logger(raff_logger) do
voting_strategy(model, data, sols, pliminf, plimsup)
end
mainind = findlast(x->x == maximum(votsis), votsis)
s = sols[mainind]
return RAFFOutput(s.status, s.solution, ni, s.p, s.f, nf, nj, s.outliers)
end
function raff(model::Function, data::Array{Float64, 2}, n::Int; kwargs...)
# Define closures for derivative and initializations
# 'x' is considered as global for this function
model_cl(θ) = model(x, θ)
grad_model!(g, x_, θ) = begin
global x = x_
return ForwardDiff.gradient!(g, model_cl, θ)
end
return raff(model, grad_model!, data, n; kwargs...)
end
"""
praff(model::Function, data::Array{Float64, 2}, n::Int; kwargs...)
praff(model::Function, gmodel!::Function, data::Array{Float64, 2},
n::Int; MAXMS::Int=1, SEEDMS::Int=123456789, batches::Int=1,
initguess::Vector{Float64}=zeros(Float64, n),
ε::Float64=1.0e-4, noutliers::Int=-1, ftrusted::Union{Float64,
Tuple{Float64, Float64}}=0.5)
Multicore distributed version of RAFF. See the description of the
[`raff`](@ref) function for the main (non-optional) arguments. All the
communication is performed by channels.
This function uses all available **local** workers to run RAFF
algorithm. Note that this function does not use *Tasks*, so all the
parallelism is based on the [Distributed](https://docs.julialang.org/en/latest/manual/parallel-computing/#Multi-Core-or-Distributed-Processing-1) package.
The optional arguments are
- `MAXMS`: number of multistart points to be used
- `SEEDMS`: integer seed for random multistart points
- `batches`: size of batches to be send to each worker
- `initguess`: starting point to be used in the multistart procedure
- `ε`: stopping tolerance
- `noutliers`: integer describing the maximum expected number of
outliers. The default is *half*. *Deprecated*.
- `ftrusted`: float describing the minimum expected percentage of
trusted points. The default is *half* (0.5). Can also be a
Tuple of the form `(fmin, fmax)` percentages of trusted points.
Returns a [`RAFFOutput`](@ref) object containing the solution.
"""
function praff(model::Function, gmodel!::Function,
data::Array{Float64, 2}, n::Int; MAXMS::Int=1,
SEEDMS::Int=123456789, batches::Int=1,
initguess::Vector{Float64}=zeros(Float64, n),
ε::Float64=1.0e-4, noutliers::Int=-1,
ftrusted::Union{Float64, Tuple{Float64, Float64}}=0.5)
np, = size(data)
if noutliers != -1
Base.depwarn("Optional argument `noutliers::Int` is deprecated and will be removed from future releases. Use `ftrusted::Float` or `itrusted::Tuple{Float, Float}` instead.", :raff)
ftrusted = (noutliers >= 0) ? max(0, np - noutliers) / np : 0.5
end
# Initialize random generator
seedMS = MersenneTwister(SEEDMS)
# Define interval for trusted points
pliminf, plimsup = try
check_ftrusted(ftrusted, np)
catch e
with_logger(raff_logger) do
@error("Error in optional parameter `ftrusted`.", e)
end
return RAFFOutput()
end
lv = plimsup - pliminf + 1
# Create a RemoteChannel to receive solutions
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(div(lv, 2)))
# TODO: Check a smart way for not creating a large channel
squeue = RemoteChannel(() -> Channel{RAFFOutput}(lv))
# Create another channel to assign tasks
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
# This command selects only nodes which are local to myid()
curr_workers = workers()
futures = Vector{Future}(undef, length(curr_workers))
# Start updater Task
# This task is not needed up to now.
# @async with_logger(()-> update_best(bqueue, bestx), raff_logger)
# Start workers Tasks (CPU intensive)
with_logger(raff_logger) do
@debug("Workers", curr_workers)
end
for (i, t) in enumerate(curr_workers)
futures[i] = @spawnat(t, with_logger( ()-> try
@debug("Creating worker $(t).")
consume_tqueue(bqueue, tqueue, squeue,
model, gmodel!, data, n, pliminf,
plimsup, MAXMS, seedMS, initguess)
catch e
@error("Unable to start worker $(t).", e)
end, raff_logger
))
with_logger(raff_logger) do
@debug("Created worker $(t).")
end
end
# Check asynchronously if there is at least one live worker
@async with_logger(
() -> check_and_close(bqueue, tqueue, squeue, futures),
raff_logger)
# Populate the task queue with jobs
for p = pliminf:batches:plimsup
try
put!(tqueue, p:min(plimsup, p + batches - 1))
catch e
with_logger(raff_logger) do
@warn("Tasks queue prematurely closed while inserting tasks. Will exit.")
end
break
end
with_logger(raff_logger) do
@debug("Added problem $(p) to tasks queue.")
end
end
# The task queue can be closed, since all the problems have been
# read, due to the size 0 of this channel
close(tqueue)
with_logger(raff_logger) do
@debug("Waiting for workers to finish.")
end
# Create a vector of solutions to store the results from workers
sols = Vector{RAFFOutput}(undef, lv)
for i in 1:lv
try
rout = take!(squeue)
sols[rout.p - pliminf + 1] = rout
with_logger(raff_logger) do
@debug("Stored solution for p=$(rout.p).")
end
catch e
with_logger(raff_logger) do
@error("Error when retrieving solutions.", e)
end
end
end
close(bqueue)
close(squeue)
# Count the total number of iterations, and function and Jacobian
# evaluations.
nf = 0
nj = 0
ni = 0
for s in sols
nf += s.nf
nj += s.nj
ni += s.iter
end
# Apply the filter and the voting strategy to all the solutions
# found
votsis = with_logger(raff_logger) do
voting_strategy(model, data, sols, pliminf, plimsup)
end
mainind = findlast(x->x == maximum(votsis), votsis)
s = sols[mainind]
return RAFFOutput(s.status, s.solution, ni, s.p, s.f, nf, nj, s.outliers)
end
function praff(model::Function, data::Array{Float64, 2}, n::Int; kwargs...)
# Define closures for derivative and initializations
# 'x' is considered as global parameter for this function
model_cl(θ) = model(x, θ)
grad_model!(g, x_, θ) = begin
global x = x_
return ForwardDiff.gradient!(g, model_cl, θ)
end
return praff(model, grad_model!, data, n; kwargs...)
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 6391 | # This file contains utility functions to deal with distributed
# computing
"""
update_best(channel::RemoteChannel, bestx::SharedArray{Float64, 1})
Listen to a `channel` for results found by lmlovo. If there is an
improvement for the objective function, the shared array `bestx` is
updated.
**Attention**: There might be an unstable state if there is a process
reading `bestx` while this function is updating it. This should not
be a problem, since it is used as a starting point.
**Attention 2**: this function is currently out of use.
"""
function update_best(channel::RemoteChannel, bestx::SharedArray{Float64, 1})
@debug("Running updater.")
n = length(bestx)
N::Int = 0
while isopen(channel)
θ = try
take!(channel)
catch e
if isa(e, InvalidStateException)
break
end
@warn("Something wrong when reading from channel. Will skip.", e)
continue
end
@debug("Updater has read values from channel.")
for i = 1:n
bestx[i] = (N * bestx[i] + θ[i]) / (N + 1)
end
N += 1
end
@debug("Update channel closed. Exiting thread.")
end
"""
function consume_tqueue(bqueue::RemoteChannel, tqueue::RemoteChannel,
squeue::RemoteChannel, model::Function, gmodel!::Function,
data::Array{Float64, 2}, n::Int, pliminf::Int,
plimsup::Int, MAXMS::Int, seedMS::MersenneTwister)
This function represents one worker, which runs lmlovo in a multistart
fashion.
It takes a job from the RemoteChannel `tqueue` and runs `lmlovo`
function to it. It might run using a multistart strategy, if
`MAXMS>1`. It sends the best results found for each value obtained in
`tqueue` to channel `squeue`, which will be consumed by the main
process. All the other arguments are the same for [`praff`](@ref)
function.
"""
function consume_tqueue(bqueue::RemoteChannel, tqueue::RemoteChannel,
squeue::RemoteChannel,
model::Function, gmodel!::Function,
data::Array{Float64, 2}, n::Int, pliminf::Int,
plimsup::Int, MAXMS::Int,
seedMS::MersenneTwister, initguess::Vector{Float64})
@debug("Started worker $(myid())")
while isopen(tqueue)
p = try
take!(tqueue)
catch e
if isa(e, InvalidStateException)
break
end
@warn("Something wrong when reading task. Will skip task.", e)
continue
end
@debug("Received task $(p)")
if (p.start < pliminf) || (p.stop > plimsup) ||
(length(p) == 0)
@warn("Invalid value for task: $(p). Will skip task.")
continue
end
for k in p
wbest = RAFFOutput(k)
nf = 0
nj = 0
ni = 0
# Multi-start strategy
for j = 1:MAXMS
# New random starting point
θ = randn(seedMS, n)
θ .= θ .+ initguess
# Call function and store results
rout = lmlovo(model, gmodel!, θ, data, n, k)
nf += rout.nf
nj += rout.nj
ni += rout.iter
(rout.status == 1) && (rout.f < wbest.f) && (wbest = rout)
# This block is related to a strategy of smart
# starting points for the multistart
# process. Currently, it makes no sense to use it.
# if rout.f < wbest.f
# # Send asynchronously the result to channel if success
# if rout.status == 1
# @async try
# put!(bqueue, rout.solution)
# @debug("Added new point to queue.", rout.solution, rout.f)
# catch e
# @warn(string("Problems when saving best point found in queue. ",
# "Will skip this step"), e)
# end
# end
# end
end
@debug("Finished. p = $(k) and f = $(wbest.f).")
try
wbest = RAFFOutput(wbest.status, wbest.solution, ni, wbest.p, wbest.f, nf, nj,
wbest.outliers)
put!(squeue, wbest)
catch e
if isa(e, InvalidStateException)
@warn("Solution queue prematurely closed. Unable to save solution for p=$(k).")
return
end
@warn("Something wrong when sending the solution to queue for p=$(k).", e)
end
end
end
end
"""
check_and_close(bqueue::RemoteChannel, tqueue::RemoteChannel,
squeue::RemoteChannel, futures::Vector{Future};
secs::Float64=0.1)
Check if there is at least one worker process in the vector of
`futures` that has not prematurely finished. If there is no alive
worker, close task, solution and best queues, `tqueue`, `squeue` and
`bqueue`, respectively.
"""
function check_and_close(bqueue::RemoteChannel, tqueue::RemoteChannel,
squeue::RemoteChannel, futures::Vector{Future};
secs::Float64=0.1)
n_alive = length(futures)
@debug("Checking worker status.")
for (i, f) in enumerate(futures)
if timedwait(()->isready(f), secs) == :ok
@warn("Worker $(i) seems to have finished prematurely.",
fetch(f))
n_alive -= 1
end
end
@debug("Workers online: $(n_alive)")
# Only closes all queues if there are tasks to be completed
if n_alive == 0 && isopen(tqueue)
@warn("No live worker found. Will close queues and finish.")
close(bqueue)
close(tqueue)
close(squeue)
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 16884 | export generate_test_problems, generate_noisy_data!,
generate_noisy_data, generate_clustered_noisy_data,
generate_clustered_noisy_data!
"""
This dictionary represents the list of models used in the generation of random tests.
Return the tuple `(n, model, model_str)`, where
- `n` is the number of parameters of the model
- `model` is the model of the form `m(x, θ)`, where `x` are the
variables and `θ` are the parameters
- `model_str` is the string representing the model, used to build random generated problems
"""
const model_list = Dict(
"linear" => (2, (x, θ) -> θ[1] * x[1] + θ[2],
"(x, θ) -> θ[1] * x[1] + θ[2]"),
"cubic" => (4, (x, θ) -> θ[1] * x[1]^3 + θ[2] * x[1]^2 + θ[3] * x[1] + θ[4],
"(x, θ) -> θ[1] * x[1]^3 + θ[2] * x[1]^2 + θ[3] * x[1] + θ[4]"),
"expon" => (3, (x, θ) -> θ[1] + θ[2] * exp(- θ[3] * x[1]),
"(x, θ) -> θ[1] + θ[2] * exp(- θ[3] * x[1])"),
"logistic" => (4, (x, θ) -> θ[1] + θ[2] / (1.0 + exp(- θ[3] * x[1] + θ[4])),
"(x, θ) -> θ[1] + θ[2] / (1.0 + exp(- θ[3] * x[1] + θ[4]))"),
"circle" => (3, (x, θ) -> (x[1] - θ[1])^2 + (x[2] - θ[2])^2 - θ[3]^2,
"(x, θ) -> (x[1] - θ[1])^2 + (x[2] - θ[2])^2 - θ[3]^2"),
"ellipse" => (6, (x, θ) -> θ[1] * x[1]^2 + θ[2] * x[1] * x[2] + θ[3] * x[2]^2 +
θ[4] * x[1] + θ[5] * x[2] + θ[6],
"(x, θ) -> θ[1] * x[1]^2 + θ[2] * x[1] * x[2] + θ[3] * x[2]^2 + θ[4] * x[1] + θ[5] * x[2] + θ[6]")
)
"""
interval_rand!(x::Vector{Float64},
intervals::Vector{Tuple{Float64, Float64}})
Fill a vector `x` with uniformly distributed random numbers generated
in the interval given by `intervals`. It is assumed that `length(x) ==
length(intervals)`.
Throws an `ErrorException` if the dimension of `x` is smaller the
dimension of `intervals` or if the intervals are invalid.
"""
function interval_rand!(x::Vector{Float64},
intervals::Vector{Tuple{Float64, Float64}})
(length(x) < length(intervals)) &&
error("Length of vector smaller than length of intervals.")
for v in intervals
(v[1] > v[2]) && error("Bad interval $(v[1])>$(v[2]).")
end
map!((v) -> v[1] + rand() * (v[2] - v[1]), x, intervals)
end
"""
generate_test_problems(datFilename::String, solFilename::String,
model::Function, modelStr::String, n::Int, np::Int, p::Int;
x_interval::Tuple{Float64, Float64}=(-10.0, 10.0),
θSol::Vector{Float64}=10.0 * randn(n), std::Float64=200.0,
out_times::Float64=7.0)
generate_test_problems(datFilename::String, solFilename::String,
model::Function, modelStr::String, n::Int, np::Int, p::Int,
cluster_interval::Tuple{Float64, Float64};
x_interval::Tuple{Float64, Float64}=(-10.0, 10.0),
θSol::Vector{Float64}=10.0 * randn(n), std::Float64=200.0,
out_times::Float64=7.0)
Generate random data files for testing fitting problems.
- `datFilename` and `solFilename` are strings with the name of the
files for storing the random data and solution, respectively.
- `model` is the model function and `modelStr` is a string
representing this model function, e.g.
model = (x, θ) -> θ[1] * x[1] + θ[2]
modelStr = "(x, θ) -> θ[1] * x[1] + θ[2]"
where vector `θ` represents the parameters (to be found) of the
model and vector `x` are the variables of the model.
- `n` is the number of parameters
- `np` is the number of points to be generated.
- `p` is the number of trusted points to be used in the LOVO
approach.
If `cluster_interval` is provided, then generates outliers only in
this interval.
Additional parameters:
- `xMin`, `xMax`: interval for generating points in one dimensional
tests *Deprecated*
- `x_interval`: interval for generating points in one dimensional
tests
- `θSol`: true solution, used for generating perturbed points
- `std`: standard deviation
- `out_times`: deviation for outliers will be `out_times * std`.
"""
function generate_test_problems(datFilename::String,
solFilename::String, model::Function, modelStr::String, n::Int,
np::Int, p::Int; gn_kwargs...)
# Generate data file
θSol = nothing
open(datFilename, "w") do data
vdata, θSol, outliers = generate_noisy_data(model, n, np, p;
gn_kwargs...)
# Dimension of the domain of the function to fit
@printf(data, "%d\n", 1)
for k = 1:np
@printf(data, "%20.15f %20.15f %1d\n",
vdata[k, 1], vdata[k, 2], Int(k in outliers))
end
end
# Generate solution file
open(solFilename, "w") do sol
println(sol, n) # number of variables
println(sol, θSol) # parameters
println(sol, modelStr) # function expression
end
end
function generate_test_problems(datFilename::String,
solFilename::String, model::Function, modelStr::String, n::Int,
np::Int, p::Int, x_interval::Tuple{Float64, Float64},
cluster_interval::Tuple{Float64, Float64}; gn_kwargs...)
# Generate data file
θSol = nothing
open(datFilename, "w") do data
vdata, θSol, outliers = generate_clustered_noisy_data(model,
n, np, p, x_interval, cluster_interval; gn_kwargs...)
# Dimension of the domain of the function to fit
@printf(data, "%d\n", 1)
for k = 1:np
@printf(data, "%20.15f %20.15f %1d\n",
vdata[k, 1], vdata[k, 2], Int(k in outliers))
end
end
# Generate solution file
open(solFilename, "w") do sol
println(sol, n) # number of variables
println(sol, θSol) # parameters
println(sol, modelStr) # function expression
end
end
"""
get_unique_random_points(np::Int, npp::Int)
Choose exactly `npp` unique random points from a set containing `np`
points. This function is similar to `rand(vector)`, but does not allow
repetitions.
If `npp` < `np`, returns all the `np` points. Note that this function
is not very memory efficient, since the process of selecting unique
elements involves creating several temporary vectors.
Return a vector with the selected points.
"""
function get_unique_random_points(np::Int, npp::Int)
# Check invalid arguments
((np <= 0) || (npp <=0)) && (return Vector{Int}())
ntp = min(npp, np)
v = Vector{Int}(undef, ntp)
return get_unique_random_points!(v, np, npp)
end
"""
get_unique_random_points!(v::Vector{Int}, np::Int, npp::Int)
Choose exactly `npp` unique random points from a set containing `np`
points. This function is similar to `rand(vector)`, but does not allow
repetitions.
If `npp` < `np`, returns all the `np` points. Note that this function
is not very memory efficient, since the process of selecting unique
elements involves creating several temporary vectors.
Return the vector `v` provided as argument filled with the selected
points.
"""
function get_unique_random_points!(v::Vector{Int}, np::Int, npp::Int)
# Check invalid arguments
((np <= 0) || (npp <=0)) && return v
ntp = min(np, npp)
# Check invalid size
(length(v) < ntp) && throw(DimensionMismatch("Incorrect size for vector."))
# Check small np for efficiency
if np == ntp
for i = 1:np
v[i] = i
end
return v
end
points = [1:np;]
while ntp > 0
u = rand(points, ntp)
unique!(u)
for i in u
v[ntp] = i
ntp -= 1
end
setdiff!(points, u)
end
return v
end
"""
generate_noisy_data(model::Function, n::Int, np::Int, p::Int;
x_interval::Tuple{Float64, Float64}=(-10.0, 10.0),
θSol::Vector{Float64}=10.0 * randn(Float64, n),
std::Float64=200.0, out_times::Float64=7.0)
generate_noisy_data(model::Function, n::Int, np::Int, p::Int,
x_interval::Tuple{Float64, Float64})
generate_noisy_data(model::Function, n::Int, np::Int, p::Int,
θSol::Vector{Float64}, x_interval::Tuple{Float64, Float64})
Random generate a fitting one-dimensional data problem.
This function receives a `model(x, θ)` function, the number of parameters
`n`, the number of points `np` to be generated and the number of
trusted points `p`.
If the `n`-dimensional vector `θSol` is provided, then the exact
solution will not be random generated. The interval `[xMin, xMax]`
(*deprecated*) or `x_interval` for generating the values to evaluate
`model` can also be provided.
It returns a tuple `(data, θSol, outliers)` where
- `data`: (`np` x `3`) array, where each row contains `x` and
`model(x, θSol)`.
- `θSol`: `n`-dimensional vector with the exact solution.
- `outliers`: the outliers of this data set
"""
function generate_noisy_data(model::Function, n::Int, np::Int,
p::Int; gn_kwargs...)
# Data matrix
data = Array{Float64}(undef, np, 3)
# Points selected to be outliers
v = Vector{Int}(undef, np - p)
return generate_noisy_data!(data, v, model, n, np, p;
gn_kwargs...)
end
# Deprecated
@deprecate(generate_noisy_data(model::Function, n, np, p,
xMin::Float64, xMax::Float64), generate_noisy_data(model, n, np,
p, (xMin, xMax)))
@deprecate(generate_noisy_data(model::Function, n::Int, np::Int,
p::Int, θSol::Vector{Float64}, xMin::Float64, xMax::Float64),
generate_noisy_data(model, n, np, p, θSol, (xMin, xMax)))
generate_noisy_data(model::Function, n::Int, np::Int, p::Int,
x_interval::Tuple{Float64, Float64}) = generate_noisy_data(model,
n, np, p; x_interval=x_interval)
generate_noisy_data(model::Function, n::Int, np::Int, p::Int,
θSol::Vector{Float64}, x_interval::Tuple{Float64, Float64}) =
generate_noisy_data(model, n, np, p; x_interval=x_interval,
θSol=θSol)
"""
generate_noisy_data!(data::AbstractArray{Float64, 2},
v::Vector{Int}, model::Function, n::Int, np::Int, p::Int;
x_interval::Tuple{Float64, Float64}=(-10.0, 10.0),
θSol::Vector{Float64}=10.0 * randn(Float64, n),
std::Float64=200.0, out_times::Float64=7.0)
Random generate a fitting one-dimensional data problem, storing the
data in matrix `data` and the outliers in vector `v`.
This function receives a `model(x, θ)` function, the number of parameters
`n`, the number of points `np` to be generated and the number of
trusted points `p`.
If the `n`-dimensional vector `θSol` is provided, then the exact
solution will not be random generated. The interval `[xMin, xMax]`
(*deprecated*) or `x_interval` for generating the values to evaluate
`model` can also be provided.
It returns a tuple `(data, θSol, outliers)` where
- `data`: (`np` x `3`) array, where each row contains `x` and
`model(x, θSol)`.
- `θSol`: `n`-dimensional vector with the exact solution.
- `outliers`: the outliers of this data set
"""
function generate_noisy_data!(data::AbstractArray{Float64, 2},
v::Vector{Int}, model::Function, n::Int, np::Int, p::Int;
x_interval::Tuple{Float64, Float64}=(-10.0, 10.0),
θSol::Vector{Float64}=10.0 * randn(Float64, n),
std::Float64=200.0, out_times::Float64=7.0)
@assert(x_interval[1] <= x_interval[2],
"Invalid interval for random number generation.")
@assert(size(data) == (np, 3),
"Invalid size of data matrix. $(size(data)) != $((np, 3)).")
@assert(length(v) >= np - p,
"Invalid size for vector of outliers.")
# Generate (x_i) where x_interval[1] <= x_i <= x_interval[2] (data)
# Fix the problem of large interval with 1 element.
x = (np == 1) ? sum(x_interval) / 2.0 : LinRange(x_interval[1], x_interval[2], np)
# Points selected to be outliers
get_unique_random_points!(v, np, np - p)
# Add noise to some random points
sgn = sign(randn())
for k = 1:np
y = model(x[k], θSol) + randn() * std
noise = 0.0
if k in v
y = model(x[k], θSol)
noise = (1.0 + 2 * rand()) * out_times * std * sgn
end
data[k, 1] = x[k]
data[k, 2] = y + noise
data[k, 3] = noise
end
return data, θSol, v
end
"""
generate_clustered_noisy_data(model::Function, n::Int, np::Int,
p::Int, x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...)
generate_clustered_noisy_data(model::Function, n::Int,
np::Int, p::Int, θSol::Vector{Float64},
x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...)
Generate a test set with clustered outliers.
The arguments and optional arguments are the same for
[`generate_noisy_data!`](@ref), with exception of tuple
`cluster_interval` which is the interval to generate the clustered
outliers.
It returns a tuple `(data, θSol, outliers)` where
- `data`: (`np` x `3`) array, where each row contains `x` and
`model(x, θSol)`. The same array given as argument
- `θSol`: `n`-dimensional vector with the exact solution.
- `outliers`: the outliers of this data set. The same vector given
as argument.
"""
function generate_clustered_noisy_data(model::Function, n::Int,
np::Int, p::Int, x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...)
data = Array{Float64, 2}(undef, np, 3)
v = Vector{Int}(undef, np - p)
return generate_clustered_noisy_data!(data, v, model, n, np, p,
x_interval, cluster_interval; kwargs...)
end
generate_clustered_noisy_data(model::Function, n::Int, np::Int,
p::Int, θSol::Vector{Float64}, x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...) =
generate_clustered_noisy_data(model, n, np, p, x_interval,
cluster_interval, θSol=θSol; kwargs...)
"""
generate_clustered_noisy_data!(data::Array{Float64, 2},
v::Vector{Int}, model::Function, n::Int, np::Int, p::Int,
x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...)
Generate a test set with clustered outliers. This version overwrites
the content of (`np` x `3`) matrix `data` and vector `v` with integer
indices to the position of outliers in `data`.
The arguments and optional arguments are the same for
[`generate_noisy_data!`](@ref), with exception of tuple
`cluster_interval` which is the interval to generate the clustered
outliers.
It returns a tuple `(data, θSol, outliers)` where
- `data`: (`np` x `3`) array, where each row contains `x` and
`model(x, θSol)`. The same array given as argument
- `θSol`: `n`-dimensional vector with the exact solution.
- `outliers`: the outliers of this data set. The same vector given
as argument.
"""
function generate_clustered_noisy_data!(data::Array{Float64, 2},
v::Vector{Int}, model::Function, n::Int, np::Int,
p::Int, x_interval::Tuple{Float64,Float64},
cluster_interval::Tuple{Float64, Float64}; kwargs...)
if (np - p > 0) &&
!(x_interval[1] <= cluster_interval[1] <
cluster_interval[2] <= x_interval[2])
error("Bad interval for clustered data generation.")
end
interval_len = x_interval[2] - x_interval[1]
fr1 = (cluster_interval[1] - x_interval[1]) / interval_len
fr2 = (cluster_interval[2] - cluster_interval[1]) / interval_len
fr3 = (x_interval[2] - cluster_interval[2]) / interval_len
# Distribute data in a proportional way
# Interval 2 will contain the clustered outliers
np2 = max(np - p, Int(round(fr2 * np)))
np1 = min(np - np2, Int(round(fr1 * np)))
# Interval 3 will contain the remaining points
np3 = max(0, np - np1 - np2)
@debug("Clustered points: $(np1), $(np2), $(np3).")
# Avoid repetition in the borders of the intervals
δ_c = (cluster_interval[2] - cluster_interval[1]) / (np2 + 2)
# Generate data
tmpv = Vector{Int}()
tmpd, θSol, tmpv = generate_noisy_data!(@view(data[1:np1, :]),
tmpv, model, n, np1, np1; x_interval=(x_interval[1],
cluster_interval[1]), kwargs...)
generate_noisy_data!(@view(data[np1 + 1:np1 + np2, :]), v, model,
n, np2, np2 - (np - p); x_interval=(cluster_interval[1] + δ_c,
cluster_interval[2] - δ_c), θSol=θSol, kwargs...)
# Update the outlier number with the correct number
map!((x) -> x + np1, v, v)
generate_noisy_data!(@view(data[np1 + np2 + 1:np, :]), tmpv,
model, n, np3, np3; x_interval=(cluster_interval[2],
x_interval[2]), θSol=θSol, kwargs...)
return data, θSol, v
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 2484 | export RAFFOutput
"""
This type defines the output file for the RAFF algorithm.
RAFFOutput(status::Int, solution::Vector{Float64}, iter::Int,
p::Int, f::Float64, nf::Int, nj::Int, outliers::Vector{Int})
where
- `status`: is 1 if converged and 0 if not
- `solution`: vector with the parameters of the model
- `iter`: number of iterations up to convergence
- `p`: number of trusted points
- `f`: the residual value
- `nf`: number of function evaluations
- `nj`: number of Jacobian evaluations
- `outliers`: the possible outliers detected by the method, for the
given `p`
RAFFOutput()
Creates a null version of output, equivalent to `RAFFOutput(0, [], -1, 0, Inf, -1, -1, [])`
RAFFOuput(p::Int)
RAFFOuput(sol::Vector{Float64}, p::Int)
Creates a null version of output for the given `p` and a null version
with the given solution, respectively.
"""
struct RAFFOutput
status :: Int
solution :: Vector{Float64}
iter :: Int
p :: Int
f :: Float64
nf :: Int
nj :: Int
outliers :: Vector{Int}
end
RAFFOutput(p) = RAFFOutput(0, [], -1, p, Inf, -1, -1, [])
RAFFOutput(sol, p) = RAFFOutput(0, sol, -1, p, Inf, -1, -1, [])
# Deprecated compatibility function.
RAFFOutput(status, solution, iter, p, f, outliers) = begin
Base.depwarn("The call to `RAFFOutput` without `nf` and `nj` will be deprecated in future versions. Use the ful version `RAFFOutput(status, solution, iter, p, f, nf, nj, outliers)` instead.", :raff)
RAFFOutput(status, solution, iter, p, f, -1, -1, outliers)
end
RAFFOutput() = RAFFOutput(0)
# Overload the == for RAFFOutput
import Base.==
==(a::RAFFOutput, b::RAFFOutput) = (
(a.status == b.status) &&
(a.solution == b.solution) &&
(a.iter == b.iter) &&
(a.p == b.p) &&
(a.f == b.f) &&
(a.outliers == b.outliers) &&
(a.nf == b.nf) &&
(a.nj == b.nj)
)
import Base.show
function show(io::IO, ro::RAFFOutput)
print(io,"** RAFFOutput ** \n")
print(io,"Status (.status) = $(ro.status) \n")
print(io,"Solution (.solution) = $(ro.solution) \n")
print(io,"Number of iterations (.iter) = $(ro.iter) \n")
print(io,"Number of trust points (.p) = $(ro.p) \n")
print(io,"Objective function value (.f) = $(ro.f) \n")
print(io,"Number of function evaluations (.nf) = $(ro.nf)\n")
print(io,"Number of Jacobian evaluations (.nj) = $(ro.nj)\n")
print(io,"Index of outliers (.outliers) = $(ro.outliers)\n")
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 6796 | export set_raff_output_level, set_lm_output_level
"""
voting_strategy(model::Function, data::Array{Float64, 2}, sols::Vector{RAFFOutput}, pliminf::Int,
plimsup::Int)
Utility function to compute the matrix representing the voting system
used by RAFF.
It first applies a filtering strategy, to eliminate obvious local
minima, then it calculates a *magic threshold* and constructs the
distance matrix. The vector `sols` contains the solutions `s_p`, for
`p = pliminf, ... plimsup`.
"""
function voting_strategy(model::Function, data::Array{Float64, 2}, sols::Vector{RAFFOutput}, pliminf::Int,
plimsup::Int)
# Remove possible stationary points, i.e., points with lower
# values for 'p' and higher 'f'.
eliminate_local_min!(model, data, sols)
# Voting strategy
lv = plimsup - pliminf + 1
dvector = zeros(Int(lv * (lv - 1) / 2))
dmatrix = zeros(lv, lv)
pos = 0
n_conv = 0
for j = 1:lv
# Count how many have successfully converged
(sols[j].status == 1) && (n_conv += 1)
for i = j + 1:lv
dmatrix[i, j] = Inf
if sols[i].status == 1 && sols[j].status == 1
dmatrix[i, j] = norm(sols[i].solution - sols[j].solution, Inf)
pos += 1
dvector[pos] = dmatrix[i, j]
end
end
end
threshold = Inf
if pos > 0
dvv = @view dvector[1:pos]
threshold = minimum(dvv) + mean(dvv) / (1.0 + sqrt(plimsup))
elseif n_conv == 0
@warn("No convergence for any 'p'. Returning largest.")
end
votsis = zeros(lv)
@debug("Threshold: $(threshold)");
# Actual votation
for j = 1:lv
# Count +1 if converged
(sols[j].status == 1) && (votsis[j] += 1)
# Check other distances
for i = j + 1:lv
if dmatrix[i, j] <= threshold
votsis[j] += 1
votsis[i] += 1
end
end
end
@debug("Voting vector:", votsis)
@debug("Distance matrix:", dmatrix)
return votsis
end
"""
check_ftrusted(ftrusted::Union{Float64, Tuple{Float64, Float64}}, np::Int)
Utility function to check `ftrusted` parameter in [`raff`](@ref) and
[`praff`](@ref). Throws an `ErrorException` if the percentage of
trusted points is incorrect.
"""
function check_ftrusted(ftrusted::Union{Float64, Tuple{Float64, Float64}}, np::Int)
if typeof(ftrusted) == Float64
(!(0.0 <= ftrusted <= 1.0)) && error("Bad value for `ftrusted`: $(ftrusted).")
return Int(round(ftrusted * np)), np
end
(!(0.0 <= ftrusted[1] <= ftrusted[2] <= 1.0)) &&
error("Bad interval for `ftrusted`: $(ftrusted[1]) > $(ftrusted[2]).")
return Int(round(ftrusted[1] * np)), Int(round(ftrusted[2] * np))
end
"""
eliminate_local_min!(sols::Vector{RAFFOutput})
Check if the function value of the solution found by smaller values of
`p` is not greater when compared with larger ones. This certainly
indicates that a local minimizer was found by the smaller `p`.
"""
function eliminate_local_min!(model::Function, data::Array{Float64, 2},
sols::Vector{RAFFOutput})
lv = length(sols)
sec_sol_ind = -1
# Start from the largest p
for i = lv:-1:1
(sols[i].status != 1) && continue
for j = i - 1:-1:1
if (sols[j].status == 1) && (sols[j].f > sols[i].f)
@debug(" Possible local minimizer for p = $(sols[j].p) " *
"with f = $(sols[j].f). Removing it.")
sols[j] = RAFFOutput(0)
end
end
end
# Test the maximum 'p'
nump, = size(data)
maxp = sols[lv]
if (lv > 1) && (maxp.status == 1)
i = lv - 1
# while (i > 0) && (sols[i].status != 1)
# i -= 1
# end
bestf = maxp.f
j = 0
while (i > 0)
if (sols[i].status == 1) && (sols[i].f < bestf)
bestf = sols[i].f
j = i
end
i -= 1
end
if j == 0
@debug(" Nobody to reject p = $(maxp.p).")
else
sec_sol = sols[j]
@debug(" p = $(sec_sol.p) will try to reject p = $(maxp.p).")
nmin = 0
for i = 1:nump
y = data[i, end]
x = @view data[i, 1:(end - 1)]
y1 = model(x, maxp.solution)
y2 = model(x, sec_sol.solution)
(abs(y - y1) < abs(y - y2)) && (nmin += 1)
end
if nmin < lv / 2
@debug(" nmin = $(nmin). Rejecting p = $(maxp.p).")
sols[lv] = RAFFOutput(0)
end
end
end
end
"""
This function is an auxiliary function. It finds the `p` smallest
values of vector `V` and brings them to the first `p` positions. The
indexes associated with the `p` smallest values are stored in `ind`.
"""
function sort_fun!(V::Vector{Float64}, ind::Vector{Int}, p::Int)
# If p is invalid, returns an empty view
(p <= 0) && (return @view(ind[1:p]), @view(V[1:p]))
npun = length(ind)
for i = 1:npun
ind[i] = i
end
for i = 1:p
for j = i + 1:npun
if V[i] > V[j]
aux = ind[j]
ind[j] = ind[i]
ind[i] = aux
vaux = V[j]
V[j] = V[i]
V[i] = vaux
end
end
end
return @view(ind[1:p]), @view(V[1:p])
end
"""
set_raff_output_level(level::LogLevel)
Set the output level of [`raff`](@ref) and [`praff`](@ref) algorithms
to the desired logging level. Options are (from highly verbose to just
errors): `Logging.Debug`, `Logging.Info`, `Logging.Warn` and
`Logging.Error`. The package
[`Logging`](https://docs.julialang.org/en/v1.0/stdlib/Logging/index.html)
needs to be loaded.
Defaults to `Logging.Error`.
"""
function set_raff_output_level(level::LogLevel)
global raff_logger = ConsoleLogger(stdout, level)
end
"""
set_lm_output_level(level::LogLevel)
Set the output level of [`lmlovo`](@ref) algorithm to the desired
logging level. Options are (from highly verbose to just errors):
`Logging.Debug`, `Logging.Info`, `Logging.Warn` and
`Logging.Error`. The package
[`Logging`](https://docs.julialang.org/en/v1.0/stdlib/Logging/index.html)
needs to be loaded.
Defaults to `Logging.Error`.
"""
function set_lm_output_level(level::LogLevel)
global lm_logger = ConsoleLogger(stdout, level)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 283 | using RAFF
using Test
using DelimitedFiles
using Distributed
using Random
using SharedArrays
using Logging
include("test_raff.jl")
include("test_utils.jl")
include("test_generator.jl")
include("test_parallel.jl")
include("test_multivariate.jl")
include("test_integration.jl")
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 2417 | using RAFF
using Random
using Distributed
using Printf
using ArgParse
# Set Debug for Logging
using Logging
using Base.CoreLogging
global_logger(ConsoleLogger(stdout, Logging.Error))
s = ArgParseSettings()
@add_arg_table s begin
"type"
help = "Sequential(s) or parallel(p)"
arg_type = String
required = true
"np"
arg_type = Int
help = "Number of points"
required = true
"nw"
arg_type = Int
help = "Number of workers"
end
parsed_args = parse_args(ARGS, s)
n = 2
np = parsed_args["np"]
p = Int(0.7 * np)
answer = [2.0, -0.5]
if parsed_args["type"] == "p"
###################
# Distributed run #
###################
nw = parsed_args["nw"]
addprocs(nw)
@everywhere using RAFF
# Set Debug for Logging
@everywhere using Logging
@everywhere using Base.CoreLogging
@everywhere global_logger(ConsoleLogger(stdout, Logging.Error))
@everywhere gmodel!(x, t_, g) = begin
g[1] = exp(t_ * x[2])
g[2] = t_ * x[1] * exp(t_ * x[2]);
end
@everywhere model(x, t) = x[1] * exp(t * x[2])
# First run
praff(model, gmodel!, zeros(1, 3), n)
# Set the seed for generating the same data
Random.seed!(123456789)
data, xSol = RAFF.generateNoisyData(model, n, np, p, answer)
val, t, bytes, gctime, memallocs = try
@timed praff(model, gmodel!, data, n)
catch e
([1.0e+99, 1.0e+99], 1.0e+99, -1), -1.0, 0, -1.0, nothing
end
sol, fsol, psol = val
@printf("%2d %10d %10.5s %15.8e %15.8e %15.8e %6d\n", nw, np,
t, sol[1], sol[2], fsol, psol)
rmprocs(workers())
else
##############
# Serial run #
##############
gmodel!(x, t_, g) = begin
g[1] = exp(t_ * x[2])
g[2] = t_ * x[1] * exp(t_ * x[2]);
end
model(x, t) = x[1] * exp(t * x[2])
# First run
raff(model, gmodel!, zeros(1, 3), n)
# Set the seed for generating the same data
Random.seed!(123456789)
data, xSol = RAFF.generateNoisyData(model, n, np, p, answer)
val, t, bytes, gctime, memallocs = try
@timed raff(model, gmodel!, data, n)
catch e
([1.0e+99, 1.0e+99], 1.0e+99, -1), -1.0, 0, -1.0, nothing
end
sol, fsol, psol = val
@printf("%2d %10d %10.5s %15.8e %15.8e %15.8e %6d\n", 0, np,
t, sol[1], sol[2], fsol, psol)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 6939 | @testset "Random generator" begin
modelStr = "(x, θ) -> θ[1] * x[1] + θ[2]"
model = eval(Meta.parse(modelStr))
n = 2
np = 5
p = 1
datf = "test.dat"
solf = "sol.dat"
generate_test_problems(datf, solf, model, modelStr, n, np, p)
open(solf, "r") do fp
@test n == parse(Int, readline(fp))
@test n == length(split(readline(fp)))
@test modelStr == readline(fp)
end
nLines = 0
nNoise = 0
open(datf, "r") do fp
# Check the dimension of the problem
@test 1 == parse(Int, readline(fp))
for line in eachline(fp)
nLines += 1
(parse(Int, split(line)[3]) != 0.0) && (nNoise += 1)
end
end
@test (np - p) == nNoise
@test np == nLines
@testset "Unique" begin
@test length(RAFF.get_unique_random_points(5, 2)) == 2
@test length(RAFF.get_unique_random_points(0, -1)) == 0
@test length(RAFF.get_unique_random_points(5, 0)) == 0
@test length(RAFF.get_unique_random_points(1, 2)) == 1
v = RAFF.get_unique_random_points(5, 5)
@test length(v) == 5
cnt = 0
for i = 1:length(v)
(i in v) && (cnt += 1)
end
@test cnt == 5
@test length(RAFF.get_unique_random_points(1, -1)) == 0
@test length(RAFF.get_unique_random_points(-1, 1)) == 0
#
@test_throws(DimensionMismatch,
RAFF.get_unique_random_points!(
Vector{Int}(undef, 1), 3, 2))
end
@testset "Noisy" begin
n, model, model_s = RAFF.model_list["linear"]
θSol = [1.0, 1.0]
np = 5
p = 4
# No outliers
data, θSol1, v = RAFF.generate_noisy_data(model, n, np, np)
@test length(v) == 0
@test length(θSol1) == n
@test size(data) == (np, 3)
@test all(data[:, 3] .== 0.0)
# All outliers
data, θSol1, v = RAFF.generate_noisy_data(model, n, np, 0)
@test length(v) == np
@test length(θSol1) == n
@test size(data) == (np, 3)
@test all(data[:, 3] .!= 0.0)
#
data, θSol1, v = RAFF.generate_noisy_data(model, n, np, p)
@test length(v) == np - p
@test length(θSol1) == n
@test size(data) == (np, 3)
@test sum(data[:, 3] .!= 0.0) == np - p
# Test if the solution provided is maintained and also the interval
xMin = -10.0
xMax = 10.0
data, θSol1, v = RAFF.generate_noisy_data(model, n, np, p, θSol, xMin, xMax)
@test all(data[:, 1] .>= xMin)
@test all(data[:, 1] .<= xMax)
@test θSol == θSol1
# Test if the solution provided is maintained and also the interval
x_interval = (-10.0, 10.0)
data, θSol1, v = RAFF.generate_noisy_data(model, n, np, p, θSol, x_interval)
@test all(data[:, 1] .>= x_interval[1])
@test all(data[:, 1] .<= x_interval[2])
@test θSol == θSol1
# Test the memory efficient version
data = Array{Float64}(undef, np, 3)
v = Vector{Int64}(undef, np)
data1, θSol1, v1 = RAFF.generate_noisy_data!(data, v, model, n, np, p)
@test data == data1
@test v == v1
end
@testset "Cluster" begin
n, model, model_s = RAFF.model_list["linear"]
θSol = [1.0, 1.0]
np = 10
p = 7
x_int = (-10.0, 10.0)
c_int = (0.0, 5.0)
data, θSol1, v = generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
@test size(data) == (np, 3)
@test length(θSol1) == n
@test length(v) == np - p
out_ind = findall(abs.(data[:, 3]) .> 0.0)
@test length(out_ind) == length(v)
@test all(data[v, 1] .>= c_int[1])
@test all(data[v, 1] .<= c_int[2])
# This loop checks if the points are generated in order and
# there is no repetition between groups of points
is_ordered = true
for i = 1:np - 1
(data[i, 1] >= data[i + 1, 1]) && (is_ordered = false)
end
@test is_ordered
# Cluster interval at the beginning
x_int = (-10.0, 10.0)
c_int = (-10.0, 0.0)
data, θSol1, v = generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
@test length(v) == np - p
@test any(data[:, 1] .> 0.0)
# Non enclosing cluster interval
x_int = (-10.0, 10.0)
c_int = (-11.0, 0.0)
@test_throws ErrorException generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
# Singleton cluster interval with np - p > 1
np = 10
p = 5
x_int = (-10.0, 10.0)
c_int = (1.0, 1.0)
@test_throws ErrorException generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
# Singleton cluster interval with no outliers
np = 10
p = 10
x_int = (-10.0, 10.0)
c_int = (1.0, 1.0)
data, θSol1, v = generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
@test length(v) == 0
# Cluster with only one element
np = 10
p = 8
x_int = (1.0, 30.0)
c_int = (5.0, 10.0)
data, θSol1, v = generate_clustered_noisy_data(model, n, np,
p, x_int, c_int)
@test length(findall(data[:, 1] .<= 5.0)) == 1
end
@testset "Rand Interv." begin
n = 2
x = Vector{Float64}(undef, n)
l = [1.0, -5.0]
u = [2.0, -1.0]
interval = [i for i in zip(l, u)]
RAFF.interval_rand!(x, interval)
@test all(x .>= l)
@test all(x .<= u)
n = 5
x = zeros(Float64, n)
RAFF.interval_rand!(x, interval)
@test all(x[1:2] .>= l)
@test all(x[1:2] .<= u)
@test all(x[3:n] .== 0.0)
n = 1
x = zeros(Float64, n)
@test_throws ErrorException RAFF.interval_rand!(x, interval)
# Bad interval
n = 2
x = Vector{Float64}(undef, n)
l = [ 1.0, -5.0]
u = [-1.0, -1.0]
interval = [i for i in zip(l, u)]
@test_throws ErrorException RAFF.interval_rand!(x, interval)
end
@testset "Model list" for (type, (n, model, model_str)) in RAFF.model_list
# TODO: Maybe we need to get the dimension of the model?
x = (type == "circle" || type == "ellipse") ? rand(2) : rand()
θ = rand(n)
model2 = eval(Meta.parse(model_str))
@test model(x, θ) ≈ model2(x, θ)
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 920 | @testset "Generated test set" begin
dir = "integration_test_files/"
# Iterate over a list of small problems and solutions
for prob in eachline(dir * "list.dat")
# Ignore blank lines
(length(strip(prob)) == 0) && continue
dname, sname = split(prob)
# Data file
open(dir * dname, "r") do fp
global N = parse(Int, readline(fp))
global data = readdlm(fp)[:, [1, 2]]
end
# Solution file
fsol = open(dir * sname, "r")
# Number of parameters
n = Meta.parse(readline(fsol))
# Solution vector
answer = eval(Meta.parse(readline(fsol)))
# Model function to fit data
model = eval(Meta.parse(readline(fsol)))
close(fsol)
# Call raff
rout = raff(model, data, n)
@test rout.solution ≈ answer atol=1.0e-2
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 2735 | @testset "Multivariate" begin
@testset "Simple test" begin
data = [1.0 1.0 2.0
0.0 0.0 4.0
7.0 1.5 -4.5
2.0 2.0 -17.0 # outlier
0.0 8.6 -4.6]
# This is the most complete way of defining the arguments for model and gmodel!
model = (x::Union{Vector{Float64}, SubArray}, θ::Vector{Float64}) -> θ[1] *
x[1] + θ[2] * x[2] + θ[3]
gmodel! = (g::SubArray, x::Union{Vector{Float64}, SubArray},
θ::Vector{Float64}) -> begin
g[1] = x[1]
g[2] = x[2]
g[3] = 1.0
end
n = 3
p = 4
θsol = [- 1.0, - 1.0, 4.0]
rout = lmlovo(model, gmodel!, data, n, p; ε=1.0e-8)
@test rout.solution ≈ θsol atol=1.0e-4
@test rout.outliers == [4]
rout = raff(model, gmodel!, data, n)
@test rout.solution ≈ θsol atol=1.0e-3
@test rout.p == 4
@test rout.outliers == [4]
# Using automatic differentiation
# This is the most complete way of defining the arguments for
# model when automatic differentiation is being used
model = (x::Union{Vector{Float64}, SubArray}, θ) -> θ[1] *
x[1] + θ[2] * x[2] + θ[3]
rout = lmlovo(model, data, n, p; ε=1.0e-8)
@test rout.solution ≈ θsol atol=1.0e-4
@test rout.outliers == [4]
rout = raff(model, data, n)
@test rout.solution ≈ θsol atol=1.0e-3
@test rout.p == 4
@test rout.outliers == [4]
end
@testset "Circle" begin
data = [2.0 1.0 0.0
1.766044443118978 1.6427876096865393 -3.3306690738754696e-16
1.1736481776669305 1.9848077530122081 2.220446049250313e-16
0.5000000000000002 1.8660254037844388 0.0
0.06030737921409168 1.3420201433256689 -1.1102230246251565e-16
0.06030737921409157 0.6579798566743313 0.0
0.49999999999999956 0.13397459621556151 0.0
3.17364817766693 0.015192246987791869 0.0 # noise
1.766044443118978 0.3572123903134604 0.0]
θsol = [1.0, 1.0, 1.0]
model = (x::Union{Vector{Float64}, SubArray}, θ) -> (x[1] - θ[1])^2 + (x[2] - θ[2])^2 - θ[3]
n = 3
p = 8
rout = lmlovo(model, [0.0, 0.0, 2.0], data, n, p; ε=1.0e-8)
@test rout.solution ≈ θsol atol=1.0e-4
@test rout.outliers == [8]
rout = raff(model, data, n; ε=1.0e-8)
@test rout.p == 8
@test rout.outliers == [8]
@test rout.solution ≈ θsol atol=1.0e-4
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 11295 | @testset "Parallel tests" begin
model(x, θ) = θ[1] + θ[2] * x[1]
gmodel!(g, x, θ) = begin
g[1] = 1.0
g[2] = x[1]
end
n = 2
np = 10
p = 7
Random.seed!(123456789)
data, θSol, = RAFF.generate_noisy_data(model, n, np, p; std=0.0)
# Remove outlier information
data = data[:, 1:2]
@testset "Updater" begin
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(0))
bestθ = SharedArray{Float64, 1}(n)
bestθ .= 0.0
fut = @async RAFF.update_best(bqueue, bestθ)
# Should update bestθ
newbest1 = ones(Float64, n)
put!(bqueue, newbest1)
sleep(0.1)
@test bestθ == newbest1
# Should update bestθ again
newbest2 = rand(Float64, n)
put!(bqueue, newbest2)
sleep(0.1)
@test bestθ == (newbest1 + newbest2) / 2.0
# Should not throw an error nor die with invalid element
# The following test if failing in Julia Nightly. They do not
# allow adding different types to specific-typed channels,
# which is good. See how to fix this test.
# put!(bqueue, 10)
# @test !isready(bqueue)
# @test !istaskdone(fut)
# Should finish
close(bqueue)
sleep(0.1)
@test istaskdone(fut)
end
@testset "Consumer" begin
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(0))
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
squeue = RemoteChannel(() -> Channel{RAFFOutput}(0))
seedMS = MersenneTwister(1234)
initguess = zeros(Float64, n)
MAXMS = 1
worker1 = @async @test begin
RAFF.consume_tqueue(bqueue, tqueue, squeue,
model, gmodel!, data, n, p - 2, np,
MAXMS, seedMS, initguess)
true
end
@test !istaskdone(worker1)
# Should not do anything for an invalid interval
put!(tqueue, p - 3:p)
@test !isready(squeue)
# Should work, since the problem is easy
put!(tqueue, p - 2:p - 2)
# Should return a vector with 1 solution pair
rout = take!(squeue)
@test !istaskdone(worker1)
@test rout.p == p - 2
@test rout.status == 1
# Another test, with different p
put!(tqueue, p:p)
rout = take!(squeue)
@test rout.p == p
@test rout.status == 1
@test rout.f ≈ 0.0 atol=1.0e-1
@test rout.solution ≈ θSol atol=1.0e-1
# Test with interval
put!(tqueue, p - 1:p)
rout = take!(squeue)
@test rout.status == 1
@test rout.p == p - 1
rout = take!(squeue)
@test rout.status == 1
@test rout.p == p
@test !isready(squeue)
# Check if worker is alive when bqueue is closed
close(bqueue)
put!(tqueue, p:p)
take!(squeue)
@test !istaskdone(worker1)
# Check if worker finishes when tqueue is closed
close(tqueue)
sleep(0.1)
@test istaskdone(worker1)
# Worker should die if solution queue is closed
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
seedMS = MersenneTwister(1234)
MAXMS = 1
worker2 = @async @test begin
RAFF.consume_tqueue(bqueue, tqueue, squeue,
model, gmodel!, data, n, p - 2, np,
MAXMS, seedMS, initguess)
true
end
@test !istaskdone(worker2)
close(squeue)
put!(tqueue, p:p)
sleep(0.1)
@test !isready(tqueue)
@test istaskdone(worker2)
end
@testset "Consumer Multistart" begin
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(4))
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
squeue = RemoteChannel(() -> Channel{RAFFOutput}(0))
seedMS = MersenneTwister(1234)
initguess = zeros(Float64, n)
MAXMS = 1
# Check if the worker dies after closing the task queue
worker1 = @async @test begin
RAFF.consume_tqueue(bqueue, tqueue, squeue,
model, gmodel!, data, n, p - 2, np,
MAXMS, seedMS, initguess)
true
end
put!(tqueue, p:p)
close(tqueue)
rout1 = take!(squeue)
sleep(0.1)
@test istaskdone(worker1)
# Save the objective function found and run multistart
# strategy with the same initial random generator
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
MAXMS = 3
seedMS = MersenneTwister(1234)
worker = @async @test begin
RAFF.consume_tqueue(bqueue, tqueue, squeue,
model, gmodel!, data, n, p - 2, np,
MAXMS, seedMS, initguess)
true
end
put!(tqueue, p:p)
rout2 = take!(squeue)
close(tqueue)
fetch(worker)
close(bqueue)
# Should find a better point
@test rout1.f >= rout2.f
@test rout1.p == rout2.p
@test istaskdone(worker)
end
@testset "Worker checker" begin
nworkers = 3
# Test if all workers are dead
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(4))
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
squeue = RemoteChannel(() -> Channel{RAFFOutput}(0))
futures = Vector{Future}(undef, nworkers)
for i = 1:nworkers
futures[i] = @spawn error()
end
RAFF.check_and_close(bqueue, tqueue, squeue, futures)
@test !isopen(bqueue)
@test !isopen(tqueue)
@test !isopen(squeue)
# Test if there is at least one live worker
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(4))
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
squeue = RemoteChannel(() -> Channel{RAFFOutput}(0))
futures = Vector{Future}(undef, nworkers)
for i = 1:nworkers - 1
futures[i] = @spawn error()
end
futures[nworkers] = @spawn take!(tqueue)
RAFF.check_and_close(bqueue, tqueue, squeue, futures)
@test isopen(bqueue)
@test isopen(tqueue)
@test isopen(squeue)
put!(tqueue, 1:1)
RAFF.check_and_close(bqueue, tqueue, squeue, futures)
@test !isopen(bqueue)
@test !isopen(tqueue)
@test !isopen(squeue)
# Ensure that this checker does not closes the queue if the
# workers have finished their job very fast
bqueue = RemoteChannel(() -> Channel{Vector{Float64}}(4))
tqueue = RemoteChannel(() -> Channel{UnitRange{Int}}(0))
squeue = RemoteChannel(() -> Channel{RAFFOutput}(0))
futures = Vector{Future}(undef, nworkers)
for i = 1:nworkers
futures[i] = @spawn(nothing)
end
# Simulates the case where all the tasks have already been
# taken
close(tqueue)
RAFF.check_and_close(bqueue, tqueue, squeue, futures)
@test isopen(bqueue)
@test isopen(squeue)
end
@testset "PRAFF" begin
model(x, θ) = θ[1] * exp(x[1] * θ[2])
gmodel!(g, x, θ) = begin
g[1] = exp(x[1] * θ[2])
g[2] = x[1] * θ[1] * exp(x[1] * θ[2])
end
data = [-1.0 3.2974425414002564;
-0.75 2.9099828292364025;
-0.5 2.568050833375483;
-0.25 2.2662969061336526;
0.0 2.0;
0.25 1.764993805169191;
0.5 1.5576015661428098;
0.75 1.5745785575819442; #noise
1.0 1.2130613194252668;
1.25 1.0705228570379806;
1.5 0.9447331054820294;
1.75 0.8337240393570168;
2.0 0.7357588823428847;
2.25 0.6493049347166995;
2.5 0.5730095937203802;
2.75 0.5056791916094929;
3.0 0.44626032029685964;
3.25 0.5938233504083881; #noise
3.5 0.3475478869008902;
3.75 0.30670993368985694;
4.0 0.5706705664732254; #noise
]
answer = [2.0, -0.5]
# Regular test
rout = praff(model, data, 2)
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.iter >= size(data)[1]
@test rout.nf >= 1
@test rout.nj >= 1
rout = praff(model, gmodel!, data, 2)
rgood = rout
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.iter >= size(data)[1]
@test rout.nf >= 1
@test rout.nj >= 1
# Multistart test
rout = praff(model, data, 2; MAXMS=2)
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.f <= rgood.f
@test rout.iter >= 1.1 * rgood.iter
@test rout.nf >= 1.1 * rgood.nf
@test rout.nj >= 1.1 * rgood.nj
end
@testset "Test parameters" begin
data = [-1.0 3.2974425414002564;
-0.75 2.9099828292364025;
-0.5 2.568050833375483;
-0.25 2.2662969061336526;
0.0 2.0;
0.25 1.764993805169191;
0.5 1.5576015661428098;
0.75 1.5745785575819442; #noise
1.0 1.2130613194252668;
1.25 1.0705228570379806;
1.5 0.9447331054820294;
1.75 0.8337240393570168;
2.0 0.7357588823428847;
2.25 0.6493049347166995;
2.5 0.5730095937203802;
2.75 0.5056791916094929;
3.0 0.44626032029685964;
3.25 0.5938233504083881; #noise
3.5 0.3475478869008902;
3.75 0.30670993368985694;
4.0 0.5706705664732254; #noise
]
answer = [2.0, -0.5]
rout = praff(model, gmodel!, data, 2; noutliers=0)
@test rout.p == 21
rout = praff(model, data, 2; noutliers=5)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
rout = praff(model, data, 2; ftrusted=(21 - 5)/21)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
rout = praff(model, data, 2; ftrusted=(18/21, 18/21))
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test praff(model, data, 2; ftrusted=(0.5, 1.1)) == RAFFOutput()
@test praff(model, data, 2; ftrusted=-0.1) == RAFFOutput()
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 7330 | @testset "Simple tests" begin
model(x, θ) = θ[1] * exp(x[1] * θ[2])
gmodel!(g, x, θ) = begin
g[1] = exp(x[1] * θ[2])
g[2] = x[1] * θ[1] * exp(x[1] * θ[2])
end
@testset "Basic usage" begin
data = [-1.0 3.2974425414002564;
-0.75 2.9099828292364025;
-0.5 2.568050833375483;
-0.25 2.2662969061336526;
0.0 2.0;
0.25 1.764993805169191;
0.5 1.5576015661428098;
0.75 1.5745785575819442; #noise
1.0 1.2130613194252668;
1.25 1.0705228570379806;
1.5 0.9447331054820294;
1.75 0.8337240393570168;
2.0 0.7357588823428847;
2.25 0.6493049347166995;
2.5 0.5730095937203802;
2.75 0.5056791916094929;
3.0 0.44626032029685964;
3.25 0.5938233504083881; #noise
3.5 0.3475478869008902;
3.75 0.30670993368985694;
4.0 0.5706705664732254; #noise
]
answer = [2.0, -0.5]
θ = [0.0, 0.0]
rout = lmlovo(model, θ, data, 2, 18)
@test rout.status == 1
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.nf >= rout.iter
@test rout.nj >= rout.iter
θ = [0.0, 0.0]
# Test with small p
rout = lmlovo(model, θ, data, 2, 3)
@test rout.status == 1
@test rout.p == 3
θ = [0.0, 0.0]
rout = raff(model, data, 2)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.iter >= size(data)[1]
@test rout.nf >= 1
@test rout.nj >= 1
@test_throws AssertionError lmlovo(model, θ, data, 0, 1)
@test_throws AssertionError lmlovo(model, θ, data, 2, -1)
rout = lmlovo(model, θ, data, 2, 0)
@test rout.status == 1
@test rout.iter == 0
@test rout.f == 0
@test rout.outliers == [1:size(data)[1];]
@test rout.solution == θ
@test rout.nf == 0
@test rout.nj == 0
# lmlovo with function and gradient
θ = [0.0, 0.0]
rout = lmlovo(model, gmodel!, θ, data, 2, 18)
@test rout.status == 1
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
θ = [0.0, 0.0]
rout = raff(model, gmodel!, data, 2)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test rout.iter >= size(data)[1]
@test rout.nf >= 1
@test rout.nj >= 1
@test_throws AssertionError lmlovo(model, gmodel!, θ, data, 0, 1)
@test_throws AssertionError lmlovo(model, gmodel!, θ, data, 2, -1)
rout = lmlovo(model, gmodel!, θ, data, 2, 0)
@test rout.status == 1
@test rout.iter == 0
@test rout.f == 0
@test rout.outliers == [1:size(data)[1];]
@test rout.solution == θ
@test rout.nf == 0
@test rout.nj == 0
end
# Test to check Issue #1
@testset "Error in printing" begin
m(x, θ) = θ[1] * x[1]^2 + θ[2]
A = [ -2.0 5.00;
-1.5 3.25;
-1.0 2.00;
-0.5 1.25;
0.0 1.00;
0.5 1.25;
1.0 2.00;
1.5 3.25;
2.0 5.00 ]
θ = [0.0, 0.0]
# Changes log just for this test
rout = with_logger(Logging.NullLogger()) do
lmlovo(m, θ, A, 2, 4)
end
@test rout.status == 1
@test rout.p == 4
end
@testset "Test parameters" begin
data = [-1.0 3.2974425414002564;
-0.75 2.9099828292364025;
-0.5 2.568050833375483;
-0.25 2.2662969061336526;
0.0 2.0;
0.25 1.764993805169191;
0.5 1.5576015661428098;
0.75 1.5745785575819442; #noise
1.0 1.2130613194252668;
1.25 1.0705228570379806;
1.5 0.9447331054820294;
1.75 0.8337240393570168;
2.0 0.7357588823428847;
2.25 0.6493049347166995;
2.5 0.5730095937203802;
2.75 0.5056791916094929;
3.0 0.44626032029685964;
3.25 0.5938233504083881; #noise
3.5 0.3475478869008902;
3.75 0.30670993368985694;
4.0 0.5706705664732254; #noise
]
answer = [2.0, -0.5]
rout = raff(model, gmodel!, data, 2; noutliers=0)
@test rout.p == 21
rout = raff(model, data, 2; noutliers=5)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
rout = raff(model, data, 2; ftrusted=(21 - 5)/21)
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
rout = raff(model, data, 2; ftrusted=(18/21, 18/21))
@test rout.f ≈ 0.0 atol=1.0e-5
@test rout.solution ≈ answer atol=1.0e-5
@test rout.p == 18
@test raff(model, data, 2; ftrusted=(0.5, 1.1)) == RAFFOutput()
@test raff(model, data, 2; ftrusted=-0.1) == RAFFOutput()
end
# Tests for RAFFOutput
@testset "RAFFOutput tests" begin
nullOut = RAFFOutput(0, [], -1, 0, Inf, -1, -1, [])
@test RAFFOutput() == nullOut
@test RAFFOutput(0) == nullOut
# Check if deprecated version is creating `nf` and `nj`
@test RAFFOutput(0, [], -1, 0, Inf, []) == nullOut
nullPOut = RAFFOutput(0, [], -1, 10, Inf, -1, -1, [])
@test nullPOut == RAFFOutput(10)
# Test output
raff_output = RAFFOutput(1, ones(5), 2, 6, - 1.0, 10, 20, ones(Int, 6))
io = IOBuffer()
print(io, raff_output)
s = String(take!(io))
rx = Regex("\\(\\.status\\) = " * string(raff_output.status))
@test match(rx, s) !== nothing
svec = replace(string(raff_output.solution), r"([\[\]])"=>s"\\\1")
rx = Regex("\\(\\.solution\\) = " * svec)
@test match(rx, s) !== nothing
rx = Regex("\\(\\.iter\\) = " * string(raff_output.iter))
@test match(rx, s) !== nothing
rx = Regex("\\(\\.p\\) = " * string(raff_output.p))
@test match(rx, s) !== nothing
rx = Regex("\\(\\.f\\) = " * string(raff_output.f))
@test match(rx, s) !== nothing
rx = Regex("\\(\\.nf\\) = " * string(raff_output.nf))
@test match(rx, s) !== nothing
rx = Regex("\\(\\.nj\\) = " * string(raff_output.nj))
@test match(rx, s) !== nothing
svec = replace(string(raff_output.outliers), r"([\[\]])"=>s"\\\1")
rx = Regex("\\(\\.outliers\\) = " * svec)
@test match(rx, s) !== nothing
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1807 | @testset "Util. tests" begin
@testset "ftrusted" begin
@test_throws MethodError RAFF.check_ftrusted(1, 15)
@test RAFF.check_ftrusted(0.0, 15) == (0, 15)
@test RAFF.check_ftrusted(0.3, 15) == (4, 15)
@test RAFF.check_ftrusted(1.0, 15) == (15, 15)
@test_throws ErrorException RAFF.check_ftrusted(-1.0, 15)
@test RAFF.check_ftrusted((0.1, 0.4) , 15) == (2, 6)
@test_throws ErrorException RAFF.check_ftrusted((0.4, 0.1) , 15)
@test_throws ErrorException RAFF.check_ftrusted((0.1, 1.1) , 15)
@test_throws ErrorException RAFF.check_ftrusted((-0.4, 0.1) , 15)
end
@testset "Sort function" begin
ind = Vector{Int}(undef, 3)
v = [3.0, 2.0, 1.0]
pi, pv = RAFF.sort_fun!(v, ind, 2)
@test(length(pi) == 2)
@test(length(pv) == 2)
@test(pi == [3, 2])
@test(pv == [1.0, 2.0])
@test(pi == ind[1:2])
@test(pv == v[1:2])
ind = Vector{Int}(undef, 4)
v = [1.0, 3.0, 1.0, 1.0]
pi, pv = RAFF.sort_fun!(v, ind, 3)
@test(issubset(pi, [1, 3, 4]))
@test(pv == ones(3))
@test(pv == v[1:3])
v = [1.0, 3.0, 1.0, 1.0]
pi, pv = RAFF.sort_fun!(v, ind, 1)
@test(pi == [1])
@test(pv == [1.0])
# Extreme behavior
ind = zeros(Int, 3)
v = [3.0, 2.0, 1.0]
pi, pv = RAFF.sort_fun!(v, ind, 0)
@test(length(pi) == length(pv) == 0)
@test(v == [3.0, 2.0, 1.0])
@test(ind == zeros(3))
pi, pv = RAFF.sort_fun!(v, ind, -1)
@test(length(pi) == length(pv) == 0)
@test(v == [3.0, 2.0, 1.0])
@test(ind == zeros(3))
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1167 | # This example shows a simple use for a channel and a feeder
using Distributed
using Logging
using Base.CoreLogging
# Set Debug for Logging
global_logger(ConsoleLogger(stdout, Logging.Debug))
function read_and_print(channel::Channel{Tuple{Vector{Int}, Int}})
@debug("Running updater.")
while true
x, i = try
take!(channel);
catch e
if isa(e, InvalidStateException)
break
end
@warn("Something wrong when reading from channel. Will skip.", e)
continue
end
@debug("Updater has read values from channel. x = $(x) and i = $(i).")
end
@debug("Update channel closed. Exiting thread.")
end
c = Channel{Tuple{Vector{Int}, Int}}(0)
# Task waits for the channel to be fed
task1 = @async read_and_print(c)
# Feeding the channel
task2 = @async for i = 1:10
put!(c, (ones(Int, 3), i))
end
sleep(1)
# Wrong feeding
put!(c, 1)
# This should close `task1`
close(c)
# Just to be sure
sleep(1)
println("Task1 done = $(istaskdone(task1))")
println("Task2 done = $(istaskdone(task2))")
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 3262 | # This example shows how to get mouse clicks in Julia
# Packages needed
#
# add Gtk
# add GtkReactive
# add Graphics
# add Colors
using Gtk, Gtk.ShortNames, GtkReactive, Graphics, Colors
win = Window("Drawing")
c = canvas(UserUnit) # create a canvas with user-specified coordinates
push!(win, c)
const lines = Signal([]) # the list of lines that we'll draw
const newline = Signal([]) # the in-progress line (will be added to list above)
const drawing = Signal(false) # this will become true if we're actively dragging
# c.mouse.buttonpress is a `Reactive.Signal` that updates whenever the
# user clicks the mouse inside the canvas. The value of this signal is
# a MouseButton which contains position and other information.
# We're going to define a callback function that runs whenever the
# button is clicked. If we just wanted to print the value of the
# returned button object, we could just say
# map(println, c.mouse.buttonpress)
# However, here our function is longer than `println`, so
# we're going to use Julia's do-block syntax to define the function:
sigstart = map(c.mouse.buttonpress) do btn
# This is the beginning of the function body, operating on the argument `btn`
if btn.button == 1 && btn.modifiers == 0 # is it the left button, and no shift/ctrl/alt keys pressed?
push!(drawing, true) # activate dragging
push!(newline, [btn.position]) # initialize the line with the current position
end
end
const dummybutton = MouseButton{UserUnit}()
# See the Reactive.jl documentation for `filterwhen`
sigextend = map(filterwhen(drawing, dummybutton, c.mouse.motion)) do btn
# while dragging, extend `newline` with the most recent point
push!(newline, push!(value(newline), btn.position))
end
sigend = map(c.mouse.buttonrelease) do btn
if btn.button == 1
push!(drawing, false) # deactivate dragging
# append our new line to the overall list
push!(lines, push!(value(lines), value(newline)))
# For the next click, make sure `newline` starts out empty
push!(newline, [])
end
end
function drawline(ctx, l, color)
isempty(l) && return
p = first(l)
move_to(ctx, p.x, p.y)
set_source(ctx, color)
for i = 2:length(l)
p = l[i]
line_to(ctx, p.x, p.y)
end
stroke(ctx)
end
# Because `draw` isn't a one-line function, we again use do-block syntax:
redraw = draw(c, lines, newline) do cnvs, lns, newl # the function body takes 3 arguments
fill!(cnvs, colorant"white") # set the background to white
set_coordinates(cnvs, BoundingBox(0, 1, 0, 1)) # set coordinates to 0..1 along each axis
ctx = getgc(cnvs) # gets the "graphics context" object (see Cairo/Gtk)
for l in lns
drawline(ctx, l, colorant"blue") # draw old lines in blue
end
drawline(ctx, newl, colorant"red") # draw new line in red
end
showall(win)
#If we are not in a REPL
if (!isinteractive())
# Create a condition object
c = Condition()
# Get the window
# win = guidict["gui"]["window"]
# Notify the condition object when the window closes
signal_connect(win, :destroy) do widget
notify(c)
end
# Wait for the notification before proceeding ...
wait(c)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 941 | # This is a minimal working example for running LMlovo on several
# workers in Julia and saving them into a shared matrix
using Random
using Revise
using Distributed
using SharedArrays
@everywhere using RAFF
@everywhere using Base.CoreLogging
@everywhere CoreLogging._min_enabled_level[] = CoreLogging.Warn + 1
@everywhere gmodel!(x, t_, g) = begin
g[1] = exp(t_ * x[2])
g[2] = t_ * x[1] * exp(t_ * x[2]);
end
@everywhere model(x, t) = x[1] * exp(t * x[2])
n = 2
np = 1000
p = 700
# Set the seed for generating the same data
Random.seed!(123456789)
data, xSol = RAFF.generateNoisyData(model, n, np, p)
result = SharedArray{Float64, 2}(n, np)
f = @sync @distributed for i = 1:np
# Starting point
x = zeros(Float64, 2)
# Call function and store results
s, x, iter, p, f = LMlovo(model, gmodel!, x, data, 2, i, MAXITER=1000)
result[:, i] .= x
println("Finished. p = $(p) and f = $(f).")
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1320 | # This example shows a simple use for a remote channel and a feeder
using Distributed
@everywhere using Logging
@everywhere using Base.CoreLogging
# Set Debug for Logging
@everywhere global_logger(ConsoleLogger(stdout, Logging.Debug))
function read_and_print(channel::RemoteChannel)
@debug("Running updater.")
while true
x, i = try
take!(channel);
catch e
if isa(e, InvalidStateException)
break
end
@warn("Something wrong when reading from channel. Will skip.", e)
continue
end
@debug("Updater has read values from channel. x = $(x) and i = $(i).")
end
@debug("Update channel closed. Exiting thread.")
end
c = RemoteChannel(() -> Channel{Tuple{Vector{Int}, Int}}(0))
# Task in main process that waits for the channel to be fed
task1 = @async read_and_print(c)
# Feeding the channel
task2 = @sync @distributed for i = 1:10
put!(c, (ones(Int, 3), i))
sleep(rand() / 2)
@debug("Process $(myid()) has fed the channel.")
end
sleep(1)
# Wrong feeding
put!(c, 1)
# This should close `task1`
close(c)
# Just to be sure
sleep(1)
println("Task1 done = $(istaskdone(task1))")
println("Task2 done = $(istaskdone(task2))")
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 616 | # This is a minimal working example for running lmlovo function
using Random
using RAFF
# Set Logging.Debug for Logging
using Logging
using Base.CoreLogging
global_logger(ConsoleLogger(stdout, Logging.Debug))
gmodel!(x, t_, g) = begin
g[1] = exp(t_ * x[2])
g[2] = t_ * x[1] * exp(t_ * x[2]);
end
model(x, t) = x[1] * exp(t * x[2])
n = 2
np = 100
p = 70
# Set the seed for generating the same data
Random.seed!(123456789)
data, xSol = RAFF.generateNoisyData(model, n, np, p, [2.0, -0.5])
@time status, x, iter, p_, f = lmlovo(model, data, n, p)
println("True sol: $(xSol).")
println("Found: $x.")
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 686 | # This is a minimal working example for running praff on several
# workers in Julia and saving them into a shared matrix
using Random
@everywhere using RAFF
# Set Debug for Logging
@everywhere using Logging
@everywhere using Base.CoreLogging
@everywhere global_logger(ConsoleLogger(stdout, Logging.Error))
@everywhere gmodel!(x, t_, g) = begin
g[1] = exp(t_ * x[2])
g[2] = t_ * x[1] * exp(t_ * x[2]);
end
@everywhere model(x, t) = x[1] * exp(t * x[2])
n = 2
np = 100
p = 70
# Set the seed for generating the same data
Random.seed!(123456789)
data, xSol, = RAFF.generateNoisyData(model, n, np, p)
praff(model, gmodel!, data, n, MAXMS=2)
println("True sol: $(xSol).")
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 4151 | using RAFF
using Printf
using Random
using Logging
using Base.CoreLogging
"""
calc_ratio( model_str::String, np::Int, p::Int,
sol::Vector{Float64}, ntests::Int=10,
initguess::Vector{Float64}=nothing, maxms::Int=1,
fp::IOStream=stdout)
Run a sequence of `ntests` tests for model `model_str` using a dataset
of `np` points and `p` trusted points (i. e. `np - p` outliers). The
points are generated by perturbations around *solution* `sol`. See
function [`generate_noisy_data`](@ref) for more details about the
generation of random points and outliers.
If provided, `initguess` is used as a starting points for
[`raff`](@ref), `maxms` is the number of random initial points used
and `fp` is the output.
"""
function calc_ratio( model_str::String, np::Int, p::Int,
sol::Vector{Float64}, ntests::Int=10,
initguess=nothing, maxms::Int=1,fp=stdout; cluster=nothing)
# Set Logging
global_logger(ConsoleLogger(stdout, Logging.Error))
n, model, = RAFF.model_list[model_str]
tmpsol = Vector{Float64}(undef, n)
large_number = 179424673
# Tests
tot_tim = 0.0
n_match = zeros(Int, np + 1)
n_exact = 0
n_out = 0
# Number of correct outliers found
n_cout = 0
# Number of false positives found
n_fp = 0
for i = 1:ntests
# Define seed for this run
Random.seed!(large_number + i)
if initguess == nothing
x = zeros(Float64, n)
else
x = copy(initguess)
end
tmpsol .= sol
data, tmpsol, tout = if cluster == nothing
generate_noisy_data(model, n, np, p, tmpsol, (1.0, 30.0))
else
generate_clustered_noisy_data(model, n, np, p, tmpsol,
(1.0, 30.0), cluster)
end
rsol, t, = @timed praff(model, data[:, 1:end - 1], n;
MAXMS=maxms, initguess=x, ftrusted=0.7)
cnt = 0
for k in rsol.outliers
(k in tout) && (cnt += 1)
end
n_out += length(rsol.outliers)
n_cout += cnt
n_fp += length(rsol.outliers) - cnt
n_match[cnt + 1] += 1
(cnt == np - p) && (length(rsol.outliers) == np - p) && (n_exact += 1)
tot_tim += t
end
@printf(fp, "%10s %5d %5d %10.8f %10.8f %10.8f %10.8f %10.2f %5d %5d %5d %8.4f\n",
model_str, np, p, n_match[np - p + 1] /
(1.0 * ntests), n_exact / (1.0 * ntests), n_cout / ntests,
n_fp / ntests, n_out / ntests, n_match[1], n_match[2],
n_match[3], tot_tim)
return n_match
end
"""
run_calc_ratio(filename="/tmp/table.txt")
Perform a sequence of tests for different models and a combination of
different number of outliers.
Saves the results in `filename`.
"""
function run_calc_ratio(filename="/tmp/table.txt")
for (model_str, sol) in [ ("linear", [-200.0, 1000.0]), ("cubic", [0.5, -20.0, 300.0, 1000.0]),
("expon", [5000.0, 4000.0, 0.2]),
("logistic", [6000.0, -5000, -0.2, -3.7]) ]
for (np, p) in [(10, 9), (10, 8), (100, 99), (100, 90)]
for maxms in [1, 10, 100, 1000]
open(filename, "a") do fp
calc_ratio(model_str, np, p, sol, 1000, nothing, maxms, fp);
end
end
end
end
end
"""
run_calc_ratio_clustered(filename="/tmp/table.txt")
Perform a sequence of tests for different models for solving problems
with clustered outliers.
Saves the results in `filename`.
"""
function run_calc_ratio_clustered(filename="/tmp/table.txt")
np = 100
p = 90
maxms = 100
for (model_str, sol) in [ ("linear", [-200.0, 1000.0]), ("cubic", [0.5, -20.0, 300.0, 1000.0]),
("expon", [5000.0, 4000.0, 0.2]),
("logistic", [6000.0, -5000, -0.2, -3.7]) ]
open(filename, "a") do fp
calc_ratio(model_str, np, p, sol, 1000, nothing, maxms, fp, cluster=(5.0, 10.0));
end
end
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 2756 | using DelimitedFiles
using PyPlot
using RAFF
"""
draw_problem(M; raff_output=nothing, model_str="logistic",
datafile="/tmp/output.txt")
draw_problem(datafile::String="/tmp/output.txt"; kwargs...)
Draw the problem data given by a (`m`x`3`) `M` matrix. By default it
is assumed that the model is given by the logistic model. If no
arguments are given it is assumed that the data file is given in file
`/tmp/output.txt`.
If a [RAFFOutput](@ref) object is provided, then it plots the model
found and also the outliers (true and false positives).
Optional arguments:
- `raff_output`: [RAFFOutput](@ref) object with the solution
obtained
- `model_str`: a string with the name of the model to be used to
plot the solution. See [model_list](@ref) for details.
"""
function draw_problem(M; raff_output=nothing, model_str="logistic",
θsol=nothing)
x = M[:, 1]
y = M[:, 2]
co = M[:, 3]
true_outliers = findall(co .!= 0.0)
PyPlot.scatter(x[co .== 0.0], y[co .== 0.0], color=PyPlot.cm."Pastel1"(2.0/9.0),
marker="o", s=50.0, linewidths=0.2)
PyPlot.scatter(x[co .!= 0.0], y[co .!= 0.0], color=PyPlot.cm."Pastel1"(2.0/9.0),
marker="^", s=50.0, linewidths=0.2, label="Outliers")
if raff_output != nothing
n, model, modelstr = RAFF.model_list[model_str]
modl1 = (x) -> model(x, raff_output.solution)
t = minimum(x):0.01:maximum(x)
PyPlot.plot(t, modl1.(t), color=PyPlot.cm."Set1"(2.0/9.0))
# Draw outliers found by RAFF
true_positives = intersect(true_outliers, raff_output.outliers)
false_positives = setdiff(raff_output.outliers, true_positives)
PyPlot.scatter(x[false_positives], y[false_positives],
color=PyPlot.cm."Pastel1"(0.0/9.0), marker="o",
linewidths=0.2, edgecolors="k", s=50.0, label="False positives")
PyPlot.scatter(x[true_positives], y[true_positives],
color=PyPlot.cm."Pastel1"(0.0/9.0), marker="^",
s=50.0, linewidths=0.2, edgecolors="k", label="Identified outliers")
end
# Plot the true solution, if available
if θsol != nothing
modl2 = (x) -> model(x, θsol)
PyPlot.plot(t, modl2.(t), color=PyPlot.cm."Set1"(1.0/9.0),
linestyle="--")
end
PyPlot.legend(loc="best")
PyPlot.show()
PyPlot.savefig("/tmp/figure.png", DPI=150)
end
function draw_problem(datafile::String="/tmp/output.txt"; kwargs...)
fp = open(datafile, "r")
N = parse(Int, readline(fp))
M = readdlm(fp)
close(fp)
draw_problem(M; kwargs...)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 6568 | # This script generates and draws examples for the circle detection
using PyPlot
using Printf
using DelimitedFiles
using RAFF
"""
gen_circle(np::Int, p::Int; std::Float64=0.1,
θSol::Vector{Float64}=10.0*randn(Float64, 3),
outTimes::Float64=5.0,
interval::Vector{Float64}=rand(np)*2.0*π)
Generate perturbed points in a circle given by `θSol`. Construct a
test file for `RAFF`.
"""
function gen_circle(np::Int, p::Int; std::Float64=0.1,
θSol::Vector{Float64}=10.0*randn(Float64, 3),
outTimes::Float64=5.0, interval::Vector{Float64}=rand(np)*2.0*π)
ρ = (α, ρ) -> [ρ * cos(α) + θSol[1], ρ * sin(α) + θSol[2]]
f = (x) -> (x[1] - θSol[1])^2 + (x[2] - θSol[2])^2 - θSol[3]^2
data = Array{Float64, 2}(undef, np, 4)
# Points selected to be outliers
v = RAFF.get_unique_random_points(np, np - p)
for (i, α) in enumerate(interval)
pt = ρ(α, θSol[3] + std * randn())
data[i, 3:4] = 0.0 #f(pt)
if i in v
pt = ρ(α, θSol[3] * (1.0 + 2 * rand()) * outTimes * std * sign(randn()))
data[i, 4] = 1.0
end
data[i, 1:2] = pt
end
open("/tmp/output.txt", "w") do fp
# Dimension of the domain of the function to fit
@printf(fp, "%d\n", 2)
for k = 1:np
@printf(fp, "%20.15f %20.15f %20.15f %1d\n",
data[k, 1], data[k, 2], data[k, 3], Int(k in v))
end
end
return data, v
end
function gen_ncircle(np::Int, p::Int; std::Float64=0.1,
θSol::Vector{Float64}=10.0*randn(Float64, 3),
outTimes::Float64=5.0, interval::Vector{Float64}=rand(p)*2.0*π)
ρ = (α, ρ) -> [ρ * cos(α) + θSol[1], ρ * sin(α) + θSol[2]]
f = (x) -> (x[1] - θSol[1])^2 + (x[2] - θSol[2])^2 - θSol[3]^2
data = Array{Float64, 2}(undef, np, 4)
for (i, α) in enumerate(interval)
pt = ρ(α, θSol[3] + std * randn())
data[i, 1:2] = pt
data[i, 3:4] .= 0.0 #f(pt)
end
# Just random noise
v = Vector{Int}(undef, np - p)
for i = p + 1:np
data[i, 1] = θSol[1] - 2.0 * θSol[3] + rand() * 4.0 * θSol[3]
data[i, 2] = θSol[2] - 2.0 * θSol[3] + rand() * 4.0 * θSol[3]
data[i, 3] = 0.0
data[i, 4] = 1.0
v[i - p] = i
end
open("/tmp/output.txt", "w") do fp
# Dimension of the domain of the function to fit
@printf(fp, "%d\n", 2)
for k = 1:np
@printf(fp, "%20.15f %20.15f %20.15f %1d\n",
data[k, 1], data[k, 2], data[k, 3], Int(k in v))
end
end
return data, v
end
"""
Draw the points generated by the previous function.
"""
function draw_circle(data, outliers)
np, = size(data)
c = zeros(np)
c[outliers] .= 1.0
PyPlot.scatter(data[:, 1], data[:, 2], c=c, marker="o", s=50.0, linewidths=0.2,
cmap=PyPlot.cm["Paired"], alpha=0.9)
PyPlot.axis("scaled")
PyPlot.xticks([])
PyPlot.yticks([])
PyPlot.savefig("/tmp/circle.png", dpi=72)
end
"""
Draw the points and the solutions obtained. Save the picture in a file.
"""
function draw_circle_sol(M; model_str="circle", raff_output=nothing, other_sols...)
x = M[:, 1]
y = M[:, 2]
co = M[:, 4]
t = 0:0.1:2.1 * π
ptx = (α, ρ, d) -> ρ * cos(α) + d[1]
pty = (α, ρ, d) -> ρ * sin(α) + d[2]
# Plot data
true_outliers = findall(co .!= 0.0)
PyPlot.scatter(x[co .== 0.0], y[co .== 0.0], color=PyPlot.cm."Pastel1"(2.0/9.0),
marker="o", s=50.0, linewidths=0.2)
PyPlot.scatter(x[co .!= 0.0], y[co .!= 0.0], color=PyPlot.cm."Pastel1"(2.0/9.0),
marker="^", s=25.0, linewidths=0.2, label="Outliers")
if raff_output != nothing
n, model, modelstr = RAFF.model_list[model_str]
fSol = raff_output.solution
modl1x = (α) -> ptx(α, fSol[3], fSol[1:2])
modl1y = (α) -> pty(α, fSol[3], fSol[1:2])
PyPlot.plot(modl1x.(t), modl1y.(t), color=PyPlot.cm."Set1"(2.0/9.0))
# Draw outliers found by RAFF
true_positives = intersect(true_outliers, raff_output.outliers)
false_positives = setdiff(raff_output.outliers, true_positives)
if length(false_positives) > 0
PyPlot.scatter(x[false_positives], y[false_positives],
color=PyPlot.cm."Pastel1"(0.0/9.0), marker="o",
linewidths=0.2, edgecolors="k", s=50.0, label="False positives")
end
if length(true_positives) > 0
PyPlot.scatter(x[true_positives], y[true_positives],
color=PyPlot.cm."Pastel1"(0.0/9.0), marker="^",
s=50.0, linewidths=0.2, edgecolors="k", label="Identified outliers")
end
end
PyPlot.legend(loc="best")
PyPlot.axis("scaled")
PyPlot.xticks([])
PyPlot.yticks([])
PyPlot.savefig("/tmp/circle.png", dpi=150, bbox_inches="tight")
end
function draw_circle_sol(tSol, fSol, lsSol)
datafile = "/tmp/output.txt"
fp = open(datafile, "r")
N = parse(Int, readline(fp))
M = readdlm(fp)
close(fp)
x = M[:, 1]
y = M[:, 2]
ρ = M[:, 3]
co = M[:, 4]
t = [0:0.1:2.1 * π;]
ptx = (α, ρ, d) -> ρ * cos(α) + d[1]
pty = (α, ρ, d) -> ρ * sin(α) + d[2]
# True solution
pptx = (α) -> ptx(α, tSol[3], tSol[1:2])
ppty = (α) -> pty(α, tSol[3], tSol[1:2])
PyPlot.plot(pptx.(t), ppty.(t), "b--", label="True solution")
# RAFF solution
pptx = (α) -> ptx(α, fSol[3], fSol[1:2])
ppty = (α) -> pty(α, fSol[3], fSol[1:2])
PyPlot.plot(pptx.(t), ppty.(t), "g-", label="RAFF")
# # LS solution
# pptx = (α) -> ptx(α, lsSol[3], lsSol[1:2])
# ppty = (α) -> pty(α, lsSol[3], lsSol[1:2])
# PyPlot.plot(pptx.(t), ppty.(t), "r-", label="Least squares")
PyPlot.scatter(x[co .== 0.0], y[co .== 0.0], color=PyPlot.cm."Pastel1"(2.0/9.0),
marker="o", s=50.0, linewidths=0.2)
PyPlot.scatter(x[co .!= 0.0], y[co .!= 0.0], color=PyPlot.cm."Pastel1"(1.0/9.0),
marker=".", s=25.0, linewidths=0.2, label="Outliers")
PyPlot.legend(loc=4)
PyPlot.axis("scaled")
PyPlot.xticks([])
PyPlot.yticks([])
PyPlot.savefig("/tmp/circle.png", dpi=150, bbox_inches="tight")
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1581 | using PyPlot,LinearAlgebra,Random
"""
```
gen_ellipse(F1:Vector{Float64},F2=Vector{Float64},
a::Float64,p::Int,npun::Int,ntot::Int,
tmin::Float64,tmax::Float64)
```
Generate perturbed points in a ellipse given focus F1 and F2 major axis a. It is mandatory to define the number of points `ntot` the of exacts points `p` and the number of point to compose the ellipse `npun`.
`ntot-npun` is the number of random points
`npun-p` is the number of points of the ellipse which are normally-distributed.
`tmin` and `tmax` are parameter to define limits for the figure.
## Example
```julia-repl
julia> gen_ellipse([1.0,2.0],[3.0,5.0],5.0,0,100,1000,-10,10)
```
"""
function gen_ellipse(F1,F2,a,p,npun,ntot,tmin,tmax)
c = norm(F2-F1,2)
if a<=c
error("a<=c, it isn't an elipse")
end
b = sqrt(a^2-c^2)
# let us assume β is the rotation angle from (1,0)
cosβ = (F2-F1)[1]/norm(F2-F1,2)
sinβ = sqrt(1.0-cosβ)
center = 0.5*(F1+F2)
rng = MersenneTwister(1234)
x = zeros(ntot)
y = zeros(ntot)
θ = shuffle(rng,[0:(2*π)/npun:2*π;])
for i=1:p
x[i] = center[1]*cosβ+b*cos(θ[i])*cosβ+center[2]*sinβ+a*sin(θ[i])*sinβ
y[i] = -center[1]*sinβ -b*cos(θ[i])*sinβ+center[2]*cosβ+a*sin(θ[i])*cosβ
end
for i=p+1:npun
anoise = a+0.1*randn()
bnoise = b+0.1*randn()
x[i] = center[1]*cosβ+bnoise*cos(θ[i])*cosβ+center[2]*sinβ+anoise*sin(θ[i])*sinβ
y[i] = -center[1]*sinβ -bnoise*cos(θ[i])*sinβ+center[2]*cosβ+anoise*sin(θ[i])*cosβ
end
x[npun+1:end]= tmin.+(tmax-tmin)*rand(ntot-npun)
y[npun+1:end]= tmin.+(tmax-tmin)*rand(ntot-npun)
plot(x,y,".")
return [x y]
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 2158 | using RAFF
using Printf
using ArgParse
using Random
function mainf()
s = ArgParseSettings()
@add_arg_table s begin
"np"
help = "Number of points"
arg_type = Int
required = true
"p"
help = "Number of trusted points"
arg_type = Int
required = true
"--model"
help = "Model function: linear, cubic, expon, logistic"
arg_type = String
default = "linear"
"--fname"
help = "File name"
arg_type = String
default = "output"
"--sol"
help = "Solution"
nargs = '*'
arg_type = Float64
"--cint"
help = "Cluster interval"
nargs = 2
arg_type = Float64
"--i"
help = "Generate i-th run."
arg_type = Int
default = 0
end
# Main program
parsed_args = parse_args(ARGS, s)
n, model, modelStr = RAFF.model_list[parsed_args["model"]]
x_interval = (- 0.0, 30.0)
θSol = Vector{Float64}(undef, n)
if length(parsed_args["sol"]) == 0
randn!(θSol)
elseif length(parsed_args["sol"]) == n
θSol .= parsed_args["sol"]
else
error("Incorrect number of elements in solution")
end
fname = parsed_args["fname"]
try
ffname = "/tmp/" * fname * ".txt"
fsname = "/tmp/" * fname * "_sol.txt"
if parsed_args["i"] != 0
Random.seed!(179424673 + parsed_args["i"])
end
if length(parsed_args["cint"]) == 2
generate_test_problems(ffname, fsname, model, modelStr, n,
parsed_args["np"], parsed_args["p"], x_interval,
Tuple(parsed_args["cint"]); θSol=θSol, std=200.0)
else
generate_test_problems(ffname, fsname, model, modelStr, n,
parsed_args["np"], parsed_args["p"];
x_interval=x_interval, θSol=θSol, std=200.0)
end
@printf("Created problem and solution files.\n")
catch e
@printf("Problems when creating test problem.\n")
println(e)
end
end
mainf()
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 726 | using RAFF
using DelimitedFiles
using Printf
# Set Debug for Logging
using Logging
using Base.CoreLogging
function run_lmlovo(p::Int, initguess=nothing; model_str="logistic")
global_logger(ConsoleLogger(stdout, Logging.Error))
n, model, modelstr = RAFF.model_list[model_str]
open("/tmp/output.txt") do fp
global N = parse(Int, readline(fp))
global data = readdlm(fp)
end
if initguess == nothing
initguess = zeros(Float64, n)
end
rsol = lmlovo(model, initguess, data[:, 1:2], n, p)
@printf("Solution found:
fbest = %f
p = %d\n", rsol.f, rsol.p)
println(rsol.solution)
return rsol
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 1817 | # This script runs all the tests and create a table for comparison.
using RAFF
using Printf
using DelimitedFiles
fpath = "../test_problems/"
getType = begin
d = Dict(
"C" => "cubic",
"L" => "linear",
"LOG" => "logistic",
"CI" => "circle",
"E" => "expon"
)
(s) -> d[s]
end
function run_tests(;kwargs...)
tfp = open("/tmp/table.csv", "w")
@printf(tfp, """
| Name | Dim. | N Points | N Outl. | Found | Correct | Time (s) | Status | Solution |
| ---- | ---- | -------- | ------- | ----- | ------- | -------- | ------ | -------- |
""")
for fname in readdir(fpath)
occursin("sol", fname) && continue
m = match(r"[^1-9]*", fname)
(m == nothing) && continue
mtype = getType(m.match)
open(fpath * fname) do fp
global N = parse(Int, readline(fp))
global data = readdlm(fp)
end
n, model, = RAFF.model_list[mtype]
np, = size(data)
# Retrieve the true outliers
outliers = findall(data[:, end] .== 1.0)
# Run RAFF
rsol, t, = @timed raff(model, data[:, 1:end - 1], n; kwargs...)
# Save problem data
name = match(r"[^.]*", fname).match
@printf(tfp, "| %5s | %3d | %6d | %3d | ", name, n, np, length(outliers))
nexact = 0
for i in rsol.outliers
(i in outliers) && (nexact += 1)
end
@printf(tfp, "%3d | %3d | %10.4f | %d | ",
length(rsol.outliers), nexact, t, rsol.status)
# Save best solution
@printf(tfp, "[")
for i = 1:n - 1
@printf(tfp, "%10.3e, ", rsol.solution[i])
end
@printf(tfp, "%10.3e] |\n", rsol.solution[n])
end
close(tfp)
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 931 | using Distributed
using DelimitedFiles
using Printf
@everywhere using RAFF
# This script runs the parallel version of RAFF
"""
run_praff()
Load and run the parallel/distributed version of RAFF. It assumes that
there is a problem file `/tmp/output.txt`.
"""
function run_praff(maxms=1, initguess=nothing; model_str="logistic", foutliers=0.5)
n, model, modelstr = RAFF.model_list[model_str]
open("/tmp/output.txt") do fp
global N = parse(Int, readline(fp))
global data = readdlm(fp)
end
if initguess == nothing
initguess = zeros(Float64, n)
end
rsol = praff(model, data[:, 1:end - 1], n; MAXMS=maxms, initguess=initguess,
noutliers=Int(round(foutliers * size(data)[1])))
@printf("Solution found:
fbest = %f
p = %d\n", rsol.f, rsol.p)
println(rsol.solution)
return rsol
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | code | 728 | using RAFF
using DelimitedFiles
using Printf
# Set Debug for Logging
using Logging
using Base.CoreLogging
function run_raff(maxms=1, initguess=nothing;
model_str="logistic", kwargs...)
n, model, modelstr = RAFF.model_list[model_str]
open("/tmp/output.txt") do fp
global N = parse(Int, readline(fp))
global data = readdlm(fp)
end
if initguess == nothing
initguess = zeros(Float64, n)
end
rsol = raff(model, data[:, 1:end - 1], n; kwargs..., MAXMS=maxms, initguess=initguess)
@printf("Solution found:
fbest = %f
p = %d\n", rsol.f, rsol.p)
println(rsol.solution)
return rsol
end
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 2404 | # Advanced usage
## Test Problems
In order to develop a robust code, test problems regarding data
fitting with outliers have been created. All the problems are
available in directory `test/test_problems`. All the problems have the
form
```math
\begin{array}{ccccc}
k & & & & \\
x_{11} & x_{12} & ... & x_{1k} & f_1\\
... & & & & \\
x_{np,1} & x_{np,2} & ... & x_{np,k} & f_{np}
\end{array}
```
where ``k`` is the number of *variables* of the model (not the number
of parameters!), ``np`` is the number of data points to be adjusted,
``x_{ij}`` are the values selected for parameter ``j`` in experiment
``i`` and ``f_i`` is the result obtained for experiment ``i``.
## Script files
During the development and testing of `RAFF` several scripts and
pieces of Julia code have been created. Those files are mostly related
to the automated generation of test problems and visualization of the
solutions. All those files are located in the `test/scripts`
directory.
We explain each file below, so maybe more advanced users can modify
and re-use the code to generate their own problems.
- `calc_ratio.jl`: this script contains a function that generates
several tests for the same model and solve each one with
`RAFF`. Then it prints the ratio of outliers that have
successfully been detected but the method.
- `draw.jl`: This script draws the solution of a given problem and
also its data, which was taken from a file. Examples of such files
are located in `test/test_problems`.
- `run_raff.jl`: this script simply loads a problem data from a
file, selects a given model and runs `RAFF`. Examples of such
files are located in `test/test_problems`.
- `run_lmlovo.jl`: the same as before, but only for [`lmlovo`](@ref)
function. Used mostly for testing.
- `generate_fit_tests.jl`: script for generating random test problem
files, using the pre-defined models given by
[`RAFF.model_list`](@ref). This function cannot be called inside
Julia, since it uses `ArgParse` package.
- `gen_circle.jl`: specific script for generating random test problems
related to the detection of circles in the plane. It also provides
functions to draw the problem and the solution, which differ from
the `draw.jl` script above.
- `run_performance_tests.jl`: script for generating some performance
tests, so we can compare different versions of RAFF.
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 937 | ## Summary
There are four main RAFF structures:
1. *[Main functions](@ref):* directly called by user;
1. *[Auxiliary functions](@ref):* used like internal auxiliary functions;
1. *[Random generation](@ref):* used to generate random sets of data, in order to test `RAFF`
1. *[Output type](@ref):* type defined to manipulate output information.
## Main functions
```@docs
lmlovo
raff
praff
set_raff_output_level
set_lm_output_level
```
## Auxiliary functions
```@docs
RAFF.voting_strategy
RAFF.eliminate_local_min!
RAFF.sort_fun!
RAFF.update_best
RAFF.consume_tqueue
RAFF.check_and_close
RAFF.check_ftrusted
RAFF.interval_rand!
```
## Random generation
```@docs
RAFF.generate_test_problems
RAFF.get_unique_random_points
RAFF.get_unique_random_points!
RAFF.generate_noisy_data!
RAFF.generate_noisy_data
RAFF.generate_clustered_noisy_data!
RAFF.generate_clustered_noisy_data
RAFF.model_list
```
## Output type
```@docs
RAFFOutput
```
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 817 | # Examples
In addition to the examples given in the [Tutorial](@ref), the
`examples/` directory contains another ways of using RAFF. Currently,
we only provide an example on how to load a problem from file, solve
it using `RAFF` and visually check the results.
- `cubic.jl`: this example solves a problem using a cubic model,
with 4 parameters. The example also illustrates how to use
[`RAFF.model_list`](@ref) utility structure in order to load
pre-defined models.
- `draw_and_detect.jl`: this nice example uses
[`GtkReactive.jl`](https://github.com/JuliaGizmos/GtkReactive.jl)
to show a graphic application of `RAFF.jl` to the detection of
circles drawn by the user. The user can also see the difference
between the LOVO approach and the traditional least squares
technique.
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 2160 | # Overview
This page is devoted to document and make easier the use of RAFF-
**R**obust **A**lgebraic **F**itting **F**unction. Our intent is to
provide a package to determine fitting functions for a dataset with
ability to detect possible outliers of the dataset. All the code was
made in [Julia language](https://julialang.org), version 1.0.
This package **is not** an implentation of classical least squares
solvers. It is an optimization-based package, based on algorithms for Lower Order-Value Optimization (LOVO) which were introduced in [1] and revisited in [2] to fit the user-provided models to experimental data. Recently, a good review can be found in [3]. To find possible outliers, LOVO methods depend on the number of outliers as input information. `RAFF` differs in this point and has no dependence on this number of outliers to perform the fitting process. In order to find a robust adjustment, a voting system is used, which is also responsible for the detection of possible outliers.
## Current Status
The current status of this project is beta quality, don't use for
anything important. We provide support to serial and parallel
running.
## Developed by
This project was developed by the optimization group at Department of
Mathematics, State University of Maringá, Brazil.
* Francisco Sobral (Leader)
* Emerson Vitor Castelani
* Ronaldo Lopes
* Wesley Shirabayashi
The authors of this package were sponsored by **Fundação Araucária**,
project number 002/17 - 47223.
## References
[1] Andreani, R., Dunder, C. & Martínez, J.M. Math Meth Oper Res (2005) 61: 365. https://doi.org/10.1007/s001860400410
[2] Andreani, R., Martínez, J.M., Martínez, L. et al. J Glob Optim (2009) 43: 1. https://doi.org/10.1007/s10898-008-9280-3
[3] Martínez, J.M. TOP (2012) 20: 75. https://doi.org/10.1007/s11750-010-0169-1
## Citing this package
If you would like to cite this package, please use
> Castelani, E. V., Lopes, R., Shirabayashi, W., & Sobral,
> F. N. C. (2019). RAFF.jl: Robust Algebraic Fitting Function in
> Julia. *Journal of Open Source Software*,
> 4(39), 1385. https://doi.org/10.21105/joss.01385
[BibTex](assets/raff.bib)
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 1919 | # Random generation of problems
`RAFF.jl` contains several methods for the generation of artificial
datasets, in order to simulate noisy data with outliers. See the
[API](api.md) for details about the possibilities of random generation
of data with outliers.
## Simple example - the exponential function
First, it is necessary to load `RAFF.jl` and the desired model to
generate the problem.
```@repl docrepl
using RAFF
n, model, = RAFF.model_list["expon"]
```
If an *exact* solution is not provided, `RAFF.jl` will generate a
random one, but this can destroy the shape of some models. Therefore,
in this example, we will provide a *hint* for a nice exponential
model. In this example, we will generate 20 random points with 2
outliers in the interval ``[1, 30]``.
```@repl docrepl
exact_sol = [5000.0, 4000.0, 0.2]
interv = (1.0, 30.0)
np = 20
p = 18
```
Before calling the generating function, we fix the random seed, so
this example is the same everywhere.
```@repl docrepl
using Random
Random.seed!(12345678)
data, = generate_noisy_data(model, n, np, p, x_interval=interv, θSol=exact_sol)
```
Now we use the script `test/script/draw.jl` (see
[Advanced](advanced.md) section) to visualize the data
generated. `draw.jl` uses the `PyPlot.jl` package, so it is necessary
to install it before running this example.
```
julia> include("test/scripts/draw.jl")
julia> draw_problem(data, model_str="expon")
```
If you are interested in running `RAFF`, just call the [`raff`](@ref)
method. **Attention**: we have to drop the last column of `data`,
since it contains information regarding the noise added to the
outliers.
```@repl docrepl
r = raff(model, data[:, 1:end - 1], n, MAXMS=10)
```
If all the steps were successful, after command
```
julia> draw_problem(data, model_str="expon", raff_output=r)
```
the following picture should appear

| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 6886 | # Tutorial
```@setup docrepl
```
## Installation
This package is supported just for Julia version 1.0. Consequently,
it uses package 3.0. Currently `RAFF` is registered in [General Julia Registers](https://github.com/JuliaRegistries), so the
package can be installed using the Julia package manager.
From the Julia REPL, type `]` to enter into Pkg REPL mode and run:
```
pkg> add RAFF
```
In what follows, we provide some simple examples on how to solve
problems with `RAFF`. All the examples, and some other ones, are given
in the `examples/` directory as Julia scripts.
## Basic usage
Just to illustrate the potential and basic usage of `RAFF`, let us consider
the following data set given by an array ``A``:
```math
A=\left[ \begin{array}{cc}
-2.0 & 5.0 \\
-1.5 & 3.25\\
-1.0 & 2.0 \\
-0.5 & 1.25\\
0.0 & 1.0 \\
0.5 & 1.25\\
1.0 & 2.0 \\
1.5 & 3.25\\
2.0 & 5.0 \\
\end{array}\right]
```
Let's suppose the first column of ``A`` as an experimental measure with
result given by second column. It is easy to see that the fitting
function in this case is accurate and given by
```math
\phi(x) = x^2 + 1
```
Now let's perturb one result of second column of ``A``. For example,
consider ``A_{6,2} = 2.55``. Assuming the model for fitting given by
```math
\varphi(x; \theta) = \theta_1 x^2 + \theta_2
```
we have by classical least squares as result `θ = [0.904329,
1.3039]`. On the other hand, when we consider `RAFF` algorithm we
obtain the correct answer `θ = [1.0, 1.0]`. Moreover, we also have a
list of the possible outliers.
In order to run `RAFF` algorithm we need to setup
```@repl docrepl
using RAFF
```
and define the data set and model:
```@repl docrepl
A=[-2.0 5.0;
-1.5 3.25;
-1.0 2.0 ;
-0.5 1.25;
0.0 1.0 ;
0.5 2.55;
1.0 2.0 ;
1.5 3.25;
2.0 5.0 ;];
model(x, θ) = θ[1] * x[1]^2 + θ[2]
```
After that, we can run method [`raff`](@ref):
```@repl docrepl
raff(model, A, 2)
```
The number `2` above is the number of variables in model, i. e., the
number of parameters to adjust in the model. The output is a
[`RAFFOutput`](@ref) type. For example, to access only the parameters of
solution, we can use
```@repl docrepl
output = raff(model, A, 2)
output.solution
```
Note that `RAFF` algorithm detects and ignores possible outliers. In
order to see which points are outliers, we can access the `outliers`
attribute.
```@repl docrepl
output.outliers
```
More details about `RAFFOutput` type and other options can be obtained
in [API section](api.md).
By default `RAFF` uses automatic differentiation, more specifically
[ForwardDiff.jl package](https://github.com/JuliaDiff/ForwardDiff.jl). But
is possible to call `RAFF` methods with gradient vector of model. For
example, considering the above example, we have,
```math
\nabla \varphi(x; \theta) = [x^2, 1].
```
Programming this gradient and running `RAFF` we have
```@repl docrepl
gmodel!(g, x, θ) = begin
g[1] = x[1]^2
g[2] = 1.0
end
raff(model, gmodel!, A, 2)
```
Preliminary tests have shown that the use of explicit derivatives is
10 times faster than automatic differentiation.
## Multivariate models
`RAFF` supports the use of multivariate fitting functions to data sets
of different dimensions. To illustrate how this works, consider the
following example:
```@repl docrepl
data = [1.0 1.0 2.0
0.0 0.0 4.0
7.0 1.5 -4.5
2.0 2.0 -17.0 # outlier
0.0 8.6 -4.6]
```
and the following model
```@repl docrepl
model(x, θ) = θ[1] * x[1] + θ[2] * x[2] + θ[3]
```
Note that this model has two variables ``(x_1, x_2)`` and three
parameters ``(\theta_1, \theta_2, \theta_3)``. This problem has one
outlier (`data[4,:]`), so there are 4 trusted points. Let's run `RAFF`
and check the answer.
```@repl docrepl
output = raff(model, data, 3)
```
The right answer is `[-1.0, -1.0, 4.0]`. As we can note, `RAFF` get
a good fit for the data set. Handling the output follows the same
pattern as the one-dimensional case.
In order to get improvements in processing time, we can code the
gradient vector of model too:
```@repl docrepl
gmodel!(g, x, θ) = begin
g[1] = x[1]
g[2] = x[2]
g[3] = 1.0
end
```
```@repl docrepl
output = raff(model, gmodel!, data, 3)
```
## Changing some options
`RAFF` has tunning options like precision of gradient stopping
criteria and initial guess.
```@repl docrepl
output = raff(model, data, 3; initguess=[0.5,0.5,0.5], ε=1.0e-4)
```
`RAFF` is based on an optimization method. In this way, it is subject to
stopping at stationary points that are not global minimizers. For this
reason, heuristics were implemented to find global minimizers. Such
heuristics depend on random number generation. So, if you want to run
tests with more reliability this can be a useful strategy. To define
in `RAFF`, say, 1000 different starting points, is enough to redefine
the keyword argument `MAXMS`.
```@repl docrepl
output = raff(model, data, 3; MAXMS=1, initguess=[0.5,0.5,0.5], ε=1.0e-10)
```
In the above example, we have also changed the starting point for the
method. Also, the stopping criterion was changed to ``10^{-10}``,
which means high accuracy when solving the subproblems. See [`RAFF`
API](api.md#RAFF) for all the possible options that can be used.
## Parallel running
`RAFF` can be run in a parallel or distributed environment, using the
[Distributed](https://docs.julialang.org/en/v1.0/stdlib/Distributed/)
package and function [`praff`](@ref). Let's use `praff` to solve the
same problem from the beginning. First, the Distributed package has to
be loaded and the number of workers has to be added. It is also
possible to add the address of other machines.
```
using Distributed
addprocs(3) # Add 3 worker processes
```
This step can be replaced if Julia is initialized with the `-p`
option
```
julia -p 3
```
Now we have to load [`RAFF`](@ref Overview) and the fit function in all
workers:
```
@everywhere using RAFF
@everywhere function model(x, θ)
θ[1] * x[1]^2 + θ[2]
end
@everywhere function gmodel!(g, x, θ)
g[1] = x[1]^2
g[2] = 1.0
end
```
then, we call [`praff`](@ref) to solve the problem (note that we do
not need to send the `A` matrix to all workers, since it will be
automatically sent by `praff`).
```
A=[-2.0 5.0;
-1.5 3.25;
-1.0 2.0 ;
-0.5 1.25;
0.0 1.0 ;
0.5 2.55;
1.0 2.0 ;
1.5 3.25;
2.0 5.0 ;];
n = 2
output = praff(model, gmodel!, A, n)
RAFFOutput(1, [1.0, 0.999996], 6, 8, 4.0205772365906425e-11, [6])
```
The true effectiveness of parallelism occurs when option `MAXMS` is
set, which changes the number of random initial points that are tried
for each subproblem solved. Better solutions can be achieved with
higher values of `MAXMS`
```
n = 2
output = praff(model, gmodel!, A, n; MAXMS=1000)
RAFFOutput(1, [1.0, 1.0], 7, 8, 5.134133698545651e-13, [6])
```
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 0.6.4 | e716c75b85568625f4bd09aae9174e6f43aca981 | docs | 12582 | ---
title: 'RAFF.jl: Robust Algebraic Fitting Function in Julia'
tags:
- Julia
- Statistics
- Lower Order-Value Optimization
- Outlier detection
- Nonlinear optimization
authors:
- name: Emerson V. Castelani
orcid: 0000-0001-9718-6486
affiliation: 1
- name: Ronaldo Lopes
affiliation: 1
- name: Wesley V. I. Shirabayashi
orcid: 0000-0002-7790-6703
affiliation: 1
- name: Francisco N. C. Sobral
orcid: 0000-0003-4963-0946
affiliation: 1
affiliations:
- name: Department of Mathematics, State University of Maringá, Paraná, Brazil
index: 1
date: 19 May 2019
bibliography: paper.bib
---
# Summary
`RAFF.jl` is a Julia package for the adjustment of a function to a dataset
coming from some experiment. This package is an alternative to classical
adjustment techniques such as linear and nonlinear regression. The goal of this
package is to find robust adjustments free from the influence of possible outliers
(discrepant points of the adjustment).
# Motivation
Let $f : \mathbb{R}^n \to \mathbb{R}$ be a function whose mathematical
description is not available. This function can be, for example, a
black-box, a proprietary computer program or an experiment. Suppose
that a dataset $S = \{(x_1, y_1), \dots, (x_m, y_m)\}$ is available,
where $y_i$ is an approximation of $f(x_i)$ (from an experimental
procedure, numerical approximation, etc.) and we want to approximate
$f$ by a known model $\phi$. Model $\phi$ can be defined as $\phi(x,
\theta)$, where $x$ are the $n$ independent variables of $f$ and
$\theta$ represents some parameters of $\phi$. ``RAFF.jl`` (Robust
Algebraic Fitting Function) is a Julia package developed to find
parameters $\theta$ for $\phi$ in order to adjust it to the
observed values $S$ of the unknown function $f$. Following @Liu2008 and
@keles2018, in general, the adjustment can be related to
1. Classical least squares (algebraic fit): which considers the sum of deviations of type
$\vert \phi(x_i, \theta) - y_i \vert^2$, also known as regression;
2. Orthogonal least squares (geometric fit): which considers the sum of deviations of type
$\min_x \Vert (x, \phi(x, \theta))-(x_i, y_i)\Vert^2$ (orthogonal projection on the
curve to be adjusted).
``RAFF.jl`` was developed to solve a generalization of the first case.
Linear and nonlinear regression is essentially the adjustment of
mathematical functions to data and is a problem that appears in many
areas of science. When data comes from real experiments, non-expected
errors may cause the appearance of outliers, which might be
responsible for causing the regression calculated by sum of deviations
to result in misleading approximations. Regression is strongly
connected to Statistics but practical methods to detect outliers are
not very common. @Motulsky2006a, for example, develop
a method for outlier detection based on the assumption that the error
follows a Lorentzian distribution around the function and use
nonlinear regression based on least squares. ``RAFF.jl`` provides
automatic detection of outliers using a voting system. It is an
optimization-based package, based on algorithms for Lower Order-Value
Optimization (LOVO) which were introduced by @Andreani2005 and
revisited by @Andreani2009. Recently, @Martinez2012 performed a complete review
about LOVO problems considering theoretical aspects of algorithms to solve it
and potential applications.
# Background
To elucidate the essence of how ``RAFF.jl`` works, let us detail some aspects related
to the LOVO problem and its resolution. Let us consider $m$ functions
$F_i:\mathbb{R}^n \rightarrow \mathbb{R}$, $i=1,...,m$. Given $\theta \in
\mathbb{R}^n$, we can sort the set $\{F_i(\theta),i=1,...,m\}$ in ascending
order:
$$
F_{i_1(\theta)}(\theta)\leq F_{i_2(\theta)}(\theta)\leq ...\leq
F_{i_m(\theta)}(\theta).
$$
Considering a value $1\leq p \leq m$, we can define the LOVO function as
$$S_p(\theta)=\sum_{k=1}^{p} F_{i_k(\theta)}(\theta)$$
and the LOVO problem as
$$\min_{\theta \in \mathbb{R}^{n}}S_p(\theta).$$
Assuming that $F_i, i=1,...,m$ are continuous functions we have that
$S_p$ is a continuous function, but assuming that $F_i$'s are differentiable
functions we cannot conclude that $S_p$ is differentiable. This can be seen by
reformulating the LOVO problem as follows. Denoting
$\mathcal{C}=\{\mathcal{C}_1,...,\mathcal{C}_r\}$ as the set of all
combinations of $\{1,...,m\}$ taken $p$ at time, we can define for each $i\in
\{1,...,r\}$ the following function
$$f_i(\theta)=\sum_{k\in \mathcal{C}_i} F_k(\theta)$$
and
$$f_{min}(\theta)=\min\{f_1(\theta),...,f_r(\theta)\}.$$
It can be observed that, for a given $\theta$, $f_{min}(\theta)$ is obtained by a combination $\mathcal{C}_j$ which contains the smallest sum of $p$ elements of the set $\{F_i(\theta),i=1,...,m\}$. Therefore $f_{\min}(\theta)=S_p(\theta)$ and, consequently,
the LOVO function is non differentiable. The LOVO problem description can become
more clear by considering an example. In this sense, let us consider the dataset given by
$x$ $y$
------- ---------
-0.5 0.119447
0.0 0.3
0.5 0.203551
0.75 0.423998
and the model defined by $\phi(x,\theta)=\theta (\sin(x)+\cos(x))$.
Naturally, we have $m=4$ and let us consider $p=3$. The $F_i$'s can assume diferent forms. To leave the example closest to our approach, let's consider
$F_i$'s as:
$$
\begin{aligned}
F_1(\theta) & =(0.119447 -\phi(-0.5,\theta))^2, \\
F_2(\theta) & =(0.3 -\phi(0.0,\theta))^2, \\
F_3(\theta) & =(0.203551 -\phi(0.5,\theta))^2, \\
F_4(\theta) & =(0.423998 -\phi(-0.75,\theta))^2.
\end{aligned}
$$
Since $m=4$ and $p=3$, we have 4 possible subsets with 3 elements each from set
$\{1,2,3,4\}$:
$$\mathcal{C}_1=\{1,2,3\},\mathcal{C}_2=\{1,2,4\},\mathcal{C}_3=\{1,3,4\}\ \text{and}\ \mathcal{C}_4=\{2,3,4\}.$$
Thus, associated to each $\mathcal{C}_i,i=1,...,4$, we can define function $f_i$
as follows
$$
\begin{aligned}
f_1(\theta) & =F_1(\theta)+F_2(\theta)+F_3(\theta),\\
f_2(\theta) & =F_1(\theta)+F_2(\theta)+F_4(\theta),\\
f_3(\theta) & =F_1(\theta)+F_3(\theta)+F_4(\theta),\\
f_4(\theta) & =F_2(\theta)+F_3(\theta)+F_4(\theta),
\end{aligned}
$$
and consequently,
$$f_{min}(\theta)=\min\{f_1(\theta),f_2(\theta),f_3(\theta),f_4(\theta)\}=S_3(\theta).$$
As previously pointed out, this function is continuous but it is
not differentiable as illustrated in [Figure 2](#lovo).
![The red function represents the LOVO function. Observing the interval $[0.2,0.25]$ we can note a singular point even considering $f_1$, $f_2$, $f_3$ and $f_4$ as differentiable functions.](lovo_desc.png){#lovo width=60%,height=60%}
@Andreani2009 introduced line search methods and handled the possible
singularities in a clever way, using the following approximation for $\nabla f_{min}(\theta)$
$$\nabla f_{min}(\theta)=\nabla f_i(\theta),$$ where $i \in \mathcal{I}
(\theta)=\{k \in \{1,...,r\};f_k(\theta)=f_{min}(\theta)\}$. This
approach can naturally be extended for second order derivatives.
An important point for practical purposes is when we consider the LOVO
problem with $p=m$ and $F_i(\theta)=(\phi(x_i,\theta)- y_i)^2$. In
this case, the LOVO problem coincides with classical least squares
and, consequently, it can be seen as a generalization of the least
squares problem. When $p < m$ and $F_i(\theta)=(\phi(x_i,\theta)-
y_i)^2$, the solution $\theta$ provides a model $\phi(x,\theta)$ free
from influence of the $m-p$ points with the highest deviation. The
number $p$ can be interpreted as the number of trusted points, that
is, $m - p$ possible outliers were identified.
One of the most usual ways to solve the problem of nonlinear least
squares is by using the Levenberg-Marquardt method
[@more1978levenberg]. This method is a first-order method, where
derivatives of the model $\phi$ *with respect to $\theta$* are used to
compute the gradient of the objective function in the associated least
squares problem. The reason for the wide use of Levenberg-Marquardt
method is, in general, associated with quadratic convergence
properties even using only first-order derivatives. In this direction,
it is relevant to ask about Levenberg-Marquardt-based methods to solve
LOVO problems in the context of adjustment functions.
``RAFF.jl`` implements a Levenberg-Marquardt algorithm in the context
of LOVO problems, i.e., it solves the problem of minimizing
$f_{min}(\theta)$, where $F_i(\theta)=(\phi(x_i,\theta)- y_i)^2$, for
$i = 1,\dots, m$. In this sense, first-order derivatives are necessary
and the same strategy of @Andreani2009 is used. It uses first-order
derivatives of the model $\phi$ with respect to $\theta$ to
approximate the gradient of $f_{min}(\theta)$, which is a non
differentiable function. Moreover, LOVO problems have the limitation
that the number $p$ of possible trusted points needs to be given by
the user. ``RAFF.jl`` solves this limitation by implementing a voting
system. In this voting system, several LOVO subproblems are solved
with different values for $p$, the number of possible trusted
points. Each solution of a LOVO subproblem is associated to a vector
parameter $\theta$. The vector parameters are compared against each
other using the Euclidean distance, where small distances (using a
threshold) are considered the same solution. The parameter $\theta^*$
which most occurs among them is declared as the solution.
# Functionality
``RAFF.jl`` main methods expect as input a dataset of the observed
data and a model function, whose parameters one intends to adjust. The
model function is a regular Julia function with 2 arguments: $\theta$
represents the parameters of the model and $x$ represents the
arguments of function $f$. The following function is an example of a
model representing the logistic function $$ \phi(x, \theta) =
\theta_1 + \frac{\theta_2}{1.0 + \exp(- \theta_3 x + \theta_4)}.
$$ The observed data can be represented by the following table:
$x$ $y$
------- ---------
0.0000 1166.0892
3.3333 1384.4495
6.6666 4054.1959
10.0000 2692.4928
13.3333 3011.5096
16.6666 3882.4381
20.0000 4612.4603
23.3333 6605.6544
26.6666 5880.1774
30.0000 5506.3050
In this example, the true function was given by $$ f(x) = 1000 +
\frac{5000}{1.0 + \exp(- 0.2 x + 3)}. $$ The observed data was
generated as random normal perturbations around the graphic of $f$ and
is shown in [Figure 1](#logistic). The dots and triangles represent
the observed data, where the red triangles were manually set to be the
outliers. Using the least squares technique with the model above, the
green function is found. When `RAFF.jl` is applied to the same
problem, it correctly identifies the two outliers. The resulting
function is depicted as the red one, very close to $f$.
{#logistic
width=60%, height=60%}
# Additional features
The user may also provide more information to ``RAFF.jl``, such as an
rough approximation to the expected number of *trusted*
observations. Additional methods and options are also available to
more advanced users, such as generation of random test data and
multistart strategies. First-order derivatives of the model $\phi$
with respect to $\theta$ can also be provided, which results in a
faster executing time. When they are not provided by the user,
``RAFF.jl`` uses Julia's ``ForwardDiff.jl`` package [@Revels2016].
``RAFF.jl`` can be run in serial, parallel and distributed
environments. Parallel and distributed methods use the native
[``Distributed.jl``](https://docs.julialang.org/en/v1.0/stdlib/Distributed/)
package. The distributed version is a primary-worker implementation that
does not use shared arrays, therefore, can be run both locally or on a
cluster of computers.
This package is intended to be used by any experimental researcher
with a little knowledge about mathematical modeling and fitting
functions.
# Installation and usage
``RAFF.jl`` is an open-source software that can be
[downloaded from Github](https://github.com/fsobral/RAFF.jl). It is a
registered package and can be directly installed from Julia's package
repository. The whole description for first time usage or its API is
available at its
[documentation](https://fsobral.github.io/RAFF.jl/stable/).
# Acknowledgments
This project was supported by Fundação Araucária under grant 002/17.
# References
| RAFF | https://github.com/fsobral/RAFF.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | code | 231 |
module ITensorLattices
using GraphRecipes
using Graphs
using ITensors
using Parameters
using Plots
using LinearAlgebra
include("graphs.jl")
include("lattices.jl")
include("visualize.jl")
end
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | code | 5368 |
export Coord, get_neighbor_graph, nnodes, build_graph
export GraphParams, ChainGraphParams, TriangularGraphParams, SquareGraphParams, HoneycombGraphParams
Coord = Tuple{<:Real, <:Real}
function get_neighbor_graph(graph, n)
n == 1 && return graph
A = adjacency_matrix(graph)
# walk n steps
N = (A ^ n) .!= 0
# remove all neighbors that are closer than n steps away
closer = reduce(.|, [A ^ n .!= 0 for n in 1:(n - 1)])
N .&= .!closer
# remove selves
Ident = BitArray(Diagonal(ones(size(A, 1))))
N .&= .!Ident
return SimpleGraph(N)
end
############################################################################################
### Graph Types
############################################################################################
abstract type GraphParams end
@with_kw struct ChainGraphParams <: GraphParams
N::Int
periodic::Bool=false
end
@with_kw struct TriangularGraphParams <: GraphParams
Nx::Int
Ny::Int
yperiodic::Bool=false
end
@with_kw struct HoneycombGraphParams <: GraphParams
Nx::Int
Ny::Int
yperiodic::Bool=false
end
@with_kw struct SquareGraphParams <: GraphParams
Nx::Int
Ny::Int
yperiodic::Bool=false
end
# Get number of sites
nnodes(params::GraphParams) = @error "nnodes for type $(typeof(params)) is not implemented"
nnodes(params::ChainGraphParams) = params.N
nnodes(params::TriangularGraphParams) = params.Nx * params.Ny
nnodes(params::SquareGraphParams) = params.Nx * params.Ny
nnodes(params::HoneycombGraphParams) = params.Nx * params.Ny
# Build Graphs
build_graph(params::GraphParams) = @error "build_graph for type $(typeof(params)) is not implemented"
function build_graph(params::ChainGraphParams)
@unpack N, periodic = params
coords = [(i - 1, 0) for i in 1:N]
edges = Edge.([(i, i+1) for i in 1:(N-1)])
periodic && push!(edges, Edge(N, 1))
graph = SimpleGraph(edges)
return graph, coords
end
function build_graph(params::TriangularGraphParams)
@unpack Nx, Ny, yperiodic = params
yperiodic = yperiodic && (Ny > 2)
n = Nx * Ny
nbonds = 3n - 2Ny + (yperiodic ? 0 : -2Nx + 1)
edges = Vector{Tuple{Int, Int}}(undef, nbonds)
coords = Vector{Coord}(undef, n)
b = 0
for i in 1:n
x = (i - 1) ÷ Ny + 1
y = mod1(i, Ny)
# set coordinates
coords[i] = (x - 1, y - 1)
# x-direction bonds
if x < Nx
edges[b += 1] = (i, i + Ny)
end
# 2d bonds
if Ny > 1
# vertical / y-periodic diagonal bond
if (i + 1 <= n) && ((y < Ny) || yperiodic)
edges[b += 1] = (i, i + 1)
end
# periodic vertical bond
if yperiodic && y == 1
edges[b += 1] = (i, i + Ny - 1)
end
# diagonal bonds
if x < Nx && y < Ny
edges[b += 1] = (i, i + Ny + 1)
end
end
end
graph = SimpleGraph(Edge.(edges))
return graph, coords
end
function build_graph(params::SquareGraphParams)
@unpack Nx, Ny, yperiodic = params
yperiodic = yperiodic && (Ny > 2)
n = Nx * Ny
nbonds = 2n - Ny + (yperiodic ? 0 : -Nx)
edges = Vector{Tuple{Int, Int}}(undef, nbonds)
coords = Vector{Coord}(undef, n)
b = 0
for i in 1:n
x = (i - 1) ÷ Ny + 1
y = mod1(i, Ny)
# set coordinates
coords[i] = (x - 1, y - 1)
if x < Nx
edges[b += 1] = (i, i + Ny)
end
if Ny > 1
if y < Ny
edges[b += 1] = (i, i + 1)
elseif yperiodic
edges[b += 1] = (i, i - Ny + 1)
end
end
end
graph = SimpleGraph(Edge.(edges))
return graph, coords
end
"""
Build honeycomb lattice in armchair configuration
"""
function build_graph(params::HoneycombGraphParams)
@unpack Nx, Ny, yperiodic = params
getidx(x, y) = (x - 1) * Ny + y
wrap(x, k) = mod1(mod1(x, k) + k, k)
# Build graph
yperiodic = yperiodic && (Ny > 2)
edges = Vector{Tuple{Int, Int}}()
function add_edge!(x1, y1, x2, y2)
idx1 = getidx(x1, y1)
idx2 = getidx(x2, y2)
push!(edges, (idx1, idx2))
end
for x in 1:(Nx - 1)
for y in 1:Ny
# bind to next row
add_edge!(x, y, x + 1, y)
# Alternate between linking up and linking down on odd rows
if x % 2 == 1
crossy = y + ((x ÷ 2) % 2 == 0 ? 1 : -1)
if crossy ∈ 1:Ny
add_edge!(x, y, x + 1, crossy)
elseif yperiodic
add_edge!(x, y, x + 1, wrap(crossy, Ny))
end
end
end
end
graph = SimpleGraph(Edge.(edges))
# Calculate node coordinates
function getcoords(x, y)
xcoord = (x - 1) * 3/4 + ((x % 2) == 0 ? -1/4 : 0)
ycoord = (y - 1) * sqrt(3) + ((x ÷ 2) % 2 == 0 ? sqrt(3)/2 : 0)
return xcoord, ycoord
end
coords = Vector{Coord}(undef, Nx * Ny)
for x in 1:Nx
for y in 1:Ny
xcoord = (x - 1) * 3/4 + ((x % 2) == 0 ? -1/4 : 0)
ycoord = (y - 1) * sqrt(3) + ((x ÷ 2) % 2 == 0 ? sqrt(3) / 2 : 0)
coords[getidx(x, y)] = (xcoord, ycoord)
end
end
return graph, coords
end
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | code | 2106 |
export build_graph, build_lattice, get_coords, nsites
export LatticeParams, ChainLatticeParams, TriangularLatticeParams, SquareLatticeParams, HoneycombLatticeParams
"""
Aliases
"""
nsites = nnodes
const LatticeParams = GraphParams
const ChainLatticeParams = ChainGraphParams
const TriangularLatticeParams = TriangularGraphParams
const SquareLatticeParams = SquareGraphParams
const HoneycombLatticeParams = HoneycombGraphParams
"""
Build graph from lattice. This can be useful for analyzing the bond structure
with graph algorithms and visualization of the lattice as a sanity check
"""
function build_graph(lattice::Lattice)::SimpleGraph
edges = map(bond -> Edge(bond.s1, bond.s2), lattice)
return SimpleGraph(edges)
end
"""
Build ITensor Lattice from undirected graph
"""
function build_lattice(graph::Graph, coords::Union{Vector{<:Coord}, Nothing}=nothing)::Lattice
@assert !is_directed(graph) "Graph must be undirected"
es = edges(graph)
function buildbond(edge)
idx1 = src(edge)
idx2 = dst(edge)
(x1, y1), (x2, y2) = isnothing(coords) ? ((0, 0), (0, 0)) : (coords[idx1], coords[idx2])
return LatticeBond(idx1, idx2, x1, y1, x2, y2)
end
return buildbond.(es)
end
"""
Builds a dictionary mapping the site index to cartesian coordinates
"""
function get_coords(lattice::Lattice)::Vector{Coord}
isempty(lattice) && return []
# make index => (x, y) pairs
getcoord(bond) = [bond.s1 => (bond.x1, bond.y1), bond.s2 => (bond.x2, bond.y2)]
allcoords = vcat(getcoord.(lattice)...)
# create dictionary to remove duplicates
dict = Dict(allcoords)
# create array or coordinates indexed by the site index
indices = keys(dict)
maxidx = maximum(indices)
coordlist = Vector{Coord}(undef, maxidx)
for i in 1:maxidx
coordlist[i] = get(dict, i, (0, 0))
end
return coordlist
end
function build_lattice(params::LatticeParams; neighbor::Int=1)
graph, coords = build_graph(params)
neighbor_graph = get_neighbor_graph(graph, neighbor)
return build_lattice(neighbor_graph, coords)
end
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | code | 777 |
export visualize
"""
Visualize ITensor lattice
Keyword arguments are GraphRecipes arguments: https://docs.juliaplots.org/stable/generated/graph_attributes/
"""
function visualize(lattice::Lattice; use_lattice_coords=true, kwargs...)
if isempty(lattice)
@warn "Empty lattice — nothing to visualize"
return
end
defaultargs = (
curves=false,
curvature_scalar=0.2,
nodesize=0.3,
nodeshape=:circle
)
graph = build_graph(lattice)
if use_lattice_coords
coords = get_coords(lattice)
x = [coord[1] for coord in coords]
y = [coord[2] for coord in coords]
graphplot(graph; x, y, defaultargs..., kwargs...)
else
graphplot(graph; defaultargs..., kwargs...)
end
end
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | code | 859 | using ITensorLattices
using Test
@testset "Lattice Tests" begin
params = ChainLatticeParams(N = 5, periodic = true)
n = nsites(params)
@test n == 5
l = build_lattice(params)
@test length(l) == 5
params = SquareLatticeParams(Nx = 10, Ny = 3)
n = nsites(params)
@test n == 30
l = build_lattice(params)
@test length(l) == 47
params = TriangularLatticeParams(Nx = 20, Ny = 30)
n = nsites(params)
@test n == 600
l = build_lattice(params)
@test length(l) == 1701
params = HoneycombLatticeParams(Nx = 100, Ny = 100, yperiodic = true)
n = nsites(params)
@test n == 10000
l = build_lattice(params)
@test length(l) == 14900
# neighbors
params = HoneycombLatticeParams(Nx = 20, Ny = 4, yperiodic = true)
l = build_lattice(params, neighbor=2)
@test length(l) == 224
end
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.0.0 | fba08372ca54b8ae2dd93b11fc1be477186d61fa | docs | 183 | # ITensorLattices
Library for generating and visualizing ITensor lattices for constructing Hamiltonians
## Up Next:
- [ ] Add zigzag configuration honeycomb
- [ ] Add kagome lattice
| ITensorLattices | https://github.com/Stanford-Condensed-Matter-Theory-Group/ITensorLattices.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 1925 | using BinDeps
isfile("deps.jl") && rm("deps.jl")
@BinDeps.setup
libtesselate = library_dependency("libtesselate", aliases = ["libtesselate.dylib"], runtime = true)
rootdir = BinDeps.depsdir(libtesselate)
srcdir = joinpath(rootdir, "src")
prefix = joinpath(rootdir, "usr")
libdir = joinpath(prefix, "lib")
headerdir = joinpath(prefix, "include")
if Sys.iswindows()
libfile = joinpath(libdir, "libtesselate.dll")
arch = "x86"
if Sys.WORD_SIZE == 64
arch = "x64"
end
@build_steps begin
FileRule(libfile, @build_steps begin
BinDeps.run(@build_steps begin
ChangeDirectory(srcdir)
`cmd /c compile.bat all $arch`
`cmd /c copy libtesselate.dll $libfile`
`cmd /c copy triangle.h $headerdir`
`cmd /c copy tesselate.h $headerdir`
`cmd /c copy commondefine.h $headerdir`
`cmd /c compile.bat clean $arch`
end) end) end
provides(Binaries, URI(libfile), libtesselate)
else
libname = "libtesselate.so"
if Sys.isapple()
libname = "libtesselate.dylib"
end
libfile = joinpath(libdir, libname)
provides(BinDeps.BuildProcess, (@build_steps begin
FileRule(libfile, @build_steps begin
BinDeps.ChangeDirectory(srcdir)
`make clean`
`make libtesselate`
`cp libtesselate.so $libfile`
`cp triangle.h $headerdir/`
`cp tesselate.h $headerdir/`
`cp commondefine.h $headerdir/`
`make clean`
end)
end), libtesselate)
end
@BinDeps.install Dict(:libtesselate => :libtesselate)
| TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 597 | push!(LOAD_PATH,"../src/")
using Documenter, TriangleMesh
makedocs(
modules = [TriangleMesh],
doctest=false,
clean=false,
format = :html,
assets = ["assets/logo.png"],
sitename = "TriangleMesh.jl",
pages = [
"Home" => "index.md",
"Workflow" => "man/examples.md",
"Modules, Types and Methods" => "man/mtm.md",
"Index" => "man/mtm_idx.md"
],
authors = "K. Simon"
)
deploydocs(
deps = Deps.pip("mkdocs", "python-markdown-math"),
repo = "github.com/konsim83/TriangleMesh.jl.git",
target = "build",
branch = "gh-pages",
make = nothing
)
| TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 2001 | #=
To run this script type 'include("path/to/this/file/TriangleMesh_plot.jl")'
=#
using TriangleMesh, PyPlot
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
plot_TriMesh(m :: TriMesh; <keyword arguments>)
Plot a mesh.
# Keyword arguments
- `linewidth :: Real = 1`: Width of edges
- `marker :: String = "None"`: Markertype can be `o`, `x`,`None`,... (see `lineplot` options in Python's matplotlib)
- `markersize :: Real = 10`: Size of marker
- `linestyle :: String = "-"`: Li9nestyle can be `-`,`--`,... (see `lineplot` options in Python's matplotlib)
- `color :: String = "red"`: Edge color (see `lineplot` options in Python's matplotlib)
"""
function plot_TriMesh(m :: TriMesh;
linewidth :: Real = 1.5,
marker :: String = "None",
markersize :: Real = 5,
linestyle :: String = "-",
color :: String = "blue")
fig = matplotlib[:pyplot][:figure]("2D Mesh Plot", figsize = (10,10))
ax = matplotlib[:pyplot][:axes]()
ax[:set_aspect]("equal")
# Connectivity list -1 for Python
tri = ax[:triplot](m.point[:,1], m.point[:,2], m.cell.-1 )
setp(tri, linestyle = linestyle,
linewidth = linewidth,
marker = marker,
markersize = markersize,
color = color)
fig[:canvas][:draw]()
return fig
end
# -----------------------------------------------------------
# -----------------------------------------------------------
# Create a mesh of an L-shaped polygon and refine some cells
poly = polygon_unitSquareWithHole()
mesh = create_mesh(poly, info_str="my mesh", voronoi=true, delaunay=true, set_area_max=true)
mesh_refined = refine(mesh, ind_cell=[1;4;9], divide_cell_into=10, keep_edges=true)
plot_TriMesh(mesh, linewidth=4, linestyle="--")
plot_TriMesh(mesh_refined, color="blue") | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 2584 | """
Create and refine 2D unstructured triangular meshes. Interfaces
[Triangle](https://www.cs.cmu.edu/~quake/triangle.html) written by J.R. Shewchuk.
"""
module TriangleMesh
using ProgressMeter, Libdl, LinearAlgebra, DelimitedFiles
export TriMesh, Polygon_pslg,
create_mesh, refine, refine_rg,
polygon_unitSimplex, polygon_unitSquare, polygon_unitSquareWithHole, polygon_unitSquareWithRegion, polygon_unitSquareWithEnclosedRegion,
polygon_regular, polygon_Lshape, polygon_struct_from_points,
set_polygon_point!, set_polygon_point_marker!, set_polygon_point_attribute!,
set_polygon_segment!, set_polygon_segment_marker!, set_polygon_region!, set_polygon_hole!,
write_mesh,triangulate
# ----------------------------------------
# The library must be compiled and found by julia. (Check if this can be done in a nicer way.)
if ~isfile(string(Base.@__DIR__, "/../deps/deps.jl"))
Base.@error("Triangle library not found. Please run `Pkg.build(\"TriangleMesh\")` first.")
end
if Sys.iswindows()
libsuffix = ".dll"
elseif Sys.isapple()
libsuffix = ".dylib"
elseif Sys.islinux()
libsuffix = ".so"
else
Base.@error "Operating system not supported."
end
const libname = string(Base.@__DIR__, "/../deps/usr/lib/libtesselate")
const libtesselate = string(libname, libsuffix)
# ----------------------------------------
# --------------------------------------
# Contains Polygon struct
include("TriangleMesh_Polygon.jl")
# Some constructors of useful polygons
include("TriangleMesh_polygon_constructors.jl")
# Struct that corresponds to C-struct (only for talking to the Triangle
# library)
include("TriangleMesh_Mesh_ptr_C.jl")
# Julia struct to actually work with, contains all necessary information about
# the triangulation created by Triangle
include("TriangleMesh_TriMesh.jl")
# Write the triangulation in files (to be extended to different formats)
include("TriangleMesh_FileIO.jl")
# --------------------------------------
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# --------------------------------------
# input function, reads as string
function input(prompt::String="")
print(prompt)
return chomp(readline())
end
# --------------------------------------
# The actual code
include("TriangleMesh_create_mesh.jl")
include("TriangleMesh_refine.jl")
include("TriangleMesh_refine_rg.jl")
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
end # end module
| TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 3202 | """
write_mesh(m :: TriMesh, file_name :: String; <keyword arguments>)
Write mesh to disk.
# Arguments
- `file_name :: String`: Provide a string with path and filename
# Keyword arguments
- `format :: String = "triangle"`: Specify mesh format. Only option for now is `"triangle"` (Triangle's native mesh format)
"""
function write_mesh(m::TriMesh, file_name::String;
format::String="triangle")
if format == "triangle"
write_mesh_triangle(m, file_name)
else
Base.@error("File output format not known.")
end
return nothing
end
function write_mesh_triangle(m::TriMesh, file_name::String)
# Write files into #PWD/meshfiles folder
if ~ispath(pwd() * "/meshfiles")
mkdir("meshfiles")
end
path = pwd() * "/meshfiles/"
file_name = path * basename(file_name)
println("Writing files to $file_name.* .......")
# write points
if m.n_point > 0
open(file_name * ".node", "w") do f
firstline = "# Number of nodes, dimension, attributes and markers:\n"
write(f, firstline)
secondline = "$(m.n_point) 2 $(m.n_point_attribute) $(m.n_point_marker) \n"
write(f, secondline)
thirdline = "# List of nodes, attributes (optional) and markers (optional):\n"
write(f, thirdline)
# zip several arrays of different type
cmd = "zip(1:$(m.n_point), $(m.point[1,:]), $(m.point[2,:])"
for i in 1:m.n_point_attribute
cmd = cmd * ", $(m.point_attribute)[$(i),:]"
end
cmd = cmd * ", $(m.point_marker))"
data = eval(Base.Meta.parse(cmd))
writedlm(f, data, " ")
close(f)
end
end
# write triangles
if m.n_cell > 0
open(file_name * ".ele", "w") do f
firstline = "# Number of triangles, nodes per triangle (always 3) and attributes:\n"
write(f, firstline)
secondline = "$(m.n_cell) 3 0 \n"
write(f, secondline)
thirdline = "# List of triangles and attributes (optional):\n"
write(f, thirdline)
# zip several arrays of different type
cmd = "zip(1:$(m.n_cell), $(m.cell[1,:]), $(m.cell[2,:]), $(m.cell[3,:]))"
data = eval(Base.Meta.parse(cmd))
writedlm(f, data, " ")
close(f)
end
end
# write triangle neighbors
if length(m.cell_neighbor) > 0
open(file_name * ".neigh", "w") do f
firstline = "# Number of triangles, number of neigbors (always 3):\n"
write(f, firstline)
secondline = "$(m.n_cell) 3 \n"
write(f, secondline)
thirdline = "# List of triangle neighbors:\n"
write(f, thirdline)
# zip several arrays of different type
cmd = "zip(1:$(m.n_cell), $(m.cell_neighbor[1,:]), $(m.cell_neighbor[2,:]), $(m.cell_neighbor[3,:]))"
data = eval(Base.Meta.parse(cmd))
writedlm(f, data, " ")
close(f)
end
end
# write edges
if m.n_edge > 0
open(file_name * ".edge", "w") do f
firstline = "# Number of edes and boudary markers:\n"
write(f, firstline)
secondline = "$(m.n_edge) 1 \n"
write(f, secondline)
thirdline = "# List of edges and boundary markers (optional):\n"
write(f, thirdline)
# zip several arrays of different type
cmd = "zip(1:$(m.n_edge), $(m.edge[1,:]), $(m.edge[2,:]), $(m.edge_marker))"
data = eval(Base.Meta.parse(cmd))
writedlm(f, data, " ")
close(f)
end
end
println("...done!")
end | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 10080 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
Julia struct for that corresponds to C struct of Triangle. Only for internal use.
"""
mutable struct Mesh_ptr_C
# Triangulation part
pointlist::Ptr{Cdouble}
pointattributelist::Ptr{Cdouble}
pointmarkerlist::Ptr{Cint}
numberofpoints::Cint
numberofpointattributes::Cint
trianglelist::Ptr{Cint}
triangleattributelist::Ptr{Cdouble}
trianglearealist::Ptr{Cdouble}
neighborlist::Ptr{Cint}
numberoftriangles::Cint
numberofcorners::Cint
numberoftriangleattributes::Cint
segmentlist::Ptr{Cint}
segmentmarkerlist::Ptr{Cint}
numberofsegments::Cint
holelist::Ptr{Cdouble}
numberofholes::Cint
regionlist::Ptr{Cdouble}
numberofregions::Cint
edgelist::Ptr{Cint}
edgemarkerlist::Ptr{Cint}
normlist::Ptr{Cdouble}
numberofedges::Cint
end # end struct
"""
Mesh_ptr_C(p :: Polygon_pslg)
Constructor for `Mesh_ptr_C` from polygon. Only for internal use.
"""
function Mesh_ptr_C(p::Polygon_pslg)
numberofpoints = Cint(p.n_point)
pointlist = pointer(p.point)
if p.n_point_marker == 0
pointmarkerlist = convert(Ptr{Cint}, C_NULL)
else
pointmarkerlist = pointer(p.point_marker)
end
numberofpointattributes = Cint(p.n_point_attribute)
if numberofpointattributes == 0
pointattributelist = convert(Ptr{Cdouble}, C_NULL)
else
pointattributelist = pointer(p.point_attribute)
end
numberoftriangles = Cint(0)
numberoftriangleattributes = Cint(0)
numberofcorners = Cint(3)
trianglelist = convert(Ptr{Cint}, C_NULL)
triangleattributelist = convert(Ptr{Cdouble}, C_NULL)
trianglearealist = convert(Ptr{Cdouble}, C_NULL)
neighborlist = convert(Ptr{Cint}, C_NULL)
# numberoftriangles = Cint(p.n_cell)
# trianglelist = pointer(cell)
# trianglearealist = pointer(cell_area_constraint)
# numberofcorners = Cint(3)
# # numberoftriangleattributes = Cint(0)
# # triangleattributelist = convert(Ptr{Cdouble}, C_NULL)
# neighborlist = convert(Ptr{Cint}, C_NULL)
# if p.n_region>0
# println("Should be full 1")
# numberoftriangleattributes = Cint(p.n_cell)
# else
# println("Should be empty 1")
# numberoftriangleattributes = Cint(0)
# end
# if numberoftriangleattributes>0
# println("Should be full 2")
# triangleattributelist = pointer(p.triangle_attribute)
# else
# println("Should be empty 2")
# triangleattributelist = convert(Ptr{Cdouble}, C_NULL)
# end
numberofsegments = Cint(p.n_segment)
if numberofsegments > 0
segmentlist = pointer(p.segment)
segmentmarkerlist = pointer(p.segment_marker)
else
segmentlist = convert(Ptr{Cint}, C_NULL)
segmentmarkerlist = convert(Ptr{Cint}, C_NULL)
end
numberofregions = Cint(p.n_region)
if numberofregions == 0
regionlist = convert(Ptr{Cdouble}, C_NULL)
else
regionlist = pointer(p.region)
end
numberofholes = Cint(p.n_hole)
if numberofholes == 0
holelist = convert(Ptr{Cdouble}, C_NULL)
else
holelist = pointer(p.hole)
end
edgelist = convert(Ptr{Cint}, C_NULL)
edgemarkerlist = convert(Ptr{Cint}, C_NULL)
normlist = convert(Ptr{Cdouble}, C_NULL)
numberofedges = Cint(0)
mesh_C = Mesh_ptr_C(pointlist,
pointattributelist,
pointmarkerlist,
numberofpoints,
numberofpointattributes,
trianglelist,
triangleattributelist,
trianglearealist,
neighborlist,
numberoftriangles,
numberofcorners,
numberoftriangleattributes,
segmentlist,
segmentmarkerlist,
numberofsegments,
holelist,
numberofholes,
regionlist,
numberofregions,
edgelist,
edgemarkerlist,
normlist,
numberofedges)
return mesh_C
end
"""
Mesh_ptr_C(n_point :: Cint, point :: Array{Float64,2},
n_point_marker :: Cint, point_marker :: Array{Cint,2},
n_point_attribute :: Cint, point_attribute :: Array{Float64,2},
n_cell :: Cint, cell :: Array{Cint,2}, cell_area_constraint :: Array{Float64,1},
n_edge :: Cint, edge :: Array{Cint,2}, edge_marker :: Array{Cint,1},
n_segment :: Cint, segment :: Array{Cint,2}, segment_marker :: Array{Cint,1},
n_hole :: Cint, hole :: Array{Float64,2},
n_region :: Cint, region :: Array{Float64,2}, triangle_attribute :: Array{Float64,2}, n_triangle_attribute :: Cint )
Constructor for `Mesh_ptr_C` from mesh data. Only for internal use.
"""
function Mesh_ptr_C(n_point::Cint, point::Array{Float64,2},
n_point_marker::Cint, point_marker::Array{Cint,2},
n_point_attribute::Cint, point_attribute::Array{Float64,2},
n_cell::Cint, cell::Array{Cint,2}, cell_area_constraint::Array{Float64,1},
n_edge::Cint, edge::Array{Cint,2}, edge_marker::Array{Cint,1},
n_segment::Cint, segment::Array{Cint,2}, segment_marker::Array{Cint,1},
n_hole::Cint, hole::Array{Float64,2},
n_region::Cint, region::Array{Float64,2}, triangle_attribute::Array{Float64,2}, n_triangle_attribute::Cint )
numberofpoints = Cint(n_point)
pointlist = pointer(point)
if n_point_marker == 0
pointmarkerlist = convert(Ptr{Cint}, C_NULL)
else
pointmarkerlist = pointer(point_marker)
end
numberofpointattributes = Cint(n_point_attribute)
if numberofpointattributes == 0
pointattributelist = convert(Ptr{Cdouble}, C_NULL)
else
pointattributelist = pointer(point_attribute)
end
numberoftriangles = Cint(n_cell)
trianglelist = pointer(cell)
trianglearealist = pointer(cell_area_constraint)
numberofcorners = Cint(3)
# numberoftriangleattributes = Cint(0)
# triangleattributelist = convert(Ptr{Cdouble}, C_NULL)
neighborlist = convert(Ptr{Cint}, C_NULL)
if n_region > 0
numberoftriangleattributes = Cint(n_cell)
else
numberoftriangleattributes = Cint(0)
end
numberoftriangleattributes = Cint(n_triangle_attribute)
if numberoftriangleattributes > 0
triangleattributelist = pointer(triangle_attribute)
else
triangleattributelist = convert(Ptr{Cdouble}, C_NULL)
end
numberofsegments = Cint(n_segment)
if numberofsegments > 0
segmentlist = pointer(segment)
segmentmarkerlist = pointer(segment_marker)
else
segmentlist = convert(Ptr{Cint}, C_NULL)
segmentmarkerlist = convert(Ptr{Cint}, C_NULL)
end
numberofregions = Cint(n_region)
if numberofregions == 0
regionlist = convert(Ptr{Cdouble}, C_NULL)
else
regionlist = pointer(region)
end
numberofholes = Cint(n_hole)
if numberofholes == 0
holelist = convert(Ptr{Cdouble}, C_NULL)
else
holelist = pointer(hole)
end
numberofedges = Cint(n_segment)
if numberofedges > 0
edgelist = pointer(segment)
edgemarkerlist = pointer(segment_marker)
else
edgelist = convert(Ptr{Cint}, C_NULL)
edgemarkerlist = convert(Ptr{Cint}, C_NULL)
end
normlist = convert(Ptr{Cdouble}, C_NULL)
mesh_C = Mesh_ptr_C(pointlist,
pointattributelist,
pointmarkerlist,
numberofpoints,
numberofpointattributes,
trianglelist,
triangleattributelist,
trianglearealist,
neighborlist,
numberoftriangles,
numberofcorners,
numberoftriangleattributes,
segmentlist,
segmentmarkerlist,
numberofsegments,
holelist,
numberofholes,
regionlist,
numberofregions,
edgelist,
edgemarkerlist,
normlist,
numberofedges)
return mesh_C
end
"""
Mesh_ptr_C()
Constructor for `Mesh_ptr_C`. Initialize everything as `NULL`. Only for internal use.
"""
function Mesh_ptr_C()
return Mesh_ptr_C(C_NULL, C_NULL, C_NULL, 0, 0,
C_NULL, C_NULL, C_NULL, C_NULL, 0, 0, 0,
C_NULL, C_NULL, 0,
C_NULL, 0,
C_NULL, 0,
C_NULL, C_NULL, C_NULL, 0)
end
# -----------------------------------------------------------
# -----------------------------------------------------------
# ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Int32,2}, ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Int32,2}, ::Array{Float64,1}, ::Int32, ::Array{Int32,2}, ::Array{Int32,1}, ::Int32, ::Array{Int32,2}, ::Array{Int32,1}, ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Float64,2}, ::Array{Float64,2}, ::Int32)
# ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Int32,2}, ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Int32,2}, ::Array{Float64,1}, ::Int32, ::Array{Int32,2}, ::Array{Int32,1}, ::Int32, ::Array{Int32,2}, ::Array{Int32,1}, ::Int32, ::Array{Float64,2}, ::Int32, ::Array{Float64,2}, !Matched::Array{Float64,1}, ::Int32 | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 6502 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
Struct describes a planar straight-line graph (PSLG) of a polygon. It contains points, point markers,
attributes, segments, segment markers and holes.
"""
struct Polygon_pslg
n_point :: Cint # number of points
point :: Array{Cdouble, 2} # (2, n_point)-array
n_point_marker :: Cint # either 0 or 1
point_marker :: Array{Cint,2}
n_point_attribute :: Cint # number of attributes
point_attribute :: Array{Cdouble,2}
n_segment :: Cint # number of segments
segment :: Array{Cint,2}
segment_marker :: Array{Cint,1}
n_hole :: Cint # number of holes
hole :: Array{Cdouble,2}
n_region :: Cint # number of regions
region :: Array{Cdouble,2}
n_triangle_attribute :: Cint
end
"""
Polygon_pslg(n_point :: Int, n_point_marker :: Int, n_point_attribute :: Int, n_segment :: Int, n_hole :: Int, n_region :: Int)
Outer constructor that only reserves space for points, markers, attributes, holes and regions.
Input data is converted to hold C-data structures (Cint and Cdouble arrays) for internal use.
"""
function Polygon_pslg(n_point :: Int, n_point_marker :: Int, n_point_attribute :: Int,
n_segment :: Int, n_hole :: Int, n_region :: Int = 0, n_triangle_attribute :: Int = 0)
n_point<1 ? Base.@error("Number of polygon points must be positive.") :
n_point = n_point
point = Array{Cdouble,2}(undef, 2, n_point)
if n_point_marker>1
Base.@info "Number of point markers > 1. Only 0 or 1 admissible. Set to 1."
n_point_marker = 1
end
point_marker = Array{Cint,2}(undef, n_point_marker, n_point)
n_point_attribute<0 ? Base.@error("Number of point attributes must be positive.") :
n_point_attribute = n_point_attribute
point_attribute = Array{Cdouble,2}(undef, n_point_attribute, n_point)
n_segment = n_segment
segment = Array{Cint,2}(undef, 2,n_segment)
# Set segment marker to 1 by default.
segment_marker = ones(Cint,n_segment)
n_hole<0 ? Base.@error("Number of holes must be a nonnegative integer.") :
n_hole = n_hole
hole = Array{Cdouble,2}(undef, 2, n_hole)
n_region<0 ? Base.@error("Number of regions must be a nonnegative integer.") :
n_region = n_region
region = Array{Cdouble,2}(undef, 4, n_region)
# Call inner constructor
poly = Polygon_pslg(n_point, point,
n_point_marker, point_marker,
n_point_attribute, point_attribute,
n_segment, segment, segment_marker,
n_hole, hole,
n_region, region, n_triangle_attribute )
return poly
end
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
set_polygon_point!(poly :: Polygon_pslg, p :: AbstractArray{Float64,2})
Set `poly.point` appropriately. Input must have dimensions `n_point`-by-`2`.
"""
function set_polygon_point!(poly :: Polygon_pslg, p :: AbstractArray{Float64,2})
if length(p)>0
size(poly.point)!=size(p') ? Base.@error("Polygon constructor: Point size mismatch...") :
poly.point[:,:] = convert(Array{Cdouble,2}, p)'
end
return nothing
end
"""
set_polygon_point_marker!(poly :: Polygon_pslg, pm :: AbstractArray{Int,2})
Set `poly.point_marker` appropriately. Input must have dimensions `n_point`-by-`n_point_marker`. `n_point_marker` can be 1 or 0.
"""
function set_polygon_point_marker!(poly :: Polygon_pslg, pm :: AbstractArray{Int,2})
if length(pm)>0
size(poly.point_marker)!=(size(pm,2), size(pm,1)) ? Base.@error("Polygon constructor: Point marker mismatch...") :
poly.point_marker[:,:] = convert(Array{Cint,2}, pm)'
end
return nothing
end
"""
set_polygon_point_attribute!(poly :: Polygon_pslg, pa :: AbstractArray{Float64,2})
Set `poly.point_attribute` appropriately. Input must have dimensions `n_point`-by-`n_point_attribute`.
"""
function set_polygon_point_attribute!(poly :: Polygon_pslg, pa :: AbstractArray{Float64,2})
if length(pa)>0
size(poly.point_attribute)!=size(pa') ? Base.@error("Polygon constructor: Point attribute mismatch...") :
poly.point_attribute[:,:] = convert(Array{Cdouble,2}, pa)'
end
return nothing
end
"""
set_polygon_segment!(poly :: Polygon_pslg, s :: AbstractArray{Int,2})
Set `poly.segment` appropriately. Input must have dimensions `n_segment`-by-`2`.
"""
function set_polygon_segment!(poly :: Polygon_pslg, s :: AbstractArray{Int,2})
if length(s)>0
size(poly.segment)!=size(s') ? Base.@error("Polygon constructor: Segment size mismatch...") :
poly.segment[:,:] = convert(Array{Cint,2}, s)'
end
return nothing
end
"""
set_polygon_segment_marker!(poly :: Polygon_pslg, sm :: AbstractArray{Int,1})
Set `poly.segment_marker` appropriately. Input must have dimensions `n_segment`-by-`1`. If not set every segemnt will have marker equal to 1.
"""
function set_polygon_segment_marker!(poly :: Polygon_pslg, sm :: AbstractArray{Int,1})
if length(sm)>0
size(poly.segment_marker)!=size(sm) ? Base.@error("Polygon constructor: Segment marker mismatch...") :
poly.segment_marker[:] = convert(Array{Cint,1}, sm)
end
return nothing
end
"""
set_polygon_hole!(poly :: Polygon_pslg, h :: AbstractArray{Float64,2})
Set `poly.hole` appropriately. Input must have dimensions `n_hole`-by-`2`.
!!!
Each hole must be enclosed by segments. Do not place holes on segments.
"""
function set_polygon_hole!(poly :: Polygon_pslg, h :: AbstractArray{Float64,2})
if length(h)>0
size(poly.hole)!=size(h') ? Base.@error("Polygon constructor: Hole mismatch...") :
poly.hole[:,:] = convert(Array{Cdouble,2}, h)'
end
return nothing
end
"""
set_polygon_region!(poly :: Polygon_pslg, h :: AbstractArray{Float64,2})
Set `poly.region` appropriately. Input must have dimensions `n_region`-by-`2`.
!!!
Each region must be enclosed by segments. Do not place regions on segments.
"""
function set_polygon_region!(poly :: Polygon_pslg, h :: AbstractArray{Float64,2})
if length(h)>0
size(poly.region)!=size(h') ? Base.@error("Polygon constructor: Region mismatch...") :
poly.region[:,:] = convert(Array{Cdouble,2}, h)'
end
return nothing
end | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 9627 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
Struct containing Voronoi diagram. If arrays do not contain
data their length is zero.
"""
struct VoronoiDiagram
vor_info::String
n_point::Int
point::Array{Float64,2}
n_point_attribute::Int
point_attribute::Array{Float64,2}
n_edge::Int
edge::Array{Int,2}
normal::Array{Float64,2}
end
"""
Struct containing triangular mesh and a Voronoi diagram. If arrays do not contain
data their length is zero.
"""
struct TriMesh
mesh_info::String
n_point::Int
point::Array{Float64,2}
n_point_marker::Int
point_marker::Array{Int,2}
n_point_attribute::Int
point_attribute::Array{Float64,2}
n_cell::Int
cell::Array{Int,2}
cell_neighbor::Array{Int,2}
n_edge::Int
edge::Array{Int,2}
n_edge_marker::Int
edge_marker::Array{Int,1}
n_segment::Int
segment::Array{Int,2}
n_segment_marker::Int
segment_marker::Array{Int,1}
n_hole::Int
hole::Array{Float64,2}
n_region::Int
region::Array{Float64,2}
triangle_attribute::Array{Float64,1}
n_triangle_attribute::Int
voronoi::VoronoiDiagram
end # end struct
# -----------------------------------------------------------
# -----------------------------------------------------------
# ---------------------------------------------------------------------------------------------
"""
TriMesh(mesh :: Mesh_ptr_C, vor :: Mesh_ptr_C, mesh_info :: String)
Outer constructor for TriMesh. Read the struct returned by `ccall(...)` to Triangle library.
Wrap a Julia arrays around mesh data if their pointer is not `C_NULL`.
"""
function TriMesh(mesh::Mesh_ptr_C, vor::Mesh_ptr_C, mesh_info::String, o2::Bool)
mesh_info = mesh_info
# Let julia take ownership of C memory
take_ownership = false
# Points
n_point = Int(mesh.numberofpoints)
n_point == 0 ? Base.@error("Number of points in mesh output is zero...") :
if mesh.pointlist != C_NULL
point = convert(Array{Float64,2},
Base.unsafe_wrap(Array, mesh.pointlist, (2, n_point), own=take_ownership))
else
Base.@error("Points could not be read from triangulation structure.")
end
if mesh.pointmarkerlist != C_NULL
n_point_marker = 1
point_marker = convert(Array{Int,2}, Base.unsafe_wrap(Array, mesh.pointmarkerlist, (1, n_point), own=take_ownership))
else
n_point_marker = 0
point_marker = Array{Int,2}(undef, 0, n_point)
end
n_point_attribute = Int(mesh.numberofpointattributes)
if n_point_attribute > 0
point_attribute = convert(Array{Float64,2},
Base.unsafe_wrap(Array, mesh.pointattributelist, (n_point_attribute, n_point), own=take_ownership))
else
point_attribute = Array{Float64,2}(undef, 0, n_point)
end
# Triangles
n_cell = Int(mesh.numberoftriangles)
n_cell == 0 ? Base.@error("Number of triangles in mesh output is zero...") :
if mesh.trianglelist != C_NULL
if o2
cell = convert(Array{Int,2},
Base.unsafe_wrap(Array, mesh.trianglelist, (6, n_cell), own=take_ownership))
else
cell = convert(Array{Int,2},
Base.unsafe_wrap(Array, mesh.trianglelist, (3, n_cell), own=take_ownership))
end
if minimum(cell) == 0
cell .+= 1
end
else
Base.@error("Cells could not be read.")
end
n_triangle_attribute = Int(mesh.numberoftriangleattributes)
if mesh.triangleattributelist != C_NULL
triangle_attribute = convert(Array{Float64,1},
Base.unsafe_wrap(Array, mesh.triangleattributelist, n_cell, own=take_ownership))
else
triangle_attribute = Array{Float64,1}(undef, 0)
end
if mesh.neighborlist != C_NULL
cell_neighbor = convert(Array{Int,2},
Base.unsafe_wrap(Array, mesh.neighborlist, (3, n_cell), own=take_ownership))
if minimum(cell_neighbor) == -1
cell_neighbor .+= 1
end
else
cell_neighbor = Array{Int,2}(undef, 0, n_cell)
end
# Edges
if mesh.edgelist != C_NULL
n_edge = Int(mesh.numberofedges)
edge = convert(Array{Int,2},
Base.unsafe_wrap(Array, mesh.edgelist, (2, n_edge), own=take_ownership))
if minimum(edge) == 0
edge .+= 1
end
else
n_edge = 0
edge = Array{Int,2}(undef, 2, 0)
end
if mesh.edgemarkerlist != C_NULL
n_edge_marker = 1
edge_marker = convert(Array{Int,1},
Base.unsafe_wrap(Array, mesh.edgemarkerlist, n_edge, own=take_ownership))
else
n_edge_marker = 0
edge_marker = Array{Int,1}(0)
end
# Segments
if mesh.segmentlist != C_NULL
n_segment = Int(mesh.numberofsegments)
segment = convert(Array{Int,2},
Base.unsafe_wrap(Array, mesh.segmentlist, (2, n_segment), own=take_ownership))
if minimum(segment) == 0
segment .+= 1
end
else
n_segment = 0
segment = Array{Int,2}(undef, 2, 0)
end
if mesh.segmentmarkerlist != C_NULL
n_segment_marker = 1
segment_marker = convert(Array{Int,1},
Base.unsafe_wrap(Array, mesh.segmentmarkerlist, n_segment, own=take_ownership))
else
n_segment_marker = 0
segment_marker = Array{Int,1}(undef, 0)
end
# holes
if mesh.holelist != C_NULL
n_hole = Int(mesh.numberofholes)
hole = convert(Array{Float64,2},
Base.unsafe_wrap(Array, mesh.holelist, (2, n_hole), own=take_ownership))
else
n_hole = 0
hole = Array{Float64,2}(undef, 2, 0)
end
# regions
if mesh.regionlist != C_NULL
n_region = Int(mesh.numberofregions)
region = convert(Array{Float64,2},
Base.unsafe_wrap(Array, mesh.regionlist, (4, n_region), own=take_ownership))
else
n_region = 0
region = Array{Float64,2}(undef, 4, 0)
end
# ----------------------------------------
# VoronoiDiagram
vor_info = mesh_info * " - Voronoi diagram"
# Read the pointers
if vor.pointlist != C_NULL
# Points
n_point_v = Int(vor.numberofpoints)
point_v = convert(Array{Float64,2},
Base.unsafe_wrap(Array, vor.pointlist, (2, n_point), own=take_ownership))
n_point_attribute_v = Int(vor.numberofpointattributes)
if n_point_attribute_v > 0
point_attribute_v = convert(Array{Float64,2},
Base.unsafe_wrap(Array, vor.pointattributelist, (n_point_attribute_v, n_point_v), own=take_ownership))
else
point_attribute_v = Array{Float64,2}(undef, 0, n_point_v)
end
# Edges
n_edge_v = Int(vor.numberofedges)
if n_edge_v > 0
edge_v = convert(Array{Int,2},
Base.unsafe_wrap(Array, vor.edgelist, (2, n_edge_v), own=take_ownership))
if minimum(edge_v) == 0
edge_v .+= 1
end
if vor.normlist != C_NULL
normal_v = convert(Array{Float64,2},
Base.unsafe_wrap(Array, vor.edgelist, (2, n_edge_v), own=take_ownership))
else
normal_v = Array{Float64,2}(undef, 2, 0)
end
else
edge_v = Array{Int,2}(undef, 0, 2)
normal_v = Array{Float64,2}(undef, 2, 0)
end
else
n_point_v = 0
point_v = Array{Float64,2}(undef, 2, 0)
n_point_attribute_v = 0
point_attribute_v = Array{Float64,2}(undef, 1, 0)
n_edge_v = 0
edge_v = Array{Int,2}(undef, 2, 0)
normal_v = Array{Float64,2}(undef, 2, 0)
end
voronoi = VoronoiDiagram(vor_info,
n_point_v, point_v,
n_point_attribute_v, point_attribute_v,
n_edge_v, edge_v,
normal_v)
# ----------------------------------------
mesh_out = TriMesh(mesh_info,
n_point, point,
n_point_marker, point_marker,
n_point_attribute, point_attribute,
n_cell, cell, cell_neighbor,
n_edge, edge,
n_edge_marker, edge_marker,
n_segment, segment,
n_segment_marker, segment_marker,
n_hole, hole,
n_region, region, triangle_attribute, n_triangle_attribute,
voronoi)
# clean C
if take_ownership
mesh.pointlist = C_NULL
mesh.pointattributelist = C_NULL
mesh.pointmarkerlist = C_NULL
mesh.trianglelist = C_NULL
mesh.triangleattributelist = C_NULL
mesh.trianglearealist = C_NULL
mesh.neighborlist = C_NULL
mesh.segmentlist = C_NULL
mesh.segmentmarkerlist = C_NULL
mesh.holelist = C_NULL
mesh.regionlist = C_NULL
mesh.edgelist = C_NULL
mesh.edgemarkerlist = C_NULL
mesh.normlist = C_NULL
end
return mesh_out
end # end constructor
# --------------------------------------------------------------------------------------------- | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 17135 | """
triangulate(mesh_in :: Mesh_ptr_C, mesh_out :: Mesh_ptr_C, vor_out :: Mesh_ptr_C, switches :: String)
Direct (raw) interface to triangle library. See triangle's documentation.
"""
function triangulate(mesh_in::Mesh_ptr_C, mesh_out::Mesh_ptr_C, vor_out::Mesh_ptr_C, switches::String)
# Not using dlopen/dlsym here goes along with th e
# Julia documentation.
# see htps://docs.julialang.org/en/v1/manual/calling-c-and-fortran-code/
# We need to ensure const-ness of the first argument tuple, though.
ccall((:tesselate_pslg, libtesselate),
Cvoid,
(Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Cstring),
Ref(mesh_in),
Ref(mesh_out),
Ref(vor_out),
switches)
# Using dlclose here would unload the library which may be not
# the desired outcome when we need repeated calls.
end
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
create_mesh(poly :: Polygon_pslg; <keyword arguments>)
Creates a triangulation of a planar straight-line graph (PSLG) polygon.
# Keyword arguments
- `info_str :: String = "Triangular mesh of polygon (PSLG)"`: Some mesh info on the mesh
- `verbose :: Bool = false`: Print triangle's output
- `check_triangulation :: Bool = false`: Check triangulation for Delaunay property
after it is created
- `voronoi :: Bool = false`: Output a Voronoi diagram
- `delaunay :: Bool = false`: If true this option ensures that the mesh is Delaunay
instead of only constrained Delaunay. You can also set
it true if you want to ensure that all Voronoi vertices
are within the triangulation.
- `mesh_convex_hull :: Bool = false`: Mesh the convex hull of a `poly` (useful if the polygon does
not enclose a bounded area - its convex hull still does though)
- `output_edges :: Bool = true`: If true gives an edge list.
- `output_cell_neighbors :: Bool = true`: If true outputs a list of neighboring triangles
for each triangle
- `quality_meshing :: Bool = true`: If true avoids triangles with angles smaller that 20 degrees
- `prevent_steiner_points_boundary :: Bool = false`: If true no Steiner points are added on
boundary segnents.
- `prevent_steiner_points :: Bool = false`: If true no Steiner points are added on boundary segments
on inner segments.
- `set_max_steiner_points :: Bool = false`: If true the user will be asked to enter the maximum number
of Steiner points added. If the user inputs 0 this is
equivalent to `set_max_steiner_points = true`.
- `set_area_max :: Bool = false`: If true the user will be asked for the maximum triangle area.
- `set_angle_min :: Bool = false`: If true the user will be asked for a lower bound for minimum
angles in the triangulation.
- `add_switches :: String = ""`: The user can pass additional switches as described in triangle's
documentation. Only set this option if you know what you are doing.
"""
function create_mesh(poly::Polygon_pslg;
info_str::String="Triangular mesh of polygon (PSLG)",
verbose::Bool=false,
check_triangulation::Bool=false,
voronoi::Bool=false,
delaunay::Bool=false,
mesh_convex_hull::Bool=false,
output_edges::Bool=true,
output_cell_neighbors::Bool=true,
quality_meshing::Bool=true,
prevent_steiner_points_boundary::Bool=false,
prevent_steiner_points::Bool=false,
set_max_steiner_points::Bool=false,
set_area_max::Bool=false,
set_angle_min::Bool=false,
second_order_triangles::Bool=false,
add_switches::String="")
switches = "p"
if ~verbose
switches = switches * "Q"
end
if check_triangulation
switches = switches * "C"
end
if mesh_convex_hull
switches = switches * "c"
end
if voronoi
switches = switches * "v"
end
if delaunay
switches = switches * "D"
end
if output_edges
switches = switches * "e"
end
if output_cell_neighbors
switches = switches * "n"
end
if second_order_triangles
switches = switches * "o2"
end
# -------
if prevent_steiner_points
prevent_steiner_points_boundary = true
switches = switches * "YY"
else
if prevent_steiner_points_boundary
switches = switches * "Y"
end
end
# -------
# -------
if set_max_steiner_points
max_steiner_points_str = input("Maximum number of Steiner points allowed: ")
# Check if input makes sense
try
number = parse(Int, max_steiner_points_str)
if number < 0
Base.@error("Maximum number of Steiner points must be nonnegative.")
end
catch
Base.@error("Maximum number of Steiner points must be a nonnegative integer.")
end
switches = switches * "S" * max_steiner_points_str
end
# -------
# -------
if set_area_max
max_area_str = input("Maximum triangle area: ")
# Check if input makes sense
try
number = parse(Float64, max_area_str)
if number <= 0
Base.@error "Area must be positive."
end
catch
Base.@error "Area must be a positive real number."
end
switches = switches * "a" * max_area_str
end
# -------
# -------
if set_angle_min
quality_meshing = true
max_angle_str = input("Set angle constraint (choose whith care): ")
# Check if input makes sense
try
number = parse(Float64, max_angle_str)
if number <= 0
Base.@error "Minimum angle must be positive."
end
if number >= 34
Base.@warn "Minimum angle should not be larger than 34 degrees. For a larger angle TRIANGLE might not converge."
end
catch
Base.@error "Area must be a positive real number and should not be larger than 34 degrees."
end
switches = switches * "q" * max_angle_str
else
if quality_meshing
switches = switches * "q"
end
end
# -------
# This enables to use aditional switches and should be used with care
switches = switches * add_switches
if occursin("z", switches)
Base.@error("Triangle switches must not contain `z`. Zero based indexing is not allowed.")
end
mesh_in = Mesh_ptr_C(poly)
mesh_buffer = Mesh_ptr_C()
vor_buffer = Mesh_ptr_C()
triangulate(mesh_in, mesh_buffer, vor_buffer, switches)
mesh = TriMesh(mesh_buffer, vor_buffer, info_str, second_order_triangles)
return mesh
end
"""
create_mesh(poly :: Polygon_pslg, switches :: String; info_str :: String = "Triangular mesh of polygon (PSLG)")
Creates a triangulation of a planar straight-line graph (PSLG) polygon.
Options for the meshing algorithm are passed directly by command line switches
for Triangle. Use only if you know what you are doing.
"""
function create_mesh(poly::Polygon_pslg, switches::String;
info_str::String="Triangular mesh of polygon (PSLG)")
if occursin("z", switches)
error("Triangle switches must not contain `z`. Zero based indexing is not allowed.")
end
mesh_in = Mesh_ptr_C(poly)
mesh_buffer = Mesh_ptr_C()
vor_buffer = Mesh_ptr_C()
triangulate(mesh_in, mesh_buffer, vor_buffer, switches)
o2 = false
if occursin("o2", switches)
o2 = true
end
mesh = TriMesh(mesh_buffer, vor_buffer, info_str, o2)
return mesh
end
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
create_mesh(point :: Array{Float64,2}; <keyword arguments>)
Creates a triangulation of the convex hull of a point cloud.
# Keyword arguments
- `point_marker :: Array{Int,2} = Array{Int,2}(undef,0,size(point,1))`: Points can have a marker.
- `point_attribute :: Array{Float64,2} = Array{Float64,2}(undef,0,size(point,1))`: Points can be
given a number
of attributes.
- `info_str :: String = "Triangular mesh of convex hull of point cloud."`: Some mesh info on the mesh
- `verbose :: Bool = false`: Print triangle's output
- `check_triangulation :: Bool = false`: Check triangulation for Delaunay property
after it is created
- `voronoi :: Bool = false`: Output a Voronoi diagram
- `delaunay :: Bool = false`: If true this option ensures that the mesh is Delaunay
instead of only constrained Delaunay. You can also set
it true if you want to ensure that all Voronoi vertices
are within the triangulation.
- `output_edges :: Bool = true`: If true gives an edge list.
- `output_cell_neighbors :: Bool = true`: If true outputs a list of neighboring triangles
for each triangle
- `quality_meshing :: Bool = true`: If true avoids triangles with angles smaller that 20 degrees
- `prevent_steiner_points_boundary :: Bool = false`: If true no Steiner points are added on
boundary segnents.
- `prevent_steiner_points :: Bool = false`: If true no Steiner points are added on boundary segments
on inner segments.
- `set_max_steiner_points :: Bool = false`: If true the user will be asked to enter the maximum number
of Steiner points added. If the user inputs 0 this is
equivalent to `set_max_steiner_points = true`.
- `set_area_max :: Bool = false`: If true the user will be asked for the maximum triangle area.
- `set_angle_min :: Bool = false`: If true the user will be asked for a lower bound for minimum
angles in the triangulation.
- `add_switches :: String = ""`: The user can pass additional switches as described in triangle's
documentation. Only set this option if you know what you are doing.
"""
function create_mesh(point::Array{Float64,2};
point_marker::Array{Int,2}=Array{Int,2}(undef, 0, size(point, 1)),
point_attribute::Array{Float64,2}=Array{Float64,2}(undef, 0, size(point, 1)),
info_str::String="Triangular mesh of convex hull of point cloud.",
verbose::Bool=false,
check_triangulation::Bool=false,
voronoi::Bool=false,
delaunay::Bool=false,
output_edges::Bool=true,
output_cell_neighbors::Bool=true,
quality_meshing::Bool=true,
prevent_steiner_points_boundary::Bool=false,
prevent_steiner_points::Bool=false,
set_max_steiner_points::Bool=false,
set_area_max::Bool=false,
set_angle_min::Bool=false,
add_switches::String="")
switches = "c"
if ~verbose
switches = switches * "Q"
end
if check_triangulation
switches = switches * "C"
end
if voronoi
switches = switches * "v"
end
if delaunay
switches = switches * "D"
end
if output_edges
switches = switches * "e"
end
if output_cell_neighbors
switches = switches * "n"
end
# -------
if prevent_steiner_points
prevent_steiner_points_boundary = true
switches = switches * "YY"
else
if prevent_steiner_points_boundary
switches = switches * "Y"
end
end
# -------
# -------
if set_max_steiner_points
max_steiner_points_str = input("Maximum number of Steiner points allowed: ")
# Check if input makes sense
try
number = parse(Int, max_steiner_points_str)
if number < 0
Base.@error("Maximum number of Steiner points must be nonnegative.")
end
catch
Base.@error("Maximum number of Steiner points must be a nonnegative integer.")
end
switches = switches * "S" * max_steiner_points_str
end
# -------
# -------
if set_area_max
max_area_str = input("Maximum triangle area: ")
# Check if input makes sense
try
number = parse(Float64, max_area_str)
if number <= 0
Base.@error("Area must be positive.")
end
catch
Base.@error("Area must be a positive real number.")
end
switches = switches * "a" * max_area_str
end
# -------
# -------
if set_angle_min
quality_meshing = true
max_angle_str = input("Set angle constraint (choose whith care): ")
# Check if input makes sense
try
number = parse(Float64, max_angle_str)
if number <= 0
Base.@error("Minimum angle must be positive.")
end
if number >= 34
Base.@warn("Minimum angle should not be larger than 34 degrees. For a larger angle TRIANGLE might not converge.")
end
catch
Base.@error("Area must be a positive real number and should not be larger than 34 degrees.")
end
switches = switches * "q" * max_angle_str
else
if quality_meshing
switches = switches * "q"
end
end
# -------
# This enables to use aditional switches and should be used with care
switches = switches * add_switches
occursin("z", switches) ? Base.@error("Triangle switches must not contain `z`. Zero based indexing is not allowed.") :
poly = polygon_struct_from_points(point, point_marker, point_attribute)
mesh = create_mesh(poly, switches, info_str=info_str)
return mesh
end
"""
create_mesh(point :: Array{Float64,2}, switches :: String; <keyword arguments>)
Creates a triangulation of a planar straight-line graph (PSLG) polygon.
Options for the meshing algorithm are passed directly by command line switches
for Triangle. Use only if you know what you are doing.
# Keyword arguments
- `point_marker :: Array{Int,2} = Array{Int,2}(undef,0,size(point,1))`: Points can have a marker.
- `point_attribute :: Array{Float64,2} = Array{Float64,2}(undef,0,size(point,1))`: Points can be
given a number
of attributes.
- `info_str :: String = "Triangular mesh of convex hull of point cloud."`: Some mesh info on the mesh
"""
function create_mesh(point::Array{Float64,2}, switches::String;
point_marker::Array{Int,2}=Array{Int,2}(undef, 0, size(point, 1)),
point_attribute::Array{Float64,2}=Array{Float64,2}(undef, 0, size(point, 1)),
info_str::String="Triangular mesh of convex hull of point cloud.")
occursin("z", switches) ? Base.@error("Triangle switches must not contain `z`. Zero based indexing is not allowed.") :
if ~occursin("c", switches)
Base.@info "Option `-c` added. Triangle switches must contain the -c option for point clouds."
switches = switches * "c"
end
poly = polygon_struct_from_points(point, point_marker, point_attribute)
mesh = create_mesh(poly, switches, info_str=info_str)
return mesh
end
# -----------------------------------------------------------
# -----------------------------------------------------------
| TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 12605 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
polygon_unitSimplex()
Create a polygon of the unit simplex (example code).
"""
function polygon_unitSimplex()
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 3
n_point_marker = 1 # Set up one point marker
n_point_attribute = 0 # no special point attributes
n_segment = 3
n_hole = 0 # no holes
n_region = 0
n_triangle_attribute = 0
# Initialize a polygon and reserve memory ()
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 0.0 1.0]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = ones(Int, n_point, n_point_marker)
set_polygon_point_marker!(poly, pm)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 1]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
return poly
end
"""
polygon_unitSquare()
Create a polygon of the unit square (example code).
"""
function polygon_unitSquare()
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 4
n_point_marker = 1 # Set up one point marker
n_point_attribute = 0 # no special point attributes
n_segment = 4
n_hole = 0 # no holes
n_region = 0
n_triangle_attribute = 0
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = ones(Int, n_point, n_point_marker)
set_polygon_point_marker!(poly, pm)
# Create random attributes
pa = rand(n_point, n_point_attribute)
set_polygon_point_attribute!(poly, pa)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 4 ; 4 1]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
return poly
end
"""
polygon_regular(n_corner :: Int)
Create a polygon of a regular polyhedron with `n_corner` corners (example code).
"""
function polygon_regular(n_corner::Int)
# Choose the numbers. Everything that is zero does not need to be set.
n_point = n_corner
n_point_marker = 1 # Set up one point marker
n_point_attribute = 0 # no special point attributes
n_segment = n_point
n_hole = 0 # no holes
n_region = 0;
n_triangle_attribute = 0
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = zeros(n_point, 2)
phi = range(0, stop=2 * pi, length=n_point + 1)[1:end - 1]
point = [cos.(phi) sin.(phi)]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = ones(Int, n_point, n_point_marker)
set_polygon_point_marker!(poly, pm)
# 4 segments, indexing starts at one
s = [1:n_point circshift(1:n_point, -1)]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
return poly
end
"""
polygon_unitSquareWithHole()
Create a polygon of the unit square that has a squared hole in the middle (example code).
"""
function polygon_unitSquareWithHole()
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 8
n_point_marker = 1 # Set up one point marker
n_point_attribute = 2 # no special point attributes
n_segment = 8
n_hole = 1 # no holes
n_region = 0
n_triangle_attribute = 0
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0 ;
0.25 0.25 ; 0.75 0.25 ; 0.75 0.75 ; 0.25 0.75]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = [ones(Int, 4, n_point_marker) ; 2 * ones(Int, 4, n_point_marker)]
set_polygon_point_marker!(poly, pm)
# Create random attributes
pa = rand(n_point, n_point_attribute)
set_polygon_point_attribute!(poly, pa)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 4 ; 4 1 ;
5 6 ; 6 7 ; 7 8 ; 8 5]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = [ones(Int, 4) ; ones(Int, 4)]
set_polygon_segment_marker!(poly, sm)
# This hole is marked by the point (0.5,0.5). The hole is as big as the
# segment that encloses the point.
h = [0.5 0.5]
set_polygon_hole!(poly, h)
return poly
end
"""
polygon_unitSquareWithRegion()
Create a polygon of the unit square that has a squared hole in the middle (example code).
"""
function polygon_unitSquareWithRegion()
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 4
n_point_marker = 1 # Set up one point marker
n_point_attribute = 0 # no special point attributes
n_segment = 4
n_hole = 0 # no holes
n_region = 1
n_triangle_attribute = 1
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = ones(Int, n_point, n_point_marker)
set_polygon_point_marker!(poly, pm)
# Create random attributes
pa = rand(n_point, n_point_attribute)
set_polygon_point_attribute!(poly, pa)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 4 ; 4 1]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
# This region is marked by the point (0.5,0.5). The region is as big as the segment that encloses the point.
h = [0.5 0.5 0.0 0.0]
set_polygon_region!(poly, h)
return poly
end
"""
polygon_unitSquareWithEnclosedRegion()
Create a polygon of the unit square that has a squared hole in the middle (example code).
"""
function polygon_unitSquareWithEnclosedRegion()
# # Choose the numbers. Everything that is zero does not need to be set.
# n_point = 8
# n_point_marker = 1 # Set up one point marker
# n_point_attribute = 1 # no special point attributes
# n_segment = 8
# n_hole = 0 # no holes
# n_region = 1
# n_triangle_attribute = 1
# # Initialize a polygon and reserve memory
# poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# # 4 points
# point = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0 ;
# 0.25 0.25 ; 0.75 0.25 ; 0.75 0.75 ; 0.25 0.75]
# set_polygon_point!(poly, point)
# # Mark all input points with one (as boundary marker)
# pm = [ones(Int, 4,n_point_marker) ; 2*ones(Int, 4,n_point_marker)]
# set_polygon_point_marker!(poly, pm)
# # Create random attributes
# pa = rand(n_point, n_point_attribute)
# set_polygon_point_attribute!(poly, pa)
# # 4 segments, indexing starts at one
# s = [1 2 ; 2 3 ; 3 4 ; 4 1 ;
# 5 6 ; 6 7 ; 7 8 ; 8 5]
# set_polygon_segment!(poly, s)
# # Mark all input segments with one (as boundary marker)
# sm = [ones(Int, 4) ; ones(Int, 4)]
# set_polygon_segment_marker!(poly, sm)
# # This hole is marked by the point (0.5,0.5). The hole is as big as the
# # segment that encloses the point.
# h = [0.1 0.1]
# set_polygon_region!(poly, h)
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 8
n_point_marker = 1 # Set up one point marker
n_point_attribute = 0 # no special point attributes
n_segment = 8
n_hole = 0 # no holes
n_region = 2
n_triangle_attribute = 1
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0; 0.25 0.25 ; 0.75 0.25 ; 0.75 0.75 ; 0.25 0.75]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = [ones(Int, 4, n_point_marker) ; 2 * ones(Int, 4, n_point_marker)]
set_polygon_point_marker!(poly, pm)
# Create random attributes
pa = rand(n_point, n_point_attribute)
set_polygon_point_attribute!(poly, pa)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 4 ; 4 1 ;
5 6 ; 6 7 ; 7 8 ; 8 5]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
# This region is marked by the point (0.5,0.5). The region is as big as the segment that encloses the point.
h = [0.1 0.1 1.0 0.0; 0.5 0.5 2.0 0.0]
set_polygon_region!(poly, h)
return poly
end
"""
polygon_Lshape()
Create a polygon of an L-shaped domain (example code).
"""
function polygon_Lshape()
# Choose the numbers. Everything that is zero does not need to be set.
n_point = 6
n_point_marker = 1 # Set up one point marker
n_point_attribute = 2 # no special point attributes
n_segment = 6
n_hole = 0 # no holes
n_region = 0
n_triangle_attribute = 0
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
point = [0.0 0.0 ; 1.0 0.0 ; 1.0 0.5 ; 0.5 0.5 ; 0.5 1.0 ; 0.0 1.0]
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
pm = ones(Int, n_point, n_point_marker)
set_polygon_point_marker!(poly, pm)
# Create random attributes
pa = rand(n_point, n_point_attribute)
set_polygon_point_attribute!(poly, pa)
# 4 segments, indexing starts at one
s = [1 2 ; 2 3 ; 3 4 ; 4 5 ; 5 6 ; 6 1]
set_polygon_segment!(poly, s)
# Mark all input segments with one (as boundary marker)
sm = ones(Int, n_segment)
set_polygon_segment_marker!(poly, sm)
return poly
end
"""
polygon_struct_from_points(point :: Array{Float64,2},
pm :: Array{Int,2},
pa :: Array{Float64,2})
Create a polygon from a set of points (example code). No segments or holes are set here.
# Arguments
- `point :: Array{Float64,2}`: point set (dimension n-by-2)
- `pm :: Array{Int,2}`: each point can have a marker (dimension either n-by-0 or n-by-1)
- `pa :: Array{Float64,2}`: each point can have a number of ``k>=0``real attributes (dimension n-by-k)
"""
function polygon_struct_from_points(point::Array{Float64,2},
pm::Array{Int,2},
pa::Array{Float64,2})
# Create a Polygon_pslg struct as the input for TRIANGLE. The Polygon_pslg
# struct then only contains points, markers and attributes.
size(point, 2) != 2 ? Base.@error("Point set must have dimensions (n,2).") :
# Choose the numbers. Everything that is zero does not need to be set.
n_point = size(point, 1)
n_point_marker = size(pm, 2) # Set up one point marker
n_point_attribute = size(pa, 2) # no special point attributes
n_segment = 0
n_hole = 0 # no holes
n_region = 0
n_triangle_attribute = 0
# Initialize a polygon and reserve memory
poly = Polygon_pslg(n_point, n_point_marker, n_point_attribute, n_segment, n_hole, n_region, n_triangle_attribute)
# 4 points
set_polygon_point!(poly, point)
# Mark all input points with one (as boundary marker)
set_polygon_point_marker!(poly, pm)
# Create random attributes
set_polygon_point_attribute!(poly, pa)
return poly
end
# -----------------------------------------------------------
# ----------------------------------------------------------- | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 13037 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
refine(m :: TriMesh ; <keyword arguments>)
Refines a triangular mesh according to user set constraints.
# Keyword arguments
- `divide_cell_into :: Int = 4`: Triangles listed in `ind_cell` are area constrained
by 1/divide_cell_into * area(triangle[ind_cell]) in refined triangulation.
- `ind_cell :: Array{Int,1} = collect(1:m.n_cell)`: List of triangles to be refined.
- `keep_segments :: Bool = false`: Retain segments of input triangulations (although they may be subdivided).
- `keep_edges :: Bool = false`: Retain edges of input triangulations (although they may be subdivided).
- `verbose :: Bool = false`: Output triangle's commandline info.
- `check_triangulation :: Bool = false`: Check refined mesh.
- `voronoi :: Bool = false`: Output Voronoi diagram.
- `output_edges :: Bool = true`: Output edges.
- `output_cell_neighbors :: Bool = true`: Output cell neighbors.
- `quality_meshing :: Bool = true`: No angle is is smaller than 20 degrees.
- `info_str :: String = "Refined mesh"`: Some info string.
# Remark
The switches `keep_segments` and `keep_edges` can not be true at the same time. If `keep_segments=true` area
constraints on triangles listed in `ind_cell` are rather local constraints than hard constraints on a triangle since
the original edges may not be preserved. For details see Triangle's documentation.
"""
function refine(m::TriMesh ; divide_cell_into::Int=4,
ind_cell::Array{Int,1}=collect(1:m.n_cell),
keep_segments::Bool=false,
keep_edges::Bool=false,
verbose::Bool=false,
check_triangulation::Bool=false,
voronoi::Bool=false,
output_edges::Bool=true,
output_cell_neighbors::Bool=true,
quality_meshing::Bool=true,
info_str::String="Refined mesh")
# Do some sanity check of inputs
if divide_cell_into < 1
Base.@error "Option `divide_cell_into` must be at least 1."
end
if isempty(ind_cell)
Base.@error "List of cells to be refined must not be empty. Leave this option blank to refine globally."
end
if keep_edges && keep_segments
Base.@error "Options `keep_edges` and `keep_segments` can not both be true."
end
# Initial switch, indicates refinement, gives edges and triangle neighbors
switches = "r"
if output_edges
switches = switches * "e"
end
if output_cell_neighbors
switches = switches * "n"
end
if voronoi
switches = switches * "v"
end
if quality_meshing
switches = switches * "q"
end
# input points
n_point = Cint(m.n_point)
if n_point < 1
Base.@error "No points provided for refinement."
end
point = convert(Array{Cdouble,2}, m.point)
# input cells
n_cell = Cint(m.n_cell)
if n_cell < 1
Base.@error "No cells provided for refinement."
end
cell = convert(Array{Cint,2}, m.cell)
# If the list of triangles to be refined is not empty set up
# `cell_area_constraint` for input and `-a` to switch without a number
# following.
cell_area_constraint = -ones(m.n_cell)
for i in ind_cell
cell_area_constraint[i] = abs(det([m.point[:,m.cell[:,i]] ; ones(1, 3)])) / (2 * divide_cell_into)
end
switches = switches * "a"
# Check verbose option
if ~verbose
switches = switches * "Q"
end
# Check_triangulation
if check_triangulation
switches = switches * "C"
end
# Either keep segments or not (provided there are any)
if keep_segments
n_segment = Cint(m.n_segment)
if n_segment == 0
Base.@info "No segments provided by mesh. Option `keep_segments` disabled."
keep_segments = false
elseif n_segment > 0
segment = convert(Array{Cint,2}, m.segment)
segment_marker = convert(Array{Cint,1}, m.segment_marker)
switches = switches * "p"
else
segment = Array{Cint,2}(undef, 2, 0)
segment_marker = Array{Cint,1}(undef, 0)
end
elseif keep_edges
# If there are edges use them
n_segment = Cint(m.n_edge)
if n_segment == 0
Base.@info "No edges provided by mesh. Option `keep_edges` disabled."
keep_edges = false
end
if n_segment > 0
segment = convert(Array{Cint,2}, m.edge)
segment_marker = convert(Array{Cint,1}, m.edge_marker)
switches = switches * "p"
else
segment = Array{Cint,2}(undef, 2, 0)
segment_marker = Array{Cint,1}(undef, 0)
end
else
n_segment = Cint(0)
segment = Array{Cint,2}(undef, 2, 0)
segment_marker = Array{Cint,1}(undef, 0)
Base.@info "Neither segments nor edges will be kept during the refinement."
end
# If there are edges use them (not necessary but does not harm)
n_edge = Cint(m.n_edge)
if n_edge == 0 && keep_edges
Base.@info "No edges provided by mesh. Option `keep_edges` disabled."
keep_edges = false
end
if n_edge > 0
edge = convert(Array{Cint,2}, m.edge)
edge_marker = convert(Array{Cint,1}, m.edge_marker)
else
edges = Array{Cint,2}(undef, 2, 0)
edge_marker = Array{Cint,1}(undef, 0)
end
# If there are point marker then use them
n_point_marker = Cint(m.n_point_marker)
if n_point_marker == 1
point_marker = convert(Array{Cint,2}, m.point_marker)
elseif n_point_marker == 0
point_marker = Array{Cint,2}(undef, 0, n_point)
else
Base.@error "Number of n_point_marker must either be 0 or 1."
end
# If there are point attributes then use them
n_point_attribute = Cint(m.n_point_attribute)
if n_point_attribute > 0
point_attribute = convert(Array{Cdouble,2}, m.point_attribute)
else
point_attribute = Array{Cdouble,2}(undef, 0, n_point)
end
n_hole = Cint(m.n_hole)
if n_hole > 0
hole = convert(Array{Cdouble,2}, m.hole)
else
hole = Array{Cdouble,2}(undef, 2, n_hole)
end
n_region = Cint(m.n_region)
if n_region > 0
region = convert(Array{Cdouble,2}, m.region)
else
region = Array{Cdouble,2}(undef, 4, n_region)
end
n_triangle_attribute = Cint(m.n_triangle_attribute)
if n_triangle_attribute > 0
triangle_attribute = convert(Array{Cdouble,2}, m.triangle_attribute)
else
triangle_attribute = Array{Cdouble,2}(undef, 1, n_triangle_attribute)
end
mesh_in = Mesh_ptr_C(n_point, point,
n_point_marker, point_marker,
n_point_attribute, point_attribute,
n_cell, cell, cell_area_constraint,
n_edge, edge, edge_marker,
n_segment, segment, segment_marker,
n_hole, hole,
n_region, region, triangle_attribute, n_triangle_attribute)
mesh_buffer = Mesh_ptr_C()
vor_buffer = Mesh_ptr_C()
lib_ptr = Libdl.dlopen(libtesselate)
refine_trimesh_ptr = Libdl.dlsym(lib_ptr, :refine_trimesh)
ccall(refine_trimesh_ptr,
Cvoid,
(Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Cstring),
Ref(mesh_in),
Ref(mesh_buffer), Ref(vor_buffer),
switches)
Libdl.dlclose(lib_ptr)
mesh = TriMesh(mesh_buffer, vor_buffer, info_str, false)
return mesh
end
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
refine(m :: TriMesh, switches :: String; <keyword arguments>)
Refines a triangular mesh according to user set constraints. Command line switches are passed directly.
Use this function only if you know what you are doing.
# Keyword arguments
- `divide_cell_into :: Int = 4`: Triangles listed in `ind_cell` are area constrained
by 1/divide_cell_into * area(triangle[ind_cell]) in refined triangulation.
- `ind_cell :: Array{Int,1} = collect(1:m.n_cell)`: List of triangles to be refined.
- `info_str :: String = "Refined mesh"`: Some info string.
"""
function refine(m::TriMesh, switches::String;
divide_cell_into::Int=4,
ind_cell::Array{Int,1}=collect(1:m.n_cell),
info_str::String="Refined mesh")
# Do some sanity check of inputs
if isempty(ind_cell)
Base.@error("List of cells to be refined must not be empty. Leave this option blank to refine globally.")
end
n_point = Cint(m.n_point)
point = convert(Array{Cdouble,2}, m.point)
# input cells
n_cell = Cint(m.n_cell)
cell = convert(Array{Cint,2}, m.cell)
# If the list of triangles to be refined is not empty set up
# `cell_area_constraint` for input and `-a` to switch without a number
# following.
cell_area_constraint = -ones(m.n_cell)
for i in ind_cell
cell_area_constraint[i] = abs(det([m.point[:,m.cell[:,i]] ; ones(1, 3)])) / (2 * divide_cell_into)
end
switches = switches * "a"
n_segment = Cint(m.n_segment)
if n_segment > 0
segment = convert(Array{Cint,2}, m.segment)
segment_marker = convert(Array{Cint,1}, m.segment_marker)
else
segment = Array{Cint,2}(undef, 2, 0)
segment_marker = Array{Cint,1}(undef, 0)
end
# If there are edges use them (not necessary but does not harm)
n_edge = Cint(m.n_edge)
if n_edge > 0
edge = convert(Array{Cint,2}, m.edge)
edge_marker = convert(Array{Cint,1}, m.edge_marker)
else
edge = Array{Cint,2}(undef, 2, 0)
edge_marker = Array{Cint,1}(undef, 0)
end
# If there are point marker then use them
n_point_marker = Cint(m.n_point_marker)
if n_point_marker == 1
point_marker = convert(Array{Cint,2}, m.point_marker)
elseif n_point_marker == 0
point_marker = Array{Cint,2}(n_point_marker, n_point)
else
Base.@error("Number of n_point_marker must either be 0 or 1.")
end
# If there are point attributes then use them
n_point_attribute = Cint(m.n_point_attribute)
if n_point_marker > 0
point_attribute = convert(Array{Cdouble,2}, m.point_attribute)
else
point_attribute = Array{Cdouble,2}(undef, n_point_attribute, n_point)
end
n_hole = Cint(m.n_hole)
if n_hole > 0
hole = convert(Array{Cdouble,2}, m.hole)
else
hole = Array{Cdouble,2}(undef, 2, n_hole)
end
n_region = Cint(m.n_region)
if n_region > 0
region = convert(Array{Cdouble,2}, m.region)
else
region = Array{Cdouble,2}(undef, 4, n_region)
end
n_triangle_attribute = Cint(m.n_triangle_attribute)
if n_triangle_attribute > 0
triangle_attribute = convert(Array{Cdouble,2}, m.triangle_attribute)
else
triangle_attribute = Array{Cdouble,2}(undef, 1, n_triangle_attribute)
end
mesh_in = Mesh_ptr_C(n_point, point,
n_point_marker, point_marker,
n_point_attribute, point_attribute,
n_cell, cell, cell_area_constraint,
n_edge, edge, edge_marker,
n_segment, segment, segment_marker,
n_hole, hole,
n_region, region, triangle_attribute, n_triangle_attribute)
mesh_buffer = Mesh_ptr_C()
vor_buffer = Mesh_ptr_C()
lib_ptr = Libdl.dlopen(libtesselate)
refine_trimesh_ptr = Libdl.dlsym(lib_ptr, :refine_trimesh)
ccall(refine_trimesh_ptr,
Cvoid,
(Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Ref{Mesh_ptr_C},
Cstring),
Ref(mesh_in),
Ref(mesh_buffer), Ref(vor_buffer),
switches)
Libdl.dlclose(lib_ptr)
mesh = TriMesh(mesh_buffer, vor_buffer, info_str, false)
return mesh
end
# -----------------------------------------------------------
# ----------------------------------------------------------- | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 5304 | # -----------------------------------------------------------
# -----------------------------------------------------------
"""
refine_rg(m :: TriMesh)
Refine triangular mesh by subdivision of each edge into 2. Very slow for large meshes.
"""
function refine_rg(m::TriMesh)
####################################################
# This needs an improvement because it is very slow.
####################################################
# Step 1: Set up a new poly structure with points and segements. Ignore
# point attributes for now (could be taken care of by interpolation,
# depends on the attribute).
segment = copy(m.edge)
point = copy(m.point)
segment_marker = copy(m.edge_marker)
point_marker = copy(m.point_marker)
N = m.n_edge
progress = Progress(N, 0.01, "Subdividing all edges...", 10)
for i in 1:m.n_edge
# New point from edge subdivision
p = (point[:,segment[1,i]] + point[:,segment[2,i]]) / 2
# Change one end point
seg_tmp_2 = segment[2,i]
segment[2,i] = size(point, 2) + 1
# Push the new segment and point
segment = hcat(segment, [size(point, 2) + 1 ; seg_tmp_2])
point = hcat(point, p)
# Mark segments and points as before.
point_marker = [point_marker point_marker[i]]
segment_marker = push!(segment_marker, segment_marker[i])
next!(progress)
end
# Step 2: Build a polygon from the above
poly = Polygon_pslg(size(point, 2), 1, 0,
size(segment, 2), m.n_hole, m.n_region, m.n_triangle_attribute)
set_polygon_point!(poly, point')
set_polygon_point_marker!(poly, point_marker')
set_polygon_segment!(poly, segment')
set_polygon_segment_marker!(poly, segment_marker)
set_polygon_hole!(poly, m.hole')
# Step 3: Triangulate the new Polygon_pslg with the -YY switch. This
# forces the divided edges into the triangulation.
switches = "pYYQenv"
info_str = "Red refined triangular mesh"
mesh = create_mesh(poly, switches)
return mesh
end
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
"""
refine_rg(m :: TriMesh)
Refine triangular mesh by subdivision of each edge into 2. Only triangles listed in `ind_red` are refined. Very slow for large meshes.
"""
function refine_rg(m::TriMesh, ind_red::Array{Int,1})
####################################################
# This needs an improvement because it is very slow.
####################################################
# Step 1: For all cells to be refined mark the edges that are to be
# divided
refinement_marker = zeros(Bool, m.n_edge)
N = length(ind_red)
progress = Progress(N, 0.01, "Marking edges to be refined...", 10)
for i in ind_red
e = [m.cell[1,i] m.cell[2,i] ;
m.cell[2,i] m.cell[3,i] ;
m.cell[3,i] m.cell[1,i]]
for j in 1:3
ind_found = findall(vec(all(m.edge .== [e[j,1] ; e[j,2]], dims=1)))
if isempty(ind_found)
ind_found = findall(vec(all(m.edge .== [e[j,2] ; e[j,1]], dims=1)))
end
refinement_marker[ind_found] = [true]
end
next!(progress)
end
ind_refine_edge = findall(refinement_marker)
# Step 2: Set up a new poly structure with points and segements. Ignore
# point attributes for now (could be taken care of by interpolation,
# depends on the attribute).
segment = copy(m.edge)
point = copy(m.point)
segment_marker = copy(m.edge_marker)
point_marker = copy(m.point_marker)
N = length(ind_refine_edge)
progress = Progress(N, 0.01, "Subdividing marked edges...", 10)
for i in ind_refine_edge
# New point from edge subdivision
p = (point[:,segment[1,i]] + point[:,segment[2,i]]) / 2
# Change one end point
seg_tmp_2 = segment[2,i]
segment[2,i] = size(point, 2) + 1
# Push the new segment and point
segment = hcat(segment, [size(point, 2) + 1 ; seg_tmp_2])
point = hcat(point, p)
# Mark segments and points as before.
point_marker = [point_marker point_marker[i]]
segment_marker = push!(segment_marker, segment_marker[i])
next!(progress)
end
# Step 3: Build a polygon from the above
poly = Polygon_pslg(size(point, 2), 1, 0,
size(segment, 2), m.n_hole, m.n_region, m.n_triangle_attribute)
set_polygon_point!(poly, point')
set_polygon_point_marker!(poly, point_marker')
set_polygon_segment!(poly, segment')
set_polygon_segment_marker!(poly, segment_marker)
set_polygon_hole!(poly, m.hole')
# Step 4: Triangulate the new Polygon_pslg with the -YY switch. This
# forces the divided edges into the triangulation.
switches = "pYYQenv"
info_str = "Red-green refined triangular mesh"
mesh = create_mesh(poly, switches)
return mesh
end
# -----------------------------------------------------------
# ----------------------------------------------------------- | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 1641 | @testset "Polygon constructors" begin
@testset "Polygon struct constructor" begin
p = TriangleMesh.Polygon_pslg(5, 1, 2, 6, 0, 0, 0)
@test size(p.point) == (2, 5)
@test size(p.point_marker) == (1, 5)
@test size(p.point_attribute) == (2, 5)
@test size(p.segment) == (2, 6)
@test size(p.hole) == (2, 0)
end # end "Polygon constructors"
# --------------------------------------------------------
@testset "Standard polygons" begin
@testset "Unit simplex" begin
points = [0.0 0.0 ; 1.0 0.0 ; 0.0 1.0]'
p = polygon_unitSimplex()
@test p.point == points
@test size(p.segment) == (2, 3)
@test p.n_region == 0
end
@testset "Unit square" begin
points = [0.0 0.0 ; 1.0 0.0 ; 1.0 1.0 ; 0.0 1.0]'
p = polygon_unitSquare()
@test p.point == points
@test size(p.segment) == (2, 4)
@test p.n_region == 0
end
@testset "Unit square with hole" begin
p = polygon_unitSquareWithHole()
@test size(p.point) == (2, 8)
@test size(p.segment) == (2, 8)
@test p.n_hole == 1
@test vec(p.hole) == [0.5 ; 0.5]
@test p.n_region == 0
end
@testset "Regular unit polyhedron" begin
N = rand(5:10, 5)
for i in N
p = polygon_regular(i)
norm_points = sum(p.point.^2, dims=1)[:]
@test isapprox(norm_points, ones(i))
@test size(p.segment) == (2, i)
@test p.n_hole == 0
@test p.n_region == 0
end
end
@testset "L shape" begin
p = polygon_Lshape()
@test size(p.point) == (2, 6)
@test size(p.segment) == (2, 6)
@test p.n_hole == 0
@test p.n_region == 0
end
end # end "Standard polygons"
end # end "Polygon constructors" | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 4439 | @testset "Create mesh" begin
@testset "Mesh of unit simplex" begin
p = polygon_unitSimplex()
m = create_mesh(p, info_str = "Mesh test",
verbose = false,
check_triangulation = true,
voronoi = true,
delaunay = true,
output_edges = true,
output_cell_neighbors = true,
quality_meshing = true,
prevent_steiner_points_boundary = false,
prevent_steiner_points = false,
set_max_steiner_points = false,
set_area_max = false,
set_angle_min = false,
add_switches = "")
@test isapprox(m.point, p.point)
@test m.n_cell == 1
@test m.n_edge == 3
@test m.n_segment == 3
@test m.n_region == 0
end
@testset "Mesh of unit square with hole" begin
p = polygon_unitSquareWithHole()
m = create_mesh(p, info_str = "Mesh test",
verbose = false,
check_triangulation = true,
voronoi = true,
delaunay = true,
output_edges = true,
output_cell_neighbors = true,
quality_meshing = true,
prevent_steiner_points_boundary = false,
prevent_steiner_points = false,
set_max_steiner_points = false,
set_area_max = false,
set_angle_min = false,
add_switches = "")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 1
@test m.voronoi.n_point > 1
@test m.n_region == 0
end
@testset "Mesh of unit square with hole (manual switch passing)" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m = create_mesh(p, switches, info_str = "Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 1
@test m.voronoi.n_point > 1
@test m.n_region == 0
end
@testset "Mesh of point cloud" begin
# random points
point = [0.266666 0.321577 ;
0.615941 0.507234 ;
0.943039 0.90487 ;
0.617956 0.501991 ;
0.442223 0.396445]
m = create_mesh(point, info_str = "Mesh test",
verbose = false,
check_triangulation = true,
voronoi = true,
delaunay = true,
output_edges = true,
output_cell_neighbors = true,
quality_meshing = true,
prevent_steiner_points_boundary = true,
prevent_steiner_points = true,
set_max_steiner_points = false,
set_area_max = false,
set_angle_min = false,
add_switches = "")
@test m.n_point == 5
@test m.n_cell == 4
@test m.n_edge == 8
@test m.n_segment == 4
@test m.n_hole == 0
@test m.voronoi.n_point == 4
@test m.voronoi.n_edge == 8
@test m.n_region == 0
end
@testset "Mesh of unit square with hole (manual switch passing)" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m = create_mesh(p, switches, info_str = "Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 1
@test m.voronoi.n_point >= 1
@test m.n_region == 0
end
@testset "Mesh of point cloud (manual switch passing)" begin
# random points
point = [0.266666 0.321577 ;
0.615941 0.507234 ;
0.943039 0.90487 ;
0.617956 0.501991 ;
0.442223 0.396445]
switches = "cpenvQa0.01"
m = create_mesh(point, switches, info_str = "Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 0
@test m.voronoi.n_point >= 1
@test m.n_region == 0
end
end # end "Create Mmsh" | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 2857 | @testset "Create mesh" begin
@testset "Mesh of unit square with hole" begin
println("1st pass with TriangleMesh switches 'second_order_triangles' ")
p = polygon_unitSquareWithHole()
m = create_mesh(p, info_str="Mesh test",
verbose=false,
check_triangulation=true,
voronoi=true,
delaunay=true,
output_edges=true,
output_cell_neighbors=true,
quality_meshing=true,
prevent_steiner_points_boundary=false,
prevent_steiner_points=false,
set_max_steiner_points=false,
set_area_max=false,
set_angle_min=false,
second_order_triangles=true,
add_switches="")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 1
@test m.voronoi.n_point > 1
println("Node connectivity, first triangle: ", m.cell[:,1])
println("Node connectivity, last triangle: ", m.cell[:,end])
end
@testset "Mesh of unit square with hole (manual switch passing)" begin
println("2nd pass with Triangle switch 'o2' ")
p = polygon_unitSquareWithHole()
switches = "penvQa0.01o2"
m = create_mesh(p, switches, info_str="Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_hole == 1
@test m.voronoi.n_point > 1
println("Node connectivity, first triangle: ", m.cell[:,1])
println("Node connectivity, last triangle: ", m.cell[:,end])
end
@testset "Mesh of unit square with region (manual switch passing)" begin
p = polygon_unitSquareWithRegion()
switches = "QDpeq33o2Aa0.01"
m = create_mesh(p, switches, info_str="Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_region == 1
println("number of regions: ", m.n_region)
println("Minimum triangle_attribute: ", minimum(m.triangle_attribute))
println("Maximum triangle_attribute: ", maximum(m.triangle_attribute))
end
@testset "Mesh of unit square with enclosed region (manual switch passing)" begin
p = polygon_unitSquareWithEnclosedRegion()
switches = "QDpeq33o2Aa0.01"
m = create_mesh(p, switches, info_str="Mesh test")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
@test m.n_region == 2
println("number of regions: ", m.n_region)
println("Minimum triangle_attribute: ", minimum(m.triangle_attribute))
println("Maximum triangle_attribute: ", maximum(m.triangle_attribute))
end
end # end "Create Mesh" | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 1067 | @testset "Documentation examples" begin
@testset "Create a PSLG" begin
# example from the documentation
println("Testing documentation example")
poly = Polygon_pslg(8, 1, 2, 8, 1)
println("PSLG created")
point = [1.0 0.0 ; 0.0 1.0 ; -1.0 0.0 ; 0.0 -1.0 ;
0.25 0.25 ; -0.25 0.25 ; -0.25 -0.25 ; 0.25 -0.25]
segment = [1 2 ; 2 3 ; 3 4 ; 4 1 ; 5 6 ; 6 7 ; 7 8 ; 8 5]
point_marker = [ones(Int, 4, 1) ; 2 * ones(Int, 4, 1)]
segment_marker = [ones(Int, 4) ; 2 * ones(Int, 4)]
point_attribute = rand(8, 2)
hole = [0.5 0.5]
set_polygon_point!(poly, point)
set_polygon_point_marker!(poly, point_marker)
set_polygon_point_attribute!(poly, point_attribute)
set_polygon_segment!(poly, segment)
set_polygon_segment_marker!(poly, segment_marker)
set_polygon_hole!(poly, hole)
@test poly.n_point == 8
@test poly.n_segment == 8
@test poly.n_point_marker == 1
@test poly.n_point_attribute == 2
@test poly.n_hole == 1
@test poly.n_region == 0
@test poly.n_triangle_attribute == 0
end
end # end Documentation examples | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 4197 | @testset "Refine mesh" begin
@testset "Mesh of unit simplex (simple refinement)" begin
p = polygon_unitSimplex()
m0 = create_mesh(p, info_str="Mesh test",
verbose=false,
check_triangulation=true,
voronoi=true,
delaunay=true,
output_edges=true,
output_cell_neighbors=true,
quality_meshing=true,
prevent_steiner_points_boundary=false,
prevent_steiner_points=false,
set_max_steiner_points=false,
set_area_max=false,
set_angle_min=false,
add_switches="")
m = refine(m0, divide_cell_into=4,
ind_cell=collect(1:m0.n_cell),
keep_segments=false,
keep_edges=false,
verbose=false,
check_triangulation=false,
voronoi=false,
output_edges=true,
output_cell_neighbors=true,
quality_meshing=true,
info_str="Refined mesh")
@test m.n_point > m0.n_point
@test m.n_cell > m0.n_cell
@test m.n_edge > m0.n_edge
end
@testset "Mesh of unit square with hole (simple refinement)" begin
p = polygon_unitSquareWithHole()
m0 = create_mesh(p, info_str="Mesh test",
verbose=false,
check_triangulation=true,
voronoi=true,
delaunay=true,
output_edges=true,
output_cell_neighbors=true,
quality_meshing=true,
prevent_steiner_points_boundary=false,
prevent_steiner_points=false,
set_max_steiner_points=false,
set_area_max=false,
set_angle_min=false,
add_switches="")
m = refine(m0, divide_cell_into=4,
ind_cell=collect(1:m0.n_cell),
keep_segments=false,
keep_edges=false,
verbose=false,
check_triangulation=false,
voronoi=false,
output_edges=true,
output_cell_neighbors=true,
quality_meshing=true,
info_str="Refined mesh")
@test m.n_point > m0.n_point
@test m.n_cell > m0.n_cell
@test m.n_edge > m0.n_edge
end
@testset "Mesh of unit square with hole (simple refinement, manual switch passing)" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m0 = create_mesh(p, switches, info_str="Mesh test")
switches = switches * "r"
m = refine(m0, switches, info_str="Refined mesh")
@test m.n_point > 1
@test m.n_cell > 1
@test m.n_edge > 1
@test m.n_segment > 1
end
@testset "Mesh of unit square with hole (red-green refinement, all cells)" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m0 = create_mesh(p, switches, info_str="Mesh test")
m = refine_rg(m0)
@test m.n_point == m0.n_point + m0.n_edge
@test m.n_cell == 4 * m0.n_cell
@test m.n_edge == 2 * m0.n_edge + 3 * m0.n_cell
end
@testset "Mesh of unit square with hole (red-green refinement, only first three cells cells)" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m0 = create_mesh(p, switches, info_str="Mesh test")
m = refine_rg(m0, [1;2;3])
@test m.n_point > m0.n_point
@test m.n_cell > m0.n_cell
@test m.n_edge > m0.n_edge
end
end # end "Refine mesh" | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
|
[
"MIT"
] | 1.1.3 | fb6df4003cb65831e8b4603fda10eb136fda2631 | code | 500 | @testset "Writing mesh to file" begin
p = polygon_unitSquareWithHole()
switches = "penvQa0.01"
m = create_mesh(p, switches, info_str="Mesh test")
write_mesh(m, "test_mesh_write")
@test isfile(pwd() * "/meshfiles/test_mesh_write.node")
@test isfile(pwd() * "/meshfiles/test_mesh_write.ele")
@test isfile(pwd() * "/meshfiles/test_mesh_write.edge")
@test isfile(pwd() * "/meshfiles/test_mesh_write.neigh")
rm(pwd() * "/meshfiles/", recursive=true)
end | TriangleMesh | https://github.com/konsim83/TriangleMesh.jl.git |
Subsets and Splits