licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | code | 25357 | function cudss_version()
@test CUDSS.version() == v"0.3.0"
end
function cudss_dense()
n = 20
p = 4
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
@testset "CuVector" begin
A_cpu = rand(T, n)
A_gpu = CuVector(A_cpu)
matrix = CudssMatrix(A_gpu)
format = Ref{CUDSS.cudssMatrixFormat_t}()
CUDSS.cudssMatrixGetFormat(matrix, format)
@test format[] == CUDSS.CUDSS_MFORMAT_DENSE
A_cpu2 = rand(T, n)
A_gpu2 = CuVector(A_cpu2)
cudss_set(matrix, A_gpu2)
end
@testset "CuMatrix" begin
A_cpu = rand(T, n, p)
A_gpu = CuMatrix(A_cpu)
matrix = CudssMatrix(A_gpu)
format = Ref{CUDSS.cudssMatrixFormat_t}()
CUDSS.cudssMatrixGetFormat(matrix, format)
@test format[] == CUDSS.CUDSS_MFORMAT_DENSE
A_cpu2 = rand(T, n, p)
A_gpu2 = CuMatrix(A_cpu2)
cudss_set(matrix, A_gpu2)
end
end
end
function cudss_sparse()
n = 20
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
A_cpu = sprand(T, n, n, 1.0)
A_cpu = A_cpu + A_cpu'
A_gpu = CuSparseMatrixCSR(A_cpu)
@testset "view = $view" for view in ('L', 'U', 'F')
@testset "structure = $structure" for structure in ("G", "S", "H", "SPD", "HPD")
matrix = CudssMatrix(A_gpu, structure, view)
format = Ref{CUDSS.cudssMatrixFormat_t}()
CUDSS.cudssMatrixGetFormat(matrix, format)
@test format[] == CUDSS.CUDSS_MFORMAT_CSR
A_cpu2 = sprand(T, n, n, 1.0)
A_cpu2 = A_cpu2 + A_cpu2'
A_gpu2 = CuSparseMatrixCSR(A_cpu2)
cudss_set(matrix, A_gpu2)
end
end
end
end
function cudss_solver()
n = 20
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
A_cpu = sprand(T, n, n, 1.0)
A_cpu = A_cpu + A_cpu'
A_gpu = CuSparseMatrixCSR(A_cpu)
@testset "structure = $structure" for structure in ("G", "S", "H", "SPD", "HPD")
@testset "view = $view" for view in ('L', 'U', 'F')
solver = CudssSolver(A_gpu, structure, view)
x_cpu = zeros(T, n)
x_gpu = CuVector(x_cpu)
b_cpu = rand(T, n)
b_gpu = CuVector(b_cpu)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
@testset "config parameter = $parameter" for parameter in CUDSS_CONFIG_PARAMETERS
@testset "cudss_get" begin
val = cudss_get(solver, parameter)
end
@testset "cudss_set" begin
(parameter == "matching_type") && cudss_set(solver, parameter, 0)
(parameter == "solve_mode") && cudss_set(solver, parameter, 0)
(parameter == "ir_n_steps") && cudss_set(solver, parameter, 1)
(parameter == "ir_tol") && cudss_set(solver, parameter, 1e-8)
(parameter == "pivot_threshold") && cudss_set(solver, parameter, 2.0)
(parameter == "pivot_epsilon") && cudss_set(solver, parameter, 1e-12)
(parameter == "max_lu_nnz") && cudss_set(solver, parameter, 10)
(parameter == "hybrid_device_memory_limit") && cudss_set(solver, parameter, 2048)
for algo in ("default", "algo1", "algo2", "algo3")
(parameter == "reordering_alg") && cudss_set(solver, parameter, algo)
(parameter == "factorization_alg") && cudss_set(solver, parameter, algo)
(parameter == "solve_alg") && cudss_set(solver, parameter, algo)
end
for flag in (0, 1)
(parameter == "hybrid_mode") && cudss_set(solver, parameter, flag)
(parameter == "use_cuda_register_memory") && cudss_set(solver, parameter, flag)
end
for pivoting in ('C', 'R', 'N')
(parameter == "pivot_type") && cudss_set(solver, parameter, pivoting)
end
end
end
@testset "data parameter = $parameter" for parameter in CUDSS_DATA_PARAMETERS
parameter ∈ ("perm_row", "perm_col", "perm_reorder_row", "perm_reorder_col", "diag") && continue
if (parameter != "user_perm") && (parameter != "comm")
(parameter == "inertia") && !(structure ∈ ("S", "H")) && continue
val = cudss_get(solver, parameter)
else
perm_cpu = Cint[i for i=n:-1:1]
cudss_set(solver, parameter, perm_cpu)
perm_gpu = CuVector{Cint}(perm_cpu)
cudss_set(solver, parameter, perm_gpu)
end
end
end
end
end
end
function cudss_execution()
n = 100
p = 5
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
R = real(T)
@testset "Unsymmetric -- Non-Hermitian" begin
@testset "Pivoting = $pivot" for pivot in ('C', 'R', 'N')
A_cpu = sprand(T, n, n, 0.02) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
matrix = CudssMatrix(A_gpu, "G", 'F')
config = CudssConfig()
data = CudssData()
solver = CudssSolver(matrix, config, data)
cudss_set(solver, "pivot_type", pivot)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
@test norm(r_gpu) ≤ √eps(R)
# In-place LU
d_gpu = rand(T, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
c_cpu = rand(T, n)
c_gpu = CuVector(c_cpu)
cudss("refactorization", solver, x_gpu, c_gpu)
cudss("solve", solver, x_gpu, c_gpu)
r_gpu = c_gpu - A_gpu * x_gpu
@test norm(r_gpu) ≤ √eps(R)
end
end
@testset "Symmetric -- Hermitian" begin
@testset "view = $view" for view in ('F', 'L', 'U')
@testset "Pivoting = $pivot" for pivot in ('C', 'R', 'N')
A_cpu = sprand(T, n, n, 0.01) + I
A_cpu = A_cpu + A_cpu'
X_cpu = zeros(T, n, p)
B_cpu = rand(T, n, p)
(view == 'L') && (A_gpu = CuSparseMatrixCSR(A_cpu |> tril))
(view == 'U') && (A_gpu = CuSparseMatrixCSR(A_cpu |> triu))
(view == 'F') && (A_gpu = CuSparseMatrixCSR(A_cpu))
X_gpu = CuMatrix(X_cpu)
B_gpu = CuMatrix(B_cpu)
structure = T <: Real ? "S" : "H"
matrix = CudssMatrix(A_gpu, structure, view)
config = CudssConfig()
data = CudssData()
solver = CudssSolver(matrix, config, data)
cudss_set(solver, "pivot_type", pivot)
cudss("analysis", solver, X_gpu, B_gpu)
cudss("factorization", solver, X_gpu, B_gpu)
cudss("solve", solver, X_gpu, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
# In-place LDLᵀ / LDLᴴ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
cudss("refactorization", solver, X_gpu, C_gpu)
cudss("solve", solver, X_gpu, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
end
end
end
@testset "SPD -- HPD" begin
@testset "view = $view" for view in ('F', 'L', 'U')
@testset "Pivoting = $pivot" for pivot in ('C', 'R', 'N')
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
X_cpu = zeros(T, n, p)
B_cpu = rand(T, n, p)
(view == 'L') && (A_gpu = CuSparseMatrixCSR(A_cpu |> tril))
(view == 'U') && (A_gpu = CuSparseMatrixCSR(A_cpu |> triu))
(view == 'F') && (A_gpu = CuSparseMatrixCSR(A_cpu))
X_gpu = CuMatrix(X_cpu)
B_gpu = CuMatrix(B_cpu)
structure = T <: Real ? "SPD" : "HPD"
matrix = CudssMatrix(A_gpu, structure, view)
config = CudssConfig()
data = CudssData()
solver = CudssSolver(matrix, config, data)
cudss_set(solver, "pivot_type", pivot)
cudss("analysis", solver, X_gpu, B_gpu)
cudss("factorization", solver, X_gpu, B_gpu)
cudss("solve", solver, X_gpu, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
# In-place LLᵀ / LLᴴ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
cudss("refactorization", solver, X_gpu, C_gpu)
cudss("solve", solver, X_gpu, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
end
end
end
end
end
function cudss_generic()
n = 100
p = 5
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
R = real(T)
@testset "Unsymmetric -- Non-Hermitian" begin
A_cpu = sprand(T, n, n, 0.02) + I
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
b_gpu = CuVector(b_cpu)
@testset "ldiv!" begin
x_cpu = zeros(T, n)
x_gpu = CuVector(x_cpu)
solver = lu(A_gpu)
ldiv!(x_gpu, solver, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
@test norm(r_gpu) ≤ √eps(R)
A_gpu2 = rand(T) * A_gpu
lu!(solver, A_gpu2)
x_gpu .= b_gpu
ldiv!(solver, x_gpu)
r_gpu2 = b_gpu - A_gpu2 * x_gpu
@test norm(r_gpu2) ≤ √eps(R)
end
@testset "\\" begin
solver = lu(A_gpu)
x_gpu = solver \ b_gpu
r_gpu = b_gpu - A_gpu * x_gpu
@test norm(r_gpu) ≤ √eps(R)
A_gpu2 = rand(T) * A_gpu
lu!(solver, A_gpu2)
x_gpu = solver \ b_gpu
r_gpu2 = b_gpu - A_gpu2 * x_gpu
@test norm(r_gpu2) ≤ √eps(R)
end
end
@testset "Symmetric -- Hermitian" begin
@testset "view = $view" for view in ('F', 'L', 'U')
A_cpu = sprand(T, n, n, 0.01) + I
A_cpu = A_cpu + A_cpu'
B_cpu = rand(T, n, p)
(view == 'L') && (A_gpu = CuSparseMatrixCSR(A_cpu |> tril))
(view == 'U') && (A_gpu = CuSparseMatrixCSR(A_cpu |> triu))
(view == 'F') && (A_gpu = CuSparseMatrixCSR(A_cpu))
B_gpu = CuMatrix(B_cpu)
@testset "ldiv!" begin
X_cpu = zeros(T, n, p)
X_gpu = CuMatrix(X_cpu)
solver = ldlt(A_gpu; view)
ldiv!(X_gpu, solver, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
c = rand(R)
A_cpu2 = c * A_cpu
A_gpu2 = c * A_gpu
ldlt!(solver, A_gpu2)
X_gpu .= B_gpu
ldiv!(solver, X_gpu)
R_gpu2 = B_gpu - CuSparseMatrixCSR(A_cpu2) * X_gpu
@test norm(R_gpu2) ≤ √eps(R)
end
@testset "\\" begin
solver = ldlt(A_gpu; view)
X_gpu = solver \ B_gpu
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
c = rand(R)
A_cpu2 = c * A_cpu
A_gpu2 = c * A_gpu
ldlt!(solver, A_gpu2)
X_gpu = solver \ B_gpu
R_gpu2 = B_gpu - CuSparseMatrixCSR(A_cpu2) * X_gpu
@test norm(R_gpu2) ≤ √eps(R)
end
end
end
@testset "SPD -- HPD" begin
@testset "view = $view" for view in ('F', 'L', 'U')
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
B_cpu = rand(T, n, p)
(view == 'L') && (A_gpu = CuSparseMatrixCSR(A_cpu |> tril))
(view == 'U') && (A_gpu = CuSparseMatrixCSR(A_cpu |> triu))
(view == 'F') && (A_gpu = CuSparseMatrixCSR(A_cpu))
B_gpu = CuMatrix(B_cpu)
@testset "ldiv!" begin
X_cpu = zeros(T, n, p)
X_gpu = CuMatrix(X_cpu)
solver = cholesky(A_gpu; view)
ldiv!(X_gpu, solver, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
c = rand(R)
A_cpu2 = c * A_cpu
A_gpu2 = c * A_gpu
cholesky!(solver, A_gpu2)
X_gpu .= B_gpu
ldiv!(solver, X_gpu)
R_gpu2 = B_gpu - CuSparseMatrixCSR(A_cpu2) * X_gpu
@test norm(R_gpu2) ≤ √eps(R)
end
@testset "\\" begin
solver = cholesky(A_gpu; view)
X_gpu = solver \ B_gpu
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
@test norm(R_gpu) ≤ √eps(R)
c = rand(R)
A_cpu2 = c * A_cpu
A_gpu2 = c * A_gpu
cholesky!(solver, A_gpu2)
X_gpu = solver \ B_gpu
R_gpu2 = B_gpu - CuSparseMatrixCSR(A_cpu2) * X_gpu
@test norm(R_gpu2) ≤ √eps(R)
end
end
end
end
end
function user_permutation()
function permutation_lu(T, A_cpu, x_cpu, b_cpu, permutation)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
solver = CudssSolver(A_gpu, "G", 'F')
cudss_set(solver, "user_perm", permutation)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
nz = cudss_get(solver, "lu_nnz")
return nz
end
function permutation_ldlt(T, A_cpu, x_cpu, b_cpu, permutation, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "S" : "H"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "user_perm", permutation)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
nz = cudss_get(solver, "lu_nnz")
return nz
end
function permutation_llt(T, A_cpu, x_cpu, b_cpu, permutation, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "SPD" : "HPD"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "user_perm", permutation)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
nz = cudss_get(solver, "lu_nnz")
return nz
end
n = 1000
perm1_cpu = Vector{Cint}(undef, n)
perm2_cpu = Vector{Cint}(undef, n)
for i = 1:n
perm1_cpu[i] = i
perm2_cpu[i] = n-i+1
end
perm1_gpu = CuVector{Cint}(perm1_cpu)
perm2_gpu = CuVector{Cint}(perm2_cpu)
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
@testset "LU" begin
A_cpu = sprand(T, n, n, 0.05) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
nz1_cpu = permutation_lu(T, A_cpu, x_cpu, b_cpu, perm1_cpu)
nz2_cpu = permutation_lu(T, A_cpu, x_cpu, b_cpu, perm2_cpu)
nz1_gpu = permutation_lu(T, A_cpu, x_cpu, b_cpu, perm1_gpu)
nz2_gpu = permutation_lu(T, A_cpu, x_cpu, b_cpu, perm2_gpu)
@test nz1_cpu == nz1_gpu
@test nz2_cpu == nz2_gpu
@test nz1_cpu != nz2_cpu
end
@testset "LDLᵀ / LDLᴴ" begin
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
nz1_cpu = permutation_ldlt(T, A_cpu, x_cpu, b_cpu, perm1_cpu, uplo)
nz2_cpu = permutation_ldlt(T, A_cpu, x_cpu, b_cpu, perm2_cpu, uplo)
nz1_gpu = permutation_ldlt(T, A_cpu, x_cpu, b_cpu, perm1_gpu, uplo)
nz2_gpu = permutation_ldlt(T, A_cpu, x_cpu, b_cpu, perm2_gpu, uplo)
@test nz1_cpu == nz1_gpu
@test nz2_cpu == nz2_gpu
@test nz1_cpu != nz2_cpu
end
end
@testset "LLᵀ / LLᴴ" begin
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
nz1_cpu = permutation_llt(T, A_cpu, x_cpu, b_cpu, perm1_cpu, uplo)
nz2_cpu = permutation_llt(T, A_cpu, x_cpu, b_cpu, perm2_cpu, uplo)
nz1_gpu = permutation_llt(T, A_cpu, x_cpu, b_cpu, perm1_gpu, uplo)
nz2_gpu = permutation_llt(T, A_cpu, x_cpu, b_cpu, perm2_gpu, uplo)
@test nz1_cpu == nz1_gpu
@test nz2_cpu == nz2_gpu
@test nz1_cpu != nz2_cpu
end
end
end
end
function iterative_refinement()
function ir_lu(T, A_cpu, x_cpu, b_cpu, ir)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
solver = CudssSolver(A_gpu, "G", 'F')
cudss_set(solver, "ir_n_steps", ir)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
return norm(r_gpu)
end
function ir_ldlt(T, A_cpu, x_cpu, b_cpu, ir, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "S" : "H"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "ir_n_steps", ir)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
function ir_llt(T, A_cpu, x_cpu, b_cpu, ir, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "SPD" : "HPD"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "ir_n_steps", ir)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
n = 100
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
R = real(T)
@testset "number of iterative refinement: $ir" for ir in (1, 2)
@testset "LU" begin
A_cpu = sprand(T, n, n, 0.05) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
res = ir_lu(T, A_cpu, x_cpu, b_cpu, ir)
@test res ≤ √eps(R)
end
@testset "LDLᵀ / LDLᴴ" begin
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = ir_ldlt(T, A_cpu, x_cpu, b_cpu, ir, uplo)
@test res ≤ √eps(R)
end
end
@testset "LLᵀ / LLᴴ" begin
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = ir_llt(T, A_cpu, x_cpu, b_cpu, ir, uplo)
@test res ≤ √eps(R)
end
end
end
end
end
function small_matrices()
function cudss_lu(T, A_cpu, x_cpu, b_cpu)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
solver = CudssSolver(A_gpu, "G", 'F')
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
return norm(r_gpu)
end
function cudss_ldlt(T, A_cpu, x_cpu, b_cpu, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "S" : "H"
solver = CudssSolver(A_gpu, structure, uplo)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
function cudss_llt(T, A_cpu, x_cpu, b_cpu, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "SPD" : "HPD"
solver = CudssSolver(A_gpu, structure, uplo)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
R = real(T)
@testset "Size of the linear system: $n" for n in 1:16
@testset "LU" begin
A_cpu = sprand(T, n, n, 0.05) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
res = cudss_lu(T, A_cpu, x_cpu, b_cpu)
@test res ≤ √eps(R)
end
@testset "LDLᵀ / LDLᴴ" begin
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = cudss_ldlt(T, A_cpu, x_cpu, b_cpu, uplo)
@test res ≤ √eps(R)
end
end
@testset "LLᵀ / LLᴴ" begin
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = cudss_llt(T, A_cpu, x_cpu, b_cpu, uplo)
@test res ≤ √eps(R)
end
end
end
end
end
function hybrid_mode()
function hybrid_lu(T, A_cpu, x_cpu, b_cpu)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
solver = CudssSolver(A_gpu, "G", 'F')
cudss_set(solver, "hybrid_mode", 1)
cudss("analysis", solver, x_gpu, b_gpu)
nbytes_gpu = cudss_get(solver, "hybrid_device_memory_min")
cudss_set(solver, "hybrid_device_memory_limit", nbytes_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
return norm(r_gpu)
end
function hybrid_ldlt(T, A_cpu, x_cpu, b_cpu, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "S" : "H"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "hybrid_mode", 1)
cudss("analysis", solver, x_gpu, b_gpu)
nbytes_gpu = cudss_get(solver, "hybrid_device_memory_min")
cudss_set(solver, "hybrid_device_memory_limit", nbytes_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
function hybrid_llt(T, A_cpu, x_cpu, b_cpu, uplo)
if uplo == 'L'
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
elseif uplo == 'U'
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
else
A_gpu = CuSparseMatrixCSR(A_cpu)
end
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
structure = T <: Real ? "SPD" : "HPD"
solver = CudssSolver(A_gpu, structure, uplo)
cudss_set(solver, "hybrid_mode", 1)
cudss("analysis", solver, x_gpu, b_gpu)
nbytes_gpu = cudss_get(solver, "hybrid_device_memory_min")
cudss_set(solver, "hybrid_device_memory_limit", nbytes_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
return norm(r_gpu)
end
n = 20
@testset "precision = $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
R = real(T)
@testset "LU" begin
A_cpu = sprand(T, n, n, 0.05) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
res = hybrid_lu(T, A_cpu, x_cpu, b_cpu)
@test res ≤ √eps(R)
end
@testset "LDLᵀ / LDLᴴ" begin
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = hybrid_ldlt(T, A_cpu, x_cpu, b_cpu, uplo)
@test res ≤ √eps(R)
end
end
@testset "LLᵀ / LLᴴ" begin
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
@testset "uplo = $uplo" for uplo in ('L', 'U', 'F')
res = hybrid_llt(T, A_cpu, x_cpu, b_cpu, uplo)
@test res ≤ √eps(R)
end
end
end
end
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | docs | 4404 | # CUDSS.jl: Julia interface for NVIDIA cuDSS
[![docs-dev][docs-dev-img]][docs-dev-url]
[docs-dev-img]: https://img.shields.io/badge/docs-dev-purple.svg
[docs-dev-url]: https://exanauts.github.io/CUDSS.jl/dev
## Overview
[CUDSS.jl](https://github.com/exanauts/CUDSS.jl) is a Julia interface to the NVIDIA [cuDSS](https://developer.nvidia.com/cudss) library.
NVIDIA cuDSS provides three factorizations (LDU, LDLᵀ, LLᵀ) for solving sparse linear systems on GPUs.
### Why CUDSS.jl?
Unlike other CUDA libraries that are commonly bundled together, cuDSS is currently in preview. For this reason, it is not included in [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl).
To maintain consistency with the naming conventions used for other CUDA libraries (such as CUBLAS, CUSOLVER, CUSPARSE, etc.), we have named this interface CUDSS.jl.
## Installation
CUDSS.jl can be installed and tested through the Julia package manager:
```julia
julia> ]
pkg> add CUDSS
pkg> test CUDSS
```
## Content
CUDSS.jl provides a structured approach for leveraging NVIDIA cuDSS functionalities.
It introduces the `CudssSolver` type along with three core routines: `cudss`, `cudss_set`, and `cudss_get`.
Additionally, specialized methods for the `CuSparseMatrixCSR` type have been incorporated for `cholesky`, `ldlt`, `lu` and `\`.
To further enhance performance, in-place variants including `cholesky!`, `ldlt!`, `lu!` and `ldiv!` have been implemented.
These variants optimize performance by reusing the symbolic factorization as well as storage.
This ensures efficient solving of sparse linear systems on GPUs.
## Examples
### Example 1: Sparse unsymmetric linear system with one right-hand side
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using SparseArrays, LinearAlgebra
T = Float64
n = 100
A_cpu = sprand(T, n, n, 0.05) + I
x_cpu = zeros(T, n)
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
x_gpu = CuVector(x_cpu)
b_gpu = CuVector(b_cpu)
solver = CudssSolver(A_gpu, "G", 'F')
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - A_gpu * x_gpu
norm(r_gpu)
# In-place LU
d_gpu = rand(T, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
c_cpu = rand(T, n)
c_gpu = CuVector(c_cpu)
cudss("refactorization", solver, x_gpu, c_gpu)
cudss("solve", solver, x_gpu, c_gpu)
r_gpu = c_gpu - A_gpu * x_gpu
norm(r_gpu)
```
### Example 2: Sparse symmetric linear system with multiple right-hand sides
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using SparseArrays, LinearAlgebra
T = Float64
R = real(T)
n = 100
p = 5
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
X_cpu = zeros(T, n, p)
B_cpu = rand(T, n, p)
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
X_gpu = CuMatrix(X_cpu)
B_gpu = CuMatrix(B_cpu)
structure = T <: Real ? "S" : "H"
solver = CudssSolver(A_gpu, structure, 'L')
cudss("analysis", solver, X_gpu, B_gpu)
cudss("factorization", solver, X_gpu, B_gpu)
cudss("solve", solver, X_gpu, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
norm(R_gpu)
# In-place LDLᵀ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
cudss("refactorization", solver, X_gpu, C_gpu)
cudss("solve", solver, X_gpu, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
norm(R_gpu)
```
### Example 3: Sparse hermitian positive definite linear system with multiple right-hand sides
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using SparseArrays, LinearAlgebra
T = ComplexF64
R = real(T)
n = 100
p = 5
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
X_cpu = zeros(T, n, p)
B_cpu = rand(T, n, p)
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
X_gpu = CuMatrix(X_cpu)
B_gpu = CuMatrix(B_cpu)
structure = T <: Real ? "SPD" : "HPD"
solver = CudssSolver(A_gpu, structure, 'U')
cudss("analysis", solver, X_gpu, B_gpu)
cudss("factorization", solver, X_gpu, B_gpu)
cudss("solve", solver, X_gpu, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
norm(R_gpu)
# In-place LLᴴ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cudss_set(solver, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
cudss("refactorization", solver, X_gpu, C_gpu)
cudss("solve", solver, X_gpu, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
norm(R_gpu)
```
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | docs | 3206 | ## LLᵀ and LLᴴ
```@docs
LinearAlgebra.cholesky(A::CuSparseMatrixCSR{T,Cint}; view::Char='F') where T <: LinearAlgebra.BlasFloat
LinearAlgebra.cholesky!(solver::CudssSolver{T}, A::CuSparseMatrixCSR{T,Cint}) where T <: LinearAlgebra.BlasFloat
```
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
T = ComplexF64
R = real(T)
n = 100
p = 5
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu * A_cpu' + I
B_cpu = rand(T, n, p)
A_gpu = CuSparseMatrixCSR(A_cpu |> triu)
B_gpu = CuMatrix(B_cpu)
X_gpu = similar(B_gpu)
F = cholesky(A_gpu, view='U')
X_gpu = F \ B_gpu
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
norm(R_gpu)
# In-place LLᴴ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
cholesky!(F, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
ldiv!(X_gpu, F, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
norm(R_gpu)
```
!!! note
If we only store one triangle of `A_gpu`, we can also use the wrappers `Symmetric` and `Hermitian` instead of using the keyword argument `view` in `cholesky`. For real matrices, both wrappers are allowed but only `Hermitian` can be used for complex matrices.
```julia
H_gpu = Hermitian(A_gpu, :U)
F = cholesky(H_gpu)
```
## LDLᵀ and LDLᴴ
```@docs
LinearAlgebra.ldlt(A::CuSparseMatrixCSR{T,Cint}; view::Char='F') where T <: LinearAlgebra.BlasFloat
LinearAlgebra.ldlt!(solver::CudssSolver{T}, A::CuSparseMatrixCSR{T,Cint}) where T <: LinearAlgebra.BlasFloat
```
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
T = Float64
R = real(T)
n = 100
p = 5
A_cpu = sprand(T, n, n, 0.05) + I
A_cpu = A_cpu + A_cpu'
B_cpu = rand(T, n, p)
A_gpu = CuSparseMatrixCSR(A_cpu |> tril)
B_gpu = CuMatrix(B_cpu)
X_gpu = similar(B_gpu)
F = ldlt(A_gpu, view='L')
X_gpu = F \ B_gpu
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
norm(R_gpu)
# In-place LDLᵀ
d_gpu = rand(R, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
ldlt!(F, A_gpu)
C_cpu = rand(T, n, p)
C_gpu = CuMatrix(C_cpu)
ldiv!(X_gpu, F, C_gpu)
R_gpu = C_gpu - ( CuSparseMatrixCSR(A_cpu) + Diagonal(d_gpu) ) * X_gpu
norm(R_gpu)
```
!!! note
If we only store one triangle of `A_gpu`, we can also use the wrappers `Symmetric` and `Hermitian` instead of using the keyword argument `view` in `ldlt`. For real matrices, both wrappers are allowed but only `Hermitian` can be used for complex matrices.
```julia
S_gpu = Symmetric(A_gpu, :L)
F = ldlt(S_gpu)
```
## LU
```@docs
LinearAlgebra.lu(A::CuSparseMatrixCSR{T,Cint}) where T <: LinearAlgebra.BlasFloat
LinearAlgebra.lu!(solver::CudssSolver{T}, A::CuSparseMatrixCSR{T,Cint}) where T <: LinearAlgebra.BlasFloat
```
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
T = Float64
n = 100
A_cpu = sprand(T, n, n, 0.05) + I
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
b_gpu = CuVector(b_cpu)
F = lu(A_gpu)
x_gpu = F \ b_gpu
r_gpu = b_gpu - A_gpu * x_gpu
norm(r_gpu)
# In-place LU
d_gpu = rand(T, n) |> CuVector
A_gpu = A_gpu + Diagonal(d_gpu)
lu!(F, A_gpu)
c_cpu = rand(T, n)
c_gpu = CuVector(c_cpu)
ldiv!(x_gpu, F, c_gpu)
r_gpu = c_gpu - A_gpu * x_gpu
norm(r_gpu)
```
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | docs | 611 | # [CUDSS.jl documentation](@id Home)
## Overview
[CUDSS.jl](https://github.com/exanauts/CUDSS.jl) is a Julia interface to the NVIDIA [cuDSS](https://developer.nvidia.com/cudss) library.
NVIDIA cuDSS provides three factorizations (LDU, LDLᵀ, LLᵀ) for solving sparse linear systems on GPUs.
For more details on using cuDSS, refer to the official [cuDSS documentation](https://docs.nvidia.com/cuda/cudss/index.html).
## Installation
```julia
julia> ]
pkg> add CUDSS
pkg> test CUDSS
```
## Types
```@docs
CudssMatrix
CudssConfig
CudssData
CudssSolver
```
## Functions
```@docs
cudss_set
cudss_get
cudss
```
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | docs | 2158 | ## Iterative refinement
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
T = Float64
n = 100
p = 5
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu + I
B_cpu = rand(T, n, p)
A_gpu = CuSparseMatrixCSR(A_cpu)
B_gpu = CuMatrix(B_cpu)
X_gpu = similar(B_gpu)
solver = CudssSolver(A_gpu, "G", 'F')
# Perform one step of iterative refinement
ir = 1
cudss_set(solver, "ir_n_steps", ir)
cudss("analysis", solver, X_gpu, B_gpu)
cudss("factorization", solver, X_gpu, B_gpu)
cudss("solve", solver, X_gpu, B_gpu)
R_gpu = B_gpu - CuSparseMatrixCSR(A_cpu) * X_gpu
norm(R_gpu)
```
## User permutation
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
using AMD
T = ComplexF64
n = 100
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu' * A_cpu + I
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
b_gpu = CuVector(b_cpu)
x_gpu = similar(b_gpu)
solver = CudssSolver(A_gpu, "HPD", 'F')
# Provide a user permutation
permutation = amd(A_cpu) |> Vector{Cint}
cudss_set(solver, "user_perm", permutation)
cudss("analysis", solver, x_gpu, b_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
norm(r_gpu)
```
## Hybrid mode
```julia
using CUDA, CUDA.CUSPARSE
using CUDSS
using LinearAlgebra
using SparseArrays
T = Float64
n = 100
A_cpu = sprand(T, n, n, 0.01)
A_cpu = A_cpu + A_cpu' + I
b_cpu = rand(T, n)
A_gpu = CuSparseMatrixCSR(A_cpu)
b_gpu = CuVector(b_cpu)
x_gpu = similar(b_gpu)
solver = CudssSolver(A_gpu, "S", 'F')
# Use the hybrid mode (host and device memory)
cudss_set(solver, "hybrid_mode", 1)
cudss("analysis", solver, x_gpu, b_gpu)
# Minimal amount of device memory required in the hybrid memory mode.
nbytes_gpu = cudss_get(solver, "hybrid_device_memory_min")
# Device memory limit for the hybrid memory mode.
# Only use it if you don't want to rely on the internal default heuristic.
cudss_set(solver, "hybrid_device_memory_limit", nbytes_gpu)
cudss("factorization", solver, x_gpu, b_gpu)
cudss("solve", solver, x_gpu, b_gpu)
r_gpu = b_gpu - CuSparseMatrixCSR(A_cpu) * x_gpu
norm(r_gpu)
```
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.3.2 | 11cb7c9c06435cfadcc6d94d34c07501df32ce55 | docs | 712 | # Wrapping headers
This directory contains a script `wrapper.jl` that can be used to automatically
generate wrappers from C headers of NVIDIA cuDSS. This is done using Clang.jl.
In CUDSS.jl, the wrappers need to know whether pointers passed into the
library point to CPU or GPU memory (i.e. `Ptr` or `CuPtr`). This information is
not available from the headers, and instead should be provided by the developer.
The specific information is embedded in the TOML file `cudss.toml`.
# Usage
Either run `julia wrapper.jl` directly, or include it and call the `main()` function.
Be sure to activate the project environment in this folder (`julia --project`), which will install `Clang.jl` and `JuliaFormatter.jl`.
| CUDSS | https://github.com/exanauts/CUDSS.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | code | 367 | using Documenter
push!(LOAD_PATH,"../src/")
using FindPeaks1D
makedocs(
sitename = "FindPeaks1D.jl",
format = Documenter.HTML(prettyurls = false),
pages = [
"index.md",
"findpeaks1d.md",
],
)
deploydocs(
repo = "github.com/ymtoo/FindPeaks1D.jl",
versions = ["stable" => "v^", "v#.#", "dev" => "master"],
branch = "gh-pages",
) | FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | code | 10297 | module FindPeaks1D
export findpeaks1d, peakprominences1d, peakwidths1d
include("utils.jl")
"""
localmaxima1d(x)
Finds all local maxima in a 1-D signal. The first and the last sample can't be maxima.
"""
function localmaxima1d(x::AbstractVector{T}) where {T<:Real}
midpts = Vector{Int}(undef, 0)
leftedges = Vector{Int}(undef, 0)
rightedges = Vector{Int}(undef, 0)
i = 2
imax = length(x)
while i < imax
if x[i-1] < x[i]
iahead = i+1
while (iahead < imax) && (x[iahead] == x[i])
iahead += 1
end
if x[iahead] < x[i]
push!(leftedges, i)
push!(rightedges, iahead-1)
push!(midpts, (i+iahead-1)÷2)
i = iahead
end
end
i += 1
end
midpts, leftedges, rightedges
end
"""
findpeaks1d(x;
height=nothing,
distance=nothing,
prominence=nothing,
width=nothing,
wlen=nothing,
relheight=0.5)
Find all local maxima in a 1-D signal with specified `height`, `distance`, `prominence`, `width`.
# Arguments
- `x`: 1-D signal
- `height`: the first element is the minimal and the second, if supplied, is the maximal peak height
- `distance`: the minimal peak distance
- `prominence`: the first element is the minimal and the second, if supplied, is the maximal peak prominence
- `width`: the first element is the minimal and the second, if supplied, is the maximal peak width
- `wlen`: used for calculation of the peak prominence
- `relheight`: used for calculation of peak width
# Returns
Peak indices and properties
# Examples
```julia-repl
julia> x = [13, 12, 14, 18, 19, 19, 19, 15, 11, 6, 4, 10, 8, 13, 8, 13, 3, 18, 7, 4];
julia> pkindices, pkproperties = findpeaks1d(x)
([6, 12, 14, 16, 18], Dict{String,Any}())
julia> pkindices, pkproperties = findpeaks1d(x, height=11)
([6, 14, 16, 18], Dict{String,Any}("peak_heights" => [19, 13, 13, 18]))
julia> pkindices, pkproperties = findpeaks1d(x, height=11, distance=3)
([6, 14, 18], Dict{String,Any}("peak_heights" => [19, 13, 18]))
```
"""
function findpeaks1d(x::AbstractVector{T};
height::Union{Nothing,<:Real,NTuple{2,<:Real}}=nothing,
distance::Union{Nothing,I}=nothing,
prominence::Union{Nothing,Real,NTuple{2,Real}}=nothing,
width::Union{Nothing,Real,NTuple{2,Real}}=nothing,
wlen::Union{Nothing,I}=nothing,
relheight::Real=0.5) where {T<:Real,I<:Integer}
pkindices, leftedges, rightedges = localmaxima1d(x)
properties = Dict{String,Any}()
isempty(pkindices) && (return pkindices, properties)
if !isnothing(height)
pkheights = x[pkindices]
hmin, hmax = height isa Number ? (height, nothing) : height
keepheight = selectbyproperty(pkheights, hmin, hmax)
pkindices = pkindices[keepheight]
properties["peak_heights"] = pkheights
properties = Dict{String,Any}(key => array[keepheight] for (key, array) in properties)
end
if !isnothing(distance)
keepdist = selectbypeakdistance(pkindices, x[pkindices], distance)
pkindices = pkindices[keepdist]
properties = Dict{String,Any}(key => array[keepdist] for (key, array) in properties)
end
if !isnothing(prominence) || !isnothing(width)
prominences, leftbases, rightbases = peakprominences1d(x, pkindices, wlen)
properties["prominences"] = prominences
properties["leftbases"] = leftbases
properties["rightbases"] = rightbases
end
if !isnothing(prominence)
pmin, pmax = prominence isa Number ? (prominence, nothing) : prominence
keepprom = selectbyproperty(prominences, pmin, pmax)
pkindices = pkindices[keepprom]
properties = Dict{String,Any}(key => array[keepprom] for (key, array) in properties)
end
if !isnothing(width)
widths, widthheights, leftips, rightips = peakwidths1d(x,
pkindices,
relheight,
properties["prominences"],
properties["leftbases"],
properties["rightbases"])
properties["widths"] = widths
properties["widthheights"] = widthheights
properties["leftips"] = leftips
properties["rightips"] = rightips
wmin, wmax = width isa Number ? (width, nothing) : width
keepwidth = selectbyproperty(widths, wmin, wmax)
pkindices = pkindices[keepwidth]
properties = Dict{String,Any}(key => array[keepwidth] for (key, array) in properties)
end
pkindices, properties
end
"""
peakprominences1(x, pkindices, wlen=nothing)
Calculate the prominence of each peak in a 1-D signal.
# Arguments
- `x`: 1-D signal
- `pkindices`: peak indices
- `wlen`: a window length in samples to restrict peak finding to a window around the current peak
# Returns
- `prominences`: prominences for each peaks
- `leftbases`, `rightbases`: indices of the left and right of each peak for peaks' bases
"""
function peakprominences1d(x::AbstractVector{T},
pkindices::AbstractVector{I},
wlen::Union{Nothing,I}=nothing) where {T<:Real,I<:Integer}
wlen = argwlenasexpected(wlen)
prominences = Vector{T}(undef, length(pkindices))
leftbases = Vector{Int}(undef, length(pkindices))
rightbases = Vector{Int}(undef, length(pkindices))
for pknr ∈ eachindex(pkindices)
pkindex = pkindices[pknr]
imin = 1
imax = length(x)
!(imin <= pkindex <= imax) && throw(ArgumentError("peak $(pkindex) is not a valid index for `x`"))
if 2 <= wlen
imin = max(pkindex-wlen÷2, imin)
imax = min(pkindex+wlen÷2, imax)
end
i = leftbases[pknr] = pkindex
leftmin = x[pkindex]
while (imin <= i) && (x[i] <= x[pkindex])
if x[i] < leftmin
leftmin = x[i]
leftbases[pknr] = i
end
i -= 1
end
i = rightbases[pknr] = pkindex
rightmin = x[pkindex]
while (i <= imax) && (x[i] <= x[pkindex])
if x[i] < rightmin
rightmin = x[i]
rightbases[pknr] = i
end
i += 1
end
prominences[pknr] = x[pkindex]-max(leftmin, rightmin)
end
prominences, leftbases, rightbases
end
"""
peakwidths1d(x, pkindices, relheight=0.5, prominencedata=nothing, wlen=nothing)
Calculate the width of each peak in a 1-D signal.
# Arguments
- `x`: 1-D signal
- `pkindices`: peak indices
- `relheight`: relative height with respect to the peak heights and prominences
- `wlen`: a window length in samples to restrict peak finding to a window around the current peak
# Returns
- `widths`: width for each peak in samples
- `widthheights`: height at which the `widths` are evaluated
- `leftips`, `rightips`: interpolated left and right intersection points of a horizontal line at the
respective `widthheights`
"""
function peakwidths1d(x::AbstractVector{T},
pkindices::AbstractVector{I},
relheight::Real=0.5,
prominencedata::Union{Nothing,Tuple}=nothing,
wlen::Union{Nothing,I}=nothing) where {T<:Real,I<:Integer}
if isnothing(prominencedata)
prominencedata = peakprominences1d(x, pkindices, wlen)
end
prominences, leftbases, rightbases = prominencedata
peakwidths1d(x, pkindices, relheight, prominences, leftbases, rightbases)
end
"""
peakwidths1d(x, pkindices, relheight, prominences, leftbases, rightbases)
Calculate the width of each peak in a 1-D signal.
# Arguments
- `x`: 1-D signal
- `pkindices`: peak indices
- `relheight`: relative height with respect to the peak heights and prominences
- `prominencedata`: output of `peakprominences1d`
- `leftbases`, `rightbases`: indices of the left and right of each peak for peaks' bases
# Returns
- `widths`: width for each peak in samples
- `widthheights`: height at which the `widths` are evaluated
- `leftips`, `rightips`: interpolated left and right intersection points of a horizontal line at the respective `widthheights`
"""
function peakwidths1d(x::AbstractVector{T},
pkindices::AbstractVector{I},
relheight::Real,
prominences,
leftbases,
rightbases) where {T<:Real,I<:Integer}
npkindices = length(pkindices)
(relheight < 0) && throw(ArgumentError("`relheight` must be greater or equal to zero"))
!(npkindices == length(prominences) == length(leftbases) == length(rightbases)) && throw(ArgumentError("arrays in `prominencedata` must have the same length as `pkindices`"))
widths = Vector{Float64}(undef, npkindices)
widhtheights = Vector{Float64}(undef, npkindices)
leftips = Vector{Float64}(undef, npkindices)
rightips = Vector{Float64}(undef, npkindices)
for p ∈ 1:npkindices
imin = leftbases[p]
imax = rightbases[p]
pkindex = pkindices[p]
!(1 <= imin <= pkindex <= imax <= length(x)) && throw(ArgumentError("prominence data is invalid for peak $(pkindex)"))
height = widhtheights[p] = x[pkindex] - prominences[p] * relheight
i = pkindex
while (imin < i) && (height < x[i])
i -= 1
end
leftip = convert(Float64, i)
if x[i] < height
leftip += (height-x[i])/(x[i+1]-x[i])
end
i = pkindex
while (i < imax) && height < x[i]
i += 1
end
rightip = convert(Float64, i)
if x[i] < height
rightip -= (height-x[i])/(x[i-1]-x[i])
end
widths[p] = rightip - leftip
leftips[p] = leftip
rightips[p] = rightip
end
widths, widhtheights, leftips, rightips
end
end # module
| FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | code | 1063 | function selectbyproperty(pkproperties, pmin, pmax)
keep = trues(length(pkproperties))
if !isnothing(pmin)
keep .&= (pmin .<= pkproperties)
end
if !isnothing(pmax)
keep .&= (pkproperties .<= pmax)
end
keep
end
function selectbypeakdistance(pkindices, priority, distance)
npkindices = length(pkindices)
keep = trues(npkindices)
prioritytoposition = sortperm(priority)
for i ∈ npkindices:-1:1
j = prioritytoposition[i]
iszero(keep[j]) && continue
k = j-1
while (1 <= k) && ((pkindices[j]-pkindices[k]) < distance)
keep[k] = 0
k -= 1
end
k = j+1
while (k<=npkindices) && ((pkindices[k]-pkindices[j]) < distance)
keep[k] = 0
k += 1
end
end
keep
end
function argwlenasexpected(value)
if isnothing(value)
value = -1
elseif 1 < value
value = ceil(Int, value)
else
throw(ArgumentError("`wlen` must be larger than 1, was $(value)"))
end
value
end
| FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | code | 2381 | using FindPeaks1D
using Test
@testset "FindPeaks1D" begin
x = [13, 12, 14, 18, 19, 19, 19, 15, 11, 6, 4, 10, 8, 13, 8, 13, 3, 18, 7, 4]
pkindices, _ = FindPeaks1D.localmaxima1d(x)
@test pkindices == [6, 12, 14, 16, 18]
pkindicesh1, _ = findpeaks1d(x; height=11)
pkindicesh2, _ = findpeaks1d(x; height=(11, 16))
@test pkindicesh1 == [6, 14, 16, 18]
@test pkindicesh2 == [14, 16]
pkindicesd1, _ = findpeaks1d(x, distance=1)
pkindicesd2, _ = findpeaks1d(x, distance=3)
@test pkindicesd1 == [6, 12, 14, 16, 18]
@test pkindicesd2 == [6, 14, 18]
prominences1, leftbases1, rightbases1 = peakprominences1d(x, pkindices)
prominences2, leftbases2, rightbases2 = peakprominences1d(x, pkindices, 2)
@test prominences1 == [7, 2, 9, 9, 14]
@test leftbases1 == [2, 11, 11, 11, 17]
@test rightbases1 == [17, 13, 17, 17, 20]
@test prominences2 == [0, 2, 5, 5, 11]
@test leftbases2 == [6, 11, 13, 15, 17]
@test rightbases2 == [6, 13, 15, 17, 19]
widths1, widthheight1, leftips1, rightips1 = peakwidths1d(x, pkindices, 1.0)
@test all(isapprox.(widths1 ,[6.75, 1.3333, 5.900, 5.900, 2.9333], atol=0.001))
@test widthheight1 == [12, 8, 4, 4, 4]
@test all(isapprox.(leftips1 ,[2.0, 11.6667, 11.0, 11.0, 17.0667], atol=0.001))
@test all(isapprox.(rightips1 ,[8.75, 13.0, 16.9, 16.9, 20.0], atol=0.001))
pkindices1, _ = findpeaks1d(x, width=3.0, relheight=1.0)
@test pkindices1 == [6,14,16]
pkindices2, _ = findpeaks1d(x, width=2.0, relheight=0.5)
@test pkindices2 == [6]
pkindices3, _ = findpeaks1d(x, prominence=3)
@test pkindices3 == [6,14,16,18]
pkindices4, _ = findpeaks1d(x, prominence=100)
@test pkindices4 == Int64[]
# test for less strict type signature
pkindices1, _ = findpeaks1d(x, width=3//1, relheight=1.0)
@test pkindices1 == [6,14,16]
pkindices2, _ = findpeaks1d(x, width=2, relheight=1//2)
@test pkindices2 == [6]
pkindices3, _ = findpeaks1d(x, prominence=3//1)
@test pkindices3 == [6,14,16,18]
pkindices4, _ = findpeaks1d(x, prominence=100//1)
@test pkindices4 == Int64[]
@test_throws ArgumentError pkindiceswlenerror, _ = findpeaks1d(x, prominence=0.1, wlen=0)
x = [0, 1, 1, 1, 1, 1, 0]
pkindices, _ = FindPeaks1D.localmaxima1d(x)
@test length(pkindices) == 1
@test pkindices[1] == 4
end
| FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | docs | 2089 | # FindPeaks1D

[](https://codecov.io/gh/ymtoo/FindPeaks1D.jl)
[](https://ymtoo.github.io/FindPeaks1D.jl/stable)
[](https://ymtoo.github.io/FindPeaks1D.jl/dev)
Finding peaks in a 1-D signal in Julia. The implementation is based on [`find_peaks`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html) in `SciPy`.
## Installation
```julia-repl
julia>]
pkg> add FindPeaks1D
```
## Usage
```julia
using FindPeaks1D, ImageFiltering, Plots
n = 48001
s1 = ImageFiltering.Kernel.gaussian((1000,), (n,))
s2 = ImageFiltering.Kernel.gaussian((500,), (n,))
s = s1.parent/maximum(s1.parent) + 0.5 * circshift(
s2.parent/maximum(s2.parent), (10000,))
```
```julia
pkindices, properties = findpeaks1d(s;
height=0.1,
prominence=0.2,
width=1000.0,
relheight=0.9)
plot(s; color="black", label=false)
scatter!(pkindices, s[pkindices]; color="red", markersize=5, label="peaks")
vline!(properties["leftips"]; color="blue", width=2, label="peak edges")
vline!(properties["rightips"]; color="blue", width=2, label=false)
xlabel!("Sample")
```

```julia
pkindices, properties = findpeaks1d(s;
height=0.1,
distance=12000,
prominence=0.2,
width=1000.0,
relheight=0.9)
plot(s; color="black", label=false)
scatter!(pkindices, s[pkindices]; color="red", markersize=5, label="peaks")
vline!(properties["leftips"]; color="blue", width=2, label="peak edges")
vline!(properties["rightips"]; color="blue", width=2, label=false)
xlabel!("Sample")
```
 | FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | docs | 79 | # Manual
```@autodocs
Modules = [FindPeaks1D]
Pages = ["FindPeaks1D.jl"]
``` | FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"MIT"
] | 0.1.8 | 103c0a3502725e3b2e49afe0035bd68834655785 | docs | 203 | # FindPeaks1D Package
Finding peaks in a 1-D signal in Julia. The implementation is based on [`find_peaks`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html) in `SciPy`. | FindPeaks1D | https://github.com/ymtoo/FindPeaks1D.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 369 | using Documenter, QuantumAnnealingAnalytics
makedocs(
modules = [QuantumAnnealingAnalytics],
sitename = "QuantumAnnealingAnalytics",
authors = "Zach Morrell, Carleton Coffrin, Marc Vuffray",
pages = [
"Home" => "index.md",
"Library" => "api.md"
]
)
deploydocs(
repo = "github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git",
)
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 334 | module QuantumAnnealingAnalytics
import LinearAlgebra
import Plots
import Plots.Measures
import GraphRecipes
import QuantumAnnealing
import JSON
const _QA = QuantumAnnealing
include("base.jl")
include("plot.jl")
include("export.jl")
import Plots: savefig
export savefig
end #module
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 1159 | function spin_hamming_distance(spin_1, spin_2)
x = _QA.spin_to_int(spin_1)
y = _QA.spin_to_int(spin_2)
#Kernighan's Algorithm
diff_bits = xor(x,y)
hamming_dist = 0
while diff_bits != 0
hamming_dist += 1
diff_bits = diff_bits & (diff_bits-1)
end
return hamming_dist
end
function _get_states(ising_model, energy_levels; n = 0)
states = nothing
if energy_levels < 0
error("cannot have a negative number of energy levels")
elseif (ising_model == nothing && n == 0)
error("both ising model and n were not specified, could not compute states")
elseif ising_model == nothing && energy_levels != 0
error("must provide ising model if energy_levels != 0")
elseif ising_model != nothing && energy_levels != 0
energies = _QA.compute_ising_energy_levels(ising_model)
energy_levels = min(energy_levels, length(energies))
states = sort(collect(foldl(union, [energies[i].states for i = 1:energy_levels])))
else
if n == 0
n = _QA._check_ising_model_ids(ising_model)
end
states = 0:2^n-1
end
return states
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 856 | # QuantumAnnealingAnalytics exports everything except internal symbols, which are defined as
# those whose name starts with an underscore. If you don't want all of these
# symbols in your environment, then use `import QuantumAnnealingAnalytics` instead of
# `using QuantumAnnealingAnalytics`.
# Do not add QuantumAnnealingAnalytics-defined symbols to this exclude list. Instead, rename
# them with an underscore.
const _EXCLUDE_SYMBOLS = [Symbol(@__MODULE__), :eval, :include]
for sym in names(@__MODULE__, all=true)
sym_string = string(sym)
if sym in _EXCLUDE_SYMBOLS || startswith(sym_string, "_") || startswith(sym_string, "@_")
continue
end
if !(Base.isidentifier(sym) || (startswith(sym_string, "@") &&
Base.isidentifier(sym_string[2:end])))
continue
end
#println("$(sym)")
@eval export $sym
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 11060 | """
function to plot an annealing schedule from QuantumAnnealing.jl. kwargs are for Plots.plot
"""
function plot_annealing_schedule(annealing_schedule::_QA.AnnealingSchedule; s_steps=0.0:0.001:1.0, kwargs...)
plotted = hcat(annealing_schedule.A.(s_steps), annealing_schedule.B.(s_steps))
plt = Plots.plot(s_steps, plotted; title="Annealing Schedule", xlabel="s", label=["A(s)" "B(s)"], legend=:right, kwargs...)
return plt
end
"""
function to plot the states present in a density matrix output by QuantumAnnealing.simulate
kwargs are for Plots.bar
"""
function plot_states(ρ; order=:numeric, spin_comp=ones(Int(log2(size(ρ)[1]))), num_states=0, ising_model=nothing, energy_levels=0, kwargs...)
state_probs = _QA.z_measure_probabilities(ρ)
n = Int(log2(length(state_probs)))
state_spin_vecs = map((x) -> _QA.int_to_spin(x,pad=n), 0:2^n-1)
states = [(prob = state_probs[i], spin_vec = state_spin_vecs[i]) for i = 1:length(state_probs)]
sortby = nothing
if order == :numeric
sortby = (x) -> _QA.spin_to_int(x.spin_vec)
elseif order == :hamming
if length(spin_comp) != n
error("invalid spin_comp length")
end
sortby = (x) -> spin_hamming_distance(x.spin_vec, spin_comp)
elseif order == :prob
#sorting highest to lowest prob
sortby = (x) -> -x.prob
else
error("order must be either :numeric or :hamming or :prob")
end
kept_states = _get_states(ising_model, energy_levels, n = n)
filter_function = (x) -> _QA.spin_to_int(x.spin_vec) in kept_states
states = filter(filter_function, states)
states = sort(states, by=sortby)
probs = [states[i].prob for i = 1:length(states)]
strings = map(x -> join(string.(x.spin_vec),"\n"), states)
num_state_bars = length(states)
if num_states > 0
num_state_bars = min(num_states, num_state_bars)
end
mm = Measures.mm
plt = Plots.bar(probs[1:num_state_bars]; xticks = (1:num_state_bars,strings[1:num_state_bars]),
size = (900,600),bottom_margin=10mm, xlabel="spin state",
ylabel = "probability",title="State Probabilities of ρ",label="prob",
legend = :none, kwargs...
)
return plt
end
"""
function to plot only the lowest energy states in a dwisc bqpjson format file, String, or Dict.
possible orderings are :numeric or :hamming. spin_comp specifies a state in spin format
for comparison in hamming distance calculations. kwargs is for Plots.bar
"""
function plot_ground_states_dwisc(dw::String; kwargs...)
if length(dw) <= 255 && isfile(dw)
dwisc_data = JSON.parsefile(dw)
else
dwisc_data = JSON.parse(dw)
end
return plot_ground_states_dwisc(dwisc_data; kwargs...)
end
function plot_ground_states_dwisc(dw::Dict{String,<:Any}; order=:numeric, spin_comp=[], kwargs...)
least_energy = dw["solutions"][1]["energy"]
groundstates = filter(x -> x["energy"] ≈ least_energy,dw["solutions"])
n = length(groundstates[1]["solution"])
sortby = nothing
if order == :numeric
sortby = (x) -> _QA.spin_to_int(x["solution"])
elseif order == :hamming
if spin_comp == []
spin_comp = ones(n)
elseif length(spin_comp) != n
error("invalid spin_comp length")
end
sortby = (x) -> spin_hamming_distance(x["solution"], spin_comp)
elseif order == :prob
sortby = (x) -> -x["num_occurrences"]
else
error("order must be either :numeric or :hamming")
end
groundstates = sort(groundstates, by=sortby)
groundprobs = nothing
if "prob" in keys(groundstates[1])
groundprobs = map(x -> x["prob"], groundstates)
else
shots = map(x -> x["num_occurrences"],dw["solutions"])
numshots = sum(shots)
groundprobs = map(x -> x["num_occurrences"]/numshots, groundstates)
end
groundstrings = map(x -> join(string.(x["solution"]),"\n"), groundstates)
mm = Measures.mm
plt = Plots.bar(groundprobs; xticks = (1:length(groundprobs),groundstrings), size = (900,600),bottom_margin=10mm,
xlabel="spin state", ylabel = "probability",title="Ground States, Energy = $least_energy",label="prob",
legend = :none, kwargs...)
return plt
end
"""
function to plot only the all states in a dwisc bqpjson format file, String, or Dict.
possible orderings are :numeric, :hamming, or :energy. spin_comp specifies a state in spin format
for comparison in hamming distance calculations. kwargs is for Plots.bar
"""
function plot_states_dwisc(dw::String; kwargs...)
if length(dw) <= 255 && isfile(dw)
dwisc_data = JSON.parsefile(dw)
else
dwisc_data = JSON.parse(dw)
end
return plot_states_dwisc(dwisc_data; kwargs...)
end
function plot_states_dwisc(dw::Dict{String,<:Any}; order=:numeric, spin_comp=[], num_states=16, kwargs...)
least_energy = dw["solutions"][1]["energy"]
states = dw["solutions"]
n = length(states[1]["solution"])
sortby = nothing
if order == :numeric
sortby = (x) -> _QA.spin_to_int(x["solution"])
elseif order == :hamming
if spin_comp == []
spin_comp = ones(n)
elseif length(spin_comp) != n
error("invalid spin_comp length")
end
sortby = (x) -> spin_hamming_distance(x["solution"], spin_comp)
elseif order == :energy
sortby = (x) -> x["energy"]
elseif order == :prob
sortby = (x) -> -x["num_occurrences"]
else
error("order must be :numeric, :hamming, or :energy")
end
states = sort(states, by=sortby)
probs = nothing
if "prob" in keys(states[1])
probs = map(x -> x["prob"], states)
else
shots = map(x -> x["num_occurrences"],dw["solutions"])
numshots = sum(shots)
probs = map(x -> x["num_occurrences"]/numshots, states)
end
strings = map(x -> join(string.(x["solution"]),"\n"), states)
mm = Measures.mm
num_states = min(length(states),num_states)
plt = Plots.bar(probs[1:num_states]; xticks = (1:num_states,strings[1:num_states]),
size = (900,600), bottom_margin=10mm, xlabel="spin state",
ylabel = "probability",title="States, Least Energy = $least_energy",label="prob",
legend = :none, kwargs...)
return plt
end
"""
function to plot the state steps, generated from calling simulate(..., state_steps=[]). This
is used to see instantaneous measurement values throughout the anneal. kwargs are for Plots.plot
"""
function plot_state_steps(state_steps; ising_model=nothing, energy_levels=0, kwargs...)
n = Int(log2(size(state_steps[1])[1]))
ss = range(0,1,length=length(state_steps))
state_probs = map(x -> [real(x[i,i]) for i = 1:2^n], state_steps)
plotted_states = foldl(hcat,state_probs)
kept_states = _get_states(ising_model, energy_levels, n = n)
kept_indices = kept_states .+ 1
int2braket(i) = _QA.spin_to_braket(_QA.int_to_spin(i,pad=n))
labels = map(int2braket, reshape(kept_states,1,:))
plotted_states = plotted_states[kept_indices,:]
xlabel = "s"
ylabel = "probability"
title = "Spin State Trajectories"
legend = :topleft
plt = Plots.plot(ss, plotted_states'; title=title, label=labels, xlabel=xlabel, ylabel=ylabel, legend=legend, kwargs...)
return plt
end
function plot_varied_time_simulations(ising_model::Dict, annealing_schedule::_QA.AnnealingSchedule, time_range::Tuple; num_points=50, xscale=:identity, energy_levels = 0, kwargs...)
n = _QA._check_ising_model_ids(ising_model)
plotted_values = zeros(num_points, 2^n)
annealing_times = nothing
if xscale == :identity
annealing_times = range(time_range[1], time_range[2], length=num_points)
elseif xscale == :log10
lower = log10(time_range[1])
upper = log10(time_range[2])
exponents = range(lower,upper,length=num_points)
annealing_times = 10 .^ exponents
end
for (i, annealing_time) in enumerate(annealing_times)
ρ = _QA.simulate(ising_model, annealing_time, annealing_schedule, silence=true)
probs = _QA.z_measure_probabilities(ρ)
plotted_values[i,:] = probs
end
kept_states = _get_states(ising_model, energy_levels, n = n)
kept_indices = kept_states .+ 1
plotted_values = plotted_values[:,kept_indices]
title = "Time Varying State Probabilities"
xlabel = "annealing time"
ylabel = "probability"
int2braket(i) = _QA.spin_to_braket(_QA.int_to_spin(i,pad=n))
labels = map(int2braket, reshape(kept_states,1,:))
legend = :topleft
plt = Plots.plot(annealing_times, plotted_values; title=title, xlabel=xlabel, ylabel=ylabel, label=labels, legend=legend, xscale=xscale, kwargs...)
return plt
end
function plot_hamiltonian_energy_spectrum(hamiltonian::Function; s_range = (0,1), num_points = 50, shift=false, kwargs...)
ss = range(s_range[1],s_range[2],length=num_points)
n = size(hamiltonian(ss[1]))[1]
energies = zeros(num_points, n)
for (i,s) in enumerate(ss)
hs = hamiltonian(s)
evals, evecs = LinearAlgebra.eigen(Matrix(hs))
energies[i,:] = evals
end
if shift
ground_energies = energies[:,1]
for i in 1:size(energies)[2]
energies[:,i] = energies[:,i] - ground_energies
end
end
title = "Time Varying Spectrum of H"
xlabel = "s"
ylabel = "energy"
legend = :none
plt = Plots.plot(ss, energies; title=title, xlabel=xlabel, ylabel=ylabel, legend=legend, kwargs...)
return plt
end
function plot_ising_model(ising_model; color_nodes=true, curves=false, nodeshape=:circle, kwargs...)
n = _QA._check_ising_model_ids(ising_model)
edges = fill(0, (n,n))
nodes = fill(0.0, n)
edge_labels = Dict()
for (k,v) in ising_model
if length(k) == 1
nodes[k[1]] = v
elseif length(k) == 2
edge_labels[k] = v
edges[k[1],k[2]] = 1
edges[k[2],k[1]] = 1
else
@warn("cannot display qubit couplings of more than two qubits, omitting coupling")
end
end
nodecolor = 1
if color_nodes
color_options = Plots.palette([:blue, :white, :red], 21)
color_choices = collect(color_options)
for (i,node) in enumerate(nodes)
color_val = round(node,digits=1)
if color_val < -1
color_val = -1
elseif color_val > 1
color_val = 1
end
color_index = round(Int64, ((color_val+1) * 10)+1)
color_choices[i] = color_options[color_index]
end
nodecolor = color_choices
end
if length(edge_labels) == 0
error("GraphRecipes.jl cannot currently take graphs without edges, so the ising model must have at least one nonzero coupling")
end
plt = GraphRecipes.graphplot(edges; nodecolor=nodecolor, names=nodes, curves=curves, edge_label=edge_labels, nodeshape=nodeshape, kwargs...)
return plt
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 3154 | @testset "state encoding and transformation" begin
@testset "spin_hamming_distance" begin
@test spin_hamming_distance([1, 1, 1], [1, 1, 1]) == 0
@test spin_hamming_distance([1, 1, 1], [-1, 1, 1]) == 1
@test spin_hamming_distance([1, 1, 1], [1, -1, 1]) == 1
@test spin_hamming_distance([1, 1, 1], [1, 1, -1]) == 1
@test spin_hamming_distance([1, 1, 1], [1, -1, -1]) == 2
@test spin_hamming_distance([1, 1, 1], [-1, 1, -1]) == 2
@test spin_hamming_distance([1, 1, 1], [-1, -1, 1]) == 2
@test spin_hamming_distance([1, 1, 1], [-1, -1, -1]) == 3
@test spin_hamming_distance([-1, -1, -1], [1, 1, 1]) == 3
@test spin_hamming_distance([-1, -1, -1], [-1, 1, 1]) == 2
@test spin_hamming_distance([-1, -1, -1], [1, -1, 1]) == 2
@test spin_hamming_distance([-1, -1, -1], [1, 1, -1]) == 2
@test spin_hamming_distance([-1, -1, -1], [1, -1, -1]) == 1
@test spin_hamming_distance([-1, -1, -1], [-1, 1, -1]) == 1
@test spin_hamming_distance([-1, -1, -1], [-1, -1, 1]) == 1
@test spin_hamming_distance([-1, -1, -1], [-1, -1, -1]) == 0
end
end
@testset "get_states functionality" begin
ising_model = nothing; energy_levels = 0; n = 0
try
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test false
catch
@test true
end
ising_model = nothing; energy_levels = 0; n = 2
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [0, 1, 2, 3]
ising_model = nothing; energy_levels = 1; n = 0
try
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test false
catch
@test true
end
ising_model = nothing; energy_levels = 1; n = 2
try
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test false
catch
@test true
end
ising_model = Dict((1,) => 1, (2,) => 1); energy_levels = 0; n = 0
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [0, 1, 2, 3]
ising_model = Dict((1,) => 1, (2,) => 1); energy_levels = 0; n = 2
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [0, 1, 2, 3]
ising_model = Dict((1,) => 1, (2,) => 1); energy_levels = 1; n = 0
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [3]
ising_model = Dict((1,) => 1, (2,) => 1); energy_levels = 1; n = 2
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [3]
ising_model = nothing; energy_levels = -1; n = 2
try
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test false
catch
@test true
end
ising_model = Dict((1,) => 1, (2,) => 1); energy_levels = 10; n = 0
states = QuantumAnnealingAnalytics._get_states(ising_model, energy_levels, n = n)
@test states == [0, 1, 2, 3]
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 22385 | single_qubit_dwisc_file = "data/dwisc_1q.json"
two_qubit_dwisc_file = "data/dwisc_2q.json"
single_qubit_dwisc_dict = JSON.parsefile(single_qubit_dwisc_file)
two_qubit_dwisc_dict = JSON.parsefile(two_qubit_dwisc_file)
single_qubit_dwisc_string = JSON.json(single_qubit_dwisc_dict)
two_qubit_dwisc_string = JSON.json(two_qubit_dwisc_dict)
#just testing that nothing errors out
@testset "plotting annealing schedule" begin
@testset "AS_CIRCULAR, no kwargs" begin
plt = plot_annealing_schedule(AS_CIRCULAR)
@test true
end
@testset "AS_CIRCULAR, kwargs" begin
plt = plot_annealing_schedule(AS_CIRCULAR,xlabel="x")
@test true
end
end
@testset "plotting states from ising dict, 1q" begin
ising = Dict((1,) => 1)
ρ = simulate(ising, 10, AS_CIRCULAR, silence=true)
@testset "single qubit plot, numeric sorting, no kwargs" begin
plt = plot_states(ρ, order=:numeric)
@test true
end
@testset "single qubit plot, hamming sorting, no kwargs" begin
plt = plot_states(ρ,order=:hamming)
@test true
plt = plot_states(ρ,order=:hamming,spin_comp=[-1])
@test true
end
@testset "single qubit plot, prob sorting, no kwargs" begin
plt = plot_states(ρ,order=:prob)
@test true
end
@testset "single qubit plot, numeric sorting, with kwargs" begin
plt = plot_states(ρ,order=:numeric,xlabel="x")
@test true
end
@testset "single qubit plot, hamming sorting, with kwargs" begin
plt = plot_states(ρ,order=:hamming,xlabel="x")
@test true
plt = plot_states(ρ,order=:hamming,spin_comp=[-1],xlabel="x")
@test true
end
@testset "single qubit plot, prob sorting, with kwargs" begin
plt = plot_states(ρ,order=:prob,xlabel="x")
@test true
end
end
@testset "plotting states from ising dict, 2q" begin
ising = Dict((1,) => 1, (2,) => 1, (1,2) => -1)
ρ = simulate(ising, 10, AS_CIRCULAR, silence=true)
@testset "two qubit plot, numeric sorting, no kwargs" begin
plt = plot_states(ρ,order=:numeric)
@test true
end
@testset "two qubit plot, hamming sorting, no kwargs" begin
plt = plot_states(ρ,order=:hamming)
@test true
plt = plot_states(ρ,order=:hamming,spin_comp=[-1, -1])
@test true
end
@testset "two qubit plot, prob sorting, no kwargs" begin
plt = plot_states(ρ,order=:prob)
@test true
end
@testset "two qubit plot, num_states" begin
plt = plot_states(ρ,num_states=2)
@test true
end
@testset "two qubit plot, num_states" begin
plt = plot_states(ρ,ising_model=ising,energy_levels=1)
@test true
end
@testset "two qubit plot, numeric sorting, with kwargs" begin
plt = plot_states(ρ,order=:numeric,xlabel="x")
@test true
end
@testset "two qubit plot, hamming sorting, with kwargs" begin
plt = plot_states(ρ,order=:hamming,xlabel="x")
@test true
plt = plot_states(ρ,order=:hamming,spin_comp=[-1,-1],xlabel="x")
@test true
end
@testset "two qubit plot, prob sorting, with kwargs" begin
plt = plot_states(ρ,order=:prob,xlabel="x")
@test true
end
end
@testset "plotting from dwisc file" begin
@testset "single qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:numeric)
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:numeric)
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:hamming)
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:hamming,spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:hamming)
@test true
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:hamming, spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:prob)
@test true
end
@testset "single qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:prob)
@test true
end
@testset "single qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:hamming,spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:hamming, spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_file,order=:prob, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_file,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:numeric)
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:numeric)
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:hamming)
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:hamming,spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:hamming)
@test true
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:hamming, spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:prob)
@test true
end
@testset "two qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:prob)
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:hamming,spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:hamming, spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_file,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_file,order=:prob, xlabel="x")
@test true
end
end
@testset "plotting from dwisc dict" begin
@testset "single qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:numeric)
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:numeric)
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:hamming)
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:hamming,spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:hamming)
@test true
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:hamming, spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:prob)
@test true
end
@testset "single qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:prob)
@test true
end
@testset "single qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:hamming,spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:hamming, spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_dict,order=:prob, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_dict,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:numeric)
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:numeric)
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:hamming)
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:hamming,spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:hamming)
@test true
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:hamming, spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:prob)
@test true
end
@testset "two qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:prob)
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:hamming,spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:hamming, spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_dict,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_dict,order=:prob, xlabel="x")
@test true
end
end
@testset "plotting from dwisc string" begin
@testset "single qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:numeric)
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:numeric)
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:hamming)
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:hamming,spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:hamming)
@test true
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:hamming, spin_comp=[-1])
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:prob)
@test true
end
@testset "single qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:prob)
@test true
end
@testset "single qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:numeric, xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:hamming,spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:hamming, spin_comp=[-1], xlabel="x")
@test true
end
@testset "single qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(single_qubit_dwisc_string,order=:prob, xlabel="x")
@test true
end
@testset "single qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(single_qubit_dwisc_string,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:numeric)
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:numeric)
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:hamming)
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:hamming,spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:hamming)
@test true
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:hamming, spin_comp=[-1,-1])
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, no kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:prob)
@test true
end
@testset "two qubit dwisc, all states, prob sorting, no kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:prob)
@test true
end
@testset "two qubit dwisc, ground states, numeric sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, numeric sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:numeric, xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, hamming sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:hamming, xlabel="x")
@test true
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:hamming,spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, hamming sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:hamming, xlabel="x")
@test true
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:hamming, spin_comp=[-1,-1], xlabel="x")
@test true
end
@testset "two qubit dwisc, ground states, prob sorting, with kwargs" begin
plt = plot_ground_states_dwisc(two_qubit_dwisc_string,order=:prob, xlabel="x")
@test true
end
@testset "two qubit dwisc, all states, prob sorting, with kwargs" begin
plt = plot_states_dwisc(two_qubit_dwisc_string,order=:prob, xlabel="x")
@test true
end
end
@testset "plotting single qubit state steps" begin
state_steps = []
ising = Dict((1,) => 1)
ρ = simulate(ising, 10, AS_CIRCULAR, silence=true, state_steps=state_steps)
@testset "single qubit, no kwargs" begin
plt = plot_state_steps(state_steps)
@test true
end
@testset "single qubit, with kwargs" begin
plt = plot_state_steps(state_steps,xlabel="x")
@test true
end
end
@testset "plotting single qubit state steps" begin
state_steps = []
ising = Dict((1,) => 1, (2,) => 1, (1,2) => -1)
ρ = simulate(ising, 10, AS_CIRCULAR, silence=true, state_steps=state_steps)
@testset "single qubit, no kwargs" begin
plt = plot_state_steps(state_steps)
@test true
end
@testset "single qubit, with kwargs" begin
plt = plot_state_steps(state_steps,xlabel="x")
@test true
end
end
@testset "plotting varied time simulations" begin
ising_model = Dict((1,) => 1, (2,) => -.25, (1,2) => -.9)
plot_varied_time_simulations(ising_model, AS_CIRCULAR, (0.5, 10), num_points=10)
@test true
end
@testset "plotting hamiltonian energy spectrum" begin
ising_model = Dict((1,) => 1, (2,) => -.25, (1,2) => -.9)
H(s) = hamiltonian_transverse_ising(ising_model, AS_CIRCULAR, s)
@testset "No parameters" begin
plt = plot_hamiltonian_energy_spectrum(H)
@test true
end
@testset "Changing kwargs" begin
plt = plot_hamiltonian_energy_spectrum(H, xlabel = "x")
@test true
end
@testset "changing s_range" begin
plt = plot_hamiltonian_energy_spectrum(H, s_range=(0.5,0.7))
@test true
end
@testset "shifting by ground state" begin
plt = plot_hamiltonian_energy_spectrum(H, shift=true)
@test true
end
end
@testset "plotting ising model graphs" begin
ising_model = Dict((1,2) => 1)
@testset "no fields, one coupling" begin
plt = plot_ising_model(ising_model, color_nodes=false)
@test true
plt = plot_ising_model(ising_model)
@test true
end
ising_model = Dict((1,) => 1.1, (2,) => -0.4, (1,2) => -1.1)
@testset "with fields, one coupling" begin
plt = plot_ising_model(ising_model, color_nodes=false)
@test true
plt = plot_ising_model(ising_model)
@test true
end
ising_model = Dict((1,2) => 1, (1,2,3) => 1)
@testset "no fields, one two qubit coupling, one three qubit coupling" begin
plt = plot_ising_model(ising_model, color_nodes=false)
@test true
plt = plot_ising_model(ising_model)
@test true
end
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 175 | using QuantumAnnealingAnalytics
using QuantumAnnealing
using JSON
using Test
@testset "QuantumAnnealingAnalytics" begin
include("base.jl")
include("plot.jl")
end
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | code | 352 | using QuantumAnnealing
qa_json_path = joinpath(dirname(pathof(QuantumAnnealing)), "..")
bqp1 = "$(qa_json_path)/test/data/bqpjson_1q.json"
bqp2 = "$(qa_json_path)/test/data/bqpjson_2q.json"
dwout1 = "dwisc_1q.json"
dwout2 = "dwisc_2q.json"
simulate_bqpjson(bqp1, dwout1, 100, AS_CIRCULAR, 1000)
simulate_bqpjson(bqp2, dwout2, 100, AS_CIRCULAR, 1000)
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | docs | 345 | QuantumAnnealingAnalytics.jl Change Log
=======================================
### Staged
- nothing
### v0.2.1
- Change `plot_states` to show all states by default (fix)
- Add ground energy shift option to hamiltonian energy plots
- Add Ising model graph plotting
### v0.2.0
- Updates for QuantumAnnealing v0.2
### v0.1.0
- Initial release
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | docs | 2117 | # QuantumAnnealingAnalytics.jl
[](https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/lanl-ansi/QuantumAnnealingAnalytics.jl)
[](https://lanl-ansi.github.io/QuantumAnnealingAnalytics.jl/dev/)
Tools for Visualization of Quantum Annealing
## Dependencies
This package assumes that you have installed [QuantumAnnealing.jl](https://github.com/lanl-ansi/QuantumAnnealing.jl)
## Quick Start
Install the packages,
```
] add QuantumAnnealing, QuantumAnnealingAnalytics
```
Load the package and build a two spin ferromagnetic Ising model for simulation,
```
using QuantumAnnealing, QuantumAnnealingAnalytics
ising_model = Dict((1,) => 0.1, (1,2) => -1.0)
```
Plot an annealing schedule
```
plt = plot_annealing_schedule(AS_LINEAR)
```
Perform a basic simulation with an annealing time of `2.0` and the linear annealing schedule, and plot the probability distribution
```
ρ = simulate(ising_model, 2.0, AS_LINEAR)
plt = plot_states(ρ)
```
Increase the annealing time to approach the adiabatic limit,
```
ρ = simulate(ising_model, 5.0, AS_LINEAR)
plt = plot_states(ρ)
```
Change the annealing schedule and observe different state probabilities and save the results to a file,
```
ρ = simulate(ising_model, 5.0, AS_QUADRATIC)
plt = plot_states(ρ)
savefig(plt, "file.pdf")
```
Store intermediate steps in the anneal and plot instantaneous state measurement probabilities
```
ρ_steps=[]
ρ = simulate(ising_model, 5.0, AS_LINEAR, state_steps=ρ_steps)
plt = plot_state_steps(ρ_steps)
```
# License
This software is provided under a BSD-ish license with a "modifications must be indicated" clause. See the `LICENSE.md` file for the full text. This package is part of the Hybrid Quantum-Classical Computing suite, known internally as LA-CC-16-032.
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | docs | 267 | # Base Functions
```@autodocs
Modules = [QuantumAnnealingAnalytics]
Pages = ["base.jl"]
Order = [:function]
Private = true
```
# Plotting Functions
```@autodocs
Modules = [QuantumAnnealingAnalytics]
Pages = ["plot.jl"]
Order = [:function]
Private = true
```
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.1 | 07fe5392a26ddc2b44503250a0972fa92ac1a40f | docs | 892 | # QuantumAnnealingAnalytics Documentation
```@meta
CurrentModule = QuantumAnnealingAnalytics
```
## Overview
QuantumAnnealingAnalytics is a Julia package for plotting outputs from the QuantumAnnealing package.
For more information on Quantum Annealing, see the documentation page for QuantumAnnealing.jl, found
[here](https://lanl-ansi.github.io/QuantumAnnealing.jl/stable/). This Package is effectively a wrapper
around Plots.jl, and can handle the kwargs passed to Plots.jl in addition to those explicitely listed.
## Installation
The latest stable release of QuantumAnnealing can be installed using the Julia package manager with
```julia
] add QuantumAnnealingAnalytics
```
For the current development version, "checkout" this package with
```julia
] add QuantumAnnealingAnalytics#master
```
Test that the package works by running
```julia
] test QuantumAnnealingAnalytics
```
| QuantumAnnealingAnalytics | https://github.com/lanl-ansi/QuantumAnnealingAnalytics.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 895 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.LinearGaussian: defaultLGModel, makeLGLOPModel,
makeLGAPFModel
setRNGs(0)
n = 1000
modelBootstrap, theta, ys, ko = defaultLGModel(n)
N = 1024*256
nt = Threads.nthreads()
println(ko.logZhats[n])
smcio = SMCIO{modelBootstrap.particle, modelBootstrap.pScratch}(N, n, nt, false, 2.0)
@time smc!(modelBootstrap, smcio)
@time smc!(modelBootstrap, smcio)
println([smcio.logZhats[n], N*smcio.Vhat1s[n]])
modelLOP = makeLGLOPModel(theta, ys)
smcio = SMCIO{modelLOP.particle, modelLOP.pScratch}(N, n, nt, false, 2.0)
@time smc!(modelLOP, smcio)
@time smc!(modelLOP, smcio)
println([smcio.logZhats[n], N*smcio.Vhat1s[n]])
modelAPF = makeLGAPFModel(theta, ys)
smcio = SMCIO{modelAPF.particle, modelAPF.pScratch}(N, n, nt, false, 2.0)
@time smc!(modelAPF, smcio)
@time smc!(modelAPF, smcio)
println([smcio.logZhats[n], N*smcio.Vhat1s[n]])
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 480 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.LinearGaussian.defaultLGModel
include("test.jl")
setRNGs(0)
model, theta, ys, ko = defaultLGModel(10)
println(ko.logZhats)
numParticles = 1024*1024
numTrials = 2
## just run the algorithm a few times
testSMC(model, numParticles, numTrials, false)
testSMC(model, numParticles, numTrials, false, 0.5)
testSMCParallel(model, numParticles, numTrials, false)
testSMCParallel(model, numParticles, numTrials, false, 0.5)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 2516 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.LinearGaussian.defaultLGModel
import SMCExamples.Particles.Float64Particle
import Statistics: mean, var
setRNGs(0)
model, theta, ys, ko = defaultLGModel(100)
smcio = SMCIO{model.particle, model.pScratch}(1024*1024, model.maxn,
Threads.nthreads(), false)
smc!(model, smcio)
predMeanSMC = SequentialMonteCarlo.eta(smcio, p -> p.x, false, smcio.n)
predVarSMC = SequentialMonteCarlo.eta(smcio, p -> p.x^2, false, smcio.n) -
predMeanSMC^2
println("Predictive mean: $(ko.predictionMeans[smcio.n])")
println("Estimate: $predMeanSMC")
println("Predictive variance: $(ko.predictionVariances[smcio.n])")
println("Estimate: $predVarSMC")
filtMeanSMC = SequentialMonteCarlo.eta(smcio, p -> p.x, true, smcio.n)
filtVarSMC = SequentialMonteCarlo.eta(smcio, p -> p.x^2, true, smcio.n) -
filtMeanSMC^2
println("Filtering mean: $(ko.filteringMeans[smcio.n])")
println("Estimate: $filtMeanSMC")
println("Filtering variance: $(ko.filteringVariances[smcio.n])")
println("Estimate: $filtVarSMC")
println("Running many particle filters with only 128 particles in parallel...")
m = 10000
lZs = Vector{Float64}(undef, m)
Vs = Vector{Float64}(undef, m)
lZhats = Vector{Float64}(undef, m)
Vhats = Vector{Float64}(undef, m)
f1(p::Float64Particle) = 1.0
nthreads = Threads.nthreads()
smcios = Vector{SMCIO}(undef, nthreads)
Threads.@threads for i = 1:nthreads
smcios[i] = SMCIO{model.particle, model.pScratch}(128, model.maxn, 1, true)
end
@time Threads.@threads for i = 1:m
smcio = smcios[Threads.threadid()]
smc!(model, smcio)
lZs[i] = smcio.logZhats[smcio.n-1]
Vs[i] = SequentialMonteCarlo.V(smcio, f1, false, false, smcio.n)
lZhats[i] = smcio.logZhats[smcio.n]
Vhats[i] = SequentialMonteCarlo.V(smcio, f1, true, false, smcio.n)
end
print("Empirical relative variance of Z_n^N: ")
println(var(exp.(lZs.-ko.logZhats[smcio.n-1])))
print("Mean of unbiased estimator of relative variance of Z_n^N: ")
println(mean(Vs .* exp.(2*(lZs.-ko.logZhats[smcio.n-1]))))
print("Estimated standard deviation of the value above: ")
println(sqrt(var(Vs .* exp.(2*(lZs.-ko.logZhats[smcio.n-1])))/m))
print("Empirical relative variance of \\hat{Z}_n^N: ")
println(var(exp.(lZhats.-ko.logZhats[smcio.n])))
print("Mean of unbiased estimator of relative variance of \\hat{Z}_n^N: ")
println(mean(Vhats .* exp.(2*(lZhats.-ko.logZhats[smcio.n]))))
print("Estimated standard deviation of the value above: ")
println(sqrt(var(Vhats .* exp.(2*(lZhats.-ko.logZhats[smcio.n])))/m))
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 3018 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.LinearGaussian: LGTheta, Float64Particle, kalmanlogZ,
defaultLGModel, makeLGModel
import MonteCarloMarkovKernels: simulateChainProgress, simulateChain,
makeAMKernel, kde, estimateBM
using StaticArrays
using StatsBase
using LinearAlgebra
using Random
using Plots
Plots.gr()
setRNGs(0)
lgModel, theta, ys, ko = defaultLGModel(100)
const truex0 = theta.x0
const truev0 = theta.v0
const trueC = theta.C
@inline function toLGTheta(v::SVector{3, Float64})
return LGTheta(v[1], v[2], trueC, v[3], truex0, truev0)
end
t0 = SVector{3, Float64}(theta.A, theta.Q, theta.R)
const sigmaProp = SMatrix{3, 3, Float64}(Matrix{Float64}(I, 3, 3))
@inline function lglogprior(theta::LGTheta)
if theta.A < 0 || theta.A > 10 return -Inf end
if theta.Q < 0 || theta.Q > 10 return -Inf end
if theta.R < 0 || theta.R > 10 return -Inf end
return 0.0
end
function makelgsmcltd(ys::Vector{Float64}, N::Int64, nthreads::Int64)
smcio = SMCIO{Float64Particle, Nothing}(N, length(ys), nthreads, false, 0.5)
function ltd(in::SVector{3, Float64})
theta::LGTheta = toLGTheta(in)
lp::Float64 = lglogprior(theta)
if lp == -Inf return -Inf end
model::SMCModel = makeLGModel(theta, ys)
smc!(model, smcio)
return smcio.logZhats[length(ys)]
end
end
function makelgkalmanltd(ys::Vector{Float64})
function ltd(in::SVector{3, Float64})
theta::LGTheta = toLGTheta(in)
lp::Float64 = lglogprior(theta)
if lp == -Inf return -Inf end
return kalmanlogZ(theta, ys)
end
end
logtargetSMC = makelgsmcltd(ys, 1024, Threads.nthreads())
logtargetKalman = makelgkalmanltd(ys)
PSMC = makeAMKernel(logtargetSMC, sigmaProp)
PKalman = makeAMKernel(logtargetKalman, sigmaProp)
Random.seed!(12345)
chainSMC = simulateChainProgress(PSMC, t0, 2^15)
sar = PSMC(:acceptanceRate)
@time chainKalman = simulateChain(PKalman, t0, 2^20)
kar = PKalman(:acceptanceRate)
savefigures = false
vsKalman = (i->(x->x[i]).(chainKalman)).(1:3)
vsSMC = (i->(x->x[i]).(chainSMC)).(1:3)
plot(kde(vsSMC[1], sar))
plot!(kde(vsKalman[1], kar))
savefigures && savefig("pmmh_kde1.png")
plot(kde(vsSMC[2], sar))
plot!(kde(vsKalman[2], kar))
savefigures && savefig("pmmh_kde2.png")
plot(kde(vsSMC[3], sar))
plot!(kde(vsKalman[3], kar))
savefigures && savefig("pmmh_kde3.png")
contour(kde(vsSMC[1], vsSMC[2], sar))
contour!(kde(vsKalman[1], vsKalman[2], kar))
savefigures && savefig("pmmh_kde12.png")
contour(kde(vsSMC[1], vsSMC[3], sar))
contour!(kde(vsKalman[1], vsKalman[3], kar))
savefigures && savefig("pmmh_kde13.png")
contour(kde(vsSMC[2], vsSMC[3], sar))
contour!(kde(vsKalman[2], vsKalman[3], kar))
savefigures && savefig("pmmh_kde23.png")
plot(autocor(vsSMC[1]))
plot!(autocor(vsKalman[1]))
savefigures && savefig("pmmh_acf1.png")
plot(autocor(vsSMC[2]))
plot!(autocor(vsKalman[2]))
savefigures && savefig("pmmh_acf2.png")
plot(autocor(vsSMC[3]))
plot!(autocor(vsKalman[3]))
savefigures && savefig("pmmh_acf3.png")
estimateBM.(vsSMC)
estimateBM.(vsKalman)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 285 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.Lorenz96
using StaticArrays
include("test.jl")
setRNGs(0)
model, theta, ys = Lorenz96.defaultLorenzModel(8, 100)
## just run the algorithm a few times
testSMC(model, 1024, 2, false)
testSMCParallel(model, 1024, 2, false)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 423 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.MVLinearGaussian
using StaticArrays
include("test.jl")
setRNGs(0)
model, theta, ys, ko = MVLinearGaussian.defaultMVLGModel(2, 10)
println(ko.logZhats)
## just run the algorithm a few times
testSMC(model, 1024*1024, 2, false)
testSMC(model, 1024*1024, 2, false, 0.5)
testSMCParallel(model, 1024*1024, 2, false)
testSMCParallel(model, 1024*1024, 2, false, 0.5)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1035 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.MVLinearGaussian
using StaticArrays
setRNGs(0)
model, theta, ys, ko = MVLinearGaussian.defaultMVLGModel(2, 100)
smcio = SMCIO{model.particle, model.pScratch}(2^15, model.maxn,
Threads.nthreads(), true)
smc!(model, smcio)
predMeanSMC = SequentialMonteCarlo.eta(smcio, p -> p.x, false, smcio.n)
predVarSMC = SequentialMonteCarlo.eta(smcio,
p -> (p.x - predMeanSMC) * (p.x - predMeanSMC)', false, smcio.n)
println("Predictive mean: $(ko.predictionMeans[smcio.n])")
println("Estimate: $predMeanSMC")
println("Predictive variance: $(ko.predictionVariances[smcio.n])")
println("Estimate: $predVarSMC")
filtMeanSMC = SequentialMonteCarlo.eta(smcio, p -> p.x, true, smcio.n)
filtVarSMC = SequentialMonteCarlo.eta(smcio,
p -> (p.x - filtMeanSMC) * (p.x - filtMeanSMC)', true, smcio.n)
println("Filtering mean: $(ko.filteringMeans[smcio.n])")
println("Estimate: $filtMeanSMC")
println("Filtering variance: $(ko.filteringVariances[smcio.n])")
println("Estimate: $filtVarSMC")
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 948 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.MVLinearGaussian
import SMCExamples.Particles.MVFloat64Particle
using StaticArrays
using Plots
Plots.gr()
setRNGs(0)
model, theta, ys, ko = MVLinearGaussian.defaultMVLGModel(2, 10)
nsamples = 2^14
smcio = SMCIO{model.particle, model.pScratch}(16, model.maxn, 1, true, 2.0)
v = Vector{MVFloat64Particle{2}}(undef, 10)
for p = 1:10
v[p] = MVFloat64Particle{2}()
v[p].x .= zeros(MVector{2, Float64})
end
function smoothingMeans(model, smcio, m, v)
tmp = Vector{MVector{2,Float64}}(undef, 10)
for p = 1:10
tmp[p] = zeros(MVector{2, Float64})
end
for i = 1:m
csmc!(model, smcio, v, v)
for p = 1:10
tmp[p] .+= v[p].x
end
end
return tmp ./ m
end
@time out = smoothingMeans(model, smcio, nsamples, v)
plot((x -> x[1]).(out))
plot!((x -> x[1]).(ko.smoothingMeans), color=:red)
plot((x -> x[2]).(out))
plot!((x -> x[2]).(ko.smoothingMeans), color=:red)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 453 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.Netto.defaultNettoModel
include("test.jl")
setRNGs(0)
model, theta, ys = defaultNettoModel(10)
numParticles = 1024*1024
numTrials = 2
## just run the algorithm a few times
testSMC(model, numParticles, numTrials, false)
testSMC(model, numParticles, numTrials, false, 0.5)
testSMCParallel(model, numParticles, numTrials, false)
testSMCParallel(model, numParticles, numTrials, false, 0.5)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 388 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.SMCSampler.defaultSMCSampler
using StaticArrays
include("test.jl")
setRNGs(0)
model, ltarget = defaultSMCSampler()
## just run the algorithm a few times
testSMC(model, 1024*1024, 2, false)
testSMC(model, 1024*1024, 2, false, 0.5)
testSMCParallel(model, 1024*1024, 2, false)
testSMCParallel(model, 1024*1024, 2, false, 0.5)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 666 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.SMCSampler.defaultSMCSampler
using MonteCarloMarkovKernels
using StaticArrays
using Plots
Plots.gr()
setRNGs(0)
model, ltarget = defaultSMCSampler()
smcio = SMCIO{model.particle, model.pScratch}(1024*1024, model.maxn,
Threads.nthreads(), false)
smc!(model, smcio)
xs = (p->p.x[1]).(smcio.zetas)
ys = (p->p.x[2]).(smcio.zetas)
## bimodal target needs a kde bandwidth adjustment since its variance is large
# x, y, f = MonteCarloMarkovKernels.kde(xs, ys, 0.005)
x, y, f = MonteCarloMarkovKernels.kde(xs, ys, 1.0, 0.001)
contour(x, y, f)
contour!(x,y, (x,y) -> exp(ltarget((SVector{2,Float64}(x,y)))))
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1222 | function testSMC(model::SMCModel, N::Int64, numTrials::Int64,
fullOutput = false, essThreshold::Float64 = 2.0)
smcio = SMCIO{model.particle, model.pScratch}(N, model.maxn, 1, fullOutput,
essThreshold)
println("Running SMC. N = $N, nthreads = 1, fullOutput = $fullOutput, essThreshold = $essThreshold")
for i=1:numTrials
@time smc!(model, smcio)
left = max(length(smcio.logZhats)-10,1)
right = length(smcio.logZhats)
println(smcio.logZhats[left:right])
println(sign.(smcio.Vhat1s[left:right]) .*
sqrt.(abs.(smcio.Vhat1s[left:right])))
end
end
function testSMCParallel(model::SMCModel, N::Int64, numTrials::Int64,
fullOutput = false, essThreshold::Float64 = 2.0)
nthreads = Threads.nthreads()
smcio = SMCIO{model.particle, model.pScratch}(N, model.maxn, nthreads,
fullOutput, essThreshold)
println("Running SMC. N = $N, nthreads = $nthreads, fullOutput = $fullOutput, essThreshold = $essThreshold")
for i = 1:numTrials
@time smc!(model, smcio)
left = max(length(smcio.logZhats)-10,1)
right = length(smcio.logZhats)
println(smcio.logZhats[left:right])
println(sign.(smcio.Vhat1s[left:right]) .*
sqrt.(abs.(smcio.Vhat1s[left:right])))
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 266 | module SMCExamples
using SequentialMonteCarlo
using RNGPool
using StaticArrays
include("particles.jl")
include("lgModel.jl")
include("mvlgModel.jl")
include("smcSampler.jl")
include("finiteFK.jl")
include("nettoModel.jl")
include("lorenz96Model.jl")
end # module
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 9622 | ## This model is primarily used for testing the SMC algorithm, since
## relevant quantities can be computed exactly
module FiniteFeynmanKac
using SequentialMonteCarlo
using RNGPool
using StaticArrays
import SMCExamples.Particles.Int64Particle
struct FiniteFK{d}
mu::SVector{d, Float64}
Ms::Vector{SMatrix{d, d, Float64}}
Gs::Vector{SVector{d, Float64}}
end
let
@inline function sample(probs::SVector{d, Float64}, u::Float64) where d
i::Int64 = 1
@inbounds while u > probs[i]
@inbounds u -= probs[i]
i += 1
end
return i
end
global function makeSMCModel(fk::FiniteFK{d}) where d
n::Int64 = length(fk.Gs)
mu::SVector{d, Float64} = fk.mu
Ms = Matrix{SVector{d, Float64}}(undef, n-1, d)
for p = 1:n-1
for i = 1:d
Ms[p,i] = fk.Ms[p][i, :]
end
end
lGVectors::Vector{SVector{d, Float64}} =
Vector{SVector{d, Float64}}(undef, n)
for p = 1:n
lGVectors[p] = log.(fk.Gs[p])
end
@inline function lG(p::Int64, particle::Int64Particle, ::Nothing)
@inbounds return lGVectors[p][particle.x]
end
@inline function M!(newParticle::Int64Particle, rng::RNG, p::Int64,
particle::Int64Particle, ::Nothing)
if p == 1
newParticle.x = sample(mu, rand(rng))
else
@inbounds probs::SVector{d, Float64} = Ms[p - 1, particle.x]
@inbounds newParticle.x = sample(probs, rand(rng))
end
end
return SMCModel(M!, lG, n, Int64Particle, Nothing)
end
end
function makelM(ffk::FiniteFeynmanKac.FiniteFK{d}) where d
n::Int64 = length(ffk.Gs)
lMs::Matrix{SVector{d, Float64}} = Matrix{SVector{d, Float64}}(undef, n-1, d)
for p in 1:n-1
for i = 1:d
lMs[p, i] = log.(ffk.Ms[p][i, :])
end
end
@inline function lM(p::Int64, particle::Int64Particle, newParticle::Int64Particle,
::Nothing)
x::Int64 = particle.x
y::Int64 = newParticle.x
@inbounds lMpx::SVector{d, Float64} = lMs[p-1, x]
@inbounds v::Float64 = lMpx[y]
return v
end
return lM
end
function randomFiniteFK(d::Int64, n::Int64)
rng = getRNG()
mu = rand(rng, d)
mu ./= sum(mu)
Ms = Vector{SMatrix{d, d, Float64}}(undef, n - 1)
Gs = Vector{SVector{d, Float64}}(undef, n)
for p = 1:n-1
tmp = rand(rng, d, d)
for i = 1:d
tmp[i,:] ./= sum(tmp[i,:])
end
Ms[p] = SMatrix{d, d}(tmp)
end
for p = 1:n
Gs[p] = SVector{d}(rand(rng, d))
end
return FiniteFK(SVector{d}(mu), Ms, Gs)
end
struct FiniteFKOut{d}
etas::Vector{SVector{d, Float64}}
etahats::Vector{SVector{d, Float64}}
logZhats::Vector{Float64}
etaGs::Vector{Float64}
pis::Vector{SVector{d, Float64}}
pihats::Vector{SVector{d, Float64}}
end
function calculateEtasZs(fk::FiniteFK{d}) where d
n::Int64 = length(fk.Gs)
etas = Vector{SVector{d, Float64}}(undef, n)
etahats = Vector{SVector{d, Float64}}(undef, n)
logZhats = Vector{Float64}(undef, n)
etaGs = Vector{Float64}(undef, n)
etas[1] = fk.mu
tmp = MVector{d, Float64}(undef)
tmp .= etas[1] .* fk.Gs[1]
etaGs[1] = sum(tmp)
logZhats[1] = log(etaGs[1])
etahats[1] = tmp ./ etaGs[1]
for p = 2:n
etas[p] = etahats[p-1]' * fk.Ms[p-1]
tmp .= etas[p] .* fk.Gs[p]
etaGs[p] = sum(tmp)
logZhats[p] = logZhats[p-1] + log(etaGs[p])
etahats[p] = tmp ./ etaGs[p]
end
pis = Vector{SVector{d, Float64}}(undef, n)
pihats = Vector{SVector{d, Float64}}(undef, n)
pis[n] = etas[n]
pihats[n] = etahats[n]
Mbar = MMatrix{d, d, Float64}(undef)
for p = n-1:-1:1
for i = 1:d
Mbar[i,:] = etahats[p] .* fk.Ms[p][:,i] ./ etas[p+1][i]
end
pis[p] = pis[p+1]' * Mbar
pihats[p] = pihats[p+1]' * Mbar
end
return FiniteFKOut{d}(etas, etahats, logZhats, etaGs, pis, pihats)
end
function eta(ffkout::FiniteFKOut{d}, fv::SVector{d, Float64}, hat::Bool,
p::Int64 = length(fk.Gs)) where d
if hat
return sum(ffkout.etahats[p] .* fv)
else
return sum(ffkout.etas[p] .* fv)
end
end
function allEtas(ffkout::FiniteFKOut{d}, fv::SVector{d, Float64},
hat::Bool) where d
n::Int64 = length(ffkout.etas)
result::Vector{Float64} = Vector{Float64}(undef, n)
for p = 1:n
@inbounds result[p] = eta(ffkout, fv, hat, p)
end
return result
end
function gamma(ffkout::FiniteFKOut{d}, fv::SVector{d, Float64}, hat::Bool,
p::Int64 = length(fk.Gs)) where d
idx::Int64 = p - 1 + hat
logval = idx == 0 ? 0.0 : ffkout.logZhats[idx]
v::Float64 = eta(ffkout, fv, hat, p)
logval += log(abs(v))
return (v >= 0, logval)
end
function allGammas(ffkout::FiniteFKOut{d}, fv::SVector{d, Float64},
hat::Bool) where d
n::Int64 = length(ffkout.etas)
result::Vector{Tuple{Bool, Float64}} =
Vector{Tuple{Bool, Float64}}(undef, n)
for p = 1:n
@inbounds result[p] = gamma(ffkout, fv, hat, p)
end
return result
end
function convertFunction(f::F, d::Int64) where F<:Function
fv = MVector{d, Float64}(undef)
v = Int64Particle()
for i = 1:d
v.x = i
fv[i] = f(v)
end
return SVector{d, Float64}(fv)
end
# function eta(ffkout::FiniteFKOut{d}, f::F, hat::Bool,
# p::Int64 = length(fk.Gs)) where {d, F<:Function}
# return eta(ffkout, convertFunction(f, d), hat, p)
# end
function allEtas(ffkout::FiniteFKOut{d}, f::F, hat::Bool) where {d, F<:Function}
return allEtas(ffkout, convertFunction(f, d), hat)
end
# function gamma(ffkout::FiniteFKOut{d}, f::F, hat::Bool,
# p::Int64 = length(fk.Gs)) where {d, F<:Function}
# return gamma(ffkout, convertFunction(f, d), hat, p)
# end
function allGammas(ffkout::FiniteFKOut{d}, f::F, hat::Bool) where {d,
F<:Function}
return allGammas(ffkout, convertFunction(f, d), hat)
end
function normalizeFiniteFK(fk::FiniteFK{d}, fkout::FiniteFKOut) where d
n = length(fk.Gs)
barGs = Vector{SVector{d, Float64}}(undef, n)
for p = 1:n
barGs[p] = fk.Gs[p] / fkout.etaGs[p]
end
return FiniteFK{d}(fk.mu,fk.Ms,barGs)
end
function _nuQpqf(fk::FiniteFK{d}, nu::SVector{d, Float64}, p::Int64, q::Int64,
f::SVector{d, Float64}) where d
if p == q
return sum(nu .* f)
end
v = MVector{d,Float64}(nu)
for i = p+1:q
v .*= fk.Gs[i-1]
v .= (v' * fk.Ms[i-1])'
end
return sum(v .* f)
end
function _Qpqf(fk::FiniteFK{d}, x::Int64, p::Int64, q::Int64,
f::SVector{d, Float64}) where d
v = zeros(MVector{d,Float64})
v[x] = 1.0
return _nuQpqf(fk, SVector{d, Float64}(v), p, q, f)
end
function _Qpqf(fk::FiniteFK{d}, p::Int64, q::Int64,
f::SVector{d, Float64}) where d
result = MVector{d, Float64}(undef)
for x = 1:d
result[x] = _Qpqf(fk, x, p, q, f)
end
return result
end
function _Qpnfs(fk::FiniteFK{d}, f::SVector{d, Float64}, n::Int64) where d
result = Vector{MVector{d, Float64}}(undef, n)
for p = 1:n
result[p] = _Qpqf(fk, p, n, f)
end
return result
end
function _correctedeta(fk::FiniteFK{d}, nu::SVector{d, Float64}, p::Int64,
n::Int64, resample::Vector{Bool}) where d
if p == n || resample[p]
return nu
else
q::Int64 = p
v::MVector{d, Float64} = nu
while !resample[q] && q < n
v .*= (fk.Gs[q]).^2
v = (v' * fk.Ms[q])'
q += 1
end
return v
end
end
function vpns(fk::FiniteFK{d}, fkout::FiniteFKOut, f::SVector{d, Float64},
hat::Bool, centred::Bool, resample::Vector{Bool},
n::Int64 = length(fk.Gs)) where d
if centred
return vpns(fk, fkout, f .- eta(fkout, f, hat, n), hat, false, resample, n)
end
if hat
return vpns(fk, fkout, f .* fk.Gs[n] / fkout.etaGs[n], false, false,
resample, n)
end
ps::Vector{Int64} = [1 ; findall(resample[1:n-1]) .+ 1]
nffk = normalizeFiniteFK(fk, fkout)
Qpnfs_values = _Qpnfs(nffk, f, n)
result = Vector{Float64}(undef, length(ps))
etanfSq = eta(fkout, f, false, n)^2
for i = 1:length(ps)
p = ps[i]
etap = fkout.etas[p]
correctedetap = _correctedeta(nffk, etap, p, n, resample)
if i == length(ps)
q = n
else
q = ps[i+1]-1
end
result[i] = sum(correctedetap .* Qpnfs_values[q].^2) - etanfSq
end
return result
end
function vpns(fk::FiniteFK{d}, fkout::FiniteFKOut, f::F, hat::Bool,
centred::Bool, resample::Vector{Bool},
n::Int64 = length(fk.Gs)) where {d, F<:Function}
return vpns(fk, fkout, convertFunction(f, d), hat, centred, resample, n)
end
function avar(fk::FiniteFK{d}, fkout::FiniteFKOut, f::SVector{d, Float64},
hat::Bool, centred::Bool, resample::Vector{Bool},
n::Int64 = length(fk.Gs)) where d
return sum(vpns(fk, fkout, f, hat, centred, resample, n))
end
function avar(fk::FiniteFK{d}, fkout::FiniteFKOut, f::F, hat::Bool,
centred::Bool, resample::Vector{Bool},
n::Int64 = length(fk.Gs)) where {d, F<:Function}
return sum(vpns(fk, fkout, convertFunction(f, d), hat, centred, resample, n))
end
function allavarhat1s(fk::FiniteFK{d}, fkout::FiniteFKOut,
resample::Vector{Bool}) where d
maxn = length(fk.Gs)
result = Vector{Float64}(undef, maxn)
f1 = ones(SVector{d, Float64})
for n = 1:maxn
result[n] = avar(fk, fkout, f1, true, false, resample, n)
end
return result
end
function Path2Int64(path::Vector{Int64Particle}, d::Int64)
v::Int64 = 1
for p = 1:length(path)
v += d^(p-1)*(path[p].x-1)
end
return v
end
function Int642Path(v::Int64, d::Int64, n::Int64)
path::Vector{Int64Particle} = Vector{Int64Particle}(undef, n)
y = v - 1
for p = 1:n
path[p] = Int64Particle()
path[p].x = 1 + mod(y, d)
y = div(y, d)
end
return path
end
function fullDensity(ffk::FiniteFK, vec::Vector{Int64Particle})
val::Float64 = ffk.mu[vec[1].x] * ffk.Gs[1][vec[1].x]
for p = 2:length(vec)
val *= ffk.Ms[p-1][vec[p-1].x,vec[p].x] * ffk.Gs[p][vec[p].x]
end
return val
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1650 | ## Makes the model associated with a fully adapted auxiliary particle filter for
## a linear Gaussian state space model. This was introduced in:
## M. K. Pitt and N. Shephard. Filtering via simulation: Auxiliary particle
## filters. J. Amer. Statist. Assoc., 94(446):590--599, 1999.
function makeLGAPFModel(theta::LGTheta, ys::Vector{Float64})
n::Int64 = length(ys)
RC2v0::Float64 = theta.R + theta.C * theta.C * theta.v0
v0RoverRC2v0::Float64 = theta.v0*theta.R/RC2v0
sqrtv0RoverRC2v0::Float64 = sqrt(v0RoverRC2v0)
mu1::Float64 = v0RoverRC2v0*(theta.x0/theta.v0 + theta.C*ys[1]/theta.R)
tmp1::Float64 = theta.C * theta.x0 - ys[1]
lG1::Float64 = -0.5 * log(2 * π * RC2v0) - tmp1 * 0.5/RC2v0 * tmp1
RC2Q::Float64 = theta.R + theta.C * theta.C * theta.Q
invRC2Qover2::Float64 = 0.5/RC2Q
QRoverRC2Q::Float64 = theta.Q * theta.R / RC2Q
sqrtQRoverRC2Q::Float64 = sqrt(QRoverRC2Q)
invRover2::Float64 = 0.5/theta.R
logncG::Float64 = -0.5 * log(2 * π * RC2Q)
@inline function lG(p::Int64, particle::Float64Particle, ::Nothing)
if p == n
return p == 1 ? lG1 : 0.0
end
v::Float64 = theta.C * theta.A * particle.x - ys[p+1]
v = logncG - v * invRC2Qover2 * v
p == 1 && (v += lG1)
return v
end
@inline function M!(newParticle::Float64Particle, rng::RNG, p::Int64,
particle::Float64Particle, ::Nothing)
if p == 1
newParticle.x = mu1 + sqrtv0RoverRC2v0*randn(rng)
else
mu::Float64 = QRoverRC2Q*(theta.A*particle.x/theta.Q + theta.C*ys[p]/theta.R)
newParticle.x = mu + sqrtQRoverRC2Q*randn(rng)
end
end
return SMCModel(M!, lG, n, Float64Particle, Nothing)
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 2618 | ## Kalman filter for univariate linear Gaussian models
struct KalmanOut
predictionMeans::Vector{Float64}
predictionVariances::Vector{Float64}
filteringMeans::Vector{Float64}
filteringVariances::Vector{Float64}
smoothingMeans::Vector{Float64}
smoothingVariances::Vector{Float64}
logZhats::Vector{Float64}
end
function lognormpdf(y::Float64, μ::Float64, σ²::Float64)
v::Float64 = y-μ
lnc::Float64 = -0.5*log(2*π*σ²)
return lnc - 1/(2*σ²)*v*v
end
function kalman(theta::LGTheta, ys::Vector{Float64})
n = length(ys)
A = theta.A
C = theta.C
Q = theta.Q
R = theta.R
x0 = theta.x0
v0 = theta.v0
predictionMeans = Vector{Float64}(undef, n)
predictionVariances = Vector{Float64}(undef, n)
filteringMeans = Vector{Float64}(undef, n)
filteringVariances = Vector{Float64}(undef, n)
logZhats = Vector{Float64}(undef, n)
mutt1 = 0.0
mutt = 0.0
sigmatt1 = 0.0
sigmatt = 0.0
lZ = 0.0
for p = 1:n
if p == 1
mutt1 = x0
sigmatt1 = v0
else
mutt1 = A*mutt
sigmatt1 = A*sigmatt*A + Q
end
predictionMeans[p] = mutt1
predictionVariances[p] = sigmatt1
lZ += lognormpdf(ys[p], C*mutt1, C*sigmatt1*C + R)
logZhats[p] = lZ
K = sigmatt1 * C / (C * sigmatt1 * C + R)
mutt = mutt1 + K * (ys[p]-C*mutt1)
sigmatt = sigmatt1 - K * C * sigmatt1
filteringMeans[p] = mutt
filteringVariances[p] = sigmatt
end
smoothingMeans = Vector{Float64}(undef, n)
smoothingVariances = Vector{Float64}(undef, n)
smoothingMeans[n] = filteringMeans[n]
smoothingVariances[n] = filteringVariances[n]
for p = n:-1:2
J = filteringVariances[p-1] * A * inv(predictionVariances[p])
smoothingMeans[p-1] = filteringMeans[p-1] +
J * (smoothingMeans[p] - predictionMeans[p])
smoothingVariances[p-1] = filteringVariances[p-1] +
J * (smoothingVariances[p] - predictionVariances[p]) * J
end
return KalmanOut(predictionMeans, predictionVariances, filteringMeans,
filteringVariances, smoothingMeans, smoothingVariances, logZhats)
end
function kalmanlogZ(theta::LGTheta, ys::Vector{Float64})
n = length(ys)
A = theta.A
C = theta.C
Q = theta.Q
R = theta.R
x0 = theta.x0
v0 = theta.v0
mutt1 = 0.0
mutt = 0.0
sigmatt1 = 0.0
sigmatt = 0.0
lZ = 0.0
for p = 1:n
if p == 1
mutt1 = x0
sigmatt1 = v0
else
mutt1 = A*mutt
sigmatt1 = A*sigmatt*A + Q
end
lZ += lognormpdf(ys[p], C*mutt1, C*sigmatt1*C + R)
K = sigmatt1 * C / (C * sigmatt1 * C + R)
mutt = mutt1 + K * (ys[p]-C*mutt1)
sigmatt = sigmatt1 - K * C * sigmatt1
end
return lZ
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1756 | ## Makes the model associated with a particle filter using the "locally optimal
## proposal". Note that this is only optimal in the specific and limited sense
## that the weights are a function of xprev only, and hence the simulated value
## of x is not a source of variability.
mutable struct LGLOPParticle
x::Float64
xprev::Float64
LGLOPParticle() = new()
end
function makeLGLOPModel(theta::LGTheta, ys::Vector{Float64})
n::Int64 = length(ys)
RC2v0::Float64 = theta.R + theta.C * theta.C * theta.v0
v0RoverRC2v0::Float64 = theta.v0*theta.R/RC2v0
sqrtv0RoverRC2v0::Float64 = sqrt(v0RoverRC2v0)
mu1::Float64 = v0RoverRC2v0*(theta.x0/theta.v0 + theta.C*ys[1]/theta.R)
tmp1::Float64 = theta.C * theta.x0 - ys[1]
lG1::Float64 = -0.5 * log(2 * π * RC2v0) - tmp1 * 0.5/RC2v0 * tmp1
RC2Q::Float64 = theta.R + theta.C * theta.C * theta.Q
invRC2Qover2::Float64 = 0.5/RC2Q
QRoverRC2Q::Float64 = theta.Q * theta.R / RC2Q
sqrtQRoverRC2Q::Float64 = sqrt(QRoverRC2Q)
invRover2::Float64 = 0.5/theta.R
logncG::Float64 = -0.5 * log(2 * π * RC2Q)
@inline function lG(p::Int64, particle::LGLOPParticle, ::Nothing)
if p == 1
return lG1
else
v::Float64 = theta.C * theta.A * particle.xprev - ys[p]
return logncG - v * invRC2Qover2 * v
end
end
@inline function M!(newParticle::LGLOPParticle, rng::RNG, p::Int64,
particle::LGLOPParticle, ::Nothing)
if p == 1
newParticle.x = mu1 + sqrtv0RoverRC2v0*randn(rng)
else
mu::Float64 = QRoverRC2Q*(theta.A*particle.x/theta.Q + theta.C*ys[p]/theta.R)
newParticle.x = mu + sqrtQRoverRC2Q*randn(rng)
newParticle.xprev = particle.x
end
end
model::SMCModel = SMCModel(M!, lG, n, LGLOPParticle, Nothing)
return model
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1991 | ## The standard univariate linear Gaussian state-space model example
module LinearGaussian
using SequentialMonteCarlo
using RNGPool
import SMCExamples.Particles.Float64Particle
struct LGTheta
A::Float64
Q::Float64
C::Float64
R::Float64
x0::Float64
v0::Float64
end
function makeLGModel(theta::LGTheta, ys::Vector{Float64})
n::Int64 = length(ys)
sqrtQ::Float64 = sqrt(theta.Q)
invRover2::Float64 = 0.5/theta.R
sqrtv0::Float64 = sqrt(theta.v0)
logncG::Float64 = -0.5 * log(2 * π * theta.R)
@inline function lG(p::Int64, particle::Float64Particle, ::Nothing)
@inbounds v::Float64 = theta.C*particle.x - ys[p]
return logncG - v * invRover2 * v
end
@inline function M!(newParticle::Float64Particle, rng::RNG, p::Int64,
particle::Float64Particle, ::Nothing)
if p == 1
newParticle.x = theta.x0 + sqrtv0*randn(rng)
else
newParticle.x = theta.A*particle.x + sqrtQ*randn(rng)
end
end
return SMCModel(M!, lG, n, Float64Particle, Nothing)
end
function makelM(theta::LGTheta)
tQ::Float64 = theta.Q
negInvQover2::Float64 = -0.5/tQ
tA::Float64 = theta.A
@inline function lM(::Int64, particle::Float64Particle,
newParticle::Float64Particle, ::Nothing)
x::Float64 = particle.x
y::Float64 = newParticle.x
t::Float64 = tA*x - y
return negInvQover2 * t * t
end
return lM
end
function simulateLGModel(theta::LGTheta, n::Int64)
model = makeLGModel(theta, Vector{Float64}(undef, 0))
ys = Vector{Float64}(undef, n)
xParticle = Float64Particle()
rng = getRNG()
for p in 1:n
model.M!(xParticle, rng, p, xParticle, nothing)
ys[p] = theta.C*xParticle.x + sqrt(theta.R)*randn(rng)
end
return ys
end
function defaultLGModel(n::Int64)
theta = LGTheta(0.9, 0.6, 1.2, 1.5, 1.0, 2.0)
ys = simulateLGModel(theta, n)
ko = kalman(theta, ys)
lgModel = makeLGModel(theta, ys)
return lgModel, theta, ys, ko
end
include("lgKalman.jl")
include("lgLOPModel.jl")
include("lgAPFModel.jl")
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 2728 | ## Creates a simple Euler--Maruyama discretization of a diffusion in which the
## drift coefficient corresponds to the Lorenz96 model and the diffusion
## coefficient is constant.
module Lorenz96
using SequentialMonteCarlo
using RNGPool
using StaticArrays
import SMCExamples.Particles.MVFloat64Particle
using Random
struct LorenzTheta
σ::Float64
F::Float64
δ::Float64
Δ::Float64
steps::Int64
end
struct LorenzScratch{d}
t1::MVector{d, Float64}
t2::MVector{d, Float64}
end
LorenzScratch{d}() where d = LorenzScratch{d}(MVector{d, Float64}(undef),
MVector{d, Float64}(undef))
@inline function lorenz(F::Float64, x::MVector{d, Float64},
z::MVector{d, Float64}) where d
@inbounds z[1] = (x[2] - x[d-1]) * x[d]
@inbounds z[2] = (x[3] - x[d]) * x[1]
for i = 3:d-1
@inbounds z[i] = (x[i+1] - x[i-2]) * x[i-1]
end
@inbounds z[d] = (x[1] - x[d-2]) * x[d-1]
z .+= F
end
function makeLorenzModel(theta::LorenzTheta,
ys::Vector{SVector{d, Float64}}) where d
n::Int64 = length(ys)
σ::Float64 = theta.σ
F::Float64 = theta.F
invδ²over2 = 0.5 / (theta.δ * theta.δ)
logncG = - 0.5 * d * log(2 * π * theta.δ * theta.δ)
steps::Float64 = theta.steps
Δ::Float64 = theta.Δ
h::Float64 = Δ/steps
sqrth::Float64 = sqrt(Δ/steps)
@inline function lG(p::Int64, particle::MVFloat64Particle{d}, ::LorenzScratch{d})
r::Float64 = 0.0
for i = 1:d
@inbounds v::Float64 = particle.x[i] - ys[p][i]
r += logncG - v * invδ²over2 * v
end
return r
end
## Euler--Maruyama
@inline function M!(newParticle::MVFloat64Particle{d}, rng::RNG, p::Int64,
particle::MVFloat64Particle{d}, scratch::LorenzScratch{d})
if p == 1
randn!(rng, newParticle.x)
newParticle.x .*= σ
else
scratch.t1 .= particle.x
for i = 1:steps
lorenz(F, scratch.t1, scratch.t2)
scratch.t2 .*= h
randn!(rng, scratch.t1)
scratch.t1 .*= σ * sqrth
scratch.t1 .+= scratch.t2
end
newParticle.x .= scratch.t1
end
end
return SMCModel(M!, lG, length(ys), MVFloat64Particle{d}, LorenzScratch{d})
end
function simulateLorenzModel(theta::LorenzTheta, d::Int64, n::Int64)
model = makeLorenzModel(theta, Vector{SVector{d, Float64}}(undef, 0))
ys = Vector{SVector{d, Float64}}(undef, n)
xParticle = MVFloat64Particle{d}()
xScratch = LorenzScratch{d}()
rng = getRNG()
for p in 1:n
model.M!(xParticle, rng, p, xParticle, xScratch)
ys[p] = xParticle.x + theta.δ * randn(rng, d)
end
return ys
end
function defaultLorenzModel(d::Int64, n::Int64)
theta = LorenzTheta(0.5, 8.0, 0.7, 0.05, 20)
ys = simulateLorenzModel(theta, d, n)
model = makeLorenzModel(theta, ys)
return model, theta, ys
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 2462 | import LinearAlgebra.diagm
## Kalman filter for multivariate linear Gaussian models
struct KalmanMVOut{d}
predictionMeans::Vector{MVector{d, Float64}}
predictionVariances::Vector{MMatrix{d, d, Float64}}
filteringMeans::Vector{MVector{d, Float64}}
filteringVariances::Vector{MMatrix{d, d, Float64}}
smoothingMeans::Vector{MVector{d, Float64}}
smoothingVariances::Vector{MMatrix{d, d, Float64}}
logZhats::Vector{Float64}
end
function logmvnormpdf(y::StaticVector{d}, μ::StaticVector{d},
Σ::StaticMatrix{d,d}) where d
v::SVector{d,Float64} = y-μ
lnc::Float64 = - 0.5 * d * log(2 * π) - 0.5 * logdet(Σ)
return lnc - 0.5 * dot(v, Σ \ v)
end
function kalmanMV(theta::MVLGTheta, ys::Vector{SVector{d,Float64}}) where d
n = length(ys)
A = theta.A
C = theta.C
Q = theta.Q
R = theta.R
x0 = theta.x0
v0 = theta.v0
predictionMeans = Vector{MVector{d,Float64}}(undef, n)
predictionVariances = Vector{MMatrix{d,d,Float64}}(undef, n)
filteringMeans = Vector{MVector{d,Float64}}(undef, n)
filteringVariances = Vector{MMatrix{d,d,Float64}}(undef, n)
logZhats = Vector{Float64}(undef, n)
mutt1 = MVector{d,Float64}(undef)
mutt = MVector{d,Float64}(undef)
sigmatt1 = MMatrix{d,d,Float64}(undef)
sigmatt = MMatrix{d,d,Float64}(undef)
lZ = 0.0
for p = 1:n
if p == 1
mutt1 = x0
sigmatt1 = diagm(0 => v0)
else
mutt1 = A * mutt
sigmatt1 = A * sigmatt * A' + Q
end
predictionMeans[p] = mutt1
predictionVariances[p] = sigmatt1
lZ += logmvnormpdf(ys[p], C*mutt1, C*sigmatt1*C' + R)
logZhats[p] = lZ
K = sigmatt1 * C' * inv(C * sigmatt1 * C' + R)
mutt = mutt1 + K * (ys[p] - C * mutt1)
sigmatt = sigmatt1 - K * C * sigmatt1
filteringMeans[p] = mutt
filteringVariances[p] = sigmatt
end
smoothingMeans = Vector{MVector{d,Float64}}(undef, n)
smoothingVariances = Vector{MMatrix{d,d,Float64}}(undef, n)
smoothingMeans[n] = filteringMeans[n]
smoothingVariances[n] = filteringVariances[n]
for p = n:-1:2
J = filteringVariances[p-1] * A' * inv(predictionVariances[p])
smoothingMeans[p-1] = filteringMeans[p-1] +
J * (smoothingMeans[p] - predictionMeans[p])
smoothingVariances[p-1] = filteringVariances[p-1] +
J * (smoothingVariances[p] - predictionVariances[p]) * J'
end
return KalmanMVOut(predictionMeans, predictionVariances, filteringMeans,
filteringVariances, smoothingMeans, smoothingVariances, logZhats)
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 2890 | ## Multivariate linear Gaussian model
module MVLinearGaussian
using SequentialMonteCarlo
using RNGPool
using StaticArrays
import SMCExamples.Particles.MVFloat64Particle
using LinearAlgebra
using Random
struct MVLGTheta{d}
A::SMatrix{d, d, Float64}
Q::SMatrix{d, d, Float64}
C::SMatrix{d, d, Float64}
R::SMatrix{d, d, Float64}
x0::SVector{d, Float64}
v0::SVector{d, Float64}
end
## scratch space for computations
struct MVLGPScratch{d}
t1::MVector{d, Float64}
t2::MVector{d, Float64}
end
MVLGPScratch{d}() where d = MVLGPScratch{d}(MVector{d, Float64}(undef),
MVector{d, Float64}(undef))
function makeMVLGModel(theta::MVLGTheta, ys::Vector{SVector{d, Float64}}) where
d
n = length(ys)
cholQ = SMatrix{d, d, Float64}(cholesky(theta.Q).L)
invRover2 = 0.5 * inv(theta.R)
sqrtv0 = sqrt.(theta.v0)
logncG = - 0.5 * d * log(2 * π) - 0.5 * logdet(theta.R)
@inline function lG(p::Int64, particle::MVFloat64Particle{d},
scratch::MVLGPScratch{d})
mul!(scratch.t1, theta.C, particle.x)
@inbounds scratch.t2 .= scratch.t1 .- ys[p]
mul!(scratch.t1, invRover2, scratch.t2)
return logncG - dot(scratch.t1,scratch.t2)
end
@inline function M!(newParticle::MVFloat64Particle{d}, rng::RNG, p::Int64,
particle::MVFloat64Particle{d}, scratch::MVLGPScratch{d})
if p == 1
randn!(rng, scratch.t1)
scratch.t2 .= sqrtv0 .* scratch.t1
newParticle.x .= theta.x0 .+ scratch.t2
else
randn!(rng, scratch.t1)
mul!(scratch.t2, cholQ, scratch.t1)
mul!(scratch.t1, theta.A, particle.x)
newParticle.x .= scratch.t1 .+ scratch.t2
end
end
return SMCModel(M!, lG, length(ys), MVFloat64Particle{d}, MVLGPScratch{d})
end
function simulateMVLGModel(theta::MVLGTheta{d}, n::Int64) where d
model = makeMVLGModel(theta, Vector{SVector{d, Float64}}(undef, 0))
ys = Vector{SVector{d, Float64}}(undef, n)
xParticle = MVFloat64Particle{d}()
xScratch = MVLGPScratch{d}()
cholR = cholesky(theta.R).L
rng = getRNG()
for p in 1:n
model.M!(xParticle, rng, p, xParticle, xScratch)
ys[p] = theta.C*(xParticle.x) + cholR * randn(rng,d)
end
return ys
end
function defaultMVLGModel(d::Int64, n::Int64)
function toeplitz(d::Int64, a::Float64, C::Float64)
M = Matrix{Float64}(undef, d, d)
for i = 1:d
for j = 1:d
M[i,j] = C * a^abs(i-j)
end
end
return SMatrix{d, d, Float64}(M)
end
tA = SMatrix{d, d, Float64}(0.9 * Matrix{Float64}(I, d, d))
tC = toeplitz(d, 0.5, 1.2)
tQ = toeplitz(d, 0.2, 0.6)
tR = toeplitz(d, 0.3, 1.5)
tx0 = SVector{d,Float64}(range(1, step=1, length=d))
tv0 = SVector{d,Float64}(range(2, step=1, length=d))
theta = MVLGTheta(tA, tQ, tC, tR, tx0, tv0)
ys = simulateMVLGModel(theta, n)
ko = kalmanMV(theta, ys)
mvlgModel = makeMVLGModel(theta, ys)
return mvlgModel, theta, ys, ko
end
include("mvlgKalman.jl")
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1713 | ## Simple non-linear toy model proposed by:
## Netto, M., Gimeno, L. and Mendes, M., 1978. On the optimal and suboptimal
## nonlinear filtering problem for discrete-time systems. IEEE Transactions on
## Automatic Control, 23(6), pp.1062-1067.
module Netto
using SequentialMonteCarlo
using RNGPool
import SMCExamples.Particles.Float64Particle
struct NettoTheta
σ²::Float64
δ²::Float64
end
function makeNettoModel(theta::NettoTheta, ys::Vector{Float64})
n::Int64 = length(ys)
σ::Float64 = sqrt(theta.σ²)
invδ²over2::Float64 = 0.5 / theta.δ²
logncG::Float64 = -0.5 * log(2 * π * theta.δ²)
@inline function lG(p::Int64, particle::Float64Particle, ::Nothing)
x::Float64 = particle.x
tmp::Float64 = x*x/20.0
v::Float64 = tmp - ys[p]
return logncG - v * invδ²over2 * v
end
@inline function M!(newParticle::Float64Particle, rng::RNG, p::Int64,
particle::Float64Particle, ::Nothing)
if p == 1
newParticle.x = σ*randn(rng)
else
x::Float64 = particle.x
newParticle.x = x/2.0 + 25*x/(1.0+x*x) + 8*cos(1.2*p) + σ*randn(rng)
end
end
model::SMCModel = SMCModel(M!, lG, n, Float64Particle, Nothing)
return model
end
function simulateNettoModel(theta::NettoTheta, n::Int64)
model = makeNettoModel(theta, Vector{Float64}(undef, 0))
ys = Vector{Float64}(undef, n)
xParticle = Float64Particle()
rng = getRNG()
for p in 1:n
model.M!(xParticle, rng, p, xParticle, nothing)
ys[p] = xParticle.x*xParticle.x/20.0 + sqrt(theta.σ²)*randn(rng)
end
return ys
end
function defaultNettoModel(n::Int64)
theta = NettoTheta(0.9, 0.6)
ys = simulateNettoModel(theta, n)
model = makeNettoModel(theta, ys)
return model, theta, ys
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 685 | ## these are pre-defined particle types, only for convenience / as examples
module Particles
using StaticArrays
mutable struct Int64Particle
x::Int64
Int64Particle() = new()
end
@inline function Base.:(==)(x::Int64Particle, y::Int64Particle)
return x.x == y.x
end
mutable struct Float64Particle
x::Float64
Float64Particle() = new()
end
@inline function Base.:(==)(x::Float64Particle, y::Float64Particle)
return x.x == y.x
end
struct MVFloat64Particle{d}
x::MVector{d, Float64}
MVFloat64Particle{d}() where d = new(MVector{d, Float64}(undef))
end
@inline function Base.:(==)(x::MVFloat64Particle{d}, y::MVFloat64Particle{d}) where d
return x.x == y.x
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 6828 | ## Provides a very basic SMC sampler using random walk Metropolis proposals
## the first only allows one-dimensional states, the second allows any
## state of fixed-dimension
## This could clearly be improved to allow proposal covariance matrices to be
## supplied; currently only multipliers for an identity covariance are
## permitted. Perhaps more importantly, I would like to implement a
## mechanism for adaptively choosing the temperatures, e.g. by using the
## conditional ESS of:
## Zhou, Y., Johansen, A.M. and Aston, J.A., 2016. Toward Automatic Model
## Comparison: An Adaptive Sequential Monte Carlo Approach. Journal of
## Computational and Graphical Statistics, 25(3), pp. 701--726.
## A functional, generic SMC sampler could then be included in
## SequentialMonteCarlo.jl
module SMCSampler
using SequentialMonteCarlo
using RNGPool
using StaticArrays
using LinearAlgebra
using Random
## one-dimensional state
mutable struct SMCSP
x::Float64
lpibar0::Float64
lpibar1::Float64
SMCSP() = new()
end
## for one-dimensional states
function makeRWSMCSampler(mu, lpibar0, lpibar1, betas::Vector{Float64},
taus::Vector{Float64}, iterations::Vector{Int64})
n = length(betas)
@assert n == length(taus) == length(iterations)
@inline function lG(p::Int64, particle::SMCSP, ::Nothing)
if p == n
return 0.0
end
@inbounds return (betas[p + 1] - betas[p]) *
(particle.lpibar1 - particle.lpibar0);
end
@inline function M!(newParticle::SMCSP, rng::RNG, p::Int64,
particle::SMCSP, ::Nothing)
if p == 1
newParticle.x = mu(rng)
newParticle.lpibar0 = lpibar0(newParticle.x)
newParticle.lpibar1 = lpibar1(newParticle.x)
else
y::Float64 = particle.x
lv0::Float64 = particle.lpibar0
lv1::Float64 = particle.lpibar1
for i = 1:iterations[p]
@inbounds z::Float64 = y + taus[p] * randn(rng)
z_lv0::Float64 = lpibar0(z)
z_lv1::Float64 = lpibar1(z)
@inbounds vy::Float64 = (1.0 - betas[p]) * lv0 + betas[p] * lv1
@inbounds vz::Float64 = (1.0 - betas[p]) * z_lv0 + betas[p] * z_lv1
lu::Float64 = -randexp(rng)
if lu < vz - vy
y = z
lv0 = z_lv0
lv1 = z_lv1
end
end
newParticle.x = y
newParticle.lpibar0 = lv0
newParticle.lpibar1 = lv1
end
end
return SMCModel(M!, lG, n, SMCSP, Nothing)
end
function defaultSMCSampler1D()
function makenormlogpdf(mu::Float64, var::Float64)
c1::Float64 = - 0.5 / var
c2::Float64 = - 0.5 * log(2 * π * var)
@inline function lpdf(x::Float64)
v::Float64 = x - mu
return c1*v*v + c2
end
return lpdf
end
betas::Vector{Float64} = [0, 0.0005, 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05,
0.1, 0.25, 0.5, 1]
taus::Vector{Float64} = [0.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
1.0, 1.0]
iterations::Vector{Int64} = [0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
x0::Float64 = 0.0
v0::Float64 = 100.0
sqrtv0::Float64 = sqrt(v0)
lpibar0 = makenormlogpdf(x0, v0)
@inline function mu(rng::RNG)
return x0 + sqrtv0 * randn(rng)
end
lw1::Float64 = log(0.3)
lw2::Float64 = log(0.7)
lpdf1 = makenormlogpdf(-10.0, 0.01)
lpdf2 = makenormlogpdf(10.0, 0.04)
@inline function lpibar1(x::Float64)
v1::Float64 = lw1 + lpdf1(x)
v2::Float64 = lw2 + lpdf2(x)
m::Float64 = max(v1, v2)
return m + log(exp(v1 - m) + exp(v2 - m));
end
return makeRWSMCSampler(mu, lpibar0, lpibar1, betas, taus, iterations),
lpibar1
end
## fixed-dimensional state
mutable struct SMCSamplerParticle{d}
x::SVector{d, Float64}
lpibar0::Float64
lpibar1::Float64
SMCSamplerParticle{d}() where d = new()
end
## scratch space for computations
struct SMCSScratch{d}
tmp::MVector{d, Float64}
end
SMCSScratch{d}() where d = SMCSScratch{d}(MVector{d, Float64}(undef))
## for fixed-dimensional states
function makeRWSMCSampler(d::Int64, mu, lpibar0, lpibar1, betas::Vector{Float64},
taus::Vector{Float64}, iterations::Vector{Int64})
n = length(betas)
@assert n == length(taus) == length(iterations)
@inline function lG(p::Int64, particle::SMCSamplerParticle{d1},
scratch::SMCSScratch{d1}) where d1
if p == n
return 0.0
end
return (betas[p + 1] - betas[p]) * (particle.lpibar1 - particle.lpibar0);
end
@inline function M!(newParticle::SMCSamplerParticle{d1}, rng::RNG, p::Int64,
particle::SMCSamplerParticle{d1}, scratch::SMCSScratch{d1}) where d1
if p == 1
newParticle.x = mu(rng, scratch)
newParticle.lpibar0 = lpibar0(newParticle.x)
newParticle.lpibar1 = lpibar1(newParticle.x)
else
y::SVector{d1, Float64} = particle.x
lv0::Float64 = particle.lpibar0
lv1::Float64 = particle.lpibar1
for i = 1:iterations[p]
randn!(rng, scratch.tmp)
z::SVector{d1, Float64} = y + taus[p] * scratch.tmp
z_lv0::Float64 = lpibar0(z)
z_lv1::Float64 = lpibar1(z)
vy::Float64 = (1.0 - betas[p]) * lv0 + betas[p] * lv1
vz::Float64 = (1.0 - betas[p]) * z_lv0 + betas[p] * z_lv1
lu::Float64 = -randexp(rng)
if lu < vz - vy
y = z
lv0 = z_lv0
lv1 = z_lv1
end
end
newParticle.x = y
newParticle.lpibar0 = lv0
newParticle.lpibar1 = lv1
end
end
return SMCModel(M!, lG, n, SMCSamplerParticle{d}, SMCSScratch{d})
end
function defaultSMCSampler()
function makelogMVN(μ::SVector{2, Float64}, Σ::SMatrix{2, 2, Float64})
invΣ = inv(Σ)
lognc = - 0.5 * 2 * log(2 * π) - 0.5 * logdet(Σ)
function lpdf(x::SVector{d, Float64}) where d
v = x - μ
return lognc - 0.5*dot(v, invΣ * v)
end
return lpdf
end
betas = [0, 0.0005, 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05,
0.1, 0.25, 0.5, 1]
taus = [0.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 1.0]
iterations = [0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
μ0 = SVector{2, Float64}(0.0, 0.0)
Σ0 = SMatrix{2, 2, Float64}(100.0, 0.0, 0.0, 100.0)
A0 = SMatrix{2, 2, Float64}(cholesky(Σ0).L)
lpibar0 = makelogMVN(μ0, Σ0)
@inline function mu(rng::RNG, scratch::SMCSScratch{2})
randn!(rng, scratch.tmp)
return μ0 + A0 * scratch.tmp
end
lw1 = log(0.3)
lw2 = log(0.7)
μ11 = SVector{2, Float64}(-5.0, -5.0)
Σ11 = SMatrix{2, 2, Float64}(0.01, 0.005, 0.005, 0.01)
μ12 = SVector{2, Float64}(5.0, 5.0)
Σ12 = SMatrix{2, 2, Float64}(0.04, -0.01, -0.01, 0.04)
lpdf1 = makelogMVN(μ11, Σ11)
lpdf2 = makelogMVN(μ12, Σ12)
@inline function lpibar1(x::SVector{2, Float64})
v1::Float64 = lw1 + lpdf1(x)
v2::Float64 = lw2 + lpdf2(x)
m::Float64 = max(v1, v2)
return m + log(exp(v1 - m) + exp(v2 - m));
end
return makeRWSMCSampler(2, mu, lpibar0, lpibar1, betas, taus, iterations),
lpibar1
end
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1201 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.FiniteFeynmanKac
using Test
setRNGs(0)
d = 3
n = 10
ffk = FiniteFeynmanKac.randomFiniteFK(d, n)
ffkout = FiniteFeynmanKac.calculateEtasZs(ffk)
model = FiniteFeynmanKac.makeSMCModel(ffk)
smcio = SMCIO{model.particle, model.pScratch}(2^13, n, Threads.nthreads(),
false, 0.5)
smc!(model, smcio)
@test smcio.logZhats ≈ ffkout.logZhats atol=0.2
lM = FiniteFeynmanKac.makelM(ffk)
p1 = smcio.internal.zetaAncs[1]
p2 = smcio.zetas[1]
lM(n, p1, p2, smcio.internal.particleScratch)
## just test that the commands run; actual testing is part of the tests in
## SequentialMonteCarlo.jl
FiniteFeynmanKac.fullDensity(ffk, FiniteFeynmanKac.Int642Path(1, d, n))
FiniteFeynmanKac.allEtas(ffkout, p -> p.x, false)
FiniteFeynmanKac.allEtas(ffkout, p -> p.x, true)
FiniteFeynmanKac.allGammas(ffkout, p -> p.x, false)
FiniteFeynmanKac.allGammas(ffkout, p -> p.x, true)
FiniteFeynmanKac.vpns(ffk, ffkout, p -> p.x, true, true, smcio.resample)
FiniteFeynmanKac.avar(ffk, ffkout, p -> p.x, true, true, smcio.resample)
FiniteFeynmanKac.allavarhat1s(ffk, ffkout, smcio.resample)
@test FiniteFeynmanKac.Path2Int64(FiniteFeynmanKac.Int642Path(1, d, n), d) == 1
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 1944 | using SequentialMonteCarlo
using RNGPool
using SMCExamples.LinearGaussian: LGTheta, Float64Particle, kalmanlogZ,
defaultLGModel, makeLGModel
using MonteCarloMarkovKernels
using StaticArrays
using Test
using Random
using LinearAlgebra
import Statistics.mean
setRNGs(0)
lgModel, theta, ys, ko = defaultLGModel(10)
const truex0 = theta.x0
const truev0 = theta.v0
const trueC = theta.C
@inline function toLGTheta(v::SVector{3, Float64})
return LGTheta(v[1], v[2], trueC, v[3], truex0, truev0)
end
t0 = SVector{3, Float64}(theta.A, theta.Q, theta.R)
const sigmaProp = SMatrix{3, 3, Float64}(Matrix{Float64}(I, 3, 3))
@inline function lglogprior(theta::LGTheta)
if theta.A < 0 || theta.A > 10 return -Inf end
if theta.Q < 0 || theta.Q > 10 return -Inf end
if theta.R < 0 || theta.R > 10 return -Inf end
return 0.0
end
function makelgsmcltd(ys::Vector{Float64}, N::Int64, nthreads::Int64)
smcio = SMCIO{Float64Particle, Nothing}(N, length(ys), nthreads, false, 0.5)
function ltd(in::SVector{3, Float64})
theta::LGTheta = toLGTheta(in)
lp::Float64 = lglogprior(theta)
if lp == -Inf return -Inf end
model::SMCModel = makeLGModel(theta, ys)
smc!(model, smcio)
return smcio.logZhats[length(ys)]
end
end
function makelgkalmanltd(ys::Vector{Float64})
function ltd(in::SVector{3, Float64})
theta::LGTheta = toLGTheta(in)
lp::Float64 = lglogprior(theta)
if lp == -Inf return -Inf end
return kalmanlogZ(theta, ys)
end
end
logtargetSMC = makelgsmcltd(ys, 128, Threads.nthreads())
logtargetKalman = makelgkalmanltd(ys)
PSMC = makeAMKernel(logtargetSMC, sigmaProp)
PKalman = makeAMKernel(logtargetKalman, sigmaProp)
Random.seed!(12345)
chainSMC = simulateChainProgress(PSMC, t0, 1024*32)
chainKalman = simulateChain(PKalman, t0, 1024*1024)
@test mean(chainSMC) ≈ mean(chainKalman) rtol=0.1
@test MonteCarloMarkovKernels.cov(chainSMC) ≈
MonteCarloMarkovKernels.cov(chainKalman) rtol=0.1
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 907 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.LinearGaussian: defaultLGModel, makeLGLOPModel,
makeLGAPFModel, kalmanlogZ, makelM
using Test
setRNGs(0)
N = 32768
n = 10
nt = Threads.nthreads()
model, theta, ys, ko = defaultLGModel(n)
@test ko.logZhats[end] ≈ kalmanlogZ(theta, ys)
smcio = SMCIO{model.particle, model.pScratch}(N, n, nt, false)
smc!(model, smcio)
@test smcio.logZhats ≈ ko.logZhats atol=0.1
lM = makelM(theta)
p1 = smcio.internal.zetaAncs[1]
p2 = smcio.zetas[1]
lM(n, p1, p2, smcio.internal.particleScratch)
modelLOP = makeLGLOPModel(theta, ys)
smcio = SMCIO{modelLOP.particle, modelLOP.pScratch}(N, n, nt, false)
smc!(modelLOP, smcio)
@test smcio.logZhats ≈ ko.logZhats atol=0.1
modelAPF = makeLGAPFModel(theta, ys)
smcio = SMCIO{modelAPF.particle, modelAPF.pScratch}(N, n, nt, false, 2.0)
smc!(modelAPF, smcio)
@test smcio.logZhats[1:n-1] ≈ ko.logZhats[2:n] atol=0.1
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 506 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.Lorenz96: defaultLorenzModel, LorenzTheta, makeLorenzModel
using Test
setRNGs(0)
N = 2^10
n = 10
nt = Threads.nthreads()
model, theta, ys = defaultLorenzModel(8, 100)
smcio = SMCIO{model.particle, model.pScratch}(N, n, nt, false)
smc!(model, smcio)
lzh1 = copy(smcio.logZhats)
theta2 = LorenzTheta(theta.σ, theta.F, theta.δ, theta.Δ, theta.steps*2)
model = makeLorenzModel(theta2, ys)
smc!(model, smcio)
@test smcio.logZhats ≈ lzh1 atol=1.0
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 331 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.MVLinearGaussian.defaultMVLGModel
using Test
setRNGs(0)
N = 2^16
n = 10
nt = Threads.nthreads()
model, theta, ys, ko = defaultMVLGModel(2, n)
smcio = SMCIO{model.particle, model.pScratch}(N, n, nt, false)
smc!(model, smcio)
@test smcio.logZhats ≈ ko.logZhats atol=0.1
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 272 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.Netto.defaultNettoModel
using Test
setRNGs(0)
N = 8192
n = 10
nt = Threads.nthreads()
model, theta, ys = defaultNettoModel(n)
smcio = SMCIO{model.particle, model.pScratch}(N, n, nt, false)
smc!(model, smcio)
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 459 | using Test
@testset "Finite FK Model tests" begin
include("ffk_test.jl")
end
@testset "LGModel tests" begin
include("lg_test.jl")
end
@testset "MVLGModel tests" begin
include("mvlg_test.jl")
end
@testset "SMC Sampler tests" begin
include("smcs_test.jl")
end
@testset "Lorenz96 tests" begin
include("lorenz96_test.jl")
end
@testset "Netto tests" begin
include("netto_test.jl")
end
@testset "PMMH tests" begin
include("lgPMMH_test.jl")
end
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | code | 532 | using SequentialMonteCarlo
using RNGPool
import SMCExamples.SMCSampler: defaultSMCSampler, defaultSMCSampler1D
using Test
setRNGs(0)
model, ltarget = defaultSMCSampler()
n = model.maxn
smcio = SMCIO{model.particle, model.pScratch}(2^16, n, Threads.nthreads(),
false)
smc!(model, smcio)
@test smcio.logZhats[n] ≈ 0.0 atol=0.1
model, ltarget = defaultSMCSampler1D()
n = model.maxn
smcio = SMCIO{model.particle, model.pScratch}(2^16, n, Threads.nthreads(),
false)
smc!(model, smcio)
@test smcio.logZhats[n] ≈ 0.0 atol=0.1
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.6.1 | c574fb31f332ab93a888e669dbb7626c827da1a3 | docs | 858 | # SMCExamples
<!-- badges: start -->
[](https://github.com/awllee/SMCExamples.jl/actions)
[](https://codecov.io/gh/awllee/SMCExamples.jl)
<!-- badges: end -->
This package provides example models for use with the [SequentialMonteCarlo.jl](https://github.com/awllee/SequentialMonteCarlo.jl) package.
Some of the models in src/ are also used to test SequentialMonteCarlo.jl, particularly the Finite Feynman--Kac model which is otherwise not likely to be very interesting.
The demos in demo/ can be run as scripts or in the REPL. Some involve plotting. They are intended only to show how to run the algorithm and look at some of its outputs.
The tests in test/ also demonstrate some aspects of usage.
| SMCExamples | https://github.com/awllee/SMCExamples.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 77 | println("Building the Beta Machine Learning Toolkit package (BetaML.jl)...")
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 6125 |
# To build the documentation:
# - julia --project="." make.jl
# - empty!(ARGS); include("make.jl")
# To build the documentation without running the tests:
# - julia --project="." make.jl preview
# - push!(ARGS,"preview"); include("make.jl")
# Format notes:
# # A markdown H1 title
# A non-code markdown normal line
## A comment within the code chunk
#src: line exclusive to the source code and thus filtered out unconditionally
using Test
using Documenter, Literate, BetaML
import BetaML.Bmlj
if "preview" in ARGS
println("*** Attention: code in the tutorial will not be run/tested")
else
println("*** Building documentation and testing tutorials...")
end
# Documentation source and temp dir
const SRC_ROOTDIR = joinpath(@__DIR__, "src/")
#const SRC_TEMPDIR = joinpath(@__DIR__, "srctemp/")
push!(LOAD_PATH,"../src/") # this is the source of the code, not the documentation
const _TUTORIAL_DIR = joinpath(SRC_ROOTDIR, "tutorials")
# Important: If some tutorial is removed but the md file is left, this may still be used by Documenter
_TUTORIAL_SUBDIR = [
#"Getting started",
"Classification - cars",
"Regression - bike sharing",
"Clustering - Iris",
"Multi-branch neural network",
"Feature importance"
]
function link_example(content)
edit_url = match(r"EditURL = \"(.+?)\"", content)[1]
footer = match(r"^(---\n\n\*This page was generated using)"m, content)[1]
content = replace(
content, footer => "[View this file on Github]($(edit_url)).\n\n" * footer
)
return content
end
function _file_list(full_dir, relative_dir, extension)
return map(
file -> joinpath(relative_dir, file),
filter(file -> endswith(file, extension), sort(readdir(full_dir))),
)
end
"""
_include_sandbox(filename)
Include the `filename` in a temporary module that acts as a sandbox. (Ensuring
no constants or functions leak into other files.)
"""
function _include_sandbox(filename)
mod = @eval module $(gensym()) end
return Base.include(mod, filename)
end
function literate_directory(dir)
rm.(_file_list(dir, dir, ".md"))
for filename in _file_list(dir, dir, ".jl")
# `include` the file to test it before `#src` lines are removed. It is
# in a testset to isolate local variables between files.
if ! ("preview" in ARGS)
@testset "$(filename)" begin
_include_sandbox(filename)
end
Literate.markdown(
filename,
dir;
documenter = true,
postprocess = link_example
)
# Literate.notebook(
# filename,
# dir;
# documenter = true,
# postprocess = link_example
# )
else
Literate.markdown(
filename,
dir;
documenter = true,
postprocess = link_example,
codefence = "```text" => "```"
)
# Literate.notebook(
# filename,
# dir;
# documenter = true,
# postprocess = link_example,
# codefence = "```text" => "```"
# )
end
end
return nothing
end
function preprocessDoc()
rm(SRC_TEMPDIR,recursive=true,force=true)
mkdir(SRC_TEMPDIR)
cp(SRC_ROOTDIR, SRC_TEMPDIR; force=true)
repl = r"`\?(\w+)`" => s"[`\1`](@ref)"
for (root, dirs, files) in walkdir(SRC_TEMPDIR)
#println("Files in $root")
for file in files
if endswith(file,".md") || endswith(file,".jl")
pathfile = joinpath(root, file)
#println(pathfile) # path to files
write(pathfile, replace(read(pathfile, String), repl))
end
end
end
touch(joinpath(SRC_TEMPDIR,".gitkeep"))
end
println("Starting literating tutorials (.jl --> .md)...")
literate_directory.(joinpath.(_TUTORIAL_DIR, _TUTORIAL_SUBDIR))
println("Starting making the documentation...")
#preprocessDoc()
makedocs(sitename="BetaML.jl Documentation",
authors = "Antonello Lobianco",
pages = [
"Index" => "index.md",
"Tutorial" => vcat("tutorials/Betaml_tutorial_getting_started.md",map(
subdir -> subdir => map(
file -> joinpath("tutorials", subdir, file),
filter(
file -> endswith(file, ".md"),
sort(readdir(joinpath(_TUTORIAL_DIR, subdir))),
),
),
_TUTORIAL_SUBDIR,
),
),
"API (Reference manual)" => [
"API V2 (current)" => [
"Introduction for user" => "Api_v2_user.md",
"For developers" => [
"API implementation" => "Api_v2_developer.md",
"Style guide" => "StyleGuide_templates.md"],
"The Api module" => "Api.md",
],
"Perceptron" => "Perceptron.md",
"Trees" => "Trees.md",
"Nn" => "Nn.md",
"Clustering" => "Clustering.md",
"GMM" => "GMM.md",
"Imputation" => "Imputation.md",
"Utils" => "Utils.md",
],
"MLJ interface" => "MLJ_interface.md",
# "Benchmarks" => "Benchmarks.md", #TODO: until https://github.com/IanButterworth/SystemBenchmark.jl/issues/64 is solved
],
format = Documenter.HTML(prettyurls = false, analytics = "G-JYKX8QY5JW"),
warnonly = true,
#strict = true,
#doctest = false,
#source = SRC_TEMPDIR, # Attention here !!!!!!!!!!!
)
println("Starting deploying the documentation...")
deploydocs(
repo = "github.com/sylvaticus/BetaML.jl.git",
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 109 |
# ## This is markdown title
# This is also markdown
## This is a normal comment
#
a = 1
b = a + 1
prinln(b)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 27099 | # # [A classification task when labels are known - determining the country of origin of cars given the cars characteristics](@id classification_tutorial)
# In this exercise we are provided with several technical characteristics (mpg, horsepower,weight, model year...) for several car's models, together with the country of origin of such models, and we would like to create a machine learning model such that the country of origin can be accurately predicted given the technical characteristics.
# As the information to predict is a multi-class one, this is a _[classification](https://en.wikipedia.org/wiki/Statistical_classification) task.
# It is a challenging exercise due to the simultaneous presence of three factors: (1) presence of missing data; (2) unbalanced data - 254 out of 406 cars are US made; (3) small dataset.
#
# Data origin:
# - dataset description: [https://archive.ics.uci.edu/ml/datasets/auto+mpg](https://archive.ics.uci.edu/ml/datasets/auto+mpg)
#src Also useful: https://www.rpubs.com/dksmith01/cars
# - data source we use here: [https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data](https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data-original)
# Field description:
# 1. mpg: _continuous_
# 2. cylinders: _multi-valued discrete_
# 3. displacement: _continuous_
# 4. horsepower: _continuous_
# 5. weight: _continuous_
# 6. acceleration: _continuous_
# 7. model year: _multi-valued discrete_
# 8. origin: _multi-valued discrete_
# 9. car name: _string (unique for each instance)_
# The car name is not used in this tutorial, so that the country is inferred only from technical data. As this field includes also the car maker, and there are several car's models from the same car maker, a more sophisticated machine learnign model could exploit this information e.g. using a bag of word encoding.
using Dates #src
println(now(), " ", "*** Starting car classification tutorial..." ) #src
# ## Library loading and initialisation
# Activating the local environment specific to BetaML documentation
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
# We load a buch of packages that we'll use during this tutorial..
using Random, HTTP, Plots, CSV, DataFrames, BenchmarkTools, StableRNGs, BetaML
import DecisionTree, Flux
import Pipe: @pipe
using Test #src
println(now(), " - getting the data..." ) #src
# Machine Learning workflows include stochastic components in several steps: in the data sampling, in the model initialisation and often in the models's own algorithms (and sometimes also in the prediciton step).
# BetaML provides a random nuber generator (RNG) in order to simplify reproducibility ( [`FIXEDRNG`](@ref BetaML.Utils.FIXEDRNG). This is nothing else than an istance of `StableRNG(123)` defined in the [`BetaML.Utils`](@ref utils_module) sub-module, but you can choose of course your own "fixed" RNG). See the [Dealing with stochasticity](@ref stochasticity_reproducibility) section in the [Getting started](@ref getting_started) tutorial for details.
# Here we are explicit and we use our own fixed RNG:
seed = 123 # The table at the end of this tutorial has been obtained with seeds 123, 1000 and 10000
AFIXEDRNG = StableRNG(seed)
# ## Data loading and preparation
# To load the data from the internet our workflow is
# (1) Retrieve the data --> (2) Clean it --> (3) Load it --> (4) Output it as a DataFrame.
# For step (1) we use `HTTP.get()`, for step (2) we use `replace!`, for steps (3) and (4) we uses the `CSV` package, and we use the "pip" `|>` operator to chain these operations, so that no file is ever saved on disk:
urlDataOriginal = "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data-original"
data = @pipe HTTP.get(urlDataOriginal).body |>
replace!(_, UInt8('\t') => UInt8(' ')) |> # the original dataset has mixed field delimiters !
CSV.File(_, delim=' ', missingstring="NA", ignorerepeated=true, header=false) |>
DataFrame;
println(now(), " ", "- data wrangling..." ) #src
# This results in a table where the rows are the observations (the various cars' models) and the column the fields. All BetaML models expect this layout.
# As the dataset is ordered, we randomly shuffle the data.
idx = randperm(copy(AFIXEDRNG),size(data,1))
data[idx, :]
describe(data)
# Columns 1 to 7 contain characteristics of the car, while column 8 encodes the country or origin ("1" -> US, "2" -> EU, "3" -> Japan). That's the variable we want to be able to predict.
# Columns 9 contains the car name, but we are not going to use this information in this tutorial.
# Note also that some fields have missing data.
# Our first step is hence to divide the dataset in features (the x) and the labels (the y) we want to predict. The `x` is then a Julia standard `Matrix` of 406 rows by 7 columns and the `y` is a vector of the 406 observations:
x = Matrix{Union{Missing,Float64}}(data[:,1:7]);
y = Vector{Int64}(data[:,8]);
x = fit!(Scaler(),x)
# Some algorithms that we will use today don't accept missing data, so we need to _impute_ them. BetaML provides several imputation models in the [`Imputation`](@ref) module. Note that many of these imputation models can be used for Collaborative Filtering / Recomendation Systems. Models as [`GaussianMixtureImputer`](@ref) have the advantage over traditional algorithms as k-nearest neighbors (KNN) that GMM can "detect" the hidden structure of the observed data, where some observation can be similar to a certain pool of other observvations for a certain characteristic, but similar to an other pool of observations for other characteristics.
# Here we use [`RandomForestImputer`](@ref). While the model allows for reproducible multiple imputations (with the parameter `multiple_imputation=an_integer`) and multiple passages trough the various columns (fields) containing missing data (with the option `recursive_passages=an_integer`), we use here just a single imputation and a single passage.
# As all `BetaML` models, `RandomForestImputer` follows the patters `m=ModelConstruction(pars); fit!(m,x,[y]); est = predict(m,x)` where `est` can be an estimation of some labels or be some characteristics of x itself (the imputed version, as in this case, a reprojected version as in [`PCAEncoder`](@ref)), depending if the model is supervised or not. See the [`API user documentation`](@ref api_usage)` for more details.
# For imputers, the output of `predict` is the matrix with the imputed values replacing the missing ones, and we write here the model in a single line using a convenience feature that when the default `cache` parameter is used in the model constructor the `fit!` function returns itself the prediciton over the trained data:
x = fit!(RandomForestImputer(rng=copy(AFIXEDRNG)),x) # Same as `m = RandomForestImputer(rng=copy(AFIXEDRNG)); fit!(m,x); x= predict(m,x)`
# Further, some models don't work with categorical data as well, so we need to represent our `y` as a matrix with a separate column for each possible categorical value (the so called "one-hot" representation).
# For example, within a three classes field, the individual value `2` (or `"Europe"` for what it matters) would be represented as the vector `[0 1 0]`, while `3` (or `"Japan"`) would become the vector `[0 0 1]`.
# To encode as one-hot we use the [`OneHotEncoder`](@ref) in [`BetaML.Utils`](@ref utils_module), using the same shortcut as for the imputer we used earlier:
y_oh = fit!(OneHotEncoder(),y)
# In supervised machine learning it is good practice to partition the available data in a _training_, _validation_, and _test_ subsets, where the first one is used to train the ML algorithm, the second one to train any eventual "hyper-parameters" of the algorithm and the _test_ subset is finally used to evaluate the quality of the algorithm.
# Here, for brevity, we use only the _train_ and the _test_ subsets, implicitly assuming we already know the best hyper-parameters. Please refer to the [regression tutorial](@ref regression_tutorial) for examples of the auto-tune feature of BetaML models to "automatically" train the hyper-parameters (hint: in most cases just add the parameter `autotune=true` in the model constructor), or the [clustering tutorial](@ref clustering_tutorial) for an example of using the [`cross_validation`](@ref) function to do it manually.
# We use then the [`partition`](@ref) function in [BetaML.Utils](@ref utils_module), where we can specify the different data to partition (each matrix or vector to partition must have the same number of observations) and the shares of observation that we want in each subset. Here we keep 80% of observations for training (`xtrain`, and `ytrain`) and we use 20% of them for testing (`xtest`, and `ytest`):
((xtrain,xtest),(ytrain,ytest),(ytrain_oh,ytest_oh)) = partition([x,y,y_oh],[0.8,1-0.8],rng=copy(AFIXEDRNG));
# We finally set up a dataframe to store the accuracies of the various models we'll use.
results = DataFrame(model=String[],train_acc=Float64[],test_acc=Float64[])
# ## Random Forests
println(now(), " ", "- random forests..." ) #src
# We are now ready to use our first model, the [`RandomForestEstimator`](@ref). Random Forests build a "forest" of decision trees models and then average their predictions in order to make an overall prediction, wheter a regression or a classification.
# While here the missing data has been imputed and the dataset is comprised of only numerical values, one attractive feature of BetaML `RandomForestEstimator` is that they can work directly with missing and categorical data without any prior processing required.
# However as the labels are encoded using integers, we need also to specify the parameter `force_classification=true`, otherwise the model would undergo a _regression_ job instead.
rfm = RandomForestEstimator(force_classification=true, rng=copy(AFIXEDRNG))
# Opposite to the `RandomForestImputer` and `OneHotEncoder` models used earielr, to train a `RandomForestEstimator` model we need to provide it with both the training feature matrix and the associated "true" training labels. We use the same shortcut to get the training predictions directly from the `fit!` function. In this case the predictions correspond to the labels:
ŷtrain = fit!(rfm,xtrain,ytrain)
# You can notice that for each record the result is reported in terms of a dictionary with the possible categories and their associated probabilities.
# !!! warning
# Only categories with non-zero probabilities are reported for each record, and being a dictionary, the order of the categories is not undefined
# For example `ŷtrain[1]` is a `Dict(2 => 0.0333333, 3 => 0.933333, 1 => 0.0333333)`, indicating an overhelming probability that that car model originates from Japan.
# To retrieve the predictions with the highest probabilities use `mode(ŷ)`:
ŷtrain_top = mode(ŷtrain,rng=copy(AFIXEDRNG))
# Why `mode` takes (optionally) a RNG ? I let the answer for you :-)
# To obtain the predicted labels for the test set we simply run the `predict` function over the features of the test set:
ŷtest = predict(rfm,xtest)
# Finally we can measure the _accuracy_ of our predictions with the [`accuracy`](@ref) function. We don't need to explicitly use `mode`, as `accuracy` does it itself when it is passed with predictions expressed as a dictionary:
trainAccuracy,testAccuracy = accuracy.([ytrain,ytest],[ŷtrain,ŷtest],rng=copy(AFIXEDRNG))
#src (0.9969230769230769, 0.8271604938271605) without autotuning, (0.8646153846153846, 0.7530864197530864) with it
@test testAccuracy > 0.70 #src
# We are now ready to store our first model accuracies in the `results` dataframe:
push!(results,["RF",trainAccuracy,testAccuracy]);
# The predictions are quite good, for the training set the algoritm predicted almost all cars' origins correctly, while for the testing set (i.e. those records that has **not** been used to train the algorithm), the correct prediction level is still quite high, at around 80% (depends on the random seed)
# While accuracy can sometimes suffice, we may often want to better understand which categories our model has trouble to predict correctly.
# We can investigate the output of a multi-class classifier more in-deep with a [`ConfusionMatrix`](@ref) where the true values (`y`) are given in rows and the predicted ones (`ŷ`) in columns, together to some per-class metrics like the _precision_ (true class _i_ over predicted in class _i_), the _recall_ (predicted class _i_ over the true class _i_) and others.
# We fist build the [`ConfusionMatrix`](@ref) model, we train it with `ŷ` and `y` and then we print it (we do it here for the test subset):
cfm = ConfusionMatrix(categories_names=Dict(1=>"US",2=>"EU",3=>"Japan"),rng=copy(AFIXEDRNG))
fit!(cfm,ytest,ŷtest) # the output is by default the confusion matrix in relative terms
print(cfm)
# From the report we can see that Japanese cars have more trouble in being correctly classified, and in particular many Japanease cars are classified as US ones. This is likely a result of the class imbalance of the data set, and could be solved by balancing the dataset with various sampling tecniques before training the model.
# If you prefer a more graphical approach, we can also plot the confusion matrix. In order to do so, we pick up information from the `info(cfm)` function. Indeed most BetaML models can be queried with `info(model)` to retrieve additional information, in terms of a dictionary, that is not necessary to the prediciton, but could still be relevant. Other functions that you can use with BetaML models are `parameters(m)` and `hyperparamaeters(m)`.
res = info(cfm)
heatmap(string.(res["categories"]),string.(res["categories"]),res["normalised_scores"],seriescolor=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix (normalised scores)")
#src # When we benchmark the resourse used (time and memory) we find that Random Forests remain pretty fast, expecially when we compare them with neural networks.
#src # @btime buildForest(xtrain,ytrain,30, rng=copy(AFIXEDRNG),force_classification=true);
#src # 134.096 ms (781027 allocations: 196.30 MiB)
# ### Comparision with DecisionTree.jl
println(now(), " ", "- DecisionTree.jl..." ) #src
# We now compare BetaML [`RandomForestEstimator`] with the random forest estimator of the package [`DecisionTrees.jl`](https://github.com/JuliaAI/DecisionTree.jl)` random forests are similar in usage: we first "build" (train) the forest and we then make predictions out of the trained model.
#src # They are much faster than [`RandomForestEstimator`], but they don't work with missing or fully categorical (unordered) data. As we will see the accuracy is roughly the same, if not a bit lower.
## We train the model...
model = DecisionTree.build_forest(ytrain, xtrain,rng=seed)
## ..and we generate predictions and measure their error
(ŷtrain,ŷtest) = DecisionTree.apply_forest.([model],[xtrain,xtest]);
(trainAccuracy,testAccuracy) = accuracy.([ytrain,ytest],[ŷtrain,ŷtest])
#src (0.9846153846153847, 0.8518518518518519)
push!(results,["RF (DecisionTrees.jl)",trainAccuracy,testAccuracy]);
#src nothing; cm = ConfusionMatrix(ŷtest,ytest,classes=[1,2,3],labels=["US","EU","Japan"])
#src nothing; println(cm)
@test testAccuracy > 0.70 #src
# While the accuracy on the training set is exactly the same as for `BetaML` random forets, `DecisionTree.jl` random forests are slighly less accurate in the testing sample.
# Where however `DecisionTrees.jl` excell is in the efficiency: they are extremelly fast and memory thrifty, even if we should consider also the resources needed to impute the missing values, as they don't work with missing data.
# Also, one of the reasons DecisionTrees are such efficient is that internally the data is sorted to avoid repeated comparision, but in this way they work only with features that are sortable, while BetaML random forests accept virtually any kind of input without the needs to process it.
#src @btime DecisionTree.build_forest(ytrain, xtrain_full,-1,30,rng=123);
#src 1.431 ms (10875 allocations: 1.52 MiB)
# ### Neural network
println(now(), " ", "- neutal networks..." ) #src
# Neural networks (NN) can be very powerfull, but have two "inconvenients" compared with random forests: first, are a bit "picky". We need to do a bit of work to provide data in specific format. Note that this is _not_ feature engineering. One of the advantages on neural network is that for the most this is not needed for neural networks. However we still need to "clean" the data. One issue is that NN don't like missing data. So we need to provide them with the feature matrix "clean" of missing data. Secondly, they work only with numerical data. So we need to use the one-hot encoding we saw earlier.
# Further, they work best if the features are scaled such that each feature has mean zero and standard deviation 1. This is why we scaled the data back at the beginning of this tutorial.
# We firt measure the dimensions of our data in input (i.e. the column of the feature matrix) and the dimensions of our output, i.e. the number of categories or columns in out one-hot encoded y.
D = size(xtrain,2)
classes = unique(y)
nCl = length(classes)
# The second "inconvenient" of NN is that, while not requiring feature engineering, they still need a bit of practice on the way the structure of the network is built . It's not as simple as `fit!(Model(),x,y)` (altougth BetaML provides a "default" neural network structure that can be used, it isn't often adapted to the specific task). We need instead to specify how we want our layers, _chain_ the layers together and then decide a _loss_ overall function. Only when we done these steps, we have the model ready for training.
# Here we define 2 [`DenseLayer`](@ref) where, for each of them, we specify the number of neurons in input (the first layer being equal to the dimensions of the data), the output layer (for a classification task, the last layer output size beying equal to the number of classes) and an _activation function_ for each layer (default the `identity` function).
ls = 50 # number of neurons in the inned layer
l1 = DenseLayer(D,ls,f=relu,rng=copy(AFIXEDRNG))
l2 = DenseLayer(ls,nCl,f=relu,rng=copy(AFIXEDRNG))
# For a classification task, the last layer is a [`VectorFunctionLayer`](@ref) that has no learnable parameters but whose activation function is applied to the ensemble of the neurons, rather than individually on each neuron. In particular, for classification we pass the [`softmax`](@ref) function whose output has the same size as the input (i.e. the number of classes to predict), but we can use the `VectorFunctionLayer` with any function, including the [`pool1d`](@ref) function to create a "pooling" layer (using maximum, mean or whatever other sub-function we pass to `pool1d`)
l3 = VectorFunctionLayer(nCl,f=softmax) ## Add a (parameterless) layer whose activation function (softmax in this case) is defined to all its nodes at once
# Finally we _chain_ the layers and assign a loss function and the number of epochs we want to train the model to the constructor of [`NeuralNetworkEstimator`](@ref):
nn = NeuralNetworkEstimator(layers=[l1,l2,l3],loss=crossentropy,rng=copy(AFIXEDRNG),epochs=500)
# Aside the layer structure and size and the number of epochs, other hyper-parameters you may want to try are the `batch_size` and the optimisation algoritm to employ (`opt_alg`).
# Now we can train our network:
ŷtrain = fit!(nn, xtrain, ytrain_oh)
# Predictions are in form of a _n_records_ by _n_classes_ matrix of the probabilities of each record being in that class. To retrieve the classes with the highest probabilities we can use again the `mode` function:
ŷtrain_top = mode(ŷtrain)
# Once trained, we can predict the test labels. As the trained was based on the scaled feature matrix, so must be for the predictions
ŷtest = predict(nn,xtest)
# And finally we can measure the accuracies and store the accuracies in the `result` dataframe:
trainAccuracy, testAccuracy = accuracy.([ytrain,ytest],[ŷtrain,ŷtest],rng=copy(AFIXEDRNG))
#src (0.8923076923076924, 0.7654320987654321
push!(results,["NN",trainAccuracy,testAccuracy]);
#-
@test testAccuracy > 0.70 #src
cfm = ConfusionMatrix(categories_names=Dict(1=>"US",2=>"EU",3=>"Japan"),rng=copy(AFIXEDRNG))
fit!(cfm,ytest,ŷtest)
print(cfm)
res = info(cfm)
heatmap(string.(res["categories"]),string.(res["categories"]),res["normalised_scores"],seriescolor=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix (normalised scores)")
# While accuracies are a bit lower, the distribution of misclassification is similar, with many Jamanease cars misclassified as US ones (here we have also some EU cars misclassified as Japanease ones).
# ### Comparisons with Flux
println(now(), " ", "- Flux.jl..." ) #src
# As we did for Random Forests, we compare BetaML neural networks with the leading package for deep learning in Julia, [`Flux.jl`](https://github.com/FluxML/Flux.jl).
# In Flux the input must be in the form (fields, observations), so we transpose our original matrices
xtrainT, ytrain_ohT = transpose.([xtrain, ytrain_oh])
xtestT, ytest_ohT = transpose.([xtest, ytest_oh])
# We define the Flux neural network model in a similar way than BetaML and load it with data, we train it, predict and measure the accuracies on the training and the test sets:
#src function poolForFlux(x,wsize=5)
#src hcat([pool1d(x[:,i],wsize;f=maximum) for i in 1:size(x,2)]...)
#src end
# We fix the random seed for Flux, altough you may still get different results depending on the number of threads used.. this is a problem we solve in BetaML with [`generate_parallel_rngs`](@ref).
Random.seed!(seed)
l1 = Flux.Dense(D,ls,Flux.relu)
l2 = Flux.Dense(ls,nCl,Flux.relu)
Flux_nn = Flux.Chain(l1,l2)
fluxloss(x, y) = Flux.logitcrossentropy(Flux_nn(x), y)
ps = Flux.params(Flux_nn)
nndata = Flux.Data.DataLoader((xtrainT, ytrain_ohT),shuffle=true)
begin for i in 1:500 Flux.train!(fluxloss, ps, nndata, Flux.ADAM()) end end
ŷtrain = Flux.onecold(Flux_nn(xtrainT),1:3)
ŷtest = Flux.onecold(Flux_nn(xtestT),1:3)
trainAccuracy, testAccuracy = accuracy.([ytrain,ytest],[ŷtrain,ŷtest])
#-
push!(results,["NN (Flux.jl)",trainAccuracy,testAccuracy]);
#src 0.9384615384615385, 0.7283950617283951
# While the train accuracy is little bit higher that BetaML, the test accuracy remains comparable
@test testAccuracy > 0.65 #src
#src # However the time is again lower than BetaML, even if here for "just" a factor 2
#src # @btime begin for i in 1:500 Flux.train!(loss, ps, nndata, Flux.ADAM()) end end;
#src # 5.665 s (8943640 allocations: 1.07 GiB)
# ## Perceptron-like classifiers.
println(now(), " ", "- perceptrons-like classifiers..." ) #src
# We finaly test 3 "perceptron-like" classifiers, the "classical" Perceptron ([`PerceptronClassifier`](@ref)), one of the first ML algorithms (a linear classifier), a "kernellised" version of it ([`KernelPerceptronClassifier`](@ref), default to using the radial kernel) and "PegasosClassifier" ([`PegasosClassifier`](@ref)) another linear algorithm that starts considering a gradient-based optimisation, altought without the regularisation term as in the Support Vector Machines (SVM).
# As for the previous classifiers we construct the model object, we train and predict and we compute the train and test accuracies:
pm = PerceptronClassifier(rng=copy(AFIXEDRNG))
ŷtrain = fit!(pm, xtrain, ytrain)
ŷtest = predict(pm, xtest)
(trainAccuracy,testAccuracy) = accuracy.([ytrain,ytest],[ŷtrain,ŷtest])
#src (0.7784615384615384, 0.7407407407407407) without autotune, (0.796923076923077, 0.7777777777777778) with it
push!(results,["Perceptron",trainAccuracy,testAccuracy]);
kpm = KernelPerceptronClassifier(rng=copy(AFIXEDRNG))
ŷtrain = fit!(kpm, xtrain, ytrain)
ŷtest = predict(kpm, xtest)
(trainAccuracy,testAccuracy) = accuracy.([ytrain,ytest],[ŷtrain,ŷtest])
#src (0.9661538461538461, 0.6790123456790124) without autotune, (1.0, 0.7037037037037037) with it
push!(results,["KernelPerceptronClassifier",trainAccuracy,testAccuracy]);
pegm = PegasosClassifier(rng=copy(AFIXEDRNG))
ŷtrain = fit!(pegm, xtrain, ytrain)
ŷtest = predict(pm, xtest)
(trainAccuracy,testAccuracy) = accuracy.([ytrain,ytest],[ŷtrain,ŷtest])
#src (0.6984615384615385, 0.7407407407407407) without autotune, (0.6615384615384615, 0.7777777777777778) with it
push!(results,["Pegasaus",trainAccuracy,testAccuracy]);
# ## Summary
# This is the summary of the results we had trying to predict the country of origin of the cars, based on their technical characteristics:
println(results)
# If you clone BetaML repository
# Model accuracies on my machine with seedd 123, 1000 and 10000 respectivelly
# | model | train 1 | test 1 | train 2 | test 2 | train 3 | test 3 |
# | --------------------- | --------- | -------- | -------- | -------- | -------- | -------- |
# | RF | 0.996923 | 0.765432 | 1.000000 | 0.802469 | 1.000000 | 0.888889 |
# | RF (DecisionTrees.jl) | 0.975385 | 0.765432 | 0.984615 | 0.777778 | 0.975385 | 0.864198 |
# | NN | 0.886154 | 0.728395 | 0.916923 | 0.827160 | 0.895385 | 0.876543 |
# │ NN (Flux.jl) | 0.793846 | 0.654321 | 0.938462 | 0.790123 | 0.935385 | 0.851852 |
# │ Perceptron | 0.778462 | 0.703704 | 0.720000 | 0.753086 | 0.670769 | 0.654321 |
# │ KernelPerceptronClassifier | 0.987692 | 0.703704 | 0.978462 | 0.777778 | 0.944615 | 0.827160 |
# │ Pegasaus | 0.732308 | 0.703704 | 0.633846 | 0.753086 | 0.575385 | 0.654321 |
# We warn that this table just provides a rought idea of the various algorithms performances. Indeed there is a large amount of stochasticity both in the sampling of the data used for training/testing and in the initial settings of the parameters of the algorithm. For a statistically significant comparision we would have to repeat the analysis with multiple sampling (e.g. by cross-validation, see the [clustering tutorial](@ref clustering_tutorial) for an example) and initial random parameters.
# Neverthless the table above shows that, when we compare BetaML with the algorithm-specific leading packages, we found similar results in terms of accuracy, but often the leading packages are better optimised and run more efficiently (but sometimes at the cost of being less verstatile).
# Also, for this dataset, Random Forests seems to remain marginally more accurate than Neural Network, altought of course this depends on the hyper-parameters and, with a single run of the models, we don't know if this difference is significant.
println(now(), " ", "- DONE classification tutorial..." ) #src | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 24463 | # # [A clustering task: the prediction of plant species from floreal measures (the iris dataset)](@id clustering_tutorial)
# The task is to estimate the species of a plant given some floreal measurements. It use the classical "Iris" dataset.
# Note that in this example we are using clustering approaches, so we try to understand the "structure" of our data, without relying to actually knowing the true labels ("classes" or "factors"). However we have chosen a dataset for which the true labels are actually known, so we can compare the accuracy of the algorithms we use, but these labels will not be used during the algorithms training.
#
# Data origin:
# - dataset description: [https://en.wikipedia.org/wiki/Iris_flower_data_set](https://en.wikipedia.org/wiki/Iris_flower_data_set)
# - data source we use here: [https://github.com/JuliaStats/RDatasets.jl](https://github.com/JuliaStats/RDatasets.jl)
# ## Library and data loading
using Dates #src
println(now(), " ", "*** Start iris clustering tutorial..." ) #src
# Activating the local environment specific to BetaML documentation
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
# We load the Beta Machine Learning Toolkit as well as some other packages that we use in this tutorial
using BetaML
using Random, Statistics, Logging, BenchmarkTools, StableRNGs, RDatasets, Plots, DataFrames
# We are also going to compare our results with two other leading packages in Julia for clustering analysis, [`Clustering.jl`](https://github.com/JuliaStats/Clustering.jl) that provides (inter alia) kmeans and kmedoids algorithms and [`GaussianMixtures.jl`](https://github.com/davidavdav/GaussianMixtures.jl) that provides, as the name says, Gaussian Mixture Models. So we import them (we "import" them, rather than "use", not to bound their full names into namespace as some would collide with BetaML).
import Clustering, GaussianMixtures
using Test #src
# Here we are explicit and we use our own fixed RNG:
seed = 123 # The table at the end of this tutorial has been obtained with seeds 123, 1000 and 10000
AFIXEDRNG = StableRNG(seed)
# We do a few tweeks for the Clustering and GaussianMixtures packages. Note that in BetaML we can also control both the random seed and the verbosity in the algorithm call, not only globally
Random.seed!(seed)
#logger = Logging.SimpleLogger(stdout, Logging.Error); global_logger(logger); ## For suppressing GaussianMixtures output
println(now(), " ", "- data wrangling..." ) #src
# Differently from the [regression tutorial](@ref regression_tutorial), we load the data here from [`RDatasets`](https://github.com/JuliaStats/RDatasets.jl](https://github.com/JuliaStats/RDatasets.jl), a package providing standard datasets.
iris = dataset("datasets", "iris")
describe(iris)
# The iris dataset provides floreal measures in columns 1 to 4 and the assigned species name in column 5. There are no missing values
# ## Data preparation
# The first step is to prepare the data for the analysis. We collect the first 4 columns as our _feature_ `x` matrix and the last one as our `y` label vector.
# As we are using clustering algorithms, we are not actually using the labels to train the algorithms, we'll behave like we do not know them, we'll just let the algorithm "learn" from the structure of the data itself. We'll however use it to judge the accuracy that the various algorithms reach.
x = Matrix{Float64}(iris[:,1:4]);
yLabels = unique(iris[:,5])
# As the labels are expressed as strings, the first thing we do is encode them as integers for our analysis using the [`OrdinalEncoder`](@ref) model (data isn't really needed to be actually ordered):
y = fit!(OrdinalEncoder(categories=yLabels),iris[:,5])
# The dataset from RDatasets is ordered by species, so we need to shuffle it to avoid biases.
# Shuffling happens by default in cross_validation, but we are keeping here a copy of the shuffled version for later.
# Note that the version of [`consistent_shuffle`](@ref) that is included in BetaML accepts several n-dimensional arrays and shuffle them (by default on rows, by we can specify the dimension) keeping the association between the various arrays in the shuffled output.
(xs,ys) = consistent_shuffle([x,y], rng=copy(AFIXEDRNG));
# ## Main analysis
println(now(), " ", "- main analysis..." ) #src
# We will try 3 BetaML models ([`KMeansClusterer`](@ref), [`KMedoidsClusterer`](@ref) and [`GaussianMixtureClusterer`](@ref)) and we compare them with `kmeans` from Clusterings.jl and `GMM` from GaussianMixtures.jl
#
# `KMeansClusterer` and `KMedoidsClusterer` works by first initialising the centers of the k-clusters (step a ). These centers, also known as the "representatives", must be selected within the data for kmedoids, while for kmeans they are the geometrical centers.
#
# Then ( step b ) the algorithms iterates toward each point to assign the point to the cluster of the closest representative (according with a user defined distance metric, default to Euclidean), and ( step c ) moves each representative at the center of its newly acquired cluster (where "center" depends again from the metric).
#
# Steps _b_ and _c_ are reiterated until the algorithm converge, i.e. the tentative k representative points (and their relative clusters) don't move any more. The result (output of the algorithm) is that each point is assigned to one of the clusters (classes).
#
# The algorithm in `GaussianMixtureClusterer` is similar in that it employs an iterative approach (the Expectation_Minimisation algorithm, "em") but here we make the hipothesis that the data points are the observed outcomes of some _mixture_ probabilistic models where we have first a k-categorical variables whose outcomes are the (unobservble) parameters of a probabilistic distribution from which the data is finally drawn. Because the parameters of each of the k-possible distributions is unobservable this is also called a model with latent variables.
#
# Most `gmm` models use the Gaussain distribution as the family of the mixture components, so we can tought the `gmm` acronym to indicate _Gaussian Mixture Model_. In BetaML we have currently implemented only Gaussain components, but any distribution could be used by just subclassing `AbstractMixture` and implementing a couple of methids (you are invited to contribute or just ask for a distribution family you are interested), so I prefer to think "gmm" as an acronym for _Generative Mixture Model_.
#
# The algorithm tries to find the mixture that maximises the likelihood that the data has been generated indeed from such mixture, where the "E" step refers to computing the probability that each point belongs to each of the k-composants (somehow similar to the step _b_ in the kmeans/kmedoids algorithms), and the "M" step estimates, giving the association probabilities in step "E", the parameters of the mixture and of the individual components (similar to step _c_).
#
# The result here is that each point has a categorical distribution (PMF) representing the probabilities that it belongs to any of the k-components (our classes or clusters). This is interesting, as `gmm` can be used for many other things that clustering. It forms the backbone of the [`GaussianMixtureImputer`](@ref) model to impute missing values (on some or all dimensions) based to how close the record seems to its pears. For the same reasons, `GaussianMixtureImputer` can also be used to predict user's behaviours (or users' appreciation) according to the behaviour/ranking made by pears ("collaborative filtering").
#
# While the result of `GaussianMixtureClusterer` is a vector of PMFs (one for each record), error measures and reports with the true values (if known) can be directly applied, as in BetaML they internally call `mode()` to retrieve the class with the highest probability for each record.
#
#
# As we are here, we also try different versions of the BetaML models, even if the default "versions" should be fine. For `KMeansClusterer` and `KMedoidsClusterer` we will try different initialisation strategies ("gird", the default one, "random" and "shuffle"), while for the `GaussianMixtureClusterer` model we'll choose different distributions of the Gaussain family (`SphericalGaussian` - where the variance is a scalar, `DiagonalGaussian` - with a vector variance, and `FullGaussian`, where the covariance is a matrix).
#
# As the result would depend on stochasticity both in the data selected and in the random initialisation, we use a cross-validation approach to run our models several times (with different data) and then we average their results.
# Cross-Validation in BetaML is very flexible and it is done using the [`cross_validation`](@ref) function. It is used by default for hyperparameters autotuning of the BetaML supervised models.
# `cross_validation` works by calling the function `f`, defined by the user, passing to it the tuple `trainData`, `valData` and `rng` and collecting the result of the function f. The specific method for which `trainData`, and `valData` are selected at each iteration depends on the specific `sampler`.
#
# We start by selectign a k-fold sampler that split our data in 5 different parts, it uses 4 for training and 1 part (not used here) for validation. We run the simulations twice and, to be sure to have replicable results, we fix the random seed (at the whole crossValidaiton level, not on each iteration).
sampler = KFold(nsplits=5,nrepeats=3,shuffle=true, rng=copy(AFIXEDRNG))
# We can now run the cross-validation with our models. Note that instead of defining the function `f` and then calling `cross_validation[f(trainData,testData,rng),[x,y],...)` we use the Julia `do` block syntax and we write directly the content of the `f` function in the `do` block.
# Also, by default cross_validation already returns the mean and the standard deviation of the output of the user-provided `f` function (or the `do` block). However this requires that the `f` function returns a single scalar. Here we are returning a vector of the accuracies of the different models (so we can run the cross-validation only once), and hence we indicate with `return_statistics=false` to cross_validation not to attempt to generate statistics but rather report the whole output.
# We'll compute the statistics ex-post.
# Inside the `do` block we do 4 things:
# - we recover from `trainData` (a tuple, as we passed a tuple to `cross_validation` too) the `xtrain` features and `ytrain` labels;
# - we run the various clustering algorithms
# - we use the real labels to compute the model accuracy. Note that the clustering algorithm know nothing about the specific label name or even their order. This is why [`accuracy`](@ref) has the parameter `ignorelabels` to compute the accuracy oven any possible permutation of the classes found.
# - we return the various models' accuracies
cOut = cross_validation([x,y],sampler,return_statistics=false) do trainData,testData,rng
## For unsupervised learning we use only the train data.
## Also, we use the associated labels only to measure the performances
(xtrain,ytrain) = trainData;
## We run the clustering algorithm and then and we compute the accuracy using the real labels:
estcl = fit!(KMeansClusterer(n_classes=3,initialisation_strategy="grid",rng=rng),xtrain)
kMeansGAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(KMeansClusterer(n_classes=3,initialisation_strategy="random",rng=rng),xtrain)
kMeansRAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(KMeansClusterer(n_classes=3,initialisation_strategy="shuffle",rng=rng),xtrain)
kMeansSAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(KMedoidsClusterer(n_classes=3,initialisation_strategy="grid",rng=rng),xtrain)
kMedoidsGAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(KMedoidsClusterer(n_classes=3,initialisation_strategy="random",rng=rng),xtrain)
kMedoidsRAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(KMedoidsClusterer(n_classes=3,initialisation_strategy="shuffle",rng=rng),xtrain)
kMedoidsSAccuracy = accuracy(ytrain,estcl,ignorelabels=true)
estcl = fit!(GaussianMixtureClusterer(n_classes=3,mixtures=SphericalGaussian,rng=rng,verbosity=NONE),xtrain)
gmmSpherAccuracy = accuracy(ytrain,estcl,ignorelabels=true, rng=rng)
estcl = fit!(GaussianMixtureClusterer(n_classes=3,mixtures=DiagonalGaussian,rng=rng,verbosity=NONE),xtrain)
gmmDiagAccuracy = accuracy(ytrain,estcl,ignorelabels=true, rng=rng)
estcl = fit!(GaussianMixtureClusterer(n_classes=3,mixtures=FullGaussian,rng=rng,verbosity=NONE),xtrain)
gmmFullAccuracy = accuracy(ytrain,estcl,ignorelabels=true, rng=rng)
## For comparision with Clustering.jl
clusteringOut = Clustering.kmeans(xtrain', 3)
kMeans2Accuracy = accuracy(ytrain,clusteringOut.assignments,ignorelabels=true)
## For comparision with GaussianMistures.jl - sometimes GaussianMistures.jl em! fails with a PosDefException
dGMM = GaussianMixtures.GMM(3, xtrain; method=:kmeans, kind=:diag)
GaussianMixtures.em!(dGMM, xtrain)
gmmDiag2Accuracy = accuracy(ytrain,GaussianMixtures.gmmposterior(dGMM, xtrain)[1],ignorelabels=true)
fGMM = GaussianMixtures.GMM(3, xtrain; method=:kmeans, kind=:full)
GaussianMixtures.em!(fGMM, xtrain)
gmmFull2Accuracy = accuracy(ytrain,GaussianMixtures.gmmposterior(fGMM, xtrain)[1],ignorelabels=true)
## Returning the accuracies
return kMeansGAccuracy,kMeansRAccuracy,kMeansSAccuracy,kMedoidsGAccuracy,kMedoidsRAccuracy,kMedoidsSAccuracy,gmmSpherAccuracy,gmmDiagAccuracy,gmmFullAccuracy,kMeans2Accuracy,gmmDiag2Accuracy,gmmFull2Accuracy
end
## We transform the output in matrix for easier analysis
accuracies = fill(0.0,(length(cOut),length(cOut[1])))
[accuracies[r,c] = cOut[r][c] for r in 1:length(cOut),c in 1:length(cOut[1])]
μs = mean(accuracies,dims=1)
σs = std(accuracies,dims=1)
@test all(μs .> 0.7) #src
@test μs[1] > 0.89 && μs[4] > 0.89 && μs[9] > 0.96 #src
modelLabels=["kMeansG","kMeansR","kMeansS","kMedoidsG","kMedoidsR","kMedoidsS","gmmSpher","gmmDiag","gmmFull","kMeans (Clustering.jl)","gmmDiag (GaussianMixtures.jl)","gmmFull (GaussianMixtures.jl)"]
report = DataFrame(mName = modelLabels, avgAccuracy = dropdims(round.(μs',digits=3),dims=2), stdAccuracy = dropdims(round.(σs',digits=3),dims=2))
# Accuracies (mean and its standard dev.) running this scripts with different random seeds (`123`, `1000` and `10000`):
#
# | model | μ 1 | σ² 1 | μ 2 | σ² 2 | μ 3 | σ² 3 |
# | ------------------------------| ----- | ----- | ----- | ----- | ----- | ----- |
# │ kMeansG | 0.891 | 0.017 | 0.892 | 0.012 | 0.893 | 0.017 |
# │ kMeansR | 0.866 | 0.083 | 0.831 | 0.127 | 0.836 | 0.114 |
# │ kMeansS | 0.764 | 0.174 | 0.822 | 0.145 | 0.779 | 0.170 |
# │ kMedoidsG | 0.894 | 0.015 | 0.896 | 0.012 | 0.894 | 0.017 |
# │ kMedoidsR | 0.804 | 0.144 | 0.841 | 0.123 | 0.825 | 0.134 |
# │ kMedoidsS | 0.893 | 0.018 | 0.834 | 0.130 | 0.877 | 0.085 |
# │ gmmSpher | 0.893 | 0.016 | 0.891 | 0.016 | 0.895 | 0.017 |
# │ gmmDiag | 0.917 | 0.022 | 0.912 | 0.016 | 0.916 | 0.014 |
# │ gmmFull | 0.970 | 0.035 | 0.982 | 0.013 | 0.981 | 0.009 |
# │ kMeans (Clustering.jl) | 0.856 | 0.112 | 0.873 | 0.083 | 0.873 | 0.089 |
# │ gmmDiag (GaussianMixtures.jl) | 0.865 | 0.127 | 0.872 | 0.090 | 0.833 | 0.152 |
# │ gmmFull (GaussianMixtures.jl) | 0.907 | 0.133 | 0.914 | 0.160 | 0.917 | 0.141 |
#
# We can see that running the script multiple times with different random seed confirm the estimated standard deviations collected with the cross_validation, with the BetaML GMM-based models and grid based ones being the most stable ones.
#src plot(modelLabels,μs',seriestype=:scatter)
#src yerror=collect(zip(rand(12), rand(12))
# ### BetaML model accuracies
# From the output We see that the gmm models perform for this dataset generally better than kmeans or kmedoids algorithms, and they further have very low variances.
# In detail, it is the (default) `grid` initialisation that leads to the better results for `kmeans` and `kmedoids`, while for the `gmm` models it is the `FullGaussian` to perform better.
# ### Comparisions with `Clustering.jl` and `GaussianMixtures.jl`
# For this specific case, both `Clustering.jl` and `GaussianMixtures.jl` report substantially worst accuracies, and with very high variances. But we maintain the ranking that Full Gaussian gmm > Diagonal Gaussian > Kmeans accuracy.
# I suspect the reason that BetaML gmm works so well is in relation to the usage of kmeans algorithm for initialisation of the mixtures, itself initialized with a "grid" arpproach.
# The grid initialisation "guarantee" indeed that the initial means of the mixture components are well spread across the multidimensional space defined by the data, and it helps avoiding the EM algoritm to converge to a bad local optimus.
# ## Working without the labels
println(now(), " ", "- BIC based tuning of K..." ) #src
# Up to now we used the real labels to compare the model accuracies. But in real clustering examples we don't have the true classes, or we wouln't need to do clustering in the first instance, so we don't know the number of classes to use.
# There are several methods to judge clusters algorithms goodness. For likelyhood based algorithms as `GaussianMixtureClusterer` we can use a information criteria that trade the goodness of the lickelyhood with the number of parameters used to do the fit.
# BetaML provides by default in the gmm clustering outputs both the _Bayesian information criterion_ ([`BIC`](@ref bic)) and the _Akaike information criterion_ ([`AIC`](@ref aic)), where for both a lower value is better.
# We can then run the model with different number of classes and see which one leads to the lower BIC or AIC.
# We run hence `cross_validation` again with the `FullGaussian` gmm model.
# Note that we use the BIC/AIC criteria here for establishing the "best" number of classes but we could have used it also to select the kind of Gaussain distribution to use. This is one example of hyper-parameter tuning that we developed more in detail using autotuning in the [regression tutorial](@ref regression_tutorial).
# Let's try up to 4 possible classes:
K = 4
sampler = KFold(nsplits=5,nrepeats=2,shuffle=true, rng=copy(AFIXEDRNG))
cOut = cross_validation([x,y],sampler,return_statistics=false) do trainData,testData,rng
(xtrain,ytrain) = trainData;
BICS = []
AICS = []
for k in 1:K
m = GaussianMixtureClusterer(n_classes=k,mixtures=FullGaussian,rng=rng,verbosity=NONE)
fit!(m,xtrain)
push!(BICS,info(m)["BIC"])
push!(AICS,info(m)["AIC"])
end
return (BICS,AICS)
end
## Transforming the output in matrices for easier analysis
Nit = length(cOut)
BICS = fill(0.0,(Nit,K))
AICS = fill(0.0,(Nit,K))
[BICS[r,c] = cOut[r][1][c] for r in 1:Nit,c in 1:K]
[AICS[r,c] = cOut[r][2][c] for r in 1:Nit,c in 1:K]
μsBICS = mean(BICS,dims=1)
#-
σsBICS = std(BICS,dims=1)
#-
μsAICS = mean(AICS,dims=1)
#-
σsAICS = std(AICS,dims=1)
#-
plot(1:K,[μsBICS' μsAICS'], labels=["BIC" "AIC"], title="Information criteria by number of classes", xlabel="number of classes", ylabel="lower is better")
# We see that following the "lowest AIC" rule we would indeed choose three classes, while following the "lowest BIC" criteria we would have choosen only two classes. This means that there is two classes that, concerning the floreal measures used in the database, are very similar, and our models are unsure about them. Perhaps the biologists will end up one day with the conclusion that it is indeed only one specie :-).
# We could study this issue more in detail by analysing the [`ConfusionMatrix`](@ref), but the one used in BetaML does not account for the ignorelabels option (yet).
#
# ### Analysing the silhouette of the cluster
#
# A further metric to analyse cluster output is the so-called [Sinhouette method](https://en.wikipedia.org/wiki/Silhouette_(clustering))
#
# Silhouette is a distance-based metric and require as first argument a matrix of pairwise distances. This can be computed with the [`pairwise`](@ref) function, that default to using `l2_distance` (i.e. Euclidean). Many other distance functions are available in the [`Clustering`](@ref) sub-module or one can use the efficiently implemented distances from the [`Distances`](https://github.com/JuliaStats/Distances.jl) package, as in this example.
#
# We'll use here the [`silhouette`](@ref) function over a simple loop:
x,y = consistent_shuffle([x,y],dims=1)
import Distances
pd = pairwise(x,distance=Distances.euclidean) # we compute the pairwise distances
nclasses = 2:6
models = [KMeansClusterer, KMedoidsClusterer, GaussianMixtureClusterer]
println("Silhouette score by model type and class number:")
for ncl in nclasses, mtype in models
m = mtype(n_classes=ncl, verbosity=NONE)
ŷ = fit!(m,x)
if mtype == GaussianMixtureClusterer
ŷ = mode(ŷ)
end
s = mean(silhouette(pd,ŷ))
println("$mtype \t ($ncl classes): $s")
end
# Highest levels are better. We see again that 2 classes have better scores !
#src # ## Benchmarking computational efficiency
#src
#src # We now benchmark the time and memory required by the various models by using the `@btime` macro of the `BenchmarkTools` package:
#src
#src # ```
#src # @btime kmeans($xs,3);
#src # # 261.540 μs (3777 allocations: 442.53 KiB)
#src # @btime kmedoids($xs,3);
#src # 4.576 ms (97356 allocations: 10.42 MiB)
#src # @btime gmm($xs,3,mixtures=[SphericalGaussian() for i in 1:3], verbosity=NONE);
#src # # 5.498 ms (133365 allocations: 8.42 MiB)
#src # @btime gmm($xs,3,mixtures=[DiagonalGaussian() for i in 1:3], verbosity=NONE);
#src # # 18.901 ms (404333 allocations: 25.65 MiB)
#src # @btime gmm($xs,3,mixtures=[FullGaussian() for i in 1:3], verbosity=NONE);
#src # # 49.257 ms (351500 allocations: 61.95 MiB)
#src # @btime Clustering.kmeans($xs', 3);
#src # # 17.071 μs (23 allocations: 14.31 KiB)
#src # @btime begin dGMM = GaussianMixtures.GMM(3, $xs; method=:kmeans, kind=:diag); GaussianMixtures.em!(dGMM, $xs) end;
#src # # 530.528 μs (2088 allocations: 488.05 KiB)
#src # @btime begin fGMM = GaussianMixtures.GMM(3, $xs; method=:kmeans, kind=:full); GaussianMixtures.em!(fGMM, $xs) end;
#src # # 4.166 ms (58910 allocations: 3.59 MiB)
#src # ```
#src # (_note: the values reported here are of a local pc, not of the GitHub CI server, as sometimes - depending on data and random #src initialisation - `GaussainMixtures.em!`` fails with a `PosDefException`. This in turn would lead the whole documentation to fail to #src compile_)
#src
#src # Like for supervised models, dedicated models are much better optimized than BetaML models, and are order of magnitude more #src efficient. However even the slowest BetaML clusering model (gmm using full gaussians) is realtively fast and can handle mid-size #src datasets (tens to hundreds of thousand records) without significant slow downs.
# ## Conclusions
# We have shown in this tutorial how we can easily run clustering algorithms in BetaML with just one line of code `fit!(ChoosenClusterer(),x)`, but also how can we use cross-validation in order to help the model or parameter selection, with or whithout knowing the real classes.
# We retrieve here what we observed with supervised models. Globally the accuracy of BetaML models are comparable to those of leading specialised packages (in this case they are even better), but there is a significant gap in computational efficiency that restricts the pratical usage of BetaML to datasets that fits in the pc memory. However we trade this relative inefficiency with very flexible model definition and utility functions (for example `GaussianMixtureClusterer` works with missing data, allowing it to be used as the backbone of the [`GaussianMixtureImputer`](@ref) missing imputation function, or for collaborative reccomendation systems).
println(now(), " ", "- DONE clustering tutorial..." ) #src | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 10593 | # # [Understanding variable importance in black-box machine learning models](@id variable_importance_tutorial)
# Often we want to understand the contribution of the different variables (x columns) to the prediction accuracy of a black-box machine learning model.
# To this end, BetaML 0.12 introduces [`FeatureRanker`](@ref), a flexible variable ranking estimator that employs multiple variable importance metrics.
# `FeatureRanker` helps to determine the importance of features in predictions from any black-box machine learning model (not necessarily the BetaML suit), internally using cross-validation to assess the quality of the predictions (`metric="mda"`), or the contribution of the variable to the variance of the predictions (`metric="sobol"`), with or without a given variable.
# By default, it ranks variables (columns) in a single pass without retraining on each one. However, it is possible to specify the model to use multiple passes (where on each pass the less important variable is permuted). This helps to assess importance in the presence of highly correlated variables.
# While the default strategy is to simply (temporarily) permute the "test" variable and predict the modified data set, it is possible to refit the model to be evaluated on each variable ("permute and relearn"), of course at a much higher computational cost.
# However, if the ML model to be evaluated supports ignoring variables during prediction (as BetaML tree models do), it is possible to specify the keyword argument for such an option in the target model prediction function and avoid refitting.
# In this tutorial we will use `FeatureRanker` first with some synthetic data, and then with the Boston dataset to determine the most important variables in determining house prices.
# We will compare the results with Shapley values using the [`ShapML`](https://github.com/nredell/ShapML.jl) package.
# Let's start by activating the local environment specific to the BetaML documentation and loading the necessary packages:
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
using Statistics, Random, Pipe, StableRNGs, HTTP, CSV, DataFrames, Plots, BetaML
import Distributions: Normal, Uniform, quantile
import ShapML
Random.seed!(123)
# ## Example with synthetic data
# In this example, we generate a dataset of 5 random variables, where `x1` is the most important in determining `y`, `x2` is somewhat less important, `x3` has interaction effects with `x1`, while `x4` and `x5` do not contribute at all to the calculation of `y`.
# We also add `x6` as a highly correlated variable to `x1`, but note that `x4` also does not contribute to `y`:
N = 2000
xa = rand(Uniform(0.0,10.0),N,5)
xb = xa[:,1] .* rand.(Normal(1,0.5))
x = hcat(xa,xb)
y = [10*r[1]-r[2]+0.1*r[3]*r[1] for r in eachrow(x) ];
# Aside of `y`, that is numerical, we create also a categorical version to test classification and a further one-hot version to test neural networks models that, for classification tasks, work using one-hot encoded variables:
ysort = sort(y)
ycat = [(i < ysort[Int(round(N/3))]) ? "c" : ( (i < ysort[Int(round(2*N/3))]) ? "a" : "b") for i in y]
yoh = fit!(OneHotEncoder(),ycat);
# We run this example using a Random Forest regressor. The BetaML `RandomForestEstimator` model supports a `predict` function with the option to ignore specific dimensions. This allow us to "test" the various variables without retraining the model:
fr = FeatureRanker(model=RandomForestEstimator(),nsplits=5,nrepeats=1,recursive=false,metric="mda",ignore_dims_keyword="ignore_dims")
# We can now fit the `FeatureRanker` to our data. Note that, as for the other BetaML models, `fit!` by default returns the predictions, in this case the ranking, avoiding a separate `predict` call. The returned raking goes from the lowest to the most important variable, according to the given metric.
rank = fit!(fr,x,y)
# As expected, the ranking shows `x1` as the most important variable. Let's look in detail at the metrics that we can obtain by querying the model with `info(fr)`:
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
loss_by_col_sd = info(fr)["loss_by_col_sd"]
sobol_by_col_sd = info(fr)["sobol_by_col_sd"]
loss_fullmodel = info(fr)["loss_all_cols"]
loss_fullmodel_sd = info(fr)["loss_all_cols_sd"]
ntrials_per_metric = info(fr)["ntrials_per_metric"]
# Since we choosed `mda` as the reported metric, we must have that the reported rank is equal to the sortperm of `loss_by_col`:
sortperm(loss_by_col) == rank
# We can plot the loss per (omitted) column...
bar(string.(rank),loss_by_col[rank],label="Loss by col", yerror=quantile(Normal(1,0),0.975) .* (loss_by_col_sd[rank]./sqrt(ntrials_per_metric)))
# ..and the sobol values:
bar(string.(sortperm(sobol_by_col)),sobol_by_col[sortperm(sobol_by_col)],label="Sobol index by col", yerror=quantile(Normal(1,0),0.975) .* (sobol_by_col_sd[sortperm(sobol_by_col)]./sqrt(ntrials_per_metric)))
# As we can see from the graphs, the model did a good job of identifying the first variable as the most important one, ignoring the others and even giving a very low importance to the correlated one.
# ### Comparision with the Shapley values
# For Shapley values we need first to have a trained model
m = RandomForestEstimator()
fit!(m,x,y);
# We need then to wrap the predict function, accounting with the fact that BetaML models works with standard arrays, while `ShapML` assume data in DataFrame format:
function predict_function(model, data)
data_pred = DataFrame(y_pred = BetaML.predict(model, Matrix(data)))
return data_pred
end
# We set up other data related to the simulation..
explain = DataFrame(x[1:300, :],:auto)
reference = DataFrame(x,:auto)
sample_size = 60 ; # Number of Monte Carlo samples.
# ...and finally compute the stochastic Shapley values per individual record:
data_shap = ShapML.shap(explain = explain,
reference = reference,
model = m,
predict_function = predict_function,
sample_size = sample_size,
seed = 1
);
# We aggregate the Shape values by feature and plot:
shap_aggregated =combine(groupby(data_shap,[:feature_name])) do subdf
(mean_effect = mean(abs.(subdf.shap_effect)), std = std(abs.(subdf.shap_effect)), n = size(subdf,1) )
end
shap_values = shap_aggregated.mean_effect
bar(string.(sortperm(shap_values)),shap_values[sortperm(shap_values)],label="Shapley values by col", yerror=quantile(Normal(1,0),0.975) .* (shap_aggregated.std[sortperm(shap_values)]./ sqrt.(shap_aggregated.n)))
# Note that the output using the Sobol index and the Shapley values are very similar. This shoudn't come as a surprice, as the two metrics are related.
# ### Classifications
# For classification tasks, the usage of `FeatureRanker` doesn't change:
fr = FeatureRanker(model=RandomForestEstimator(),nsplits=3,nrepeats=2,recursive=true,metric="mda",ignore_dims_keyword="ignore_dims")
rank = fit!(fr,x,ycat)
#-
fr = FeatureRanker(model=NeuralNetworkEstimator(verbosity=NONE),nsplits=3,nrepeats=1,recursive=false,metric="sobol",refit=false)
rank = fit!(fr,x,yoh)
# ## Determinant of house prices in the Boston alrea
# We start this example by first loading the data from a CSV file and splitting the data in features and labels:
#src dataURL = "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data"
#src data = @pipe HTTP.get(dataURL).body |> CSV.File(_, delim=' ', header=false, ignorerepeated=true) |> DataFrame
data = CSV.File(joinpath(@__DIR__,"data","housing.data"), delim=' ', header=false, ignorerepeated=true) |> DataFrame
var_names = [
"CRIM", # per capita crime rate by town
"ZN", # proportion of residential land zoned for lots over 25,000 sq.ft.
"INDUS", # proportion of non-retail business acres per town
"CHAS", # Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
"NOX", # nitric oxides concentration (parts per 10 million)
"RM", # average number of rooms per dwelling
"AGE", # proportion of owner-occupied units built prior to 1940
"DIS", # weighted distances to five Boston employment centres
"RAD", # index of accessibility to radial highways
"TAX", # full-value property-tax rate per $10,000
"PTRATIO", # pupil-teacher ratio by town
"B", # 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
"LSTAT", # % lower status of the population
]
y_name = "MEDV" ;# Median value of owner-occupied homes in $1000's
# Our features are a set of 13 explanatory variables, while the label that we want to estimate is the average housing prices:
x = Matrix(data[:,1:13])
y = data[:,14];
# We use a Random Forest model as regressor and we compute the variable importance for this model as we did for the synthetic data:
fr = FeatureRanker(model=RandomForestEstimator(),nsplits=3,nrepeats=2,recursive=false)
rank = fit!(fr,x,y)
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
loss_by_col_sd = info(fr)["loss_by_col_sd"]
sobol_by_col_sd = info(fr)["sobol_by_col_sd"]
loss_fullmodel = info(fr)["loss_all_cols"]
loss_fullmodel_sd = info(fr)["loss_all_cols_sd"]
ntrials_per_metric = info(fr)["ntrials_per_metric"]
# Finally we can plot the variable importance:
bar(var_names[sortperm(loss_by_col)], loss_by_col[sortperm(loss_by_col)],label="Loss by var", permute=(:x,:y), yerror=quantile(Normal(1,0),0.975) .* (loss_by_col_sd[sortperm(loss_by_col)]./sqrt(ntrials_per_metric)), yrange=[0,0.6])
vline!([loss_fullmodel], label="Loss with all vars",linewidth=2)
vline!([loss_fullmodel-quantile(Normal(1,0),0.975) * loss_fullmodel_sd/sqrt(ntrials_per_metric),
loss_fullmodel+quantile(Normal(1,0),0.975) * loss_fullmodel_sd/sqrt(ntrials_per_metric),
], label=nothing,linecolor=:black,linestyle=:dot,linewidth=1)
#-
bar(var_names[sortperm(sobol_by_col)],sobol_by_col[sortperm(sobol_by_col)],label="Sobol index by col", permute=(:x,:y), yerror=quantile(Normal(1,0),0.975) .* (sobol_by_col_sd[sortperm(sobol_by_col)]./sqrt(ntrials_per_metric)), yrange=[0,0.5])
# As we can see, the two analyses agree on the most important variables, showing that the size of the house (number of rooms), the percentage of low-income population in the neighbourhood and, to a lesser extent, the distance to employment centres are the most important explanatory variables of house price in the Boston area. | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 6113 | # ## Library and data loading
using Dates #src
println(now(), " ", "*** Start image recognition tutorial..." ) #src
# Activating the local environment specific to BetaML documentation
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
using BetaML
using Random
Random.seed!(123);
using DelimitedFiles
using Statistics
using BenchmarkTools
using Plots
using Flux
using Flux: Data.DataLoader
using Flux: onehotbatch, onecold, crossentropy
using MLDatasets # For loading the training data
#using Images, FileIO, ImageTransformations # For loading the actual images
TESTRNG = FIXEDRNG # This could change...
x_train, y_train = MLDatasets.MNIST(split=:train)[:]
x_train = permutedims(x_train,(3,2,1))
x_train = convert(Array{Float64,3},x_train)
x_train = reshape(x_train,size(x_train,1),size(x_train,2)*size(x_train,3))
ohm = OneHotEncoder()
y_train_oh = fit!(ohm,y_train)
x_test, y_test = MLDatasets.MNIST(split=:test)[:]
x_test = permutedims(x_test,(3,2,1))
x_test = convert(Array{Float64,3},x_test)
x_test = reshape(x_test,size(x_test,1),size(x_test,2)*size(x_test,3))
y_test_oh = predict(ohm,y_test)
(N,D) = size(x_train)
# Building the model:
## 784x1 => 28x28x1
l1 = ReshaperLayer((D,1),(28,28,1))
## 28x28x1 => 14x14x8
l2 = ConvLayer(size(l1)[2],(5,5),8,stride=2,f=relu,rng=copy(TESTRNG))
## 14x14x8 => 7x7x16
l3 = ConvLayer(size(l2)[2],(3,3),16,stride=2,f=relu,rng=copy(TESTRNG))
## 7x7x16 => 4x4x32
l4 = ConvLayer(size(l3)[2],(3,3),32,stride=2,f=relu,rng=copy(TESTRNG))
## 4x4x32 => 2x2x32
l5 = ConvLayer(size(l4)[2],(3,3),32,stride=2,f=relu,rng=copy(TESTRNG))
## 2x2x32 => 1x1x32 (global per layer mean)
l6 = PoolingLayer(size(l5)[2],(2,2),stride=(2,2),f=mean)
## 1x1x32 => 32x1
l7 = ReshaperLayer(size(l6)[2])
## 32x1 => 10x1
l8 = DenseLayer(size(l7)[2][1],10,f=identity, rng=copy(TESTRNG))
## 10x1 => 10x1
l9 = VectorFunctionLayer(size(l8)[2][1],f=BetaML.softmax)
layers = [l1,l2,l3,l4,l5,l6,l7,l8,l9]
m = NeuralNetworkEstimator(layers=layers,loss=squared_cost,verbosity=HIGH,batch_size=128,epochs=4)
# We train the model only on a subset of the training data, otherwise it is too long for the automated building of this page.
# Training the whole MINST set takes approximatly 16 minutes on a mid-level laptop (on CPU), leading to a test accuracy of 0.969
(x_debug,x_other),(y_debug_oh,y_other_oh) = partition([x_train,y_train_oh],[0.01,0.99],rng=copy(TESTRNG))
#preprocess!.(layers)
# 0.131836 seconds (477.02 k allocations: 53.470 MiB, 72.73% compilation time)
#@code_warntype preprocess!(l5)
ŷ = fit!(m,x_debug,y_debug_oh)
#@btime fit!(m,x_debug,y_debug_oh)
# 1%: 15.909 s (1940246 allocations: 1.39 GiB)
# 17.509 s (1039126 allocations: 1.37 GiB)
# 15.766 s (1039111 allocations: 1.37 GiB)
# 14.669 s (3129139 allocations: 1.64 GiB) (w threads)
# 18.119 s (1039121 allocations: 1.37 GiB)
# 14.966 s (1039123 allocations: 1.37 GiB) (whout threads)
# 19.357 s (1039123 allocations: 1.37 GiB)
#println(now(), " ", "*** prefit..." ) #src
#ŷ = fit!(m,x_train,y_train_oh)
#println(now(), " ", "*** postfit..." ) #src
#y_true = inverse_predict(ohm,convert(Matrix{Bool},y_train_oh))
y_true = inverse_predict(ohm,convert(Matrix{Bool},y_debug_oh))
ŷ_nonoh = inverse_predict(ohm,ŷ)
accuracy(y_true,ŷ_nonoh)
hcat(y_true,ŷ_nonoh)
ŷtest = predict(m,x_test)
ytest_true = inverse_predict(ohm,convert(Matrix{Bool},y_test_oh))
ŷtest_nonoh = inverse_predict(ohm,ŷtest)
accuracy(ytest_true,ŷtest_nonoh)
hcat(ytest_true,ŷtest_nonoh)
cm = ConfusionMatrix()
fit!(cm,ytest_true,ŷtest_nonoh)
print(cm)
res = info(cm)
heatmap(string.(res["categories"]),string.(res["categories"]),res["normalised_scores"],seriescolor=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix (normalised scores)")
# -----------------------------------------------------------
# ## Flux implementation
# This is the equivalent workflow in Flux.
# Fitting on the whole training dataset lead to a test accuracy of 0.9658, so likely not statistically different than BetaML, but with still a much faster comutation time, as it takes only 2 minutes instead of 16...
x_train, y_train = MLDatasets.MNIST(split=:train)[:]
x_train = permutedims(x_train,(2,1,3)); # For correct img axis
#x_train = convert(Array{Float32,3},x_train);
x_train = reshape(x_train,(28,28,1,60000));
y_train = Flux.onehotbatch(y_train, 0:9)
train_data = Flux.Data.DataLoader((x_train, y_train), batchsize=128)
#x_test, y_test = MLDatasets.MNIST.testdata(dir = "data/MNIST")
x_test, y_test = MLDatasets.MNIST(split=:test)[:]
x_test = permutedims(x_test,(2,1,3)); # For correct img axis
#x_test = convert(Array{Float32,3},x_test);
x_test = reshape(x_test,(28,28,1,10000));
y_test = Flux.onehotbatch(y_test, 0:9)
model = Chain(
## 28x28 => 14x14
Conv((5, 5), 1=>8, pad=2, stride=2, Flux.relu),
## 14x14 => 7x7
Conv((3, 3), 8=>16, pad=1, stride=2, Flux.relu),
## 7x7 => 4x4
Conv((3, 3), 16=>32, pad=1, stride=2, Flux.relu),
## 4x4 => 2x2
Conv((3, 3), 32=>32, pad=1, stride=2, Flux.relu),
## Average pooling on each width x height feature map
GlobalMeanPool(),
Flux.flatten,
Dense(32, 10),
Flux.softmax
)
myaccuracy(y,ŷ) = (mean(Flux.onecold(ŷ) .== Flux.onecold(y)))
myloss(x, y) = Flux.crossentropy(model(x), y)
opt = Flux.ADAM()
ps = Flux.params(model)
number_epochs = 4
[(println(e); Flux.train!(myloss, ps, train_data, opt)) for e in 1:number_epochs]
ŷtrain = model(x_train)
ŷtest = model(x_test)
myaccuracy(y_train,ŷtrain)
myaccuracy(y_test,ŷtest)
plot(Gray.(x_train[:,:,1,2]))
cm = ConfusionMatrix()
fit!(cm,Flux.onecold(y_test) .-1, Flux.onecold(ŷtest) .-1 )
println(cm)
res = info(cm)
heatmap(string.(res["categories"]),string.(res["categories"]),res["normalised_scores"],seriescolor=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix (normalised scores)")
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 6892 | # # [A deep neural network with multi-branch architecture](@id multibranch_nn_tutorial)
# Often we can "divide" our feature sets into different groups, where for each group we have many, many variables whose importance in prediction we don't know, but for which using a fully dense layer would be too computationally expensive.
# For example, we want to predict the growth of forest trees based on soil characteristics, climate characteristics and a bunch of other data (species, age, density...).
#
# A soil (or climate) database may have hundreds of variables, how can we reduce them to a few that encode all the "soil" information?
# Sure, we could do a PCA or a clustering analysis, but a better way is to let our model itself find a way to _encode_ the soil information into a vector in a way that is optimal for our prediction goal, i.e. we target the encoding task at our prediction goal.
#
# So we run a multi-branch neural network where one branch is given by the soil variables - it starts from all the hundreds of variables and ends in a few neuron outputs, another branch in a similar way is for the climate variables, we merge them in a branch to take into account the soil-weather interrelation (for example, it is well known that the water retention capacity of a sandy soil is quite different from that of a clay soil) and finally we merge this branch with the other variable branch to arrive at a single predicted output.
# In this example we focus on building, training and predicting a multi-branch neural network. See the other examples for cross-validation, hyperparameter tuning, scaling, overfitting, encoding, etc.
#
# Data origin:
# - while we hope to apply this example soon on actual real world data, for now we work on synthetic random data just to assess the validity of the network configuration.
# ## Library and data generation
using Dates #src
println(now(), " ", "*** Starting multi-branch nn tutorial..." ) #src
# Activating the local environment specific to the tutorials
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
# We first load all the packages we are going to use
using StableRNGs, BetaML, Plots
using Test #src
println(now(), " ", "- Generating data and implementing the model..." ) #src
# Here we are explicit and we use our own fixed RNG:
seed = 123
AFIXEDRNG = StableRNG(seed)
# Here we generate the random data..
N = 100 # records
soilD = 20 # dimensions of the soil database
climateD = 30 # dimensions of the climate database
othervarD = 10 # dimensions of the other variables database
soilX = rand(StableRNG(seed),N,soilD)
climateX = rand(StableRNG(seed+10),N,climateD)
othervarX = rand(StableRNG(seed+20),N,othervarD)
X = hcat(soilX,climateX,othervarX)
Y = rand(StableRNG(seed+30),N)
# ## Model definition
# 
#
# In the figure above, each circle represents a multi-neuron layer, with the number of neurons (output dimensions) written inside. Dotted circles are `RreplicatorLayer`s, which simply "pass through" the information to the next layer.
# Red layers represent the layers responsible for the final step in encoding the information for a given branch. Subsequent layers will use this encoded information (i.e. decode it) to finally provide the prediction for the branch.
# We create a first branch for the soil variables, a second for the climate variables and finally a third for the other variables. We merge the soil and climate branches in layer 4 and the resulting branch and the other variables branch in layer 6. Finally, the single neuron layer 8 provides the prediction.
#
# The weights along the whole chain can be learned using the traditional backpropagation algorithm.
# The whole model can be implemented with the following code:
# - layer 1:
l1_soil = DenseLayer(20,30,f=relu,rng=copy(AFIXEDRNG))
l1_climate = ReplicatorLayer(30)
l1_oth = ReplicatorLayer(10)
l1 = GroupedLayer([l1_soil,l1_climate,l1_oth])
# - layer 2:
l2_soil = DenseLayer(30,30,f=relu,rng=copy(AFIXEDRNG))
l2_climate = DenseLayer(30,40,f=relu,rng=copy(AFIXEDRNG))
l2_oth = ReplicatorLayer(10)
l2 = GroupedLayer([l2_soil,l2_climate,l2_oth])
# - layer 3:
l3_soil = DenseLayer(30,4,f=relu,rng=copy(AFIXEDRNG)) # encoding of soil properties
l3_climate = DenseLayer(40,4,f=relu,rng=copy(AFIXEDRNG)) # encoding of climate properties
l3_oth = DenseLayer(10,15,f=relu,rng=copy(AFIXEDRNG))
l3 = GroupedLayer([l3_soil,l3_climate,l3_oth])
# - layer 4:
l4_soilclim = DenseLayer(8,15,f=relu,rng=copy(AFIXEDRNG))
l4_oth = DenseLayer(15,15,f=relu,rng=copy(AFIXEDRNG))
l4 = GroupedLayer([l4_soilclim,l4_oth])
# - layer 5:
l5_soilclim = DenseLayer(15,6,f=relu,rng=copy(AFIXEDRNG)) # encoding of soil and climate properties together
l5_oth = DenseLayer(15,6,f=relu,rng=copy(AFIXEDRNG)) # encoding of other vars
l5 = GroupedLayer([l5_soilclim,l5_oth])
# - layer 6:
l6 = DenseLayer(12,15,f=relu,rng=copy(AFIXEDRNG))
# - layer 7:
l7 = DenseLayer(15,15,f=relu,rng=copy(AFIXEDRNG))
# - layer 8:
l8 = DenseLayer(15,1,f=relu,rng=copy(AFIXEDRNG))
# Finally we put the layers together and we create our `NeuralNetworkEstimator` model:
layers = [l1,l2,l3,l4,l5,l6,l7,l8]
m = NeuralNetworkEstimator(layers=layers,opt_alg=ADAM(),epochs=100,rng=copy(AFIXEDRNG))
# ## Fitting the model
println(now(), " ", "- model fitting..." ) #src
# We are now ready to fit the model to the data. By default BetaML models return directly the predictions of the trained data as the output of the fitting call, so there is no need to separate call `predict(m,X)`.
Ŷ = fit!(m,X,Y)
# ## Model quality assessment
println(now(), " ", "- assessing the model quality..." ) #src
# We can compute the relative mean error between the "true" Y and the Y estimated by the model.
rme = relative_mean_error(Y,Ŷ)
@test rme <0.1 #src
# Of course we know there is no actual relation here between the X and The Y, as both are randomly generated, the result above just tell us that the network has been able to find a path between the X and Y that has been used for training, but we hope that in the real application this learned path represent a true, general relation beteen the inputs and the outputs.
# Finally we can also plot Y again Ŷ and visualize how the average loss reduced along the training:
scatter(Y,Ŷ,xlabel="vol observed",ylabel="vol estimated",label=nothing,title="Est vs. obs volumes")
#-
loss_per_epoch = info(m)["loss_per_epoch"]
plot(loss_per_epoch, xlabel="epoch", ylabel="loss per epoch", label=nothing, title="Loss per epoch")
println(now(), " ", "- Ended multi-branch nn example." ) #src | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 35394 | # # [A regression task: the prediction of bike sharing demand](@id regression_tutorial)
# The task is to estimate the influence of several variables (like the weather, the season, the day of the week..) on the demand of shared bicycles, so that the authority in charge of the service can organise the service in the best way.
#
# Data origin:
# - original full dataset (by hour, not used here): [https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset](https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset)
# - simplified dataset (by day, with some simple scaling): [https://www.hds.utc.fr/~tdenoeux/dokuwiki/en/aec](https://www.hds.utc.fr/~tdenoeux/dokuwiki/en/aec)
# - description: [https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/exam_2019_ace_.pdf](https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/exam_2019_ace_.pdf)
# - data: [https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/bike_sharing_day.csv.zip](https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/bike_sharing_day.csv.zip)
# Note that even if we are estimating a time serie, we are not using here a recurrent neural network as we assume the temporal dependence to be negligible (i.e. $Y_t = f(X_t)$ alone).
# ## Library and data loading
using Dates #src
println(now(), " ", "*** Starting bike demand regression tutorial..." ) #src
# Activating the local environment specific to
using Pkg
Pkg.activate(joinpath(@__DIR__,"..","..",".."))
# We first load all the packages we are going to use
using LinearAlgebra, Random, Statistics, StableRNGs, DataFrames, CSV, Plots, Pipe, BenchmarkTools, BetaML
import Distributions: Uniform, DiscreteUniform
import DecisionTree, Flux ## For comparisions
using Test #src
println(now(), " ", "- Loading, plotting, wrangling data..." ) #src
# Here we are explicit and we use our own fixed RNG:
seed = 123 # The table at the end of this tutorial has been obtained with seeds 123, 1000 and 10000
AFIXEDRNG = StableRNG(seed)
# Here we load the data from a csv provided by the BataML package
basedir = joinpath(dirname(pathof(BetaML)),"..","docs","src","tutorials","Regression - bike sharing")
data = CSV.File(joinpath(basedir,"data","bike_sharing_day.csv"),delim=',') |> DataFrame
describe(data)
# The variable we want to learn to predict is `cnt`, the total demand of bikes for a given day. Even if it is indeed an integer, we treat it as a continuous variable, so each single prediction will be a scalar $Y \in \mathbb{R}$.
plot(data.cnt, title="Daily bike sharing rents (2Y)", label=nothing)
# ## Decision Trees
# We start our regression task with Decision Trees.
# Decision trees training consist in choosing the set of questions (in a hierarcical way, so to form indeed a "decision tree") that "best" split the dataset given for training, in the sense that the split generate the sub-samples (always 2 subsamples in the BetaML implementation) that are, for the characteristic we want to predict, the most homogeneous possible. Decision trees are one of the few ML algorithms that has an intuitive interpretation and can be used for both regression or classification tasks.
# ### Data preparation
# The first step is to prepare the data for the analysis. This indeed depends already on the model we want to employ, as some models "accept" almost everything as input, no matter if the data is numerical or categorical, if it has missing values or not... while other models are instead much more exigents, and require more work to "clean up" our dataset.
# The tutorial starts using Decision Tree and Random Forest models that definitly belong to the first group, so the only thing we have to do is to select the variables in input (the "feature matrix", that we will indicate with "X") and the variable representing our output (the information we want to learn to predict, we call it "y"):
x = Matrix{Float64}(data[:,[:instant,:season,:yr,:mnth,:holiday,:weekday,:workingday,:weathersit,:temp,:atemp,:hum,:windspeed]])
y = data[:,16];
# We finally set up a dataframe to store the relative mean errors of the various models we'll use.
results = DataFrame(model=String[],train_rme=Float64[],test_rme=Float64[])
# ### Model selection
# We can now split the dataset between the data that we will use for training the algorithm and selecting the hyperparameters (`xtrain`/`ytrain`) and those for testing the quality of the algoritm with the optimal hyperparameters (`xtest`/`ytest`). We use the `partition` function specifying the share we want to use for these two different subsets, here 80%, and 20% respectively. As our data represents indeed a time serie, we want our model to be able to predict _future_ demand of bike sharing from _past_, observed rented bikes, so we do not shuffle the datasets as it would be the default.
((xtrain,xtest),(ytrain,ytest)) = partition([x,y],[0.75,1-0.75],shuffle=false)
(ntrain, ntest) = size.([ytrain,ytest],1)
# Then we define the model we want to use, [`DecisionTreeEstimator`](@ref) in this case, and we create an instance of the model:
println(now(), " ", "- decision trees..." ) #src
m = DecisionTreeEstimator(autotune=true, rng=copy(AFIXEDRNG))
# Passing a fixed Random Number Generator (RNG) to the `rng` parameter guarantees that everytime we use the model with the same data (from the model creation downward to value prediciton) we obtain the same results. In particular BetaML provide `FIXEDRNG`, an istance of `StableRNG` that guarantees reproducibility even across different Julia versions. See the section ["Dealing with stochasticity"](@ref stochasticity_reproducibility) for details.
# Note the `autotune` parameter. BetaML has perhaps what is the easiest method for automatically tuning the model hyperparameters (thus becoming in this way _learned_ parameters). Indeed, in most cases it is enought to pass the attribute `autotune=true` on the model constructor and hyperparameters search will be automatically performed on the first `fit!` call.
# If needed we can customise hyperparameter tuning, chosing the tuning method on the parameter `tunemethod`. The single-line above is equivalent to:
tuning_method = SuccessiveHalvingSearch(
hpranges = Dict("max_depth" =>[5,10,nothing], "min_gain"=>[0.0, 0.1, 0.5], "min_records"=>[2,3,5],"max_features"=>[nothing,5,10,30]),
loss = l2loss_by_cv,
res_shares = [0.05, 0.2, 0.3],
multithreads = true
)
m_dt = DecisionTreeEstimator(autotune=true, rng=copy(AFIXEDRNG), tunemethod=tuning_method)
# Note that the defaults change according to the specific model, for example `RandomForestEstimator`](@ref) autotuning default to not being multithreaded, as the individual model is already multithreaded.
# !!! tip
# Refer to the versions of this tutorial for BetaML <= 0.6 for a good exercise on how to perform model selection using the [`cross_validation`](@ref) function, or even by custom grid search.
# We can now fit the model, that is learn the model parameters that lead to the best predictions from the data. By default (unless we use `cache=false` in the model constructor) the model stores also the training predictions, so we can just use `fit!()` instead of `fit!()` followed by `predict(model,xtrain)`
ŷtrain = fit!(m_dt,xtrain,ytrain)
#src # Let's benchmark the time and memory usage of the training step of a decision tree:
#src # - including auto-tuning:
#src # ```
#src # @btime let
#src # m = DecisionTreeEstimator(autotune=true, rng=copy(AFIXEDRNG), verbosity=NONE, cache=false)
#src # fit!(m,$xtrain,$ytrain)
#src # end
#src # ```
#src # 323.560 ms (4514026 allocations: 741.38 MiB)
#src # - excluding autotuning:
#src # ```
#src # m = DecisionTreeEstimator(autotune=false, rng=copy(AFIXEDRNG), verbosity=NONE, cache=false)
#src # @btime let
#src # fit!(m,$xtrain,$ytrain)
#src # reset!(m)
#src # end
#src # ```
#src # 53.118 ms (242924 allocations: 91.54 MiB)
#src # Individual decision trees are blazing fast, among the fastest algorithms we could use.
#-
# The above code produces a fitted `DecisionTreeEstimator` object that can be used to make predictions given some new features, i.e. given a new X matrix of (number of observations x dimensions), predict the corresponding Y vector of scalars in R.
ŷtest = predict(m_dt, xtest)
# We now compute the mean relative error for the training and the test set. The [`relative_mean_error`](@ref) is a very flexible error function. Without additional parameter, it computes, as the name says, the _relative mean error_, between an estimated and a true vector.
# However it can also compute the _mean relative error_, also known as the "mean absolute percentage error" ([MAPE](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error)), or use a p-norm higher than 1.
# The _mean relative error_ enfatises the relativeness of the error, i.e. all observations and dimensions weigth the same, wether large or small. Conversly, in the _relative mean error_ the same relative error on larger observations (or dimensions) weights more.
# In this tutorial we use the later, as our data has clearly some outlier days with very small rents, and we care more of avoiding our customers finding empty bike racks than having unrented bikes on the rack. Targeting a low mean average error would push all our predicitons down to try accomodate the low-level predicitons (to avoid a large relative error), and that's not what we want.
# We can then compute the relative mean error for the decision tree
rme_train = relative_mean_error(ytrain,ŷtrain) # 0.1367
rme_test = relative_mean_error(ytest,ŷtest) # 0.1547
@test rme_test <= 0.3 #src
# And we save the real mean accuracies in the `results` dataframe:
push!(results,["DT",rme_train,rme_test]);
# We can plot the true labels vs the estimated one for the three subsets...
scatter(ytrain,ŷtrain,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in training period (DT)")
#-
scatter(ytest,ŷtest,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in testing period (DT)")
# Or we can visualise the true vs estimated bike shared on a temporal base.
# First on the full period (2 years) ...
ŷtrainfull = vcat(ŷtrain,fill(missing,ntest))
ŷtestfull = vcat(fill(missing,ntrain), ŷtest)
plot(data[:,:dteday],[data[:,:cnt] ŷtrainfull ŷtestfull], label=["obs" "train" "test"], legend=:topleft, ylabel="daily rides", title="Daily bike sharing demand observed/estimated across the\n whole 2-years period (DT)")
# ..and then focusing on the testing period
stc = ntrain
endc = size(x,1)
plot(data[stc:endc,:dteday],[data[stc:endc,:cnt] ŷtestfull[stc:endc]], label=["obs" "test"], legend=:bottomleft, ylabel="Daily rides", title="Focus on the testing period (DT)")
# The predictions aren't so bad in this case, however decision trees are highly instable, and the output could have depended just from the specific initial random seed.
# ## Random Forests
# Rather than trying to solve this problem using a single Decision Tree model, let's not try to use a _Random Forest_ model. Random forests average the results of many different decision trees and provide a more "stable" result.
# Being made of many decision trees, random forests are hovever more computationally expensive to train.
println(now(), " ", "- random forests..." ) #src
m_rf = RandomForestEstimator(autotune=true, oob=true, rng=copy(AFIXEDRNG))
ŷtrain = fit!(m_rf,xtrain,ytrain);
ŷtest = predict(m_rf,xtest);
rme_train = relative_mean_error(ytrain,ŷtrain) # 0.056
rme_test = relative_mean_error(ytest,ŷtest) # 0.161
push!(results,["RF",rme_train,rme_test]);
#src # Let's now benchmark the training of the BetaML Random Forest model
#src #
#src # - including auto-tuning:
#src # ```
#src # @btime let
#src # m = RandomForestEstimator(autotune=true, rng=copy(AFIXEDRNG), verbosity=NONE, cache=false)
#src # fit!(m,$xtrain,$ytrain)
#src # end
#src # ```
#src # 69.524 s (592717390 allocations: 80.28 GiB)
#src # - excluding autotuning:
#src # ```
#src # m = RandomForestEstimator(autotune=false, rng=copy(AFIXEDRNG), verbosity=NONE, cache=false)
#src # @btime let
#src # fit!(m,$xtrain,$ytrain)
#src # reset!(m)
#src # end
#src # ```
#src # 5124.769 ms (1400309 allocations: 466.66 MiB)
# While slower than individual decision trees, random forests remain relativly fast. We should also consider that they are by default efficiently parallelised, so their speed increases with the number of available cores (in building this documentation page, GitHub CI servers allow for a single core, so all the bechmark you see in this tutorial are run with a single core available).
#-
# Random forests support the so-called "out-of-bag" error, an estimation of the error that we would have when the model is applied on a testing sample.
# However in this case the oob reported is much smaller than the testing error we will actually find. This is due to the fact that the division between training/validation and testing in this exercise is not random, but has a temporal basis. It seems that in this example the data in validation/testing follows a different pattern/variance than those in training (in probabilistic terms, the daily observations are not i.i.d.).
info(m_rf)
oob_error, rme_test = info(m_rf)["oob_errors"],relative_mean_error(ytest,ŷtest)
#+
@test rme_test <= 0.20 #src
# In this case we found an error very similar to the one employing a single decision tree. Let's print the observed data vs the estimated one using the random forest and then along the temporal axis:
scatter(ytrain,ŷtrain,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in training period (RF)")
#-
scatter(ytest,ŷtest,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in testing period (RF)")
# Full period plot (2 years):
ŷtrainfull = vcat(ŷtrain,fill(missing,ntest))
ŷtestfull = vcat(fill(missing,ntrain), ŷtest)
plot(data[:,:dteday],[data[:,:cnt] ŷtrainfull ŷtestfull], label=["obs" "train" "test"], legend=:topleft, ylabel="daily rides", title="Daily bike sharing demand observed/estimated across the\n whole 2-years period (RF)")
# Focus on the testing period:
stc = 620
endc = size(x,1)
plot(data[stc:endc,:dteday],[data[stc:endc,:cnt] ŷtrainfull[stc:endc] ŷtestfull[stc:endc]], label=["obs" "val" "test"], legend=:bottomleft, ylabel="Daily rides", title="Focus on the testing period (RF)")
# ### Comparison with DecisionTree.jl random forest
# We now compare our results with those obtained employing the same model in the [DecisionTree package](https://github.com/bensadeghi/DecisionTree.jl), using the hyperparameters of the obtimal BetaML Random forest model:
println(now(), " ", "- decision trees.jl..." ) #src
best_rf_hp = hyperparameters(m_rf)
# Hyperparameters of the DecisionTree.jl random forest model
#src # set of classification parameters and respective default values
#src # n_subfeatures: number of features to consider at random per split (default: -1, sqrt(# features))
#src # n_trees: number of trees to train (default: 10)
#src # partial_sampling: fraction of samples to train each tree on (default: 0.7)
#src # max_depth: maximum depth of the decision trees (default: no maximum)
#src # min_samples_leaf: the minimum number of samples each leaf needs to have (default: 5)
#src # min_samples_split: the minimum number of samples in needed for a split (default: 2)
#src # min_purity_increase: minimum purity needed for a split (default: 0.0)
#src # keyword rng: the random number generator or seed to use (default Random.GLOBAL_RNG)
#src # multi-threaded forests must be seeded with an `Int`
n_subfeatures=isnothing(best_rf_hp.max_features) ? -1 : best_rf_hp.max_features; n_trees=best_rf_hp.n_trees; partial_sampling=0.7; max_depth=isnothing(best_rf_hp.max_depth) ? typemax(Int64) : best_rf_hp.max_depth;
min_samples_leaf=best_rf_hp.min_records; min_samples_split=best_rf_hp.min_records; min_purity_increase=best_rf_hp.min_gain;
# We train the model..
model = DecisionTree.build_forest(ytrain, convert(Matrix,xtrain),
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng = seed)
# And we generate predictions and measure their error
(ŷtrain,ŷtest) = DecisionTree.apply_forest.([model],[xtrain,xtest]);
#src # Let's benchmark the DecisionTrees.jl Random Forest training
#src # ```
#src # @btime DecisionTree.build_forest(ytrain, convert(Matrix,xtrain),
#src # n_subfeatures,
#src # n_trees,
#src # partial_sampling,
#src # max_depth,
#src # min_samples_leaf,
#src # min_samples_split,
#src # min_purity_increase;
#src # rng = seed);
#src # 36.924 ms (70622 allocations: 10.09 MiB)
#src # ```
#src # DecisionTrees.jl makes a good job in optimising the Random Forest algorithm, as it is over 3 times faster that BetaML.
(rme_train, rme_test) = relative_mean_error.([ytrain,ytest],[ŷtrain,ŷtest]) # 0.022 and 0.304
push!(results,["RF (DecisionTree.jl)",rme_train,rme_test]);
# While the train error is very small, the error on the test set remains relativly high. The very low error level on the training set is a sign that it overspecialised on the training set, and we should have better ran a dedicated hyper-parameter tuning function for the DecisionTree.jl model (we did try using the default `DecisionTrees.jl` parameters, but we obtained roughtly the same results).
@test rme_test <= 0.32 #src
# Finally we plot the DecisionTree.jl predictions alongside the observed value:
ŷtrainfull = vcat(ŷtrain,fill(missing,ntest))
ŷtestfull = vcat(fill(missing,ntrain), ŷtest)
plot(data[:,:dteday],[data[:,:cnt] ŷtrainfull ŷtestfull], label=["obs" "train" "test"], legend=:topleft, ylabel="daily rides", title="Daily bike sharing demand observed/estimated across the\n whole 2-years period (DT.jl RF)")
# Again, focusing on the testing data:
stc = ntrain
endc = size(x,1)
plot(data[stc:endc,:dteday],[data[stc:endc,:cnt] ŷtestfull[stc:endc]], label=["obs" "test"], legend=:bottomleft, ylabel="Daily rides", title="Focus on the testing period (DT.jl RF)")
# ### Conclusions of Decision Trees / Random Forests methods
# The error obtained employing DecisionTree.jl is significantly larger than those obtained using a BetaML random forest model, altought to be fair with DecisionTrees.jl we didn't tuned its hyper-parameters. Also, the DecisionTree.jl random forest model is much faster.
# This is partially due by the fact that, internally, DecisionTree.jl models optimise the algorithm by sorting the observations. BetaML trees/forests don't employ this optimisation and hence they can work with true categorical data for which ordering is not defined. An other explanation of this difference in speed is that BetaML Random Forest models accept `missing` values within the feature matrix.
# To sum up, BetaML random forests are ideal algorithms when we want to obtain good predictions in the most simpler way, even without manually tuning the hyper-parameters, and without spending time in cleaning ("munging") the feature matrix, as they accept almost "any kind" of data as it is.
# ## Neural Networks
println(now(), " ", "- neural networks..." ) #src
# BetaML provides only _deep forward neural networks_, artificial neural network units where the individual "nodes" are arranged in _layers_, from the _input layer_, where each unit holds the input coordinate, through various _hidden layer_ transformations, until the actual _output_ of the model:
# 
# In this layerwise computation, each unit in a particular layer takes input from _all_ the preceding layer units and it has its own parameters that are adjusted to perform the overall computation. The _training_ of the network consists in retrieving the coefficients that minimise a _loss_ function between the output of the model and the known data.
# In particular, a _deep_ (feedforward) neural network refers to a neural network that contains not only the input and output layers, but also (a variable number of) hidden layers in between.
# Neural networks accept only numerical inputs. We hence need to convert all categorical data in numerical units. A common approach is to use the so-called "one-hot-encoding" where the catagorical values are converted into indicator variables (0/1), one for each possible value. This can be done in BetaML using the [`OneHotEncoder`](@ref) function:
seasonDummies = fit!(OneHotEncoder(),data.season)
weatherDummies = fit!(OneHotEncoder(),data.weathersit)
wdayDummies = fit!(OneHotEncoder(),data.weekday .+ 1)
## We compose the feature matrix with the new dimensions obtained from the onehotencoder functions
x = hcat(Matrix{Float64}(data[:,[:instant,:yr,:mnth,:holiday,:workingday,:temp,:atemp,:hum,:windspeed]]),
seasonDummies,
weatherDummies,
wdayDummies)
y = data[:,16];
# As we did for decision trees/ random forests, we split the data in training, validation and testing sets
((xtrain,xtest),(ytrain,ytest)) = partition([x,y],[0.75,1-0.75],shuffle=false)
(ntrain, ntest) = size.([ytrain,ytest],1)
# An other common operation with neural networks is to scale the feature vectors (X) and the labels (Y). The BetaML [`Scaler`](@ref) model, by default, scales the data such that each dimension has mean 0 and variance 1.
# Note that we can provide the `Scaler`` model with different scale factors or specify the columns that shoudn't be scaled (e.g. those resulting from the one-hot encoding). Finally we can reverse the scaling (this is useful to retrieve the unscaled features from a model trained with scaled ones).
cols_nottoscale = [2;4;5;10:23]
xsm = Scaler(skip=cols_nottoscale)
xtrain_scaled = fit!(xsm,xtrain)
xtest_scaled = predict(xsm,xtest)
ytrain_scaled = ytrain ./ 1000 # We just divide Y by 1000, as using full scaling of Y we may get negative demand.
ytest_scaled = ytest ./ 1000
D = size(xtrain,2)
#-
# We can now build our feed-forward neaural network. We create three layers, the first layers will always have a input size equal to the dimensions of our data (the number of columns), and the output layer, for a simple regression where the predictions are scalars, it will always be one. We will tune the size of the middle layer size.
# There are already several kind of layers available (and you can build your own kind by defining a new `struct` and implementing a few functions. See the [`Nn`](@ref nn_module) module documentation for details). Here we use only _dense_ layers, those found in typycal feed-fordward neural networks.
# For each layer, on top of its size (in "neurons") we can specify an _activation function_. Here we use the [`relu`](@ref) for the terminal layer (this will guarantee that our predictions are always positive) and `identity` for the hidden layer. Again, consult the `Nn` module documentation for other activation layers already defined, or use any function of your choice.
# Initial weight parameters can also be specified if needed. By default [`DenseLayer`](@ref) use the so-called _Xavier initialisation_.
# Let's hence build our candidate neural network structures, choosing between 5 and 10 nodes in the hidden layers:
candidate_structures = [
[DenseLayer(D,k,f=relu,df=drelu,rng=copy(AFIXEDRNG)), # Activation function is ReLU, it's derivative is drelu
DenseLayer(k,k,f=identity,df=identity,rng=copy(AFIXEDRNG)), # This is the hidden layer we vant to test various sizes
DenseLayer(k,1,f=relu,df=didentity,rng=copy(AFIXEDRNG))] for k in 5:2:10]
# Note that specify the derivatives of the activation functions (and of the loss function that we'll see in a moment) it totally optional, as without them BetaML will use [`Zygote.jl`](https://github.com/FluxML/Zygote.jl for automatic differentiation.
# We do also set a few other parameters as "turnable": the number of "epochs" to train the model (the number of iterations trough the whole dataset), the sample size at each batch and the optimisation algorithm to use.
# Several optimisation algorithms are indeed available, and each accepts different parameters, like the _learning rate_ for the Stochastic Gradient Descent algorithm ([`SGD`](@ref), used by default) or the exponential decay rates for the moments estimates for the [`ADAM`](@ref) algorithm (that we use here, with the default parameters).
# The hyperparameter ranges will then look as follow:
hpranges = Dict("layers" => candidate_structures,
"epochs" => rand(copy(AFIXEDRNG),DiscreteUniform(50,100),3), # 3 values sampled at random between 50 and 100
"batch_size" => [4,8,16],
"opt_alg" => [SGD(λ=2),SGD(λ=1),SGD(λ=3),ADAM(λ=0.5),ADAM(λ=1),ADAM(λ=0.25)])
# Finally we can build "neural network" [`NeuralNetworkEstimator`](@ref) model where we "chain" the layers together and we assign a final loss function (again, you can provide your own loss function, if those available in BetaML don't suit your needs):
nnm = NeuralNetworkEstimator(loss=squared_cost, descr="Bike sharing regression model", tunemethod=SuccessiveHalvingSearch(hpranges = hpranges), autotune=true,rng=copy(AFIXEDRNG)) # Build the NN model and use the squared cost (aka MSE) as error function by default
#src NN without any parameters:
#src nnm2 = NeuralNetworkEstimator(autotune=true)
#src ŷtrain_scaled = fit!(nnm2,xtrain_scaled,ytrain_scaled)
#src ŷtrain = ŷtrain_scaled .* 1000
#src ŷtest = @pipe predict(nnm2,xtest_scaled) .* 1000 |> dropdims(_,dims=2)
#src (rme_train, rme_test) = relative_mean_error.([ŷtrain,ŷtest],[ytrain,ytest]) #0.041, 0.236
#-
# We can now fit and autotune the model:
ŷtrain_scaled = fit!(nnm,xtrain_scaled,ytrain_scaled)
# The model training is one order of magnitude slower than random forests, altought the memory requirement is approximatly the same.
#-
# To obtain the neural network predictions we apply the function `predict` to the feature matrix X for which we want to generate previsions, and then we rescale y.
# Normally we would apply here the `inverse_predict` function, but as we simple divided by 1000, we multiply ŷ by the same amount:
ŷtrain = ŷtrain_scaled .* 1000
ŷtest = predict(nnm,xtest_scaled) .* 1000
#-
(rme_train, rme_test) = relative_mean_error.([ŷtrain,ŷtest],[ytrain,ytest])
push!(results,["NN",rme_train,rme_test]);
#src 0.134, 0.149
# The error is much lower. Let's plot our predictions:
@test rme_test < 0.25 #src
# Again, we can start by plotting the estimated vs the observed value:
scatter(ytrain,ŷtrain,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in training period (NN)")
#-
scatter(ytest,ŷtest,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in testing period (NN)")
#-
# We now plot across the time dimension, first plotting the whole period (2 years):
ŷtrainfull = vcat(ŷtrain,fill(missing,ntest))
ŷtestfull = vcat(fill(missing,ntrain), ŷtest)
plot(data[:,:dteday],[data[:,:cnt] ŷtrainfull ŷtestfull], label=["obs" "train" "test"], legend=:topleft, ylabel="daily rides", title="Daily bike sharing demand observed/estimated across the\n whole 2-years period (NN)")
# ...and then focusing on the testing data
stc = 620
endc = size(x,1)
plot(data[stc:endc,:dteday],[data[stc:endc,:cnt] ŷtestfull[stc:endc]], label=["obs" "val" "test"], legend=:bottomleft, ylabel="Daily rides", title="Focus on the testing period (NN)")
# ### Comparison with Flux.jl
println(now(), " ", "- Flux.jl..." ) #src
# We now apply the same Neural Network model using the [Flux](https://fluxml.ai/) framework, a dedicated neural network library, reusing the optimal parameters that we did learn from tuning `NeuralNetworkEstimator`:
hp_opt = hyperparameters(nnm)
opt_size = size(hp_opt.layers[1])[2][1]
opt_batch_size = hp_opt.batch_size
opt_epochs = hp_opt.epochs
# We fix the default random number generator so that the Flux example gives a reproducible output
Random.seed!(seed)
# We define the Flux neural network model and load it with data...
l1 = Flux.Dense(D,opt_size,Flux.relu)
l2 = Flux.Dense(opt_size,opt_size,identity)
l3 = Flux.Dense(opt_size,1,Flux.relu)
Flux_nn = Flux.Chain(l1,l2,l3)
fluxloss(x, y) = Flux.mse(Flux_nn(x), y)
ps = Flux.params(Flux_nn)
nndata = Flux.Data.DataLoader((xtrain_scaled', ytrain_scaled'), batchsize=opt_batch_size,shuffle=true)
#src Flux_nn2 = deepcopy(Flux_nn) ## A copy for the time benchmarking
#src ps2 = Flux.params(Flux_nn2) ## A copy for the time benchmarking
# We do the training of the Flux model...
[Flux.train!(fluxloss, ps, nndata, Flux.ADAM(0.001, (0.9, 0.8))) for i in 1:opt_epochs]
#src # ..and we benchmark it..
#src # ```
#src # @btime begin for i in 1:bestEpoch Flux.train!(loss, ps2, nndata, Flux.ADAM(0.001, (0.9, 0.8))) end end
#src # 690.231 ms (3349901 allocations: 266.76 MiB)
#src # ```
#src #src # Quite surprisling, Flux training seems a bit slow. The actual results seems to depend from the actual hardware and by default Flux seems not to use multi-threading. While I suspect Flux scales better with larger networks and/or data, for these small examples on my laptop it is still a bit slower than BetaML even on a single thread.
#src # On this small example the speed of Flux is on the same order than BetaML (the actual difference seems to depend on the specific RNG seed and hardware), however I suspect that Flux scales much better with larger networks and/or data.
# We obtain the predicitons...
ŷtrainf = @pipe Flux_nn(xtrain_scaled')' .* 1000;
ŷtestf = @pipe Flux_nn(xtest_scaled')' .* 1000;
# ..and we compute the mean relative errors..
(rme_train, rme_test) = relative_mean_error.([ŷtrainf,ŷtestf],[ytrain,ytest])
push!(results,["NN (Flux.jl)",rme_train,rme_test]);
#src 0.102, 0.171
# .. finding an error not significantly different than the one obtained from BetaML.Nn.
#-
@test rme_test < 0.3 #src
# Plots:
scatter(ytrain,ŷtrainf,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in training period (Flux.NN)")
#-
scatter(ytest,ŷtestf,xlabel="daily rides",ylabel="est. daily rides",label=nothing,title="Est vs. obs in testing period (Flux.NN)")
#-
ŷtrainfullf = vcat(ŷtrainf,fill(missing,ntest))
ŷtestfullf = vcat(fill(missing,ntrain), ŷtestf)
plot(data[:,:dteday],[data[:,:cnt] ŷtrainfullf ŷtestfullf], label=["obs" "train" "test"], legend=:topleft, ylabel="daily rides", title="Daily bike sharing demand observed/estimated across the\n whole 2-years period (Flux.NN)")
#-
stc = 620
endc = size(x,1)
plot(data[stc:endc,:dteday],[data[stc:endc,:cnt] ŷtestfullf[stc:endc]], label=["obs" "val" "test"], legend=:bottomleft, ylabel="Daily rides", title="Focus on the testing period (Flux.NN)")
# ### Conclusions of Neural Network models
# If we strive for the most accurate predictions, deep neural networks are usually the best choice. However they are computationally expensive, so with limited resourses we may get better results by fine tuning and running many repetitions of "simpler" decision trees or even random forest models than a large naural network with insufficient hyper-parameter tuning.
# Also, we shoudl consider that decision trees/random forests are much simpler to work with.
# That said, specialised neural network libraries, like Flux, allow to use GPU and specialised hardware letting neural networks to scale with very large datasets.
# Still, for small and medium datasets, BetaML provides simpler yet customisable solutions that are accurate and fast.
# ## GMM-based regressors
println(now(), " ", "- GMM regressor..." ) #src
# BetaML 0.8 introduces new regression algorithms based on Gaussian Mixture Model.
# Specifically, there are two variants available, `GaussianMixtureRegressor2` and `GaussianMixtureRegressor`, and this example uses `GaussianMixtureRegressor`
# As for neural networks, they work on numerical data only, so we reuse the datasets we prepared for the neural networks.
# As usual we first define the model.
m = GaussianMixtureRegressor(rng=copy(AFIXEDRNG),verbosity=NONE)
# !!! info
# We disabled autotune here, as this code is run by GitHub continuous_integration servers on each code update, and GitHub servers seem to have some strange problem with it, taking almost 4 hours instead of a few seconds on my machine.
#src # @btime begin fit!(m,xtrainScaled,ytrainScaled); reset!(m) end
#src # 13.584 ms (103690 allocations: 25.08 MiB)
# We then fit the model to the training data..
ŷtrainGMM_unscaled = fit!(m,xtrain_scaled,ytrain_scaled)
# And we predict...
ŷtrainGMM = ŷtrainGMM_unscaled .* 1000;
ŷtestGMM = predict(m,xtest_scaled) .* 1000;
(rme_train, rme_test) = relative_mean_error.([ŷtrainGMM,ŷtestGMM],[ytrain,ytest])
push!(results,["GMM",rme_train,rme_test]);
# ## Summary
# This is the summary of the results (train and test relative mean error) we had trying to predict the daily bike sharing demand, given weather and calendar information:
println(results)
# You may ask how stable are these results? How much do they depend from the specific RNG seed ? We re-evaluated a couple of times the whole script but changing random seeds (to `1000` and `10000`):
# | Model | Train rme1 | Test rme1 | Train rme2 | Test rme2 | Train rme3 | Test rme3 |
# |:-------------------- |:----------:|:---------:|:----------:|:---------:|:----------:|:---------:|
# | DT | 0.1366960 | 0.154720 | 0.0233044 | 0.249329 | 0.0621571 | 0.161657 |
# | RF | 0.0421267 | 0.180186 | 0.0535776 | 0.136920 | 0.0386144 | 0.141606 |
# | RF (DecisionTree.jl) | 0.0230439 | 0.235823 | 0.0801040 | 0.243822 | 0.0168764 | 0.219011 |
# | NN | 0.1604000 | 0.169952 | 0.1091330 | 0.121496 | 0.1481440 | 0.150458 |
# | NN (Flux.jl) | 0.0931161 | 0.166228 | 0.0920796 | 0.167047 | 0.0907810 | 0.122469 |
# | GaussianMixtureRegressor* | 0.1432800 | 0.293891 | 0.1380340 | 0.295470 | 0.1477570 | 0.284567 |
# * GMM is a deterministic model, the variations are due to the different random sampling in choosing the best hyperparameters
# Neural networks can be more precise than random forests models, but are more computationally expensive (and tricky to set up). When we compare BetaML with the algorithm-specific leading packages, we found similar results in terms of accuracy, but often the leading packages are better optimised and run more efficiently (but sometimes at the cost of being less versatile).
# GMM_based regressors are very computationally cheap and a good compromise if accuracy can be traded off for performances.
println(now(), " ", "- DONE regression tutorial..." ) #src
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 12452 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
Api
The Api Module (currently v2)
This module includes the shared api trough the various BetaML submodules, i.e. names used by more than one submodule.
Modules are free to use other functions but these are defined here to avoid name conflicts and allows instead Multiple Dispatch to handle them.
For a user-prospective overall description of the BetaML API see the page `API V2` → [`Introduction for users`](@ref api_usage), while for the implementation of the API see the page `API V2` → [`For developers`](@ref api_implementation)
"""
module Api
using StableRNGs, DocStringExtensions, Random
import Base.show
import JLD2
export Verbosity, NONE, LOW, STD, HIGH, FULL,
FIXEDSEED, FIXEDRNG,
BetaMLModel, BetaMLSupervisedModel, BetaMLUnsupervisedModel,
BetaMLOptionsSet, BML_options, BetaMLHyperParametersSet, BetaMLLearnableParametersSet,
AutoTuneMethod,
predict, inverse_predict, fit!, fit_ex, info, reset!, reset_ex, parameters,hyperparameters, options, sethp!,
model_save, model_load
abstract type BetaMLModel end
abstract type BetaMLSupervisedModel <: BetaMLModel end
abstract type BetaMLUnsupervisedModel <: BetaMLModel end
abstract type BetaMLOptionsSet end
abstract type BetaMLHyperParametersSet end
abstract type BetaMLLearnableParametersSet end
abstract type AutoTuneMethod end
"""
$(TYPEDEF)
Many models and functions accept a `verbosity` parameter.
Choose between: `NONE`, `LOW`, `STD` [default], `HIGH` and `FULL`.
"""
@enum Verbosity NONE=0 LOW=10 STD=20 HIGH=30 FULL=40
"""
const FIXEDSEED
Fixed seed to allow reproducible results.
This is the seed used to obtain the same results under unit tests.
Use it with:
- `myAlgorithm(;rng=MyChoosenRNG(FIXEDSEED))` # always produce the same sequence of results on each run of the script ("pulling" from the same rng object on different calls)
- `myAlgorithm(;rng=copy(MyChoosenRNG(FIXEDSEED)))` # always produce the same result (new rng object on each call)
"""
const FIXEDSEED = 123
"""
$(TYPEDEF)
Fixed ring to allow reproducible results
Use it with:
- `myAlgorithm(;rng=FIXEDRNG)` # always produce the same sequence of results on each run of the script ("pulling" from the same rng object on different calls)
- `myAlgorithm(;rng=copy(FIXEDRNG))` # always produce the same result (new rng object on each function call)
"""
const FIXEDRNG = StableRNG(FIXEDSEED)
"""
$(TYPEDEF)
A struct defining the options used by default by the algorithms that do not override it with their own option sets.
# Fields:
$(TYPEDFIELDS)
# Notes:
- even if a model doesn't override `BML_options`, may not use all its options, for example deterministic models would not make use of the `rng` parameter. Passing such parameters in these cases would simply have no influence.
# Example:
```
julia> options = BML_options(cache=false,descr="My model")
```
"""
Base.@kwdef mutable struct BML_options <: BetaMLOptionsSet
"Cache the results of the fitting stage, as to allow predict(mod) [default: `true`]. Set it to `false` to save memory for large data."
cache::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"0ption for hyper-parameters autotuning [def: `false`, i.e. not autotuning performed]. If activated, autotuning is performed on the first `fit!()` call. Controll auto-tuning trough the option `tunemethod` (see the model hyper-parameters)"
autotune::Bool = false
"The verbosity level to be used in training or prediction: `NONE`, `LOW`, `STD` [default], `HIGH` or `FULL`"
verbosity::Verbosity = STD
"Random Number Generator (see [`?FIXEDSEED`](@ref FIXEDSEED)) [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG = Random.GLOBAL_RNG
end
"""
fit!(m::BetaMLModel,X,[y])
Fit ("train") a `BetaMLModel` (i.e. learn the algorithm's parameters) based on data, either only features or features and labels.
Each specific model implements its own version of `fit!(m,X,[Y])`, but the usage is consistent across models.
# Notes:
- For online algorithms, i.e. models that support updating of the learned parameters with new data, `fit!` can be repeated as new data arrive, altought not all algorithms guarantee that training each record at the time is equivalent to train all the records at once.
- If the model has been trained while having the `cache` option set on `true` (by default) `fit!` returns `ŷ` instead of `nothing` effectively making it behave like a _fit-and-transform_ function.
- In Python and other languages that don't allow the exclamation mark within the function name, use `fit_ex(⋅)` instead of `fit!(⋅)`
"""
fit!(::BetaMLModel,args...;kargs...) = nothing
fit_ex(m::BetaMLModel,args...;kargs...) = fit!(m,args...;kargs...) # version for Python interface that doesn't like the exclamation mark
"""
predict(m::BetaMLModel,[X])
Predict new information (including transformation) based on a fitted `BetaMLModel`, eventually applied to new features when the algorithm generalises to new data.
# Notes:
- As a convenience, if the model has been trained while having the `cache` option set on `true` (by default) the predictions associated with the last training of the model is retained in the model object and can be retrieved simply with `predict(m)`.
"""
function predict(m::BetaMLModel)
if m.fitted
return m.cres
else
if m.opt.verbosity > NONE
@warn "Trying to predict an unfitted model. Run `fit!(model,X,[Y])` before!"
end
return nothing
end
end
"""
inverse_predict(m::BetaMLModel,X)
Given a model `m` that fitted on `x` produces `xnew`, it takes `xnew` to return (possibly an approximation of ) `x`.
For example, when `OneHotEncoder` is fitted with a subset of the possible categories and the ` handle_unknown` option is set on `infrequent`, `inverse_transform` will aggregate all the _other_ categories as specified in `other_categories_name`.
# Notes:
- Inplemented only in a few models.
"""
inverse_predict(m::BetaMLModel,X) = nothing
"""
$(TYPEDSIGNATURES)
Return a string-keyed dictionary of "additional" information stored during model fitting.
"""
function info(m::BetaMLModel)
return m.info
end
"""
reset!(m::BetaMLModel)
Reset the parameters of a trained model.
Notes:
- In Python and other languages that don't allow the exclamation mark within the function name, use `reset_ex(⋅)` instead of `reset!(⋅)`
"""
function reset!(m::BetaMLModel)
m.par = nothing
m.cres = nothing
m.info = Dict{Symbol,Any}()
m.fitted = false
return nothing
end
reset_ex(m::BetaMLModel,args...;kargs...) = reset!(m,args...;kargs...) # version for Python interface that doesn't like the exclamation mark
function show(io::IO, ::MIME"text/plain", m::BetaMLModel)
if m.fitted == false
print(io,"A $(typeof(m)) BetaMLModel (unfitted)")
else
print(io,"A $(typeof(m)) BetaMLModel (fitted)")
end
end
function show(io::IO, m::BetaMLModel)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"A $(typeof(m)) BetaMLModel (unfitted)")
else
println(io,"A $(typeof(m)) BetaMLModel (fitted)")
println(io,"Output of `info(model)`:")
for (k,v) in info(m)
print(io,"- ")
print(io,k)
print(io,":\t")
println(io,v)
end
end
end
function show(io::IO, ::MIME"text/plain", m::BetaMLHyperParametersSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLHyperParametersSet struct)")
for f in fields
println(io,"- ",f,": ",getfield(m,f))
end
end
function show(io::IO, m::BetaMLHyperParametersSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLHyperParametersSet struct)")
for f in fields
println(io,"- ",f, " (", typeof(getfield(m,f)),"): ",getfield(m,f))
end
end
function show(io::IO, ::MIME"text/plain", m::BetaMLLearnableParametersSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLLearnableParametersSet struct)")
for f in fields
println(io,"- ",f,": ",getfield(m,f))
end
end
function show(io::IO, m::BetaMLLearnableParametersSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLLearnableParametersSet struct)")
for f in fields
println(io,"- ",f, " (", typeof(getfield(m,f)),"): ",getfield(m,f))
end
end
function show(io::IO, ::MIME"text/plain", m::BetaMLOptionsSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLOptionsSet struct)")
for f in fields
println(io,"- ",f,": ",getfield(m,f))
end
end
function show(io::IO, m::BetaMLOptionsSet)
fields = fieldnames(typeof(m))
println(io,"$(typeof(m)) (a BetaMLOptionsSet struct)")
for f in fields
println(io,"- ",f, " (", typeof(getfield(m,f)),"): ",getfield(m,f))
end
end
#partition() = nothing
"""
parameters(m::BetaMLModel)
Returns the learned parameters of a BetaML model.
!!! warning
The returned object is a reference, so if it is modified, the relative object in the model will change too.
"""
parameters(m::BetaMLModel) = m.par
"""
hyperparameters(m::BetaMLModel)
Returns the hyperparameters of a BetaML model. See also [`?options`](@ref options) for the parameters that do not directly affect learning.
!!! warning
The returned object is a reference, so if it is modified, the relative object in the model will change too.
"""
hyperparameters(m::BetaMLModel) = m.hpar
"""
$(TYPEDSIGNATURES)
Set the hyperparameters of model `m` as specified in the `hp` dictionary.
"""
function sethp!(m::BetaMLModel,hp::Dict)
hpobj = hyperparameters(m)
for (k,v) in hp
setproperty!(hpobj,Symbol(k),v)
end
end
"""
options(m::BetaMLModel)
Returns the non-learning related options of a BetaML model. See also [`?hyperparameters`](@ref hyperparameters) for the parameters that directly affect learning.
!!! warning
The returned object is a reference, so if it is modified, the relative object in the model will change too.
"""
options(m::BetaMLModel) = m.opt
#function model_save(filename::AbstractString;names...)
# JLD2.jldsave(filename;names...)
#end
"""
model_save(filename::AbstractString,overwrite_file::Bool=false;kwargs...)
Allow to save one or more BetaML models (wheter fitted or not), eventually specifying a name for each of them.
# Parameters:
- `filename`: Name of the destination file
- `overwrite_file`: Wheter to overrite the file if it alreaxy exist or preserve it (for the objects different than the one that are going to be saved) [def: `false`, i.e. preserve the file]
- `kwargs`: model objects to be saved, eventually associated with a different name to save the mwith (e.g. `mod1Name=mod1,mod2`)
# Notes:
- If an object with the given name already exists on the destination JLD2 file it will be ovenwritten.
- If the file exists, but it is not a JLD2 file and the option `overwrite_file` is set to `false`, an error will be raisen.
- Use the semicolon `;` to separate the filename from the model(s) to save
- For further options see the documentation of the [`JLD2`](https://juliaio.github.io/JLD2.jl/stable/) package
# Examples
```
julia> model_save("fittedModels.jl"; mod1Name=mod1,mod2)
```
"""
function model_save(filename,overwrite_file::Bool=false;kargs...)
flag = overwrite_file ? "w" : "a+"
JLD2.jldopen(filename, flag) do f
for (k,v) in kargs
ks = string(k)
if ks in keys(f)
delete!(f, ks)
end
f[ks] = v
end
end
end
"""
model_load(filename::AbstractString)
model_load(filename::AbstractString,args::AbstractString...)
Load from file one or more BetaML models (wheter fitted or not).
# Notes:
- If no model names to retrieve are specified it returns a dictionary keyed with the model names
- If multiple models are demanded, a tuple is returned
- For further options see the documentation of the function `load` of the [`JLD2`](https://juliaio.github.io/JLD2.jl/stable/) package
# Examples:
```
julia> models = model_load("fittedModels.jl"; mod1Name=mod1,mod2)
julia> mod1 = model_load("fittedModels.jl",mod1)
julia> (mod1,mod2) = model_load("fittedModels.jl","mod1", "mod2")
```
"""
const model_load = JLD2.load
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3558 | """
BetaML
The Beta Machine Learning toolkit
https://github.com/sylvaticus/BetaML.jl
Licence is MIT
For documentation, please look at the individual modules or online.
While the code is organised in different sub-modules, all objects are re-exported at the BetaML root level, hence the functionality of this package can be accessed by simply `using BetaML` and then employing the required function directly.
"""
module BetaML
import MLJModelInterface
const MMI = MLJModelInterface
#import StatsBase
using ForceImport, Reexport, PrecompileTools
include("Api.jl") # Shared names across modules
include("Utils/Utils.jl") # Utility function
include("Stats/Stats.jl") # Classical statistical functions
include("Nn/Nn.jl") # Neural Networks
include("Perceptron/Perceptron.jl") # Perceptron-like algorithms
include("Trees/Trees.jl") # Decision Trees and ensembles (Random Forests)
include("Clustering/Clustering.jl") # Clustering (hard) algorithms
include("GMM/GMM.jl") # GMM-based learners (clustering, fitter, regression)
include("Imputation/Imputation.jl") # (Missing) imputation algorithms
include("Utils/Utils_extra.jl") # Utility functions that depend on some BetaML functionality. Set them here to avoid recursive dependence
include("Bmlj/Bmlj.jl") # MLJ Interface module
# "Merging" of the modules...
@force using .Api
@reexport using .Api
@force using .Utils
@reexport using .Utils
@force using .Stats
@reexport using .Stats
@force using .Nn
@reexport using .Nn
@force using .Perceptron
@reexport using .Perceptron
@force using .Trees
@reexport using .Trees
@force using .Clustering
@reexport using .Clustering
@force using .GMM
@reexport using .GMM
@force using .Imputation
@reexport using .Imputation
import .Bmlj # some MLJ models have the same name as BetaML models, set them in a separate interface submodule
# ------------------------------------------------------------------------------
#MLJ interface...
const MLJ_PERCEPTRON_MODELS = (Bmlj.PerceptronClassifier, Bmlj.KernelPerceptronClassifier, Bmlj.PegasosClassifier)
const MLJ_TREES_MODELS = (Bmlj.DecisionTreeClassifier, Bmlj.DecisionTreeRegressor, Bmlj.RandomForestClassifier, Bmlj.RandomForestRegressor)
const MLJ_CLUSTERING_MODELS = (Bmlj.KMeansClusterer, Bmlj.KMedoidsClusterer, Bmlj.GaussianMixtureClusterer)
const MLJ_IMPUTERS_MODELS = (Bmlj.SimpleImputer, Bmlj.GaussianMixtureImputer, Bmlj.RandomForestImputer,Bmlj.GeneralImputer) # these are the name of the MLJ models, not the BetaML ones...
const MLJ_NN_MODELS = (Bmlj.NeuralNetworkRegressor,Bmlj.MultitargetNeuralNetworkRegressor, Bmlj.NeuralNetworkClassifier)
const MLJ_OTHER_MODELS = (Bmlj.GaussianMixtureRegressor,Bmlj.MultitargetGaussianMixtureRegressor,Bmlj.AutoEncoder)
const MLJ_INTERFACED_MODELS = (MLJ_PERCEPTRON_MODELS..., MLJ_TREES_MODELS..., MLJ_CLUSTERING_MODELS..., MLJ_IMPUTERS_MODELS..., MLJ_NN_MODELS..., MLJ_OTHER_MODELS...)
#function __init__()
MMI.metadata_pkg.(MLJ_INTERFACED_MODELS,
name = "BetaML",
uuid = "024491cd-cc6b-443e-8034-08ea7eb7db2b", # see your Project.toml
url = "https://github.com/sylvaticus/BetaML.jl", # URL to your package repo
julia = true, # is it written entirely in Julia?
license = "MIT", # your package license
is_wrapper = false, # does it wrap around some other package?
)
#end
include("Precompilation.jl")
end # module
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1404 | @setup_workload begin
# Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the
# precompile file and potentially make loading faster.
x = [1 3 10; 0.8 2.8 8; 1.2 3.2 12; 2 6 20; 1.8 5 18; 2.2 7 22; 0.5 1.5 5; 0.45 1.3 4; 0.55 1.8 6]
y = [0.5, 0.45, 0.55, 1, 0.9, 1.1, 0.25, 0.23, 0.27]
ycat = ["b","b","b","a","a","a","c","c","c"]
@compile_workload begin
# all calls in this block will be precompiled, regardless of whether
# they belong to your package or not (on Julia 1.8 and higher)
@info "Beginning BetaML PrecompileTool workflow...."
yoh = fit!(OneHotEncoder(verbosity=NONE),ycat)
fit!(NeuralNetworkEstimator(verbosity=NONE,epochs=10),x,y)
fit!(NeuralNetworkEstimator(verbosity=NONE,epochs=10),x,yoh)
fit!(RandomForestEstimator(verbosity=NONE,n_trees=5),x,y)
fit!(RandomForestEstimator(verbosity=NONE,n_trees=5),x,ycat)
fit!(PerceptronClassifier(verbosity=NONE,epochs=10),x,ycat)
fit!(KernelPerceptronClassifier(verbosity=NONE,epochs=10),x,ycat)
fit!(PegasosClassifier(verbosity=NONE,epochs=10),x,ycat)
fit!(KMeansClusterer(verbosity=NONE),x)
fit!(KMedoidsClusterer(verbosity=NONE),x)
fit!(GaussianMixtureClusterer(verbosity=NONE,tol=0.01),x)
@info "...done BetaML PrecompileTool workflow."
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1946 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
# MLJ interface for BetaML models
In this module we define the interface of several BetaML models. They can be used using the [MLJ framework](https://github.com/alan-turing-institute/MLJ.jl).
Note that MLJ models (whose name could be the same as the underlying BetaML model) are not exported. You can access them with `BetaML.Bmlj.ModelXYZ`.
"""
module Bmlj
using Random, LinearAlgebra, Statistics
using CategoricalArrays, DocStringExtensions
import MLJModelInterface # It seems that having done this in the top module is not enought
const MMI = MLJModelInterface # We need to repeat it here
#@force using ..Nn
#import ..Api
using ..Api
import ..Api: AutoTuneMethod, fit!
import ..BetaML
import ..Utils # can't using it as it exports some same-name models
import ..Perceptron
import ..Nn: AbstractLayer, ADAM, SGD, NeuralNetworkEstimator, OptimisationAlgorithm, DenseLayer, NN
import ..Utils: AbstractRNG, squared_cost, SuccessiveHalvingSearch, radial_kernel
import ..GMM
export mljverbosity_to_betaml_verbosity
"""
$(TYPEDSIGNATURES)
Convert any integer (short scale) to one of the defined betaml verbosity levels
Currently "steps" are 0, 1, 2 and 3
"""
function mljverbosity_to_betaml_verbosity(i::Integer)
if i <= 0
return NONE
elseif i == 1
return LOW
elseif i == 2
return STD
elseif i == 3
return HIGH
else
return FULL
end
end
include("Perceptron_mlj.jl") # Perceptron-like algorithms
include("Trees_mlj.jl") # Decision Trees and ensembles (Random Forests)
include("Clustering_mlj.jl") # Clustering (hard) algorithms
include("GMM_mlj.jl") # GMM-based learners (clustering, fitter, regression)
include("Imputation_mlj.jl") # Imputation models
include("Nn_mlj.jl") # Neural network models
include("Utils_mlj.jl") # Various transformers/encorders
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 9554 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for hard clustering models
export KMeansClusterer, KMedoidsClusterer
# ------------------------------------------------------------------------------
# Model Structure declarations..
"""
$(TYPEDEF)
The classical KMeansClusterer clustering algorithm, from the Beta Machine Learning Toolkit (BetaML).
# Parameters:
$(TYPEDFIELDS)
# Notes:
- data must be numerical
- online fitting (re-fitting with new data) is supported
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load KMeansClusterer pkg = "BetaML" verbosity=0
BetaML.Clustering.KMeansClusterer
julia> model = modelType()
KMeansClusterer(
n_classes = 3,
dist = BetaML.Clustering.var"#34#36"(),
initialisation_strategy = "shuffle",
initial_representatives = nothing,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(KMeansClusterer(n_classes = 3, …), …).
julia> classes_est = predict(mach, X);
julia> hcat(y,classes_est)
150×2 CategoricalArrays.CategoricalArray{Union{Int64, String},2,UInt32}:
"setosa" 2
"setosa" 2
"setosa" 2
⋮
"virginica" 3
"virginica" 3
"virginica" 1
```
"""
mutable struct KMeansClusterer <: MMI.Unsupervised
"Number of classes to discriminate the data [def: 3]"
n_classes::Int64
"Function to employ as distance. Default to the Euclidean distance. Can be one of the predefined distances (`l1_distance`, `l2_distance`, `l2squared_distance`), `cosine_distance`), any user defined function accepting two vectors and returning a scalar or an anonymous function with the same characteristics. Attention that, contrary to `KMedoidsClusterer`, the `KMeansClusterer` algorithm is not guaranteed to converge with other distances than the Euclidean one."
dist::Function
"""
The computation method of the vector of the initial representatives.
One of the following:
- "random": randomly in the X space
- "grid": using a grid approach
- "shuffle": selecting randomly within the available points [default]
- "given": using a provided set of initial representatives provided in the `initial_representatives` parameter
"""
initialisation_strategy::String
"Provided (K x D) matrix of initial representatives (useful only with `initialisation_strategy=\"given\"`) [default: `nothing`]"
initial_representatives::Union{Nothing,Matrix{Float64}}
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
KMeansClusterer(;
n_classes = 3,
dist = dist=(x,y) -> norm(x-y),
initialisation_strategy = "shuffle",
initial_representatives = nothing,
rng = Random.GLOBAL_RNG,
) = KMeansClusterer(n_classes,dist,initialisation_strategy,initial_representatives,rng)
"""
$(TYPEDEF)
# Parameters:
$(TYPEDFIELDS)
The K-medoids clustering algorithm with customisable distance function, from the Beta Machine Learning Toolkit (BetaML).
Similar to K-Means, but the "representatives" (the cetroids) are guaranteed to be one of the training points. The algorithm work with any arbitrary distance measure.
# Notes:
- data must be numerical
- online fitting (re-fitting with new data) is supported
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load KMedoidsClusterer pkg = "BetaML" verbosity=0
BetaML.Clustering.KMedoidsClusterer
julia> model = modelType()
KMedoidsClusterer(
n_classes = 3,
dist = BetaML.Clustering.var"#39#41"(),
initialisation_strategy = "shuffle",
initial_representatives = nothing,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(KMedoidsClusterer(n_classes = 3, …), …).
julia> classes_est = predict(mach, X);
julia> hcat(y,classes_est)
150×2 CategoricalArrays.CategoricalArray{Union{Int64, String},2,UInt32}:
"setosa" 3
"setosa" 3
"setosa" 3
⋮
"virginica" 1
"virginica" 1
"virginica" 2
```
"""
mutable struct KMedoidsClusterer <: MMI.Unsupervised
"Number of classes to discriminate the data [def: 3]"
n_classes::Int64
"Function to employ as distance. Default to the Euclidean distance. Can be one of the predefined distances (`l1_distance`, `l2_distance`, `l2squared_distance`), `cosine_distance`), any user defined function accepting two vectors and returning a scalar or an anonymous function with the same characteristics."
dist::Function
"""
The computation method of the vector of the initial representatives.
One of the following:
- "random": randomly in the X space
- "grid": using a grid approach
- "shuffle": selecting randomly within the available points [default]
- "given": using a provided set of initial representatives provided in the `initial_representatives` parameter
"""
initialisation_strategy::String
"Provided (K x D) matrix of initial representatives (useful only with `initialisation_strategy=\"given\"`) [default: `nothing`]"
initial_representatives::Union{Nothing,Matrix{Float64}}
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
KMedoidsClusterer(;
n_classes = 3,
dist = (x,y) -> norm(x-y),
initialisation_strategy = "shuffle",
initial_representatives = nothing,
rng = Random.GLOBAL_RNG,
) = KMedoidsClusterer(n_classes,dist,initialisation_strategy,initial_representatives,rng)
# ------------------------------------------------------------------------------
# Fit functions...
function MMI.fit(m::Union{KMeansClusterer,KMedoidsClusterer}, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
# Using low level API here. We could switch to APIV2...
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
if typeof(m) == KMeansClusterer
(assignedClasses,representatives) = BetaML.Clustering.kmeans(x,m.n_classes,dist=m.dist,initialisation_strategy=m.initialisation_strategy,initial_representatives=m.initial_representatives,rng=m.rng,verbosity=verbosity)
else
(assignedClasses,representatives) = BetaML.Clustering.kmedoids(x,m.n_classes,dist=m.dist,initialisation_strategy=m.initialisation_strategy,initial_representatives=m.initial_representatives,rng=m.rng, verbosity=verbosity)
end
cache=nothing
report=nothing
return ((classes=assignedClasses,centers=representatives,distanceFunction=m.dist), cache, report)
end
MMI.fitted_params(model::Union{KMeansClusterer,KMedoidsClusterer}, fitresults) = (centers=fitresults[2], cluster_labels=CategoricalArrays.categorical(fitresults[1]))
# ------------------------------------------------------------------------------
# Transform functions...
""" fit(m::KMeansClusterer, fitResults, X) - Given a fitted clustering model and some observations, return the distances to each centroids """
function MMI.transform(m::Union{KMeansClusterer,KMedoidsClusterer}, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
(N,D) = size(x)
nCl = size(fitResults.centers,1)
distances = Array{Float64,2}(undef,N,nCl)
for n in 1:N
for c in 1:nCl
distances[n,c] = fitResults.distanceFunction(x[n,:],fitResults[2][c,:])
end
end
return MMI.table(distances)
end
# ------------------------------------------------------------------------------
# Predict functions...
""" predict(m::KMeansClusterer, fitResults, X) - Given a fitted clustering model and some observations, predict the class of the observation"""
function MMI.predict(m::Union{KMeansClusterer,KMedoidsClusterer}, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
(N,D) = size(x)
nCl = size(fitResults.centers,1)
distances = MMI.matrix(MMI.transform(m, fitResults, X))
mindist = argmin(distances,dims=2)
assignedClasses = [Tuple(mindist[n,1])[2] for n in 1:N]
return CategoricalArray(assignedClasses,levels=1:nCl)
end
# ------------------------------------------------------------------------------
# Model metadata for registration in MLJ...
MMI.metadata_model(KMeansClusterer,
input_scitype = Union{ # scitype of the inputs
MMI.Table(MMI.Continuous),
AbstractMatrix{<: MMI.Continuous},
},
output_scitype = MMI.Table(MMI.Continuous), # scitype of the output of `transform`
target_scitype = AbstractArray{<:MMI.Multiclass}, # scitype of the output of `predict`
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.KMeansClusterer"
)
MMI.metadata_model(KMedoidsClusterer,
input_scitype = Union{ # scitype of the inputs
MMI.Table(MMI.Continuous),
AbstractMatrix{<: MMI.Continuous},
},
output_scitype = MMI.Table(MMI.Continuous), # scitype of the output of `transform`
target_scitype = AbstractArray{<:MMI.Multiclass}, # scitype of the output of `predict`
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.KMedoidsClusterer"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 21092 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for GMM based models
export GaussianMixtureClusterer, GaussianMixtureRegressor, MultitargetGaussianMixtureRegressor
# ------------------------------------------------------------------------------
# Model Structure declarations..
"""
$(TYPEDEF)
A Expectation-Maximisation clustering algorithm with customisable mixtures, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load GaussianMixtureClusterer pkg = "BetaML" verbosity=0
BetaML.GMM.GaussianMixtureClusterer
julia> model = modelType()
GaussianMixtureClusterer(
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = BetaML.GMM.DiagonalGaussian{Float64}[BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing)],
tol = 1.0e-6,
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = 9223372036854775807,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(GaussianMixtureClusterer(n_classes = 3, …), …).
Iter. 1: Var. of the post 10.800150114964184 Log-likelihood -650.0186451891216
julia> classes_est = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, Int64, UInt32, Float64}:
UnivariateFinite{Multiclass{3}}(1=>1.0, 2=>4.17e-15, 3=>2.1900000000000003e-31)
UnivariateFinite{Multiclass{3}}(1=>1.0, 2=>1.25e-13, 3=>5.87e-31)
UnivariateFinite{Multiclass{3}}(1=>1.0, 2=>4.5e-15, 3=>1.55e-32)
UnivariateFinite{Multiclass{3}}(1=>1.0, 2=>6.93e-14, 3=>3.37e-31)
⋮
UnivariateFinite{Multiclass{3}}(1=>5.39e-25, 2=>0.0167, 3=>0.983)
UnivariateFinite{Multiclass{3}}(1=>7.5e-29, 2=>0.000106, 3=>1.0)
UnivariateFinite{Multiclass{3}}(1=>1.6e-20, 2=>0.594, 3=>0.406)
```
"""
mutable struct GaussianMixtureClusterer <: MMI.Unsupervised
"Number of mixtures (latent classes) to consider [def: 3]"
n_classes::Int64
"Initial probabilities of the categorical distribution (n_classes x 1) [default: `[]`]"
initial_probmixtures::AbstractArray{Float64,1}
"""An array (of length `n_classes`) of the mixtures to employ (see the [`?GMM`](@ref GMM) module).
Each mixture object can be provided with or without its parameters (e.g. mean and variance for the gaussian ones). Fully qualified mixtures are useful only if the `initialisation_strategy` parameter is set to \"gived\".
This parameter can also be given symply in term of a _type_. In this case it is automatically extended to a vector of `n_classes` mixtures of the specified type.
Note that mixing of different mixture types is not currently supported.
[def: `[DiagonalGaussian() for i in 1:n_classes]`]"""
mixtures::Union{Type,Vector{<: BetaML.GMM.AbstractMixture}}
"Tolerance to stop the algorithm [default: 10^(-6)]"
tol::Float64
"Minimum variance for the mixtures [default: 0.05]"
minimum_variance::Float64
"Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance (see notes)."
minimum_covariance::Float64
"""
The computation method of the vector of the initial mixtures.
One of the following:
- "grid": using a grid approach
- "given": using the mixture provided in the fully qualified `mixtures` parameter
- "kmeans": use first kmeans (itself initialised with a "grid" strategy) to set the initial mixture centers [default]
Note that currently "random" and "shuffle" initialisations are not supported in gmm-based algorithms.
"""
initialisation_strategy::String
"Maximum number of iterations [def: `typemax(Int64)`, i.e. ∞]"
maximum_iterations::Int64
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
function GaussianMixtureClusterer(;
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = [BetaML.GMM.DiagonalGaussian() for i in 1:n_classes],
tol = 10^(-6),
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = typemax(Int64),
rng = Random.GLOBAL_RNG,
)
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:n_classes]
end
return GaussianMixtureClusterer(n_classes,initial_probmixtures,mixtures, tol, minimum_variance, minimum_covariance,initialisation_strategy,maximum_iterations,rng)
end
"""
$(TYPEDEF)
A non-linear regressor derived from fitting the data on a probabilistic model (Gaussian Mixture Model). Relatively fast but generally not very precise, except for data with a structure matching the chosen underlying mixture.
This is the single-target version of the model. If you want to predict several labels (y) at once, use the MLJ model [`MultitargetGaussianMixtureRegressor`](@ref).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> modelType = @load GaussianMixtureRegressor pkg = "BetaML" verbosity=0
BetaML.GMM.GaussianMixtureRegressor
julia> model = modelType()
GaussianMixtureRegressor(
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = BetaML.GMM.DiagonalGaussian{Float64}[BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing)],
tol = 1.0e-6,
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = 9223372036854775807,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(GaussianMixtureRegressor(n_classes = 3, …), …).
Iter. 1: Var. of the post 21.74887448784976 Log-likelihood -21687.09917379566
julia> ŷ = predict(mach, X)
506-element Vector{Float64}:
24.703442835305577
24.70344283512716
⋮
17.172486989759676
17.172486989759644
```
"""
mutable struct GaussianMixtureRegressor <: MMI.Deterministic
"Number of mixtures (latent classes) to consider [def: 3]"
n_classes::Int64
"Initial probabilities of the categorical distribution (n_classes x 1) [default: `[]`]"
initial_probmixtures::Vector{Float64}
"""An array (of length `n_classes``) of the mixtures to employ (see the [`?GMM`](@ref GMM) module).
Each mixture object can be provided with or without its parameters (e.g. mean and variance for the gaussian ones). Fully qualified mixtures are useful only if the `initialisation_strategy` parameter is set to \"gived\"`
This parameter can also be given symply in term of a _type_. In this case it is automatically extended to a vector of `n_classes`` mixtures of the specified type.
Note that mixing of different mixture types is not currently supported.
[def: `[DiagonalGaussian() for i in 1:n_classes]`]"""
mixtures::Union{Type,Vector{<: BetaML.GMM.AbstractMixture}}
"Tolerance to stop the algorithm [default: 10^(-6)]"
tol::Float64
"Minimum variance for the mixtures [default: 0.05]"
minimum_variance::Float64
"Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance (see notes)."
minimum_covariance::Float64
"""
The computation method of the vector of the initial mixtures.
One of the following:
- "grid": using a grid approach
- "given": using the mixture provided in the fully qualified `mixtures` parameter
- "kmeans": use first kmeans (itself initialised with a "grid" strategy) to set the initial mixture centers [default]
Note that currently "random" and "shuffle" initialisations are not supported in gmm-based algorithms.
"""
initialisation_strategy::String
"Maximum number of iterations [def: `typemax(Int64)`, i.e. ∞]"
maximum_iterations::Int64
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
function GaussianMixtureRegressor(;
n_classes = 3,
initial_probmixtures = [],
mixtures = [BetaML.GMM.DiagonalGaussian() for i in 1:n_classes],
tol = 10^(-6),
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = typemax(Int64),
rng = Random.GLOBAL_RNG
)
if typeof(mixtures) <: UnionAll
mixtures = [BetaML.GMM.mixtures() for i in 1:n_classes]
end
return GaussianMixtureRegressor(n_classes,initial_probmixtures,mixtures,tol,minimum_variance,minimum_covariance,initialisation_strategy,maximum_iterations,rng)
end
"""
$(TYPEDEF)
A non-linear regressor derived from fitting the data on a probabilistic model (Gaussian Mixture Model). Relatively fast but generally not very precise, except for data with a structure matching the chosen underlying mixture.
This is the multi-target version of the model. If you want to predict a single label (y), use the MLJ model [`GaussianMixtureRegressor`](@ref).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> ydouble = hcat(y, y .*2 .+5);
julia> modelType = @load MultitargetGaussianMixtureRegressor pkg = "BetaML" verbosity=0
BetaML.GMM.MultitargetGaussianMixtureRegressor
julia> model = modelType()
MultitargetGaussianMixtureRegressor(
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = BetaML.GMM.DiagonalGaussian{Float64}[BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing)],
tol = 1.0e-6,
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = 9223372036854775807,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, ydouble);
julia> fit!(mach);
[ Info: Training machine(MultitargetGaussianMixtureRegressor(n_classes = 3, …), …).
Iter. 1: Var. of the post 20.46947926187522 Log-likelihood -23662.72770575145
julia> ŷdouble = predict(mach, X)
506×2 Matrix{Float64}:
23.3358 51.6717
23.3358 51.6717
⋮
16.6843 38.3686
16.6843 38.3686
```
"""
mutable struct MultitargetGaussianMixtureRegressor <: MMI.Deterministic
"Number of mixtures (latent classes) to consider [def: 3]"
n_classes::Int64
"Initial probabilities of the categorical distribution (n_classes x 1) [default: `[]`]"
initial_probmixtures::Vector{Float64}
"""An array (of length `n_classes``) of the mixtures to employ (see the [`?GMM`](@ref GMM) module).
Each mixture object can be provided with or without its parameters (e.g. mean and variance for the gaussian ones). Fully qualified mixtures are useful only if the `initialisation_strategy` parameter is set to \"gived\"`
This parameter can also be given symply in term of a _type_. In this case it is automatically extended to a vector of `n_classes`` mixtures of the specified type.
Note that mixing of different mixture types is not currently supported.
[def: `[DiagonalGaussian() for i in 1:n_classes]`]"""
mixtures::Union{Type,Vector{<: BetaML.GMM.AbstractMixture}}
"Tolerance to stop the algorithm [default: 10^(-6)]"
tol::Float64
"Minimum variance for the mixtures [default: 0.05]"
minimum_variance::Float64
"Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance (see notes)."
minimum_covariance::Float64
"""
The computation method of the vector of the initial mixtures.
One of the following:
- "grid": using a grid approach
- "given": using the mixture provided in the fully qualified `mixtures` parameter
- "kmeans": use first kmeans (itself initialised with a "grid" strategy) to set the initial mixture centers [default]
Note that currently "random" and "shuffle" initialisations are not supported in gmm-based algorithms.
"""
initialisation_strategy::String
"Maximum number of iterations [def: `typemax(Int64)`, i.e. ∞]"
maximum_iterations::Int64
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
function MultitargetGaussianMixtureRegressor(;
n_classes = 3,
initial_probmixtures = [],
mixtures = [BetaML.GMM.DiagonalGaussian() for i in 1:n_classes],
tol = 10^(-6),
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
maximum_iterations = typemax(Int64),
rng = Random.GLOBAL_RNG
)
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:n_classes]
end
return MultitargetGaussianMixtureRegressor(n_classes,initial_probmixtures,mixtures,tol,minimum_variance,minimum_covariance,initialisation_strategy,maximum_iterations,rng)
end
# ------------------------------------------------------------------------------
# Fit functions...
function MMI.fit(m::GaussianMixtureClusterer, verbosity, X)
# X is nothing, y is the data: https://alan-turing-institute.github.io/MLJ.jl/dev/adding_models_for_general_use/#Models-that-learn-a-probability-distribution-1
x = MMI.matrix(X) # convert table to matrix
#=
if m.mixtures == :diag_gaussian
mixtures = [DiagonalGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :full_gaussian
mixtures = [FullGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :spherical_gaussian
mixtures = [SphericalGaussian() for i in 1:m.n_classes]
else
error("Usupported mixture. Supported mixtures are either `:diag_gaussian`, `:full_gaussian` or `:spherical_gaussian`.")
end
=#
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
mixtures = m.mixtures
res = BetaML.GMM.gmm(x,m.n_classes,initial_probmixtures=deepcopy(m.initial_probmixtures),mixtures=mixtures, minimum_variance=m.minimum_variance, minimum_covariance=m.minimum_covariance,initialisation_strategy=m.initialisation_strategy,verbosity=verbosity,maximum_iterations=m.maximum_iterations,rng=m.rng)
fitResults = (pₖ=res.pₖ,mixtures=res.mixtures) # res.pₙₖ
cache = nothing
report = (res.ϵ,res.lL,res.BIC,res.AIC)
return (fitResults, cache, report)
end
MMI.fitted_params(model::GaussianMixtureClusterer, fitresults) = (weights=fitresults.pₖ, mixtures=fitresults.mixtures)
function MMI.fit(m::GaussianMixtureRegressor, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
ndims(y) < 2 || error("Trying to fit `GaussianMixtureRegressor` with a multidimensional target. Use `MultitargetGaussianMixtureRegressor` instead.")
#=
if typeof(y) <: AbstractMatrix
y = MMI.matrix(y)
end
if m.mixtures == :diag_gaussian
mixtures = [DiagonalGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :full_gaussian
mixtures = [FullGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :spherical_gaussian
mixtures = [SphericalGaussian() for i in 1:m.n_classes]
else
error("Usupported mixture. Supported mixtures are either `:diag_gaussian`, `:full_gaussian` or `:spherical_gaussian`.")
end
=#
mixtures = m.mixtures
betamod = BetaML.GMM.GaussianMixtureRegressor(
n_classes = m.n_classes,
initial_probmixtures = m.initial_probmixtures,
mixtures = mixtures,
tol = m.tol,
minimum_variance = m.minimum_variance,
initialisation_strategy = m.initialisation_strategy,
maximum_iterations = m.maximum_iterations,
verbosity = verbosity,
rng = m.rng
)
fit!(betamod,x,y)
cache = nothing
return (betamod, cache, info(betamod))
end
function MMI.fit(m::MultitargetGaussianMixtureRegressor, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
ndims(y) >= 2 || @warn "Trying to fit `MultitargetGaussianMixtureRegressor` with a single-dimensional target. You may want to consider `GaussianMixtureRegressor` instead."
#=
if typeof(y) <: AbstractMatrix
y = MMI.matrix(y)
end
if m.mixtures == :diag_gaussian
mixtures = [DiagonalGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :full_gaussian
mixtures = [FullGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :spherical_gaussian
mixtures = [SphericalGaussian() for i in 1:m.n_classes]
else
error("Usupported mixture. Supported mixtures are either `:diag_gaussian`, `:full_gaussian` or `:spherical_gaussian`.")
end
=#
mixtures = m.mixtures
betamod = BetaML.GMM.GaussianMixtureRegressor(
n_classes = m.n_classes,
initial_probmixtures = m.initial_probmixtures,
mixtures = mixtures,
tol = m.tol,
minimum_variance = m.minimum_variance,
initialisation_strategy = m.initialisation_strategy,
maximum_iterations = m.maximum_iterations,
verbosity = verbosity,
rng = m.rng
)
fit!(betamod,x,y)
cache = nothing
return (betamod, cache, info(betamod))
end
# ------------------------------------------------------------------------------
# Predict functions...
function MMI.predict(m::GaussianMixtureClusterer, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
(N,D) = size(x)
(pₖ,mixtures) = (fitResults.pₖ, fitResults.mixtures)
nCl = length(pₖ)
# Compute the probabilities that maximise the likelihood given existing mistures and a single iteration (i.e. doesn't update the mixtures)
thisOut = BetaML.GMM.gmm(x,nCl,initial_probmixtures=pₖ,mixtures=mixtures,tol=m.tol,verbosity=NONE,minimum_variance=m.minimum_variance,minimum_covariance=m.minimum_covariance,initialisation_strategy="given",maximum_iterations=1,rng=m.rng)
classes = CategoricalArray(1:nCl)
predictions = MMI.UnivariateFinite(classes, thisOut.pₙₖ)
return predictions
end
function MMI.predict(m::GaussianMixtureRegressor, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
betamod = fitResults
return dropdims(BetaML.Api.predict(betamod,x),dims=2)
end
function MMI.predict(m::MultitargetGaussianMixtureRegressor, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
betamod = fitResults
return BetaML.Api.predict(betamod,x)
end
# ------------------------------------------------------------------------------
# Model metadata for registration in MLJ...
MMI.metadata_model(GaussianMixtureClusterer,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Missing}},
},
output_scitype = AbstractArray{<:MMI.Multiclass}, # scitype of the output of `transform`
target_scitype = AbstractArray{<:MMI.Multiclass}, # scitype of the output of `predict`
#prediction_type = :probabilistic, # option not added to metadata_model function, need to do it separately
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.GaussianMixtureClusterer"
)
MMI.prediction_type(::Type{<:GaussianMixtureClusterer}) = :probabilistic
MMI.metadata_model(GaussianMixtureRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Infinite,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Infinite,MMI.Missing}},
},
target_scitype = AbstractVector{<: MMI.Continuous}, # for a supervised model, what target?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.GaussianMixtureRegressor"
)
MMI.metadata_model(MultitargetGaussianMixtureRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Infinite,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Infinite,MMI.Missing}},
},
target_scitype = AbstractMatrix{<: MMI.Continuous}, # for a supervised model, what target?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.MultitargetGaussianMixtureRegressor"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 22988 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for imputers models
export SimpleImputer,GaussianMixtureImputer, RandomForestImputer, GeneralImputer
"""
$(TYPEDEF)
Impute missing values using feature (column) mean, with optional record normalisation (using l-`norm` norms), from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4] |> table ;
julia> modelType = @load SimpleImputer pkg = "BetaML" verbosity=0
BetaML.Imputation.SimpleImputer
julia> model = modelType(norm=1)
SimpleImputer(
statistic = Statistics.mean,
norm = 1)
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(SimpleImputer(statistic = mean, …), …).
julia> X_full = transform(mach) |> MLJ.matrix
9×2 Matrix{Float64}:
1.0 10.5
1.5 0.295466
1.8 8.0
1.7 15.0
3.2 40.0
0.280952 1.69524
3.3 38.0
0.0750839 -2.3
5.2 -2.4
```
"""
mutable struct SimpleImputer <: MMI.Unsupervised
"The descriptive statistic of the column (feature) to use as imputed value [def: `mean`]"
statistic::Function
"Normalise the feature mean by l-`norm` norm of the records [default: `nothing`]. Use it (e.g. `norm=1` to use the l-1 norm) if the records are highly heterogeneus (e.g. quantity exports of different countries)."
norm::Union{Nothing,Int64}
end
SimpleImputer(;
statistic::Function = mean,
norm::Union{Nothing,Int64} = nothing,
) = SimpleImputer(statistic,norm)
"""
$(TYPEDEF)
Impute missing values using a probabilistic approach (Gaussian Mixture Models) fitted using the Expectation-Maximisation algorithm, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example :
```julia
julia> using MLJ
julia> X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4] |> table ;
julia> modelType = @load GaussianMixtureImputer pkg = "BetaML" verbosity=0
BetaML.Imputation.GaussianMixtureImputer
julia> model = modelType(initialisation_strategy="grid")
GaussianMixtureImputer(
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = BetaML.GMM.DiagonalGaussian{Float64}[BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing), BetaML.GMM.DiagonalGaussian{Float64}(nothing, nothing)],
tol = 1.0e-6,
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "grid",
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(GaussianMixtureImputer(n_classes = 3, …), …).
Iter. 1: Var. of the post 2.0225921341714286 Log-likelihood -42.96100103213314
julia> X_full = transform(mach) |> MLJ.matrix
9×2 Matrix{Float64}:
1.0 10.5
1.5 14.7366
1.8 8.0
1.7 15.0
3.2 40.0
2.51842 15.1747
3.3 38.0
2.47412 -2.3
5.2 -2.4
```
"""
mutable struct GaussianMixtureImputer <: MMI.Unsupervised
"Number of mixtures (latent classes) to consider [def: 3]"
n_classes::Int64
"Initial probabilities of the categorical distribution (n_classes x 1) [default: `[]`]"
initial_probmixtures::Vector{Float64}
"""An array (of length `n_classes``) of the mixtures to employ (see the [`?GMM`](@ref GMM) module in BetaML).
Each mixture object can be provided with or without its parameters (e.g. mean and variance for the gaussian ones). Fully qualified mixtures are useful only if the `initialisation_strategy` parameter is set to \"gived\"`
This parameter can also be given symply in term of a _type_. In this case it is automatically extended to a vector of `n_classes`` mixtures of the specified type.
Note that mixing of different mixture types is not currently supported and that currently implemented mixtures are `SphericalGaussian`, `DiagonalGaussian` and `FullGaussian`.
[def: `DiagonalGaussian`]"""
mixtures::Union{Type,Vector{<: BetaML.GMM.AbstractMixture}}
"Tolerance to stop the algorithm [default: 10^(-6)]"
tol::Float64
"Minimum variance for the mixtures [default: 0.05]"
minimum_variance::Float64
"Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance."
minimum_covariance::Float64
"""
The computation method of the vector of the initial mixtures.
One of the following:
- "grid": using a grid approach
- "given": using the mixture provided in the fully qualified `mixtures` parameter
- "kmeans": use first kmeans (itself initialised with a "grid" strategy) to set the initial mixture centers [default]
Note that currently "random" and "shuffle" initialisations are not supported in gmm-based algorithms.
"""
initialisation_strategy::String
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
function GaussianMixtureImputer(;
n_classes = 3,
initial_probmixtures = Float64[],
mixtures = BetaML.GMM.DiagonalGaussian, #[DiagonalGaussian() for i in 1:n_classes],
tol = 10^(-6),
minimum_variance = 0.05,
minimum_covariance = 0.0,
initialisation_strategy = "kmeans",
rng = Random.GLOBAL_RNG,
)
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:n_classes]
end
return GaussianMixtureImputer(n_classes,initial_probmixtures,mixtures, tol, minimum_variance, minimum_covariance,initialisation_strategy,rng)
end
"""
$(TYPEDEF)
Impute missing values using Random Forests, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4] |> table ;
julia> modelType = @load RandomForestImputer pkg = "BetaML" verbosity=0
BetaML.Imputation.RandomForestImputer
julia> model = modelType(n_trees=40)
RandomForestImputer(
n_trees = 40,
max_depth = nothing,
min_gain = 0.0,
min_records = 2,
max_features = nothing,
forced_categorical_cols = Int64[],
splitting_criterion = nothing,
recursive_passages = 1,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(RandomForestImputer(n_trees = 40, …), …).
julia> X_full = transform(mach) |> MLJ.matrix
9×2 Matrix{Float64}:
1.0 10.5
1.5 10.3909
1.8 8.0
1.7 15.0
3.2 40.0
2.88375 8.66125
3.3 38.0
3.98125 -2.3
5.2 -2.4
```
"""
mutable struct RandomForestImputer <: MMI.Unsupervised
"Number of (decision) trees in the forest [def: `30`]"
n_trees::Int64
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `nothing`, i.e. no limits]"
max_depth::Union{Nothing,Int64}
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64
"The maximum number of (random) features to consider at each partitioning [def: `nothing`, i.e. square root of the data dimension]"
max_features::Union{Nothing,Int64}
"Specify the positions of the integer columns to treat as categorical instead of cardinal. [Default: empty vector (all numerical cols are treated as cardinal by default and the others as categorical)]"
forced_categorical_cols::Vector{Int64}
"Either `gini`, `entropy` or `variance`. This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `nothing`, i.e. `gini` for categorical labels (classification task) and `variance` for numerical labels(regression task)]. It can be an anonymous function."
splitting_criterion::Union{Nothing,Function}
"Define the times to go trough the various columns to impute their data. Useful when there are data to impute on multiple columns. The order of the first passage is given by the decreasing number of missing values per column, the other passages are random [default: `1`]."
recursive_passages::Int64
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
RandomForestImputer(;
n_trees = 30,
max_depth = nothing,
min_gain = 0.0,
min_records = 2,
max_features = nothing,
forced_categorical_cols = Int64[],
splitting_criterion = nothing,
recursive_passages = 1,
#multiple_imputations = 1,
rng = Random.GLOBAL_RNG,
) = RandomForestImputer(n_trees, max_depth, min_gain, min_records, max_features, forced_categorical_cols, splitting_criterion, recursive_passages, rng)
"""
$(TYPEDEF)
Impute missing values using arbitrary learning models, from the Beta Machine Learning Toolkit (BetaML).
Impute missing values using a vector (one per column) of arbitrary learning models (classifiers/regressors, not necessarily from BetaML) that implement the interface `m = Model([options])`, `train!(m,X,Y)` and `predict(m,X)`.
# Hyperparameters:
$(TYPEDFIELDS)
# Examples :
- *Using BetaML models*:
```julia
julia> using MLJ;
julia> import BetaML # The library from which to get the individual estimators to be used for each column imputation
julia> X = ["a" 8.2;
"a" missing;
"a" 7.8;
"b" 21;
"b" 18;
"c" -0.9;
missing 20;
"c" -1.8;
missing -2.3;
"c" -2.4] |> table ;
julia> modelType = @load GeneralImputer pkg = "BetaML" verbosity=0
BetaML.Imputation.GeneralImputer
julia> model = modelType(estimator=BetaML.DecisionTreeEstimator(),recursive_passages=2);
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(GeneralImputer(cols_to_impute = auto, …), …).
julia> X_full = transform(mach) |> MLJ.matrix
10×2 Matrix{Any}:
"a" 8.2
"a" 8.0
"a" 7.8
"b" 21
"b" 18
"c" -0.9
"b" 20
"c" -1.8
"c" -2.3
"c" -2.4
```
- *Using third party packages* (in this example `DecisionTree`):
```julia
julia> using MLJ;
julia> import DecisionTree # An example of external estimators to be used for each column imputation
julia> X = ["a" 8.2;
"a" missing;
"a" 7.8;
"b" 21;
"b" 18;
"c" -0.9;
missing 20;
"c" -1.8;
missing -2.3;
"c" -2.4] |> table ;
julia> modelType = @load GeneralImputer pkg = "BetaML" verbosity=0
BetaML.Imputation.GeneralImputer
julia> model = modelType(estimator=[DecisionTree.DecisionTreeClassifier(),DecisionTree.DecisionTreeRegressor()], fit_function=DecisionTree.fit!,predict_function=DecisionTree.predict,recursive_passages=2);
julia> mach = machine(model, X);
julia> fit!(mach);
[ Info: Training machine(GeneralImputer(cols_to_impute = auto, …), …).
julia> X_full = transform(mach) |> MLJ.matrix
10×2 Matrix{Any}:
"a" 8.2
"a" 7.51111
"a" 7.8
"b" 21
"b" 18
"c" -0.9
"b" 20
"c" -1.8
"c" -2.3
"c" -2.4
```
"""
mutable struct GeneralImputer <: MMI.Unsupervised
"Columns in the matrix for which to create an imputation model, i.e. to impute. It can be a vector of columns IDs (positions), or the keywords \"auto\" (default) or \"all\". With \"auto\" the model automatically detects the columns with missing data and impute only them. You may manually specify the columns or use \"all\" if you want to create a imputation model for that columns during training even if all training data are non-missing to apply then the training model to further data with possibly missing values."
cols_to_impute::Union{String,Vector{Int64}}
"An entimator model (regressor or classifier), with eventually its options (hyper-parameters), to be used to impute the various columns of the matrix. It can also be a `cols_to_impute`-length vector of different estimators to consider a different estimator for each column (dimension) to impute, for example when some columns are categorical (and will hence require a classifier) and some others are numerical (hence requiring a regressor). [default: `nothing`, i.e. use BetaML random forests, handling classification and regression jobs automatically]."
estimator
"Wheter the estimator(s) used to predict the missing data support itself missing data in the training features (X). If not, when the model for a certain dimension is fitted, dimensions with missing data in the same rows of those where imputation is needed are dropped and then only non-missing rows in the other remaining dimensions are considered. It can be a vector of boolean values to specify this property for each individual estimator or a single booleann value to apply to all the estimators [default: `false`]"
missing_supported::Union{Vector{Bool},Bool}
"The function used by the estimator(s) to fit the model. It should take as fist argument the model itself, as second argument a matrix representing the features, and as third argument a vector representing the labels. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.fit!`]"
fit_function::Union{Vector{Function},Function}
"The function used by the estimator(s) to predict the labels. It should take as fist argument the model itself and as second argument a matrix representing the features. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.predict`]"
predict_function::Union{Vector{Function},Function}
"Define the number of times to go trough the various columns to impute their data. Useful when there are data to impute on multiple columns. The order of the first passage is given by the decreasing number of missing values per column, the other passages are random [default: `1`]."
recursive_passages::Int64
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]. Note that this influence only the specific GeneralImputer code, the individual estimators may have their own rng (or similar) parameter."
rng::AbstractRNG
end
GeneralImputer(;
cols_to_impute = "auto",
estimator = nothing,
missing_supported = false,
fit_function = fit!,
predict_function = predict,
recursive_passages = 1,
rng = Random.GLOBAL_RNG,
) = GeneralImputer(cols_to_impute, estimator, missing_supported, fit_function,predict_function, recursive_passages, rng)
# ------------------------------------------------------------------------------
# Fit functions...
function MMI.fit(m::SimpleImputer, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
mod = BetaML.Imputation.SimpleImputer(
statistic = m.statistic,
norm = m.norm,
verbosity = verbosity,
)
fit!(mod,x)
#fitResults = MMI.table(predict(mod))
fitResults = mod
cache = nothing
report = BetaML.Api.info(mod)
return (fitResults, cache, report)
end
function MMI.fit(m::GaussianMixtureImputer, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
#=if m.mixtures == :diag_gaussian
mixtures = [DiagonalGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :full_gaussian
mixtures = [FullGaussian() for i in 1:m.n_classes]
elseif m.mixtures == :spherical_gaussian
mixtures = [SphericalGaussian() for i in 1:m.n_classes]
else
error("Usupported mixture. Supported mixtures are either `:diag_gaussian`, `:full_gaussian` or `:spherical_gaussian`.")
end
=#
mod = BetaML.Imputation.GaussianMixtureImputer(
n_classes = m.n_classes,
initial_probmixtures = m.initial_probmixtures,
mixtures = m.mixtures,
tol = m.tol,
minimum_variance = m.minimum_variance,
minimum_covariance = m.minimum_covariance,
initialisation_strategy = m.initialisation_strategy,
verbosity = verbosity,
rng = m.rng
)
fit!(mod,x)
#fitResults = MMI.table(predict(mod))
fitResults = mod
cache = nothing
report = BetaML.Api.info(mod)
return (fitResults, cache, report)
end
function MMI.fit(m::RandomForestImputer, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
mod = BetaML.Imputation.RandomForestImputer(
n_trees = m.n_trees,
max_depth = m.max_depth,
min_gain = m.min_gain,
min_records = m.min_records,
max_features = m.max_features,
forced_categorical_cols = m.forced_categorical_cols,
splitting_criterion = m.splitting_criterion,
verbosity = verbosity,
recursive_passages = m.recursive_passages,
#multiple_imputations = m.multiple_imputations,
rng = m.rng,
)
BetaML.Api.fit!(mod,x)
#if m.multiple_imputations == 1
# fitResults = MMI.table(predict(mod))
#else
# fitResults = MMI.table.(predict(mod))
#end
fitResults = mod
cache = nothing
report = BetaML.Api.info(mod)
return (fitResults, cache, report)
end
function MMI.fit(m::GeneralImputer, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
mod = BetaML.Imputation.GeneralImputer(
cols_to_impute = m.cols_to_impute,
estimator = m.estimator,
missing_supported = m.missing_supported,
fit_function = m.fit_function,
predict_function = m.predict_function,
recursive_passages = m.recursive_passages,
rng = m.rng,
verbosity = verbosity,
)
BetaML.Api.fit!(mod,x)
#if m.multiple_imputations == 1
# fitResults = MMI.table(predict(mod))
#else
# fitResults = MMI.table.(predict(mod))
#end
fitResults = mod
cache = nothing
report = BetaML.Api.info(mod)
return (fitResults, cache, report)
end
# ------------------------------------------------------------------------------
# Transform functions...
""" transform(m, fitResults, X) - Given a trained imputator model fill the missing data of some new observations"""
function MMI.transform(m::Union{SimpleImputer,GaussianMixtureImputer,RandomForestImputer}, fitResults, X)
x = MMI.matrix(X) # convert table to matrix
mod = fitResults
return MMI.table(BetaML.Api.predict(mod,x))
end
"""
transform(m, fitResults, X)
Given a trained imputator model fill the missing data of some new observations.
Note that with multiple recursive imputations and inner estimators that don't support missing data, this function works only for X for which th model has been trained with, i.e. this function can not be applied to new matrices with empty values using model trained on other matrices.
"""
function MMI.transform(m::GeneralImputer, fitResults, X)
cols2imp = fitResults.par.cols_to_impute_actual
nD2Imp = length(cols2imp)
missing_supported = typeof(fitResults.hpar.missing_supported) <: AbstractArray ? fitResults.hpar.missing_supported : fill(fitResults.hpar.missing_supported,nD2Imp)
if fitResults.hpar.recursive_passages == 1 || all(missing_supported)
x = MMI.matrix(X) # convert table to matrix
mod = fitResults
return MMI.table(BetaML.Api.predict(mod,x))
else
mod = fitResults
return MMI.table(BetaML.Api.predict(mod))
end
end
# ------------------------------------------------------------------------------
# Model metadata for registration in MLJ...
MMI.metadata_model(SimpleImputer,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Missing}},
},
output_scitype = MMI.Table(MMI.Continuous), # for an unsupervised, what output?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.SimpleImputer"
)
MMI.metadata_model(GaussianMixtureImputer,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Missing}},
},
output_scitype = MMI.Table(MMI.Continuous), # for an unsupervised, what output?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.GaussianMixtureImputer"
)
MMI.metadata_model(RandomForestImputer,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
output_scitype = MMI.Table(MMI.Known), # for an unsupervised, what output?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.RandomForestImputer"
)
MMI.metadata_model(GeneralImputer,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
output_scitype = MMI.Table(MMI.Known), # for an unsupervised, what output?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.GeneralImputer"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 22507 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for Neural Networks models
using CategoricalArrays
export NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor, NeuralNetworkClassifier
# Model Structure declarations..
"""
$(TYPEDEF)
A simple but flexible Feedforward Neural Network, from the Beta Machine Learning Toolkit (BetaML) for regression of a single dimensional target.
# Parameters:
$(FIELDS)
# Notes:
- data must be numerical
- the label should be be a _n-records_ vector.
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> modelType = @load NeuralNetworkRegressor pkg = "BetaML" verbosity=0
BetaML.Nn.NeuralNetworkRegressor
julia> layers = [BetaML.DenseLayer(12,20,f=BetaML.relu),BetaML.DenseLayer(20,20,f=BetaML.relu),BetaML.DenseLayer(20,1,f=BetaML.relu)];
julia> model = modelType(layers=layers,opt_alg=BetaML.ADAM());
NeuralNetworkRegressor(
layers = BetaML.Nn.AbstractLayer[BetaML.Nn.DenseLayer([-0.23249759178069676 -0.4125090172711131 … 0.41401934928739 -0.33017881111237535; -0.27912169279319965 0.270551221249931 … 0.19258414323473344 0.1703002982374256; … ; 0.31186742456482447 0.14776438287394805 … 0.3624993442655036 0.1438885872964824; 0.24363744610286758 -0.3221033024934767 … 0.14886090419299408 0.038411663101909355], [-0.42360286004241765, -0.34355377040029594, 0.11510963232946697, 0.29078650404397893, -0.04940236502546075, 0.05142849152316714, -0.177685375947775, 0.3857630523957018, -0.25454667127064756, -0.1726731848206195, 0.29832456225553444, -0.21138505291162835, -0.15763643112604903, -0.08477044513587562, -0.38436681165349196, 0.20538016429104916, -0.25008157754468335, 0.268681800562054, 0.10600581996650865, 0.4262194464325672], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.08534180387478185 0.19659398307677617 … -0.3413633217504578 -0.0484925247381256; 0.0024419192794883915 -0.14614102508129 … -0.21912059923003044 0.2680725396694708; … ; 0.25151545823147886 -0.27532269951606037 … 0.20739970895058063 0.2891938885916349; -0.1699020711688904 -0.1350423717084296 … 0.16947589410758873 0.3629006047373296], [0.2158116357688406, -0.3255582642532289, -0.057314442103850394, 0.29029696770539953, 0.24994080694366455, 0.3624239027782297, -0.30674318230919984, -0.3854738338935017, 0.10809721838554087, 0.16073511121016176, -0.005923262068960489, 0.3157147976348795, -0.10938918304264739, -0.24521229198853187, -0.307167732178712, 0.0808907777008302, -0.014577497150872254, -0.0011287181458157214, 0.07522282588658086, 0.043366500526073104], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.021367697115938555 -0.28326652172347155 … 0.05346175368370165 -0.26037328415871647], [-0.2313659199724562], BetaML.Utils.relu, BetaML.Utils.drelu)],
loss = BetaML.Utils.squared_cost,
dloss = BetaML.Utils.dsquared_cost,
epochs = 100,
batch_size = 32,
opt_alg = BetaML.Nn.ADAM(BetaML.Nn.var"#90#93"(), 1.0, 0.9, 0.999, 1.0e-8, BetaML.Nn.Learnable[], BetaML.Nn.Learnable[]),
shuffle = true,
descr = "",
cb = BetaML.Nn.fitting_info,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
julia> ŷ = predict(mach, X);
julia> hcat(y,ŷ)
506×2 Matrix{Float64}:
24.0 30.7726
21.6 28.0811
34.7 31.3194
⋮
23.9 30.9032
22.0 29.49
11.9 27.2438
```
"""
Base.@kwdef mutable struct NeuralNetworkRegressor <: MMI.Deterministic
"Array of layer objects [def: `nothing`, i.e. basic network]. See `subtypes(BetaML.AbstractLayer)` for supported layers"
layers::Union{Array{BetaML.Nn.AbstractLayer,1},Nothing} = nothing
"""Loss (cost) function [def: `BetaML.squared_cost`]. Should always assume y and ŷ as matrices, even if the regression task is 1-D
!!! warning
If you change the parameter `loss`, you need to either provide its derivative on the parameter `dloss` or use autodiff with `dloss=nothing`.
"""
loss::Union{Nothing,Function} = BetaML.Utils.squared_cost
"Derivative of the loss function [def: `BetaML.dsquared_cost`, i.e. use the derivative of the squared cost]. Use `nothing` for autodiff."
dloss::Union{Function,Nothing} = BetaML.Utils.dsquared_cost
"Number of epochs, i.e. passages trough the whole training sample [def: `200`]"
epochs::Int64 = 200
"Size of each individual batch [def: `16`]"
batch_size::Int64 = 16
"The optimisation algorithm to update the gradient at each batch [def: `BetaML.ADAM()`]. See `subtypes(BetaML.OptimisationAlgorithm)` for supported optimizers"
opt_alg::OptimisationAlgorithm = BetaML.Nn.ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"A call back function to provide information during training [def: `fitting_info`]"
cb::Function=BetaML.Nn.fitting_info
"Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"
rng::AbstractRNG = Random.GLOBAL_RNG
end
"""
$(TYPEDSIGNATURES)
For the `verbosity` parameter see [`Verbosity`](@ref))
"""
function MMI.fit(m::NeuralNetworkRegressor, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
ndims(y) > 1 && error("The label should have only 1 dimensions. Use `MultitargetNeuralNetworkRegressor` or `NeuralNetworkClassifier` for multi_dimensional outputs.")
mi = BetaML.Nn.NeuralNetworkEstimator(;layers=m.layers,loss=m.loss, dloss=m.dloss, epochs=m.epochs, batch_size=m.batch_size, opt_alg=m.opt_alg,shuffle=m.shuffle, cache=false, descr=m.descr, cb=m.cb, rng=m.rng, verbosity=verbosity)
fit!(mi,x,y)
fitresults = mi
cache = nothing
report = nothing
return fitresults, cache, report
end
MMI.predict(m::NeuralNetworkRegressor, fitresult, Xnew) = BetaML.Api.predict(fitresult, MMI.matrix(Xnew))
MMI.metadata_model(NeuralNetworkRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Count}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Count}},
},
target_scitype = AbstractVector{<: Union{MMI.Continuous,MMI.Count}},
supports_weights = false,
load_path = "BetaML.Bmlj.NeuralNetworkRegressor"
)
# ------------------------------------------------------------------------------
# Model Structure declarations..
"""
$(TYPEDEF)
A simple but flexible Feedforward Neural Network, from the Beta Machine Learning Toolkit (BetaML) for regression of multiple dimensional targets.
# Parameters:
$(FIELDS)
# Notes:
- data must be numerical
- the label should be a _n-records_ by _n-dimensions_ matrix
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> ydouble = hcat(y, y .*2 .+5);
julia> modelType = @load MultitargetNeuralNetworkRegressor pkg = "BetaML" verbosity=0
BetaML.Nn.MultitargetNeuralNetworkRegressor
julia> layers = [BetaML.DenseLayer(12,50,f=BetaML.relu),BetaML.DenseLayer(50,50,f=BetaML.relu),BetaML.DenseLayer(50,50,f=BetaML.relu),BetaML.DenseLayer(50,2,f=BetaML.relu)];
julia> model = modelType(layers=layers,opt_alg=BetaML.ADAM(),epochs=500)
MultitargetNeuralNetworkRegressor(
layers = BetaML.Nn.AbstractLayer[BetaML.Nn.DenseLayer([-0.2591582523441157 -0.027962845131416225 … 0.16044535560124418 -0.12838827994676857; -0.30381834909561184 0.2405495243851402 … -0.2588144861880588 0.09538577909777807; … ; -0.017320292924711156 -0.14042266424603767 … 0.06366999105841187 -0.13419651752478906; 0.07393079961409338 0.24521350531110264 … 0.04256867886217541 -0.0895506802948175], [0.14249427336553644, 0.24719379413682485, -0.25595911822556566, 0.10034088778965933, -0.017086404878505712, 0.21932184025609347, -0.031413516834861266, -0.12569076082247596, -0.18080140982481183, 0.14551901873323253 … -0.13321995621967364, 0.2436582233332092, 0.0552222336976439, 0.07000814133633904, 0.2280064379660025, -0.28885681475734193, -0.07414214246290696, -0.06783184733650621, -0.055318068046308455, -0.2573488383282579], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.0395424111703751 -0.22531232360829911 … -0.04341228943744482 0.024336206858365517; -0.16481887432946268 0.17798073384748508 … -0.18594039305095766 0.051159225856547474; … ; -0.011639475293705043 -0.02347011206244673 … 0.20508869536159186 -0.1158382446274592; -0.19078069527757857 -0.007487540070740484 … -0.21341165344291158 -0.24158671316310726], [-0.04283623889330032, 0.14924461547060602, -0.17039563392959683, 0.00907774027816255, 0.21738885963113852, -0.06308040225941691, -0.14683286822101105, 0.21726892197970937, 0.19784321784707126, -0.0344988665714947 … -0.23643089430602846, -0.013560425201427584, 0.05323948910726356, -0.04644175812567475, -0.2350400292671211, 0.09628312383424742, 0.07016420995205697, -0.23266392927140334, -0.18823664451487, 0.2304486691429084], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.11504184627266828 0.08601794194664503 … 0.03843129724045469 -0.18417305624127284; 0.10181551438831654 0.13459759904443674 … 0.11094951365942118 -0.1549466590355218; … ; 0.15279817525427697 0.0846661196058916 … -0.07993619892911122 0.07145402617285884; -0.1614160186346092 -0.13032002335149 … -0.12310552194729624 -0.15915773071049827], [-0.03435885900946367, -0.1198543931290306, 0.008454985905194445, -0.17980887188986966, -0.03557204910359624, 0.19125847393334877, -0.10949700778538696, -0.09343206702591, -0.12229583511781811, -0.09123969069220564 … 0.22119233518322862, 0.2053873143308657, 0.12756489387198222, 0.11567243705173319, -0.20982445664020496, 0.1595157838386987, -0.02087331046544119, -0.20556423263489765, -0.1622837764237961, -0.019220998739847395], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.25796717031347993 0.17579536633402948 … -0.09992960168785256 -0.09426177454620635; -0.026436330246675632 0.18070899284865127 … -0.19310119102392206 -0.06904005900252091], [0.16133004882307822, -0.3061228721091248], BetaML.Utils.relu, BetaML.Utils.drelu)],
loss = BetaML.Utils.squared_cost,
dloss = BetaML.Utils.dsquared_cost,
epochs = 500,
batch_size = 32,
opt_alg = BetaML.Nn.ADAM(BetaML.Nn.var"#90#93"(), 1.0, 0.9, 0.999, 1.0e-8, BetaML.Nn.Learnable[], BetaML.Nn.Learnable[]),
shuffle = true,
descr = "",
cb = BetaML.Nn.fitting_info,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, ydouble);
julia> fit!(mach);
julia> ŷdouble = predict(mach, X);
julia> hcat(ydouble,ŷdouble)
506×4 Matrix{Float64}:
24.0 53.0 28.4624 62.8607
21.6 48.2 22.665 49.7401
34.7 74.4 31.5602 67.9433
33.4 71.8 33.0869 72.4337
⋮
23.9 52.8 23.3573 50.654
22.0 49.0 22.1141 48.5926
11.9 28.8 19.9639 45.5823
```
"""
Base.@kwdef mutable struct MultitargetNeuralNetworkRegressor <: MMI.Deterministic
"Array of layer objects [def: `nothing`, i.e. basic network]. See `subtypes(BetaML.AbstractLayer)` for supported layers"
layers::Union{Array{BetaML.Nn.AbstractLayer,1},Nothing} = nothing
"""Loss (cost) function [def: `BetaML.squared_cost`]. Should always assume y and ŷ as matrices.
!!! warning
If you change the parameter `loss`, you need to either provide its derivative on the parameter `dloss` or use autodiff with `dloss=nothing`.
"""
loss::Union{Nothing,Function} = BetaML.Utils.squared_cost
"Derivative of the loss function [def: `BetaML.dsquared_cost`, i.e. use the derivative of the squared cost]. Use `nothing` for autodiff."
dloss::Union{Function,Nothing} = BetaML.Utils.dsquared_cost
"Number of epochs, i.e. passages trough the whole training sample [def: `300`]"
epochs::Int64 = 300
"Size of each individual batch [def: `16`]"
batch_size::Int64 = 16
"The optimisation algorithm to update the gradient at each batch [def: `BetaML.ADAM()`]. See `subtypes(BetaML.OptimisationAlgorithm)` for supported optimizers"
opt_alg::OptimisationAlgorithm = BetaML.Nn.ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"A call back function to provide information during training [def: `BetaML.fitting_info`]"
cb::Function=BetaML.Nn.fitting_info
"Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"
rng::AbstractRNG = Random.GLOBAL_RNG
end
"""
$(TYPEDSIGNATURES)
For the `verbosity` parameter see [`Verbosity`](@ref))
"""
function MMI.fit(m::MultitargetNeuralNetworkRegressor, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
ndims(y) > 1 || error("The label should have multiple dimensions. Use `NeuralNetworkRegressor` for single-dimensional outputs.")
mi = BetaML.Nn.NeuralNetworkEstimator(;layers=m.layers,loss=m.loss, dloss=m.dloss, epochs=m.epochs, batch_size=m.batch_size, opt_alg=m.opt_alg,shuffle=m.shuffle, cache=false, descr=m.descr, cb=m.cb, rng=m.rng, verbosity=verbosity)
BetaML.Api.fit!(mi,x,y)
fitresults = mi
cache = nothing
report = nothing
return fitresults, cache, report
end
MMI.predict(m::MultitargetNeuralNetworkRegressor, fitresult, Xnew) = BetaML.Api.predict(fitresult, MMI.matrix(Xnew))
MMI.metadata_model(MultitargetNeuralNetworkRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Count}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Count}},
},
target_scitype = AbstractMatrix{<: Union{MMI.Continuous,MMI.Count}},
supports_weights = false,
load_path = "BetaML.Bmlj.MultitargetNeuralNetworkRegressor"
)
# ------------------------------------------------------------------------------
"""
$(TYPEDEF)
A simple but flexible Feedforward Neural Network, from the Beta Machine Learning Toolkit (BetaML) for classification problems.
# Parameters:
$(FIELDS)
# Notes:
- data must be numerical
- the label should be a _n-records_ by _n-dimensions_ matrix (e.g. a one-hot-encoded data for classification), where the output columns should be interpreted as the probabilities for each categories.
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load NeuralNetworkClassifier pkg = "BetaML" verbosity=0
BetaML.Nn.NeuralNetworkClassifier
julia> layers = [BetaML.DenseLayer(4,8,f=BetaML.relu),BetaML.DenseLayer(8,8,f=BetaML.relu),BetaML.DenseLayer(8,3,f=BetaML.relu),BetaML.VectorFunctionLayer(3,f=BetaML.softmax)];
julia> model = modelType(layers=layers,opt_alg=BetaML.ADAM())
NeuralNetworkClassifier(
layers = BetaML.Nn.AbstractLayer[BetaML.Nn.DenseLayer([-0.376173352338049 0.7029289511758696 -0.5589563304592478 -0.21043274001651874; 0.044758889527899415 0.6687689636685921 0.4584331114653877 0.6820506583840453; … ; -0.26546358457167507 -0.28469736227283804 -0.164225549922154 -0.516785639164486; -0.5146043550684141 -0.0699113265130964 0.14959906603941908 -0.053706860039406834], [0.7003943613125758, -0.23990840466587576, -0.23823126271387746, 0.4018101580410387, 0.2274483050356888, -0.564975060667734, 0.1732063297031089, 0.11880299829896945], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([-0.029467850439546583 0.4074661266592745 … 0.36775675246760053 -0.595524555448422; 0.42455597698371306 -0.2458082732997091 … -0.3324220683462514 0.44439454998610595; … ; -0.2890883863364267 -0.10109249362508033 … -0.0602680568207582 0.18177278845097555; -0.03432587226449335 -0.4301192922760063 … 0.5646018168286626 0.47269177680892693], [0.13777442835428688, 0.5473306726675433, 0.3781939472904011, 0.24021813428130567, -0.0714779477402877, -0.020386373530818958, 0.5465466618404464, -0.40339790713616525], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.DenseLayer([0.6565120540082393 0.7139211611842745 … 0.07809812467915389 -0.49346311403373844; -0.4544472987041656 0.6502667641568863 … 0.43634608676548214 0.7213049952968921; 0.41212264783075303 -0.21993289366360613 … 0.25365007887755064 -0.5664469566269569], [-0.6911986792747682, -0.2149343209329364, -0.6347727539063817], BetaML.Utils.relu, BetaML.Utils.drelu), BetaML.Nn.VectorFunctionLayer{0}(fill(NaN), 3, 3, BetaML.Utils.softmax, BetaML.Utils.dsoftmax, nothing)],
loss = BetaML.Utils.crossentropy,
dloss = BetaML.Utils.dcrossentropy,
epochs = 100,
batch_size = 32,
opt_alg = BetaML.Nn.ADAM(BetaML.Nn.var"#90#93"(), 1.0, 0.9, 0.999, 1.0e-8, BetaML.Nn.Learnable[], BetaML.Nn.Learnable[]),
shuffle = true,
descr = "",
cb = BetaML.Nn.fitting_info,
categories = nothing,
handle_unknown = "error",
other_categories_name = nothing,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
julia> classes_est = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt8, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>0.575, versicolor=>0.213, virginica=>0.213)
UnivariateFinite{Multiclass{3}}(setosa=>0.573, versicolor=>0.213, virginica=>0.213)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>0.236, versicolor=>0.236, virginica=>0.529)
UnivariateFinite{Multiclass{3}}(setosa=>0.254, versicolor=>0.254, virginica=>0.492)
```
"""
Base.@kwdef mutable struct NeuralNetworkClassifier <: MMI.Probabilistic
"Array of layer objects [def: `nothing`, i.e. basic network]. See `subtypes(BetaML.AbstractLayer)` for supported layers. The last \"softmax\" layer is automatically added."
layers::Union{Array{BetaML.Nn.AbstractLayer,1},Nothing} = nothing
"""Loss (cost) function [def: `BetaML.crossentropy`]. Should always assume y and ŷ as matrices.
!!! warning
If you change the parameter `loss`, you need to either provide its derivative on the parameter `dloss` or use autodiff with `dloss=nothing`.
"""
loss::Union{Nothing,Function} = BetaML.Utils.crossentropy
"Derivative of the loss function [def: `BetaML.dcrossentropy`, i.e. the derivative of the cross-entropy]. Use `nothing` for autodiff."
dloss::Union{Function,Nothing} = BetaML.Utils.dcrossentropy
"Number of epochs, i.e. passages trough the whole training sample [def: `200`]"
epochs::Int64 = 200
"Size of each individual batch [def: `16`]"
batch_size::Int64 = 16
"The optimisation algorithm to update the gradient at each batch [def: `BetaML.ADAM()`]. See `subtypes(BetaML.OptimisationAlgorithm)` for supported optimizers"
opt_alg::OptimisationAlgorithm = BetaML.Nn.ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"A call back function to provide information during training [def: `BetaML.fitting_info`]"
cb::Function=BetaML.Nn.fitting_info
"The categories to represent as columns. [def: `nothing`, i.e. unique training values]."
categories::Union{Vector,Nothing} = nothing
"How to handle categories not seens in training or not present in the provided `categories` array? \"error\" (default) rises an error, \"infrequent\" adds a specific column for these categories."
handle_unknown::String = "error"
"Which value during prediction to assign to this \"other\" category (i.e. categories not seen on training or not present in the provided `categories` array? [def: ` nothing`, i.e. typemax(Int64) for integer vectors and \"other\" for other types]. This setting is active only if `handle_unknown=\"infrequent\"` and in that case it MUST be specified if Y is neither integer or strings"
other_categories_name = nothing
"Random Number Generator [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG = Random.GLOBAL_RNG
end
"""
MMI.fit(model::NeuralNetworkClassifier, verbosity, X, y)
For the `verbosity` parameter see [`Verbosity`](@ref))
"""
function MMI.fit(m::NeuralNetworkClassifier, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
categories = deepcopy(m.categories)
if categories == nothing
#if occursin("CategoricalVector",string(typeof(y))) # to avoid dependency to CategoricalArrays or MLJBase
if typeof(y) <: CategoricalVector
categories = levels(y)
end
end
ohmod = BetaML.Utils.OneHotEncoder(categories=categories,handle_unknown=m.handle_unknown,other_categories_name=m.other_categories_name, verbosity=verbosity)
Y_oh = BetaML.Api.fit!(ohmod,y)
nR,nD = size(x)
(nRy,nDy) = size(Y_oh)
nR == nRy || error("X and Y have different number of records (rows)")
if isnothing(m.layers)
layers = nothing
else
layers = deepcopy(m.layers)
push!(layers,BetaML.Nn.VectorFunctionLayer(nDy,f=BetaML.Utils.softmax))
end
mi = BetaML.Nn.NeuralNetworkEstimator(;layers=layers,loss=m.loss, dloss=m.dloss, epochs=m.epochs, batch_size=m.batch_size, opt_alg=m.opt_alg,shuffle=m.shuffle, cache=false, descr=m.descr, cb=m.cb, rng=m.rng, verbosity=verbosity)
BetaML.Api.fit!(mi,x,Y_oh)
fitresults = (mi,ohmod)
cache = nothing
report = nothing
return fitresults, cache, report
end
function MMI.predict(m::NeuralNetworkClassifier, fitresult, Xnew)
nnmod, ohmod = fitresult
yhat = BetaML.Api.predict(nnmod, MMI.matrix(Xnew))
classes = BetaML.Api.parameters(ohmod).categories
predictions = MMI.UnivariateFinite(classes, yhat,pool=missing)
#return yhat
return predictions
end
MMI.metadata_model(NeuralNetworkClassifier,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Count}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Count}},
},
target_scitype = AbstractVector{<: Union{MMI.Multiclass,MMI.Finite,MMI.Count}},
supports_weights = false,
load_path = "BetaML.Bmlj.NeuralNetworkClassifier"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 14227 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for Decision Trees/Random Forests models
export PerceptronClassifier, KernelPerceptronClassifier, PegasosClassifier
# ------------------------------------------------------------------------------
# Model Structure declarations..
"""
$(TYPEDEF)
The classical perceptron algorithm using one-vs-all for multiclass, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load PerceptronClassifier pkg = "BetaML"
[ Info: For silent loading, specify `verbosity=0`.
import BetaML ✔
BetaML.Perceptron.PerceptronClassifier
julia> model = modelType()
PerceptronClassifier(
initial_coefficients = nothing,
initial_constant = nothing,
epochs = 1000,
shuffle = true,
force_origin = false,
return_mean_hyperplane = false,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(PerceptronClassifier(initial_coefficients = nothing, …), …).
*** Avg. error after epoch 2 : 0.0 (all elements of the set has been correctly classified)
julia> est_classes = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt8, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>2.53e-34, virginica=>0.0)
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>1.27e-18, virginica=>1.86e-310)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>2.77e-57, versicolor=>1.1099999999999999e-82, virginica=>1.0)
UnivariateFinite{Multiclass{3}}(setosa=>3.09e-22, versicolor=>4.03e-25, virginica=>1.0)
```
"""
mutable struct PerceptronClassifier <: MMI.Probabilistic
"N-classes by D-dimensions matrix of initial linear coefficients [def: `nothing`, i.e. zeros]"
initial_coefficients::Union{Matrix{Float64},Nothing}
"N-classes vector of initial contant terms [def: `nothing`, i.e. zeros]"
initial_constant::Union{Vector{Float64},Nothing}
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `1000`]"
epochs::Int64
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool
"Whether to force the parameter associated with the constant term to remain zero [def: `false`]"
force_origin::Bool
"Whether to return the average hyperplane coefficients instead of the final ones [def: `false`]"
return_mean_hyperplane::Bool
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
PerceptronClassifier(;
initial_coefficients=nothing,
initial_constant=nothing,
epochs=1000,
shuffle=true,
force_origin=false,
return_mean_hyperplane=false,
rng = Random.GLOBAL_RNG,
) = PerceptronClassifier(initial_coefficients,initial_constant,epochs,shuffle,force_origin,return_mean_hyperplane,rng)
"""
$(TYPEDEF)
The kernel perceptron algorithm using one-vs-one for multiclass, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load KernelPerceptronClassifier pkg = "BetaML"
[ Info: For silent loading, specify `verbosity=0`.
import BetaML ✔
BetaML.Perceptron.KernelPerceptronClassifier
julia> model = modelType()
KernelPerceptronClassifier(
kernel = BetaML.Utils.radial_kernel,
epochs = 100,
initial_errors = nothing,
shuffle = true,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
julia> est_classes = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt8, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>0.665, versicolor=>0.245, virginica=>0.09)
UnivariateFinite{Multiclass{3}}(setosa=>0.665, versicolor=>0.245, virginica=>0.09)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>0.09, versicolor=>0.245, virginica=>0.665)
UnivariateFinite{Multiclass{3}}(setosa=>0.09, versicolor=>0.665, virginica=>0.245)
```
"""
mutable struct KernelPerceptronClassifier <: MMI.Probabilistic
"Kernel function to employ. See `?radial_kernel` or `?polynomial_kernel` (once loaded the BetaML package) for details or check `?BetaML.Utils` to verify if other kernels are defined (you can alsways define your own kernel) [def: [`radial_kernel`](@ref)]"
kernel::Function
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `100`]"
epochs::Int64
"Initial distribution of the number of errors errors [def: `nothing`, i.e. zeros]. If provided, this should be a nModels-lenght vector of nRecords integer values vectors , where nModels is computed as `(n_classes * (n_classes - 1)) / 2`"
initial_errors::Union{Nothing,Vector{Vector{Int64}}}
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
KernelPerceptronClassifier(;
kernel=BetaML.Utils.radial_kernel,
epochs=100,
initial_errors = nothing,
shuffle=true,
rng = Random.GLOBAL_RNG,
) = KernelPerceptronClassifier(kernel,epochs,initial_errors,shuffle,rng)
"""
$(TYPEDEF)
The gradient-based linear "pegasos" classifier using one-vs-all for multiclass, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load PegasosClassifier pkg = "BetaML" verbosity=0
BetaML.Perceptron.PegasosClassifier
julia> model = modelType()
PegasosClassifier(
initial_coefficients = nothing,
initial_constant = nothing,
learning_rate = BetaML.Perceptron.var"#71#73"(),
learning_rate_multiplicative = 0.5,
epochs = 1000,
shuffle = true,
force_origin = false,
return_mean_hyperplane = false,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
julia> est_classes = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt8, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>0.817, versicolor=>0.153, virginica=>0.0301)
UnivariateFinite{Multiclass{3}}(setosa=>0.791, versicolor=>0.177, virginica=>0.0318)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>0.254, versicolor=>0.5, virginica=>0.246)
UnivariateFinite{Multiclass{3}}(setosa=>0.283, versicolor=>0.51, virginica=>0.207)
```
"""
mutable struct PegasosClassifier <: MMI.Probabilistic
"N-classes by D-dimensions matrix of initial linear coefficients [def: `nothing`, i.e. zeros]"
initial_coefficients::Union{Matrix{Float64},Nothing}
"N-classes vector of initial contant terms [def: `nothing`, i.e. zeros]"
initial_constant::Union{Vector{Float64},Nothing}
"Learning rate [def: (epoch -> 1/sqrt(epoch))]"
learning_rate::Function
"Multiplicative term of the learning rate [def: `0.5`]"
learning_rate_multiplicative::Float64
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `1000`]"
epochs::Int64
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool
"Whether to force the parameter associated with the constant term to remain zero [def: `false`]"
force_origin::Bool
"Whether to return the average hyperplane coefficients instead of the final ones [def: `false`]"
return_mean_hyperplane::Bool
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
PegasosClassifier(;
initial_coefficients=nothing,
initial_constant=nothing,
learning_rate = (t -> 1/sqrt(t)),
learning_rate_multiplicative = 0.5,
epochs=1000,
shuffle=true,
force_origin=false,
return_mean_hyperplane=false,
rng = Random.GLOBAL_RNG,
) = PegasosClassifier(initial_coefficients,initial_constant,learning_rate,learning_rate_multiplicative,epochs,shuffle,force_origin,return_mean_hyperplane,rng)
# ------------------------------------------------------------------------------
# Fit functions...
function MMI.fit(model::PerceptronClassifier, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
allClasses = levels(y)
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
#initial_coefficients = length(model.initial_coefficients) == 0 ? zeros(size(x,2)) : model.initial_coefficients
fitresult = BetaML.Perceptron.perceptron(x, y; θ=model.initial_coefficients, θ₀=model.initial_constant, T=model.epochs, nMsgs=0, shuffle=model.shuffle, force_origin=model.force_origin, return_mean_hyperplane=model.return_mean_hyperplane,rng=model.rng, verbosity=verbosity)
cache=nothing
report=nothing
return (fitresult,allClasses), cache, report
end
function MMI.fit(model::KernelPerceptronClassifier, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
allClasses = levels(y)
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
#initial_errors = length(model.initial_errors) == 0 ? zeros(Int64,length(y)) : model.initial_errors
fitresult = BetaML.Perceptron.kernel_perceptron_classifier(x, y; K=model.kernel, T=model.epochs, α=model.initial_errors, nMsgs=0, shuffle=model.shuffle,rng=model.rng, verbosity=verbosity)
cache = nothing
report = nothing
return (fitresult,allClasses), cache, report
end
function MMI.fit(model::PegasosClassifier, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
allClasses = levels(y)
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
#initial_coefficients = length(model.initial_coefficients) == 0 ? zeros(size(x,2)) : model.initial_coefficients
fitresult = BetaML.Perceptron.pegasos(x, y; θ=model.initial_coefficients,θ₀=model.initial_constant, λ=model.learning_rate_multiplicative,η=model.learning_rate, T=model.epochs, nMsgs=0, shuffle=model.shuffle, force_origin=model.force_origin, return_mean_hyperplane=model.return_mean_hyperplane,rng=model.rng, verbosity=verbosity)
cache=nothing
report=nothing
return (fitresult,allClasses), cache, report
end
# ------------------------------------------------------------------------------
# Predict functions....
function MMI.predict(model::Union{PerceptronClassifier,PegasosClassifier}, fitresult, Xnew)
fittedModel = fitresult[1]
#classes = CategoricalVector(fittedModel.classes)
classes = fittedModel.classes
allClasses = fitresult[2] # as classes do not includes classes unsees at training time
nLevels = length(allClasses)
nRecords = MMI.nrows(Xnew)
modelPredictions = BetaML.Perceptron.predict(MMI.matrix(Xnew), fittedModel.θ, fittedModel.θ₀, fittedModel.classes)
predMatrix = zeros(Float64,(nRecords,nLevels))
# Transform the predictions from a vector of dictionaries to a matrix
# where the rows are the PMF of each record
for n in 1:nRecords
for (c,cl) in enumerate(allClasses)
predMatrix[n,c] = get(modelPredictions[n],cl,0.0)
end
end
#predictions = [MMI.UnivariateFinite(classes, predMatrix[i,:])
# for i in 1:nRecords]
predictions = MMI.UnivariateFinite(allClasses,predMatrix,pool=missing)
return predictions
end
function MMI.predict(model::KernelPerceptronClassifier, fitresult, Xnew)
fittedModel = fitresult[1]
#classes = CategoricalVector(fittedModel.classes)
classes = fittedModel.classes
allClasses = fitresult[2] # as classes do not includes classes unsees at training time
nLevels = length(allClasses)
nRecords = MMI.nrows(Xnew)
#ŷtrain = Perceptron.predict([10 10; 2.2 2.5],model.x,model.y,model.α, model.classes,K=model.K)
modelPredictions = BetaML.Perceptron.predict(MMI.matrix(Xnew), fittedModel.x, fittedModel.y, fittedModel.α, fittedModel.classes, K=fittedModel.K)
predMatrix = zeros(Float64,(nRecords,nLevels))
# Transform the predictions from a vector of dictionaries to a matrix
# where the rows are the PMF of each record
for n in 1:nRecords
for (c,cl) in enumerate(allClasses)
predMatrix[n,c] = get(modelPredictions[n],cl,0.0)
end
end
#predictions = [MMI.UnivariateFinite(classes, predMatrix[i,:])
# for i in 1:nRecords]
#predictions = MMI.UnivariateFinite(classes, predMatrix)
predictions = MMI.UnivariateFinite(allClasses,predMatrix,pool=missing)
#predictions4 = MMI.UnivariateFinite(modelPredictions,pool=classes,ordered=false)
#predictions = MMI.UnivariateFinite(modelPredictions,pool=fittedModel.classes)
return predictions
end
# ------------------------------------------------------------------------------
# Model metadata for registration in MLJ...
MMI.metadata_model(PerceptronClassifier,
input_scitype = Union{
MMI.Table(MMI.Infinite),
AbstractMatrix{<: MMI.Infinite},
},
target_scitype = AbstractVector{<: MMI.Finite},
supports_weights = false,
load_path = "BetaML.Bmlj.PerceptronClassifier"
)
MMI.metadata_model(KernelPerceptronClassifier,
input_scitype = Union{
MMI.Table(MMI.Infinite),
AbstractMatrix{<: MMI.Infinite},
},
target_scitype = AbstractVector{<: MMI.Finite},
supports_weights = false,
load_path = "BetaML.Bmlj.KernelPerceptronClassifier"
)
MMI.metadata_model(PegasosClassifier,
input_scitype = Union{
MMI.Table(MMI.Infinite),
AbstractMatrix{<: MMI.Infinite},
},
target_scitype = AbstractVector{<: MMI.Finite},
supports_weights = false,
load_path = "BetaML.Bmlj.PegasosClassifier"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 17559 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for Decision Trees/Random Forests models
export DecisionTreeRegressor, RandomForestRegressor, DecisionTreeClassifier, RandomForestClassifier
# ------------------------------------------------------------------------------
# Model Structure declarations..
"""
$(TYPEDEF)
A simple Decision Tree model for regression with support for Missing data, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> modelType = @load DecisionTreeRegressor pkg = "BetaML" verbosity=0
BetaML.Trees.DecisionTreeRegressor
julia> model = modelType()
DecisionTreeRegressor(
max_depth = 0,
min_gain = 0.0,
min_records = 2,
max_features = 0,
splitting_criterion = BetaML.Utils.variance,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(DecisionTreeRegressor(max_depth = 0, …), …).
julia> ŷ = predict(mach, X);
julia> hcat(y,ŷ)
506×2 Matrix{Float64}:
24.0 26.35
21.6 21.6
34.7 34.8
⋮
23.9 23.75
22.0 22.2
11.9 13.2
```
"""
mutable struct DecisionTreeRegressor <: MMI.Deterministic
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `0`, i.e. no limits]"
max_depth::Int64
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64
"The maximum number of (random) features to consider at each partitioning [def: `0`, i.e. look at all features]"
max_features::Int64
"This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `variance`]. Either `variance` or a custom function. It can also be an anonymous function."
splitting_criterion::Function
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
DecisionTreeRegressor(;
max_depth=0, #typemax(Int)
min_gain=0.0,
min_records=2,
max_features=0,
splitting_criterion=BetaML.Utils.variance,
rng = Random.GLOBAL_RNG,
) = DecisionTreeRegressor(max_depth,min_gain,min_records,max_features,splitting_criterion,rng)
"""
$(TYPEDEF)
A simple Decision Tree model for classification with support for Missing data, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load DecisionTreeClassifier pkg = "BetaML" verbosity=0
BetaML.Trees.DecisionTreeClassifier
julia> model = modelType()
DecisionTreeClassifier(
max_depth = 0,
min_gain = 0.0,
min_records = 2,
max_features = 0,
splitting_criterion = BetaML.Utils.gini,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(DecisionTreeClassifier(max_depth = 0, …), …).
julia> cat_est = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt32, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>0.0, versicolor=>0.0, virginica=>1.0)
UnivariateFinite{Multiclass{3}}(setosa=>0.0, versicolor=>0.0, virginica=>1.0)
UnivariateFinite{Multiclass{3}}(setosa=>0.0, versicolor=>0.0, virginica=>1.0)
```
"""
mutable struct DecisionTreeClassifier <: MMI.Probabilistic
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `0`, i.e. no limits]"
max_depth::Int64
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64
"The maximum number of (random) features to consider at each partitioning [def: `0`, i.e. look at all features]"
max_features::Int64
"This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `gini`]. Either `gini`, `entropy` or a custom function. It can also be an anonymous function."
splitting_criterion::Function
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
DecisionTreeClassifier(;
max_depth=0,
min_gain=0.0,
min_records=2,
max_features=0,
splitting_criterion=BetaML.Utils.gini,
rng = Random.GLOBAL_RNG,
) = DecisionTreeClassifier(max_depth,min_gain,min_records,max_features,splitting_criterion,rng)
"""
$(TYPEDEF)
A simple Random Forest model for regression with support for Missing data, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example:
```julia
julia> using MLJ
julia> X, y = @load_boston;
julia> modelType = @load RandomForestRegressor pkg = "BetaML" verbosity=0
BetaML.Trees.RandomForestRegressor
julia> model = modelType()
RandomForestRegressor(
n_trees = 30,
max_depth = 0,
min_gain = 0.0,
min_records = 2,
max_features = 0,
splitting_criterion = BetaML.Utils.variance,
β = 0.0,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(RandomForestRegressor(n_trees = 30, …), …).
julia> ŷ = predict(mach, X);
julia> hcat(y,ŷ)
506×2 Matrix{Float64}:
24.0 25.8433
21.6 22.4317
34.7 35.5742
33.4 33.9233
⋮
23.9 24.42
22.0 22.4433
11.9 15.5833
```
"""
mutable struct RandomForestRegressor <: MMI.Deterministic
"Number of (decision) trees in the forest [def: `30`]"
n_trees::Int64
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `0`, i.e. no limits]"
max_depth::Int64
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64
"The maximum number of (random) features to consider at each partitioning [def: `0`, i.e. square root of the data dimension]"
max_features::Int64
"This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `variance`]. Either `variance` or a custom function. It can also be an anonymous function."
splitting_criterion::Function
"Parameter that regulate the weights of the scoring of each tree, to be (optionally) used in prediction based on the error of the individual trees computed on the records on which trees have not been trained. Higher values favour \"better\" trees, but too high values will cause overfitting [def: `0`, i.e. uniform weigths]"
β::Float64
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
RandomForestRegressor(;
n_trees=30,
max_depth=0,
min_gain=0.0,
min_records=2,
max_features=0,
splitting_criterion=BetaML.Utils.variance,
β=0.0,
rng = Random.GLOBAL_RNG,
) = RandomForestRegressor(n_trees,max_depth,min_gain,min_records,max_features,splitting_criterion,β,rng)
"""
$(TYPEDEF)
A simple Random Forest model for classification with support for Missing data, from the Beta Machine Learning Toolkit (BetaML).
# Hyperparameters:
$(TYPEDFIELDS)
# Example :
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load RandomForestClassifier pkg = "BetaML" verbosity=0
BetaML.Trees.RandomForestClassifier
julia> model = modelType()
RandomForestClassifier(
n_trees = 30,
max_depth = 0,
min_gain = 0.0,
min_records = 2,
max_features = 0,
splitting_criterion = BetaML.Utils.gini,
β = 0.0,
rng = Random._GLOBAL_RNG())
julia> mach = machine(model, X, y);
julia> fit!(mach);
[ Info: Training machine(RandomForestClassifier(n_trees = 30, …), …).
julia> cat_est = predict(mach, X)
150-element CategoricalDistributions.UnivariateFiniteVector{Multiclass{3}, String, UInt32, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
⋮
UnivariateFinite{Multiclass{3}}(setosa=>0.0, versicolor=>0.0, virginica=>1.0)
UnivariateFinite{Multiclass{3}}(setosa=>0.0, versicolor=>0.0667, virginica=>0.933)
```
"""
mutable struct RandomForestClassifier <: MMI.Probabilistic
n_trees::Int64
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `0`, i.e. no limits]"
max_depth::Int64
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64
"The maximum number of (random) features to consider at each partitioning [def: `0`, i.e. square root of the data dimensions]"
max_features::Int64
"This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `gini`]. Either `gini`, `entropy` or a custom function. It can also be an anonymous function."
splitting_criterion::Function
"Parameter that regulate the weights of the scoring of each tree, to be (optionally) used in prediction based on the error of the individual trees computed on the records on which trees have not been trained. Higher values favour \"better\" trees, but too high values will cause overfitting [def: `0`, i.e. uniform weigths]"
β::Float64
"A Random Number Generator to be used in stochastic parts of the code [deafult: `Random.GLOBAL_RNG`]"
rng::AbstractRNG
end
RandomForestClassifier(;
n_trees=30,
max_depth=0,
min_gain=0.0,
min_records=2,
max_features=0,
splitting_criterion=BetaML.Utils.gini,
β=0.0,
rng = Random.GLOBAL_RNG,
) = RandomForestClassifier(n_trees,max_depth,min_gain,min_records,max_features,splitting_criterion,β,rng)
#=
# skipped for now..
# ------------------------------------------------------------------------------
# Hyperparameters ranges definition (for automatic tuning)
MMI.hyperparameter_ranges(::Type{<:DecisionTreeRegressor}) = (
# (range(Float64, :alpha, lower=0, upper=1, scale=:log),
# range(Int, :beta, lower=1, upper=Inf, origin=100, unit=50, scale=:log),
# nothing)
range(Int64,:max_depth,lower=0,upper=Inf,scale=:log),
range(Float64,:min_gain,lower=0,upper=Inf,scale=:log),
range(Int64,:min_records,lower=0,upper=Inf,scale=:log),
range(Int64,:max_features,lower=0,upper=Inf,scale=:log),
nothing
)
=#
# ------------------------------------------------------------------------------
# Fit functions...
function MMI.fit(model::Union{DecisionTreeRegressor,RandomForestRegressor}, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
max_depth = model.max_depth == 0 ? size(x,1) : model.max_depth
# Using low level API here. We could switch to APIV2...
if (typeof(model) == DecisionTreeRegressor)
max_features = model.max_features == 0 ? size(x,2) : model.max_features
fitresult = BetaML.Trees.buildTree(x, y, max_depth=max_depth, min_gain=model.min_gain, min_records=model.min_records, max_features=max_features, splitting_criterion=model.splitting_criterion,rng=model.rng, verbosity=verbosity)
else
max_features = model.max_features == 0 ? Int(round(sqrt(size(x,2)))) : model.max_features
fitresult = BetaML.Trees.buildForest(x, y, model.n_trees, max_depth=max_depth, min_gain=model.min_gain, min_records=model.min_records, max_features=max_features, splitting_criterion=model.splitting_criterion, β=model.β,rng=model.rng,verbosity=verbosity)
end
cache=nothing
report=nothing
return fitresult, cache, report
end
function MMI.fit(model::Union{DecisionTreeClassifier,RandomForestClassifier}, verbosity, X, y)
x = MMI.matrix(X) # convert table to matrix
a_target_element = y[1] # a CategoricalValue or CategoricalString
#y_plain = MMI.int(y) .- 1 # integer relabeling should start at 0
yarray = convert(Vector{eltype(levels(y))},y) # convert to a simple Array{T}
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
max_depth = model.max_depth == 0 ? size(x,1) : model.max_depth
# Using low level API here. We could switch to APIV2...
if (typeof(model) == DecisionTreeClassifier)
max_features = model.max_features == 0 ? size(x,2) : model.max_features
fittedmodel = BetaML.Trees.buildTree(x, yarray, max_depth=max_depth, min_gain=model.min_gain, min_records=model.min_records, max_features=max_features, splitting_criterion=model.splitting_criterion, force_classification=true,rng=model.rng, verbosity=verbosity)
else
max_features = model.max_features == 0 ? Int(round(sqrt(size(x,2)))) : model.max_features
fittedmodel = BetaML.Trees.buildForest(x, yarray, model.n_trees, max_depth=max_depth, min_gain=model.min_gain, min_records=model.min_records, max_features=max_features, splitting_criterion=model.splitting_criterion, force_classification=true, β=model.β,rng=model.rng, verbosity=verbosity)
end
cache = nothing
report = nothing
fitresult = (fittedmodel,a_target_element)
return (fitresult, cache, report)
end
# ------------------------------------------------------------------------------
# Predict functions....
MMI.predict(model::Union{DecisionTreeRegressor,RandomForestRegressor}, fitresult, Xnew) = BetaML.Trees.predict(fitresult, MMI.matrix(Xnew))
function MMI.predict(model::Union{DecisionTreeClassifier,RandomForestClassifier}, fitresult, Xnew)
fittedModel = fitresult[1]
a_target_element = fitresult[2]
decode = MMI.decoder(a_target_element)
classes = MMI.classes(a_target_element)
nLevels = length(classes)
nRecords = MMI.nrows(Xnew)
treePredictions = BetaML.Trees.predict(fittedModel, MMI.matrix(Xnew),rng=model.rng)
predMatrix = zeros(Float64,(nRecords,nLevels))
# Transform the predictions from a vector of dictionaries to a matrix
# where the rows are the PMF of each record
for n in 1:nRecords
for (c,cl) in enumerate(classes)
predMatrix[n,c] = get(treePredictions[n],string(cl),0.0)
end
end
predictions = MMI.UnivariateFinite(classes, predMatrix)
return predictions
end
# ------------------------------------------------------------------------------
# Model metadata for registration in MLJ...
MMI.metadata_model(DecisionTreeRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
target_scitype = AbstractVector{<: MMI.Continuous}, # for a supervised model, what target?
supports_weights = false, # does the model support sample weights?
load_path = "BetaML.Bmlj.DecisionTreeRegressor"
)
MMI.metadata_model(RandomForestRegressor,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
target_scitype = AbstractVector{<: MMI.Continuous},
supports_weights = false,
load_path = "BetaML.Bmlj.RandomForestRegressor"
)
MMI.metadata_model(DecisionTreeClassifier,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
target_scitype = AbstractVector{<: Union{MMI.Missing,MMI.Finite}},
supports_weights = false,
load_path = "BetaML.Bmlj.DecisionTreeClassifier"
)
MMI.metadata_model(RandomForestClassifier,
input_scitype = Union{
MMI.Table(Union{MMI.Known,MMI.Missing}),
AbstractMatrix{<:Union{MMI.Known,MMI.Missing}},
},
target_scitype = AbstractVector{<: Union{MMI.Missing,MMI.Finite}},
supports_weights = false,
load_path = "BetaML.Bmlj.RandomForestClassifier"
)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 7844 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# MLJ interface for the Utils models of BetaML
export AutoEncoder
# ------------------------------------------------------------------------------
# Start AutoEncoder
# Model Structure declarations..
"""
$(TYPEDEF)
A ready-to use AutoEncoder, from the Beta Machine Learning Toolkit (BetaML) for ecoding and decoding of data using neural networks
# Parameters:
$(FIELDS)
# Notes:
- data must be numerical
- use `transform` to obtain the encoded data, and `inverse_trasnform` to decode to the original data
# Example:
```julia
julia> using MLJ
julia> X, y = @load_iris;
julia> modelType = @load AutoEncoder pkg = "BetaML" verbosity=0;
julia> model = modelType(encoded_size=2,layers_size=10);
julia> mach = machine(model, X)
untrained Machine; caches model-specific representations of data
model: AutoEncoder(e_layers = nothing, …)
args:
1: Source @334 ⏎ Table{AbstractVector{Continuous}}
julia> fit!(mach,verbosity=2)
[ Info: Training machine(AutoEncoder(e_layers = nothing, …), …).
***
*** Training for 200 epochs with algorithm BetaML.Nn.ADAM.
Training.. avg loss on epoch 1 (1): 35.48243542158747
Training.. avg loss on epoch 20 (20): 0.07528042222678126
Training.. avg loss on epoch 40 (40): 0.06293071729378613
Training.. avg loss on epoch 60 (60): 0.057035588828991145
Training.. avg loss on epoch 80 (80): 0.056313167754822875
Training.. avg loss on epoch 100 (100): 0.055521461091809436
Training the Neural Network... 52%|██████████████████████████████████████ | ETA: 0:00:01Training.. avg loss on epoch 120 (120): 0.06015206472927942
Training.. avg loss on epoch 140 (140): 0.05536835903285201
Training.. avg loss on epoch 160 (160): 0.05877560142428245
Training.. avg loss on epoch 180 (180): 0.05476302769966953
Training.. avg loss on epoch 200 (200): 0.049240864053557445
Training the Neural Network... 100%|█████████████████████████████████████████████████████████████████████████| Time: 0:00:01
Training of 200 epoch completed. Final epoch error: 0.049240864053557445.
trained Machine; caches model-specific representations of data
model: AutoEncoder(e_layers = nothing, …)
args:
1: Source @334 ⏎ Table{AbstractVector{Continuous}}
julia> X_latent = transform(mach, X)
150×2 Matrix{Float64}:
7.01701 -2.77285
6.50615 -2.9279
6.5233 -2.60754
⋮
6.70196 -10.6059
6.46369 -11.1117
6.20212 -10.1323
julia> X_recovered = inverse_transform(mach,X_latent)
150×4 Matrix{Float64}:
5.04973 3.55838 1.43251 0.242215
4.73689 3.19985 1.44085 0.295257
4.65128 3.25308 1.30187 0.244354
⋮
6.50077 2.93602 5.3303 1.87647
6.38639 2.83864 5.54395 2.04117
6.01595 2.67659 5.03669 1.83234
julia> BetaML.relative_mean_error(MLJ.matrix(X),X_recovered)
0.03387721261716176
```
"""
Base.@kwdef mutable struct AutoEncoder <: MMI.Unsupervised
"The number of neurons (i.e. dimensions) of the encoded data. If the value is a float it is consiered a percentual (to be rounded) of the dimensionality of the data [def: `0.33`]"
encoded_size::Union{Float64,Int64} = 0.333
"Inner layer dimension (i.e. number of neurons). If the value is a float it is considered a percentual (to be rounded) of the dimensionality of the data [def: `nothing` that applies a specific heuristic]. Consider that the underlying neural network is trying to predict multiple values at the same times. Normally this requires many more neurons than a scalar prediction. If `e_layers` or `d_layers` are specified, this parameter is ignored for the respective part."
layers_size::Union{Int64,Float64,Nothing} = nothing
"The layers (vector of `AbstractLayer`s) responsable of the encoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `layers_size`]. See `subtypes(BetaML.AbstractLayer)` for supported layers"
e_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"The layers (vector of `AbstractLayer`s) responsable of the decoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `layers_size`]. See `subtypes(BetaML.AbstractLayer)` for supported layers"
d_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"""Loss (cost) function [def: `BetaML.squared_cost`]. Should always assume y and ŷ as (n x d) matrices.
!!! warning
If you change the parameter `loss`, you need to either provide its derivative on the parameter `dloss` or use autodiff with `dloss=nothing`.
"""
loss::Union{Nothing,Function} = BetaML.Utils.squared_cost
"Derivative of the loss function [def: `BetaML.dsquared_cost` if `loss==squared_cost`, `nothing` otherwise, i.e. use the derivative of the squared cost or autodiff]"
dloss::Union{Function,Nothing} = nothing
"Number of epochs, i.e. passages trough the whole training sample [def: `200`]"
epochs::Int64 = 200
"Size of each individual batch [def: `8`]"
batch_size::Int64 = 8
"The optimisation algorithm to update the gradient at each batch [def: `BetaML.ADAM()`] See `subtypes(BetaML.OptimisationAlgorithm)` for supported optimizers"
opt_alg::OptimisationAlgorithm = BetaML.Nn.ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = BetaML.Utils.SuccessiveHalvingSearch(hpranges = Dict("epochs"=>[100,150,200],"batch_size"=>[8,16,32],"encoded_size"=>[0.2,0.3,0.5],"layers_size"=>[1.3,2.0,5.0]),multithreads=false)
"An optional title and/or description for this model"
descr::String = ""
"Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"
rng::AbstractRNG = Random.GLOBAL_RNG
end
"""
$(TYPEDSIGNATURES)
For the `verbosity` parameter see [`Verbosity`](@ref))
"""
function MMI.fit(m::AutoEncoder, verbosity, X)
x = MMI.matrix(X) # convert table to matrix
typeof(verbosity) <: Integer || error("Verbosity must be a integer. Current \"steps\" are 0, 1, 2 and 3.")
verbosity = mljverbosity_to_betaml_verbosity(verbosity)
mi = BetaML.Utils.AutoEncoder(;encoded_size=m.encoded_size,layers_size=m.layers_size,e_layers=m.e_layers,d_layers=m.d_layers,loss=m.loss, dloss=m.dloss, epochs=m.epochs, batch_size=m.batch_size, opt_alg=m.opt_alg,shuffle=m.shuffle, tunemethod=m.tunemethod, cache=false, descr=m.descr, rng=m.rng, verbosity=verbosity)
Api.fit!(mi,x)
fitresults = mi
cache = nothing
report = nothing
return fitresults, cache, report
end
#MMI.predict(m::AutoEncoder, fitresult, Xnew) = predict(fitresult, MMI.matrix(Xnew))
# MMI.transform(m::AutoEncoder, fitresult, Xnew) = MMI.predict(m::AutoEncoder, fitresult, Xnew)
MMI.transform(m::AutoEncoder, fitresult, Xnew) = BetaML.Api.predict(fitresult, MMI.matrix(Xnew))
MMI.inverse_transform(m::AutoEncoder, fitresult, Xnew) = BetaML.Api.inverse_predict(fitresult, MMI.matrix(Xnew))
MMI.metadata_model(AutoEncoder,
input_scitype = Union{
MMI.Table(Union{MMI.Continuous,MMI.Count}),
AbstractMatrix{<:Union{MMI.Continuous,MMI.Count}},
},
output_scitype = AbstractMatrix{<: Union{MMI.Continuous,MMI.Count}},
supports_weights = false,
load_path = "BetaML.Bmlj.AutoEncoder"
) | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1198 | """
Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT.
"""
"""
Clustering module (WIP)
(Hard) Clustering algorithms
Provide hard clustering methods using K-means and K-medoids. Please see also the `GMM` module for GMM-based soft clustering (i.e. where a probability distribution to be part of the various classes is assigned to each record instead of a single class), missing values imputation / collaborative filtering / reccomendation systems using clustering methods as backend.
The module provides the following models. Use `?[model]` to access their documentation:
- [`KMeansClusterer`](@ref): Classical K-mean algorithm
- [`KMedoidsClusterer`](@ref): K-medoids algorithm with configurable distance metric
Some metrics of the clustered output are available (e.g. [`silhouette`](@ref)).
"""
module Clustering
using LinearAlgebra, Random, Statistics, StatsBase, Reexport, CategoricalArrays, DocStringExtensions
import Distributions
using ForceImport
@force using ..Api
@force using ..Utils
import Base.print
import Base.show
export KMeansC_hp, KMedoidsC_hp, KMeansClusterer, KMedoidsClusterer
include("Clustering_hard.jl") # K-means and k-medoids
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 22339 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
init_representatives(X,K;initialisation_strategy,initial_representatives)
Initialisate the representatives for a K-Mean or K-Medoids algorithm
# Parameters:
* `X`: a (N x D) data to clusterise
* `K`: Number of cluster wonted
* `initialisation_strategy`: Whether to select the initial representative vectors:
* `random`: randomly in the X space
* `grid`: using a grid approach [default]
* `shuffle`: selecting randomly within the available points
* `given`: using a provided set of initial representatives provided in the `initial_representatives` parameter
* `initial_representatives`: Provided (K x D) matrix of initial representatives (used only together with the `given` initialisation_strategy) [default: `nothing`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Returns:
* A (K x D) matrix of initial representatives
# Example:
```julia
julia> initial_representatives = init_representatives([1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.6 38],2,initialisation_strategy="given",initial_representatives=[1.7 15; 3.6 40])
```
"""
function init_representatives(X,n_classes;initialisation_strategy="grid",initial_representatives=nothing,rng = Random.GLOBAL_RNG)
X = makematrix(X)
(N,D) = size(X)
K = n_classes
# Random choice of initial representative vectors (any point, not just in X!)
minX = minimum(X,dims=1)
maxX = maximum(X,dims=1)
Z = zeros(K,D)
if initialisation_strategy == "random"
for i in 1:K
for j in 1:D
Z[i,j] = rand(rng,Distributions.Uniform(minX[j],maxX[j]))
end
end
elseif initialisation_strategy == "grid"
for d in 1:D
# same "space" for each class on each dimension
Z[:,d] = collect(range(minX[d] + (maxX[d]-minX[d])/(K*2) , stop=maxX[d] - (maxX[d]-minX[d])/(K*2) , length=K))
#ex: collect(range(minX[d], stop=maxX[d], length=K))
#collect(range(s+(e-s)/(K*2), stop=e-(e-s)/(K*2), length=K))
end
elseif initialisation_strategy == "given"
if isnothing(initial_representatives) error("With the `given` strategy you need to provide the initial set of representatives in the initial_representatives parameter.") end
initial_representatives = makematrix(initial_representatives)
Z = initial_representatives
elseif initialisation_strategy == "shuffle"
zIdx = StatsBase.sample(rng, 1:size(X,1), K, replace=false)
Z = X[zIdx, :]
else
error("initialisation_strategy \"$initialisation_strategy\" not implemented")
end
return Z
end
function classAssignation(X,Z,dist)
cIdx = zeros(Int64,size(X,1))
for (i,x) in enumerate(eachrow(X))
cost = Inf
for (k,z) in enumerate(eachrow(Z))
if (dist(x,z) < cost)
cost = dist(x,z)
cIdx[i] = k
end
end
end
return cIdx
end
function updateKMeansRepresentatives!(Z,X,cIdx)
K,D = size(Z)
for j in 1:K
Cⱼ = X[cIdx .== j,:] # Selecting the constituency by boolean selection
if size(Cⱼ)[1] > 0
Z[j,:] = sum(Cⱼ,dims=1) ./ size(Cⱼ)[1]
else
# move toward the center if no costituency
xAvg = mean(X,dims=1)'
Z[j,:] = Z[j,:] .+ ((xAvg - Z[j,:]) .* 0.01)
end
end
end
## Basic K-Means Algorithm (Lecture/segment 13.7 of https://www.edx.org/course/machine-learning-with-python-from-linear-models-to)
"""
kmeans(X,K;dist,initialisation_strategy,initial_representatives)
Compute K-Mean algorithm to identify K clusters of X using Euclidean distance
!!! warning
This function is no longer exported. Use `KMeansClusterer` instead.
# Parameters:
* `X`: a (N x D) data to clusterise
* `K`: Number of cluster wonted
* `dist`: Function to employ as distance (see notes). Default to Euclidean distance.
* `initialisation_strategy`: Whether to select the initial representative vectors:
* `random`: randomly in the X space
* `grid`: using a grid approach [default]
* `shuffle`: selecting randomly within the available points
* `given`: using a provided set of initial representatives provided in the `initial_representatives` parameter
* `initial_representatives`: Provided (K x D) matrix of initial representatives (used only together with the `given` initialisation_strategy) [default: `nothing`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Returns:
* A tuple of two items, the first one being a vector of size N of ids of the clusters associated to each point and the second one the (K x D) matrix of representatives
# Notes:
* Some returned clusters could be empty
* The `dist` parameter can be:
* Any user defined function accepting two vectors and returning a scalar
* An anonymous function with the same characteristics (e.g. `dist = (x,y) -> norm(x-y)^2`)
* One of the above predefined distances: `l1_distance`, `l2_distance`, `l2squared_distance`, `cosine_distance`
# Example:
```julia
julia> (clIdx,Z) = kmeans([1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.3 38; 5.1 -2.3; 5.2 -2.4],3)
```
"""
function kmeans(X,K;dist=(x,y) -> norm(x-y),initialisation_strategy="grid",initial_representatives=nothing,verbosity=STD,rng = Random.GLOBAL_RNG)
X = makematrix(X)
(N,D) = size(X)
# Random choice of initial representative vectors (any point, not just in X!)
minX = minimum(X,dims=1)
maxX = maximum(X,dims=1)
initial_representatives = init_representatives(X,K,initialisation_strategy=initialisation_strategy,initial_representatives=initial_representatives,rng=rng)
Z = initial_representatives
cIdx_prev = zeros(Int64,N)
# Looping
while true
# Determining the constituency of each cluster
cIdx = classAssignation(X,Z,dist)
# Determining the new representative by each cluster
# for (j,z) in enumerate(eachrow(Z))
updateKMeansRepresentatives!(Z,X,cIdx)
# Checking termination condition: clusters didn't move any more
if cIdx == cIdx_prev
return (cIdx,Z)
else
cIdx_prev = cIdx
end
end
end
function updateKMedoidsRepresentatives!(Z,X,cIdx,dist)
K,D = size(Z)
for j in 1:K
Cⱼ = X[cIdx .== j,:] # Selecting the constituency by boolean selection
nⱼ = size(Cⱼ)[1] # Size of the cluster
if nⱼ == 0 continue end # empty continuency. Let's not do anything. Stil in the next batch other representatives could move away and points could enter this cluster
bestCost = Inf
bestCIdx = 0
for cIdx in 1:nⱼ # candidate index
candidateCost = 0.0
for tIdx in 1:nⱼ # target index
candidateCost += dist(Cⱼ[cIdx,:],Cⱼ[tIdx,:])
end
if candidateCost < bestCost
bestCost = candidateCost
bestCIdx = cIdx
end
end
Z[j,:] = reshape(Cⱼ[bestCIdx,:],1,D)
end
end
## Basic K-Medoids Algorithm (Lecture/segment 14.3 of https://www.edx.org/course/machine-learning-with-python-from-linear-models-to)
"""
kmedoids(X,K;dist,initialisation_strategy,initial_representatives)
Compute K-Medoids algorithm to identify K clusters of X using distance definition `dist`
!!! warning
This function is no longer exported. Use `KMedoidsClusterer` instead.
# Parameters:
* `X`: a (n x d) data to clusterise
* `K`: Number of cluster wonted
* `dist`: Function to employ as distance (see notes). Default to Euclidean distance.
* `initialisation_strategy`: Whether to select the initial representative vectors:
* `random`: randomly in the X space
* `grid`: using a grid approach
* `shuffle`: selecting randomly within the available points [default]
* `given`: using a provided set of initial representatives provided in the `initial_representatives` parameter
* `initial_representatives`: Provided (K x D) matrix of initial representatives (used only together with the `given` initialisation_strategy) [default: `nothing`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Returns:
* A tuple of two items, the first one being a vector of size N of ids of the clusters associated to each point and the second one the (K x D) matrix of representatives
# Notes:
* Some returned clusters could be empty
* The `dist` parameter can be:
* Any user defined function accepting two vectors and returning a scalar
* An anonymous function with the same characteristics (e.g. `dist = (x,y) -> norm(x-y)^2`)
* One of the above predefined distances: `l1_distance`, `l2_distance`, `l2squared_distance`, `cosine_distance`
# Example:
```julia
julia> (clIdx,Z) = kmedoids([1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.3 38; 5.1 -2.3; 5.2 -2.4],3,initialisation_strategy="grid")
```
"""
function kmedoids(X,K;dist=(x,y) -> norm(x-y),initialisation_strategy="grid",initial_representatives=nothing, verbosity=STD, rng = Random.GLOBAL_RNG)
X = makematrix(X)
(n,d) = size(X)
# Random choice of initial representative vectors
initial_representatives = init_representatives(X,K,initialisation_strategy=initialisation_strategy,initial_representatives=initial_representatives,rng=rng)
Z = initial_representatives
cIdx_prev = zeros(Int64,n)
# Looping
while true
# Determining the constituency of each cluster
cIdx = classAssignation(X,Z,dist)
# Determining the new representative by each cluster (within the points member)
#for (j,z) in enumerate(eachrow(Z))
updateKMedoidsRepresentatives!(Z,X,cIdx,dist)
# Checking termination condition: clusters didn't move any more
if cIdx == cIdx_prev
return (cIdx,Z)
else
cIdx_prev = cIdx
end
end
end
# ------------------------------------------------------------------------------
# Avi v2..
"""
$(TYPEDEF)
Hyperparameters for the [`KMeansClusterer`](@ref) model
# Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct KMeansC_hp <: BetaMLHyperParametersSet
"Number of classes to discriminate the data [def: 3]"
n_classes::Int64 = 3
"Function to employ as distance. Default to the Euclidean distance. Can be one of the predefined distances (`l1_distance`, `l2_distance`, `l2squared_distance`, `cosine_distance`), any user defined function accepting two vectors and returning a scalar or an anonymous function with the same characteristics. Attention that the `KMeansClusterer` algorithm is not guaranteed to converge with other distances than the Euclidean one."
dist::Function = (x,y) -> norm(x-y)
"""
The computation method of the vector of the initial representatives.
One of the following:
- "random": randomly in the X space [default]
- "grid": using a grid approach
- "shuffle": selecting randomly within the available points
- "given": using a provided set of initial representatives provided in the `initial_representatives` parameter
"""
initialisation_strategy::String = "grid"
"Provided (K x D) matrix of initial representatives (useful only with `initialisation_strategy=\"given\"`) [default: `nothing`]"
initial_representatives::Union{Nothing,Matrix{Float64}} = nothing
end
"""
$(TYPEDEF)
Hyperparameters for the and [`KMedoidsClusterer`](@ref) models
# Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct KMedoidsC_hp <: BetaMLHyperParametersSet
"Number of classes to discriminate the data [def: 3]"
n_classes::Int64 = 3
"Function to employ as distance. Default to the Euclidean distance. Can be one of the predefined distances (`l1_distance`, `l2_distance`, `l2squared_distance`, `cosine_distance`), any user defined function accepting two vectors and returning a scalar or an anonymous function with the same characteristics. Attention that the `KMeansClusterer` algorithm is not guaranteed to converge with other distances than the Euclidean one."
dist::Function = (x,y) -> norm(x-y)
"""
The computation method of the vector of the initial representatives.
One of the following:
- "random": randomly in the X space
- "grid": using a grid approach
- "shuffle": selecting randomly within the available points [default]
- "given": using a provided set of initial representatives provided in the `initial_representatives` parameter
"""
initialisation_strategy::String = "shuffle"
"Provided (K x D) matrix of initial representatives (useful only with `initialisation_strategy=\"given\"`) [default: `nothing`]"
initial_representatives::Union{Nothing,Matrix{Float64}} = nothing
end
Base.@kwdef mutable struct KMeansMedoids_lp <: BetaMLLearnableParametersSet
representatives::Union{Nothing,Matrix{Float64}} = nothing
end
"""
$(TYPEDEF)
The classical "K-Means" clustering algorithm (unsupervised).
Learn to partition the data and assign each record to one of the `n_classes` classes according to a distance metric (default Euclidean).
For the parameters see [`?KMeansC_hp`](@ref KMeansC_hp) and [`?BML_options`](@ref BML_options).
# Notes:
- data must be numerical
- online fitting (re-fitting with new data) is supported by using the "old" representatives as init ones
# Example :
```julia
julia> using BetaML
julia> X = [1.1 10.1; 0.9 9.8; 10.0 1.1; 12.1 0.8; 0.8 9.8]
5×2 Matrix{Float64}:
1.1 10.1
0.9 9.8
10.0 1.1
12.1 0.8
0.8 9.8
julia> mod = KMeansClusterer(n_classes=2)
KMeansClusterer - A K-Means Model (unfitted)
julia> classes = fit!(mod,X)
5-element Vector{Int64}:
1
1
2
2
1
julia> newclasses = fit!(mod,[11 0.9])
1-element Vector{Int64}:
2
julia> info(mod)
Dict{String, Any} with 2 entries:
"fitted_records" => 6
"av_distance_last_fit" => 0.0
"xndims" => 2
julia> parameters(mod)
BetaML.Clustering.KMeansMedoids_lp (a BetaMLLearnableParametersSet struct)
- representatives: [1.13366 9.7209; 11.0 0.9]
```
"""
mutable struct KMeansClusterer <: BetaMLUnsupervisedModel
hpar::KMeansC_hp
opt::BML_options
par::Union{Nothing,KMeansMedoids_lp}
cres::Union{Nothing,Vector{Int64}}
fitted::Bool
info::Dict{String,Any}
end
"""
$(TYPEDEF)
The classical "K-Medoids" clustering algorithm (unsupervised).
Similar to K-Means, learn to partition the data and assign each record to one of the `n_classes` classes according to a distance metric, but the "representatives" (the cetroids) are guaranteed to be one of the training points. The algorithm work with any arbitrary distance measure (default Euclidean).
For the parameters see [`?KMedoidsC_hp`](@ref KMedoidsC_hp) and [`?BML_options`](@ref BML_options).
# Notes:
- data must be numerical
- online fitting (re-fitting with new data) is supported by using the "old" representatives as init ones
- with `initialisation_strategy` different than `shuffle` (the default initialisation for K-Medoids) the representatives may not be one of the training points when the algorithm doesn't perform enought iterations. This can happen for example when the number of classes is close to the number of records to cluster.
# Example:
```julia
julia> using BetaML
julia> X = [1.1 10.1; 0.9 9.8; 10.0 1.1; 12.1 0.8; 0.8 9.8]
5×2 Matrix{Float64}:
1.1 10.1
0.9 9.8
10.0 1.1
12.1 0.8
0.8 9.8
julia> mod = KMedoidsClusterer(n_classes=2)
KMedoidsClusterer - A K-Medoids Model (unfitted)
julia> classes = fit!(mod,X)
5-element Vector{Int64}:
1
1
2
2
1
julia> newclasses = fit!(mod,[11 0.9])
1-element Vector{Int64}:
2
julia> info(mod)
Dict{String, Any} with 2 entries:
"fitted_records" => 6
"av_distance_last_fit" => 0.0
"xndims" => 2
julia> parameters(mod)
BetaML.Clustering.KMeansMedoids_lp (a BetaMLLearnableParametersSet struct)
- representatives: [0.9 9.8; 11.0 0.9]
```
"""
mutable struct KMedoidsClusterer <: BetaMLUnsupervisedModel
hpar::KMedoidsC_hp
opt::BML_options
par::Union{Nothing,KMeansMedoids_lp}
cres::Union{Nothing,Vector{Int64}}
fitted::Bool
info::Dict{String,Any}
end
function KMeansClusterer(;kwargs...)
m = KMeansClusterer(KMeansC_hp(),BML_options(),KMeansMedoids_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
function KMedoidsClusterer(;kwargs...)
m = KMedoidsClusterer(KMedoidsC_hp(),BML_options(),KMeansMedoids_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit the [`KMeansClusterer`](@ref) model to data
"""
function fit!(m::KMeansClusterer,x)
# Parameter alias..
K = m.hpar.n_classes
dist = m.hpar.dist
initialisation_strategy = m.hpar.initialisation_strategy
initial_representatives = m.hpar.initial_representatives
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
# Note that doing this we give lot of importance to the new data, even if this is few records and the model has bee fitted with milions of records.
# So, training 1000 records doesn't give the same output as training 990 records and then training again with 10 records
verbosity >= HIGH && @info "Continuing training of a pre-fitted model"
(clIdx,Z) = kmeans(x,K,dist=dist,initial_representatives=m.par.representatives,initialisation_strategy="given",verbosity=verbosity,rng=rng)
else
(clIdx,Z) = kmeans(x,K,dist=dist,initialisation_strategy=initialisation_strategy,initial_representatives=initial_representatives,verbosity=verbosity,rng=rng)
end
m.par = KMeansMedoids_lp(representatives=Z)
m.cres = cache ? clIdx : nothing
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.info["av_distance_last_fit"] = sum(dist(x[i,:],Z[clIdx[i],:]) for i in 1:size(x,1)) / size(x,1)
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Fit the [`KMedoidsClusterer`](@ref) model to data
"""
function fit!(m::KMedoidsClusterer,x)
# Parameter alias..
K = m.hpar.n_classes
dist = m.hpar.dist
initialisation_strategy = m.hpar.initialisation_strategy
initial_representatives = m.hpar.initial_representatives
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
# Note that doing this we give lot of importance to the new data, even if this is few records and the model has bee fitted with milions of records.
# So, training 1000 records doesn't give the same output as training 990 records and then training again with 10 records
verbosity >= HIGH && @info "Continuing training of a pre-fitted model"
(clIdx,Z) = kmedoids(x,K,dist=dist,initial_representatives=m.par.representatives,initialisation_strategy="given",verbosity=verbosity,rng=rng)
else
(clIdx,Z) = kmedoids(x,K,dist=dist,initialisation_strategy=initialisation_strategy,initial_representatives=initial_representatives,verbosity=verbosity,rng=rng)
end
m.par = KMeansMedoids_lp(representatives=Z)
m.cres = cache ? clIdx : nothing
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.info["av_distance_last_fit"] = sum(dist(x[i,:],Z[clIdx[i],:]) for i in 1:size(x,1)) / size(x,1)
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Assign the class of new data using the representatives learned by fitting a [`KMeansClusterer`](@ref) or [`KMedoidsClusterer`](@ref) model.
"""
function predict(m::Union{KMeansClusterer,KMedoidsClusterer},X)
X = makematrix(X)
representatives = m.par.representatives
classes = classAssignation(X,representatives,m.hpar.dist)
return classes
end
function show(io::IO, ::MIME"text/plain", m::KMeansClusterer)
if m.fitted == false
print(io,"KMeansClusterer - A K-Means Model (unfitted)")
else
print(io,"KMeansClusterer - A K-Means Model (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, ::MIME"text/plain", m::KMedoidsClusterer)
if m.fitted == false
print(io,"KMedoidsClusterer - A K-Medoids Model (unfitted)")
else
print(io,"KMedoidsClusterer - A K-Medoids Model (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::KMeansClusterer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"KMeansClusterer - A $(m.hpar.n_classes)-classes K-Means Model (unfitted)")
else
println(io,"KMeansClusterer - A $(m.info["xndims"])-dimensions $(m.hpar.n_classes)-classes K-Means Model (fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
println(io,"Representatives:")
println(io,m.par.representatives)
end
end
function show(io::IO, m::KMedoidsClusterer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"KMedoidsClusterer - A $(m.hpar.n_classes)-classes K-Medoids Model (unfitted)")
else
println(io,"KMedoidsClusterer - A $(m.info["xndims"])-dimensions $(m.hpar.n_classes)-classes K-Medoids Model (fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
println(io,"Distance function used:")
println(io,m.hpar.dist)
println(io,"Representatives:")
println(io,m.par.representatives)
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 2123 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
GMM module
Generative (Gaussian) Mixed Model learners (supervised/unsupervised)
Provides clustering and regressors using (Generative) Gaussiam Mixture Model (probabilistic).
Collaborative filtering / missing values imputation / reccomendation systems based on GMM is available in the `Imputation` module.
The module provides the following models. Use `?[model]` to access their documentation:
- [`GaussianMixtureClusterer`](@ref): soft-clustering using GMM
- [`GaussianMixtureRegressor2`](@ref): regressor using GMM as back-end (first algorithm)
- [`GaussianMixtureRegressor2`](@ref): regressor using GMM as back-end (second algorithm)
All the algorithms works with arbitrary mixture distribution, altought only {Spherical|Diagonal|Full} Gaussian mixtures has been implemented. User defined mixtures can be used defining a struct as subtype of `AbstractMixture` and implementing for that mixture the following functions:
- `init_mixtures!(mixtures, X; minimum_variance, minimum_covariance, initialisation_strategy)`
- `lpdf(m,x,mask)` (for the e-step)
- `update_parameters!(mixtures, X, pₙₖ; minimum_variance, minimum_covariance)` (the m-step)
- `npar(mixtures::Array{T,1})` (for the BIC/AIC computation)
All the GMM-based algorithms works only with numerical data, but accepts also Missing one.
The `GaussianMixtureClusterer` algorithm reports the `BIC` and the `AIC` in its `info(model)`, but some metrics of the clustered output are also available, for example the [`silhouette`](@ref) score.
"""
module GMM
using LinearAlgebra, Random, Statistics, Reexport, CategoricalArrays, DocStringExtensions
import Distributions
using ForceImport
@force using ..Api
@force using ..Utils
@force using ..Clustering
import Base.print
import Base.show
#export gmm,
export AbstractMixture,
GaussianMixtureClusterer,
GaussianMixtureRegressor2, GaussianMixtureRegressor,
GaussianMixture_hp
abstract type AbstractMixture end
include("GMM_clustering.jl")
include("Mixtures.jl")
include("GMM_regression.jl")
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 20537 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
estep(X,pₖ,mixtures)
E-step: assign the posterior prob p(j|xi) and computing the log-Likelihood of the parameters given the set of data (this last one for informative purposes and terminating the algorithm only)
"""
function estep(X,pₖ,mixtures)
(N,D) = size(X)
K = length(mixtures)
Xmask = .! ismissing.(X)
logpₙₖ = zeros(N,K)
lL = 0
for n in 1:N
if any(Xmask[n,:]) # if at least one true
Xu = X[n,Xmask[n,:]]
logpx = lse([log(pₖ[k] + 1e-16) + lpdf(mixtures[k],Xu,Xmask[n,:]) for k in 1:K])
lL += logpx
for k in 1:K
logpₙₖ[n,k] = log(pₖ[k] + 1e-16)+lpdf(mixtures[k],Xu,Xmask[n,:])-logpx
end
else
logpₙₖ[n,:] = log.(pₖ)
end
end
pₙₖ = exp.(logpₙₖ)
return (pₙₖ,lL)
end
## The gmm algorithm (Lecture/segment 16.5 of https://www.edx.org/course/machine-learning-with-python-from-linear-models-to)
# no longer true with the numerical trick implemented
# - For mixtures with full covariance matrix (i.e. `FullGaussian(μ,σ²)`) the minimum_covariance should NOT be set equal to the minimum_variance, or if the covariance matrix goes too low, it will become singular and not invertible.
"""
gmm(X,K;initial_probmixtures,mixtures,tol,verbosity,minimum_variance,minimum_covariance,initialisation_strategy)
Compute Expectation-Maximisation algorithm to identify K clusters of X data, i.e. employ a Generative Mixture Model as the underlying probabilistic model.
!!! warning
This function is no longer exported. Use one of the various models that use GMM as backend instead.
X can contain missing values in some or all of its dimensions. In such case the learning is done only with the available data.
Implemented in the log-domain for better numerical accuracy with many dimensions.
# Parameters:
* `X` : A (n x d) data to clusterise
* `K` : Number of cluster wanted
* `initial_probmixtures` : Initial probabilities of the categorical distribution (K x 1) [default: `[]`]
* `mixtures`: An array (of length K) of the mixture to employ (see notes) [def: `[DiagonalGaussian() for i in 1:K]`]
* `tol`: Tolerance to stop the algorithm [default: 10^(-6)]
* `verbosity`: A verbosity parameter regulating the information messages frequency [def: `STD`]
* `minimum_variance`: Minimum variance for the mixtures [default: 0.05]
* `minimum_covariance`: Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance (see notes).
* `initialisation_strategy`: Mixture initialisation algorithm [def: `kmeans`]
* `maximum_iterations`: Maximum number of iterations [def: `typemax(Int64)`, i.e. ∞]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Returns:
* A named touple of:
* `pₙₖ`: Matrix of size (N x K) of the probabilities of each point i to belong to cluster j
* `pₖ`: Probabilities of the categorical distribution (K x 1)
* `mixtures`: Vector (K x 1) of the estimated underlying distributions
* `ϵ`: Vector of the discrepancy (matrix norm) between pⱼₓ and the lagged pⱼₓ at each iteration
* `lL`: The log-likelihood (without considering the last mixture optimisation)
* `BIC`: The Bayesian Information Criterion (lower is better)
* `AIC`: The Akaike Information Criterion (lower is better)
# Notes:
- The mixtures currently implemented are `SphericalGaussian(μ,σ²)`,`DiagonalGaussian(μ,σ²)` and `FullGaussian(μ,σ²)`
- Reasonable choices for the minimum_variance/Covariance depends on the mixture. For example 0.25 seems a reasonable value for the SphericalGaussian, 0.05 seems better for the DiagonalGaussian, and FullGaussian seems to prefer either very low values of variance/covariance (e.g. `(0.05,0.05)` ) or very big but similar ones (e.g. `(100,100)` ).
- For `initialisation_strategy`, look at the documentation of `init_mixtures!` for the mixture you want. The provided gaussian mixtures support `grid`, `kmeans` or `given`. `grid` is faster (expecially if X contains missing values), but `kmeans` often provides better results.
# Resources:
- [Paper describing gmm with missing values](https://doi.org/10.1016/j.csda.2006.10.002)
- [Class notes from MITx 6.86x (Sec 15.9)](https://stackedit.io/viewer#!url=https://github.com/sylvaticus/MITx_6.86x/raw/master/Unit 04 - Unsupervised Learning/Unit 04 - Unsupervised Learning.md)
- [Limitations of gmm](https://www.r-craft.org/r-news/when-not-to-use-gaussian-mixture-model-gmm-clustering/)
# Example:
```julia
julia> clusters = gmm([1 10.5;1.5 0; 1.8 8; 1.7 15; 3.2 40; 0 0; 3.3 38; 0 -2.3; 5.2 -2.4],3,verbosity=HIGH)
```
"""
function gmm(X,K;initial_probmixtures=Float64[],mixtures=[DiagonalGaussian() for i in 1:K],tol=10^(-6),verbosity=STD,minimum_variance=0.05,minimum_covariance=0.0,initialisation_strategy="kmeans",maximum_iterations=typemax(Int64),rng = Random.GLOBAL_RNG)
# TODO: benchmark with this one: https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-022-04740-9
if verbosity > STD
@codelocation
end
# debug:
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#K = 3
#initial_probmixtures=nothing; tol=0.0001; msgStep=1; minimum_variance=0.25; initialisation_strategy="grid"
#mixtures = [SphericalGaussian() for i in 1:K]
# ---------
X = makematrix(X)
(N,D) = size(X)
pₖ = isempty(initial_probmixtures) ? fill(1/K,K) : copy(initial_probmixtures)
# no longer true with the numerical trick implemented
#if (minimum_variance == minimum_covariance)
# @warn("Setting the minimum_variance equal to the minimum_covariance may lead to singularity problems for mixtures with full covariance matrix.")
#end
msgStepMap = Dict(NONE => 0, LOW=>100, STD=>20, HIGH=>5, FULL=>1)
msgStep = msgStepMap[verbosity]
# Initialisation of the parameters of the mixtures
mixtures = identity.(deepcopy(mixtures)) # to set the container to the minimum common denominator of element types the deepcopy is not to change the function argument
#mixtures = identity.(mixtures)
init_mixtures!(mixtures,X,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy=initialisation_strategy,rng=rng)
pₙₖ = zeros(Float64,N,K) # The posteriors, i.e. the prob that item n belong to cluster k
ϵ = Float64[]
# Checking dimensions only once (but adding then inbounds doesn't change anything. Still good
# to provide a nice informative message)
if size(pₖ,1) != K || length(mixtures) != K
error("Error in the dimensions of the inputs. Please check them.")
end
# finding empty/non_empty values
#Xmask = .! ismissing.(X)
lL = -Inf
iter = 1
while(true)
oldlL = lL
# E Step: assigning the posterior prob p(j|xi) and computing the log-Likelihood of the parameters given the set of data
pₙₖlagged = copy(pₙₖ)
pₙₖ, lL = estep(X,pₖ,mixtures)
push!(ϵ,norm(pₙₖlagged - pₙₖ))
# M step: find parameters that maximise the likelihood
# Updating the probabilities of the different mixtures
nₖ = sum(pₙₖ,dims=1)'
n = sum(nₖ)
pₖ = nₖ ./ n
update_parameters!(mixtures, X, pₙₖ; minimum_variance=minimum_variance,minimum_covariance=minimum_covariance)
# Information. Note the likelihood is whitout accounting for the new mu, sigma
if msgStep != 0 && (length(ϵ) % msgStep == 0 || length(ϵ) == 1)
println("Iter. $(length(ϵ)):\tVar. of the post $(ϵ[end]) \t Log-likelihood $(lL)")
end
# Closing conditions. Note that the logLikelihood is those without considering the new mu,sigma
if ((lL - oldlL) <= (tol * abs(lL))) || (iter >= maximum_iterations)
npars = npar(mixtures) + (K-1)
#BIC = lL - (1/2) * npars * log(N)
BICv = bic(lL,npars,N)
AICv = aic(lL,npars)
#if (ϵ[end] < tol)
return (pₙₖ=pₙₖ,pₖ=pₖ,mixtures=mixtures,ϵ=ϵ,lL=lL,BIC=BICv,AIC=AICv)
else
iter += 1
end
end # end while loop
end # end function
# - For mixtures with full covariance matrix (i.e. `FullGaussian(μ,σ²)`) the minimum_covariance should NOT be set equal to the minimum_variance, or if the covariance matrix goes too low, it will become singular and not invertible.
# Avi v2..
"""
$(TYPEDEF)
Hyperparameters for GMM clusters and other GMM-based algorithms
## Parameters:
$(FIELDS)
"""
mutable struct GaussianMixture_hp <: BetaMLHyperParametersSet
"Number of mixtures (latent classes) to consider [def: 3]"
n_classes::Int64
"Initial probabilities of the categorical distribution (n_classes x 1) [default: `[]`]"
initial_probmixtures::Vector{Float64}
"""An array (of length `n_classes`) of the mixtures to employ (see the [`?GMM`](@ref GMM) module).
Each mixture object can be provided with or without its parameters (e.g. mean and variance for the gaussian ones). Fully qualified mixtures are useful only if the `initialisation_strategy` parameter is set to \"gived\".
This parameter can also be given symply in term of a _type_. In this case it is automatically extended to a vector of `n_classes` mixtures of the specified type.
Note that mixing of different mixture types is not currently supported and that currently implemented mixtures are `SphericalGaussian`, `DiagonalGaussian` and `FullGaussian`.
[def: `DiagonalGaussian`]"""
mixtures::Union{Type,Vector{<: AbstractMixture}}
"Tolerance to stop the algorithm [default: 10^(-6)]"
tol::Float64
"Minimum variance for the mixtures [default: 0.05]"
minimum_variance::Float64
"Minimum covariance for the mixtures with full covariance matrix [default: 0]. This should be set different than minimum_variance."
minimum_covariance::Float64
"""
The computation method of the vector of the initial mixtures.
One of the following:
- "grid": using a grid approach
- "given": using the mixture provided in the fully qualified `mixtures` parameter
- "kmeans": use first kmeans (itself initialised with a "grid" strategy) to set the initial mixture centers [default]
Note that currently "random" and "shuffle" initialisations are not supported in gmm-based algorithms.
"""
initialisation_strategy::String
"Maximum number of iterations [def: 5000]"
maximum_iterations::Int64
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method (suitable for the GMM-based regressors)
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod
function GaussianMixture_hp(;
n_classes::Union{Nothing,Int64} = nothing, # def: 3
initial_probmixtures::Vector{Float64} = Float64[],
mixtures::Union{Type,Vector{<: AbstractMixture},Nothing} = nothing, # DiagonalGaussian
tol::Float64 = 10^(-6),
minimum_variance::Float64 = 0.05,
minimum_covariance::Float64 = 0.0,
initialisation_strategy::String = "kmeans",
maximum_iterations::Int64 = 5000,
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(
hpranges = Dict("n_classes" =>[2,3,4,5,6],
"mixtures"=>[SphericalGaussian,DiagonalGaussian,FullGaussian],
"initialisation_strategy"=>["grid","kmeans"],
"minimum_covariance"=>[0.01,0.02],
"minimum_variance"=>[0.05,0.07,0.1])
,multithreads=true)
)
if isnothing(mixtures) && isnothing(n_classes)
n_classes = 3
mixtures = [DiagonalGaussian() for i in 1:3]
elseif isnothing(mixtures) && !isnothing(n_classes)
mixtures = [DiagonalGaussian() for i in 1:n_classes]
elseif typeof(mixtures) <: UnionAll && isnothing(n_classes)
n_classes = 3
mixtures = [mixtures() for i in 1:n_classes]
elseif typeof(mixtures) <: UnionAll && !isnothing(n_classes)
mixtures = [mixtures() for i in 1:n_classes]
elseif typeof(mixtures) <: AbstractVector && isnothing(n_classes)
println("sfsdfsdf")
n_classes = length(mixtures)
elseif typeof(mixtures) <: AbstractVector && !isnothing(n_classes)
n_classes == length(mixtures) || error("The length of the mixtures vector must be equal to the number of classes")
end
return new(n_classes,initial_probmixtures,mixtures,tol,minimum_variance,minimum_covariance,initialisation_strategy,maximum_iterations,tunemethod)
end
end
Base.@kwdef mutable struct GMMCluster_lp <: BetaMLLearnableParametersSet
mixtures::Union{Type,Vector{<: AbstractMixture}} = AbstractMixture[] # attention that this is set up at model construction, as it has the same name as the hyperparameter
initial_probmixtures::Vector{Float64} = []
#probRecords::Union{Nothing,Matrix{Float64}} = nothing
end
"""
$(TYPEDEF)
Assign class probabilities to records (i.e. _soft_ clustering) assuming a probabilistic generative model of observed data using mixtures.
For the parameters see [`?GaussianMixture_hp`](@ref GaussianMixture_hp) and [`?BML_options`](@ref BML_options).
# Notes:
- Data must be numerical
- Mixtures can be user defined: see the [`?GMM`](@ref GMM) module documentation for a discussion on provided vs custom mixtures.
- Online fitting (re-fitting with new data) is supported by setting the old learned mixtrures as the starting values
- The model is fitted using an Expectation-Minimisation (EM) algorithm that supports Missing data and is implemented in the log-domain for better numerical accuracy with many dimensions
# Example:
```julia
julia> using BetaML
julia> X = [1.1 10.1; 0.9 9.8; 10.0 1.1; 12.1 0.8; 0.8 9.8];
julia> mod = GaussianMixtureClusterer(n_classes=2)
GaussianMixtureClusterer - A Generative Mixture Model (unfitted)
julia> prob_belong_classes = fit!(mod,X)
Iter. 1: Var. of the post 2.15612140465882 Log-likelihood -29.06452054772657
5×2 Matrix{Float64}:
1.0 0.0
1.0 0.0
0.0 1.0
0.0 1.0
1.0 0.0
julia> new_probs = fit!(mod,[11 0.9])
Iter. 1: Var. of the post 1.0 Log-likelihood -1.3312256125240092
1×2 Matrix{Float64}:
0.0 1.0
julia> info(mod)
Dict{String, Any} with 6 entries:
"xndims" => 2
"error" => [1.0, 0.0, 0.0]
"AIC" => 15.7843
"fitted_records" => 6
"lL" => 1.10786
"BIC" => -2.21571
julia> parameters(mod)
BetaML.GMM.GMMCluster_lp (a BetaMLLearnableParametersSet struct)
- mixtures: DiagonalGaussian{Float64}[DiagonalGaussian{Float64}([0.9333333333333332, 9.9], [0.05, 0.05]), DiagonalGaussian{Float64}([11.05, 0.9500000000000001], [0.05, 0.05])]
- initial_probmixtures: [0.0, 1.0]
```
"""
mutable struct GaussianMixtureClusterer <: BetaMLUnsupervisedModel
hpar::GaussianMixture_hp
opt::BML_options
par::Union{Nothing,GMMCluster_lp}
cres::Union{Nothing,Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function GaussianMixtureClusterer(;kwargs...)
m = GaussianMixtureClusterer(GaussianMixture_hp(),BML_options(),GMMCluster_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
# Special correction for GaussianMixture_hp
kwkeys = keys(kwargs) #in(2,[1,2,3])
if !in(:mixtures,kwkeys) && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [DiagonalGaussian() for i in 1:3]
elseif !in(:mixtures,kwkeys) && in(:n_classes,kwkeys)
m.hpar.mixtures = [DiagonalGaussian() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: UnionAll && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:3]
elseif typeof(kwargs[:mixtures]) <: UnionAll && in(:n_classes,kwkeys)
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: AbstractVector && !in(:n_classes,kwkeys)
m.hpar.n_classes = length(kwargs[:mixtures])
elseif typeof(kwargs[:mixtures]) <: AbstractVector && in(:n_classes,kwkeys)
kwargs[:n_classes] == length(kwargs[:mixtures]) || error("The length of the mixtures vector must be equal to the number of classes")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit the [`GaussianMixtureClusterer`](@ref) model to data
# Notes:
- re-fitting is a new complete fitting but starting with mixtures computed in the previous fitting(s)
"""
function fit!(m::GaussianMixtureClusterer,x)
# Parameter alias..
K = m.hpar.n_classes
initial_probmixtures = m.hpar.initial_probmixtures
mixtures = m.hpar.mixtures
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:K]
end
tol = m.hpar.tol
minimum_variance = m.hpar.minimum_variance
minimum_covariance = m.hpar.minimum_covariance
initialisation_strategy = m.hpar.initialisation_strategy
maximum_iterations = m.hpar.maximum_iterations
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
verbosity >= HIGH && @info "Continuing training of a pre-fitted model"
gmmOut = gmm(x,K;initial_probmixtures=m.par.initial_probmixtures,mixtures=m.par.mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="given",maximum_iterations=maximum_iterations,rng = rng)
else
gmmOut = gmm(x,K;initial_probmixtures=initial_probmixtures,mixtures=mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy=initialisation_strategy,maximum_iterations=maximum_iterations,rng = rng)
end
probRecords = gmmOut.pₙₖ
m.par = GMMCluster_lp(mixtures = gmmOut.mixtures, initial_probmixtures=makecolvector(gmmOut.pₖ))
m.cres = cache ? probRecords : nothing
m.info["error"] = gmmOut.ϵ
m.info["lL"] = gmmOut.lL
m.info["BIC"] = gmmOut.BIC
m.info["AIC"] = gmmOut.AIC
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the classes probabilities associated to new data assuming the mixtures computed in fitting a [`GaussianMixtureClusterer`](@ref) model.
"""
function predict(m::GaussianMixtureClusterer,X)
X = makematrix(X)
mixtures = m.par.mixtures
initial_probmixtures = m.par.initial_probmixtures
probRecords, lL = estep(X,initial_probmixtures,mixtures)
return probRecords
end
function show(io::IO, ::MIME"text/plain", m::GaussianMixtureClusterer)
if m.fitted == false
print(io,"GaussianMixtureClusterer - A Generative Mixture Model (unfitted)")
else
print(io,"GaussianMixtureClusterer - A Generative Mixture Model (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::GaussianMixtureClusterer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"GaussianMixtureClusterer - A $(m.hpar.n_classes)-classes Generative Mixture Model (unfitted)")
else
print(io,"GaussianMixtureClusterer - A $(m.hpar.n_classes)-classes Generative Mixture Model(fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
println(io,"Mixtures:")
println(io,m.par.mixtures)
println(io,"Probability of each mixture:")
println(io,m.par.initial_probmixtures)
end
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 16692 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
import BetaML.Utils.allowmissing
# ------------------------------------------------------------------------------
# GaussianMixtureRegressor2
Base.@kwdef mutable struct GaussianMixtureRegressor2_lp <: BetaMLLearnableParametersSet
mixtures::Union{Type,Vector{<: AbstractMixture}} = DiagonalGaussian[] # The type is only temporary, it should always be replaced by an actual mixture
initial_probmixtures::Vector{Float64} = []
#probRecords::Union{Nothing,Matrix{Float64}} = nothing
meanYByMixture::Union{Nothing,Matrix{Float64}} = nothing
end
"""
$(TYPEDEF)
A multi-dimensional, missing data friendly non-linear regressor based on Generative (Gaussian) Mixture Model (strategy "1").
The training data is used to fit a probabilistic model with latent mixtures (Gaussian distributions with different covariances are already implemented) and then predictions of new data is obtained by fitting the new data to the mixtures.
For hyperparameters see [`GaussianMixture_hp`](@ref) and [`BML_options`](@ref).
This strategy (`GaussianMixtureRegressor2`) works by fitting the EM algorithm on the feature matrix X.
Once the data has been probabilistically assigned to the various classes, a mean value of fitting values Y is computed for each cluster (using the probabilities as weigths).
At predict time, the new data is first fitted to the learned mixtures using the e-step part of the EM algorithm to obtain the probabilistic assignment of each record to the various mixtures. Then these probabilities are multiplied to the mixture averages for the Y dimensions learned at training time to obtain the predicted value(s) for each record.
# Notes:
- Predicted values are always a matrix, even when a single variable is predicted (use `dropdims(ŷ,dims=2)` to get a single vector).
# Example:
```julia
julia> using BetaML
julia> X = [1.1 10.1; 0.9 9.8; 10.0 1.1; 12.1 0.8; 0.8 9.8];
julia> Y = X[:,1] .* 2 - X[:,2]
5-element Vector{Float64}:
-7.8999999999999995
-8.0
18.9
23.4
-8.200000000000001
julia> mod = GaussianMixtureRegressor2(n_classes=2)
GaussianMixtureRegressor2 - A regressor based on Generative Mixture Model (unfitted)
julia> ŷ = fit!(mod,X,Y)
Iter. 1: Var. of the post 2.15612140465882 Log-likelihood -29.06452054772657
5×1 Matrix{Float64}:
-8.033333333333333
-8.033333333333333
21.15
21.15
-8.033333333333333
julia> new_probs = predict(mod,[11 0.9])
1×1 Matrix{Float64}:
21.15
julia> info(mod)
Dict{String, Any} with 6 entries:
"xndims" => 2
"error" => [2.15612, 0.118848, 4.19495e-7, 0.0, 0.0]
"AIC" => 32.7605
"fitted_records" => 5
"lL" => -7.38023
"BIC" => 29.2454
```
"""
mutable struct GaussianMixtureRegressor2 <: BetaMLUnsupervisedModel
hpar::GaussianMixture_hp
opt::BML_options
par::Union{Nothing,GaussianMixtureRegressor2_lp}
cres::Union{Nothing,Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function GaussianMixtureRegressor2(;kwargs...)
m = GaussianMixtureRegressor2(GaussianMixture_hp(),BML_options(),GaussianMixtureRegressor2_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
# Special correction for GaussianMixture_hp
kwkeys = keys(kwargs) #in(2,[1,2,3])
if !in(:mixtures,kwkeys) && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [DiagonalGaussian() for i in 1:3]
elseif !in(:mixtures,kwkeys) && in(:n_classes,kwkeys)
m.hpar.mixtures = [DiagonalGaussian() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: UnionAll && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:3]
elseif typeof(kwargs[:mixtures]) <: UnionAll && in(:n_classes,kwkeys)
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: AbstractVector && !in(:n_classes,kwkeys)
m.hpar.n_classes = length(kwargs[:mixtures])
elseif typeof(kwargs[:mixtures]) <: AbstractVector && in(:n_classes,kwkeys)
kwargs[:n_classes] == length(kwargs[:mixtures]) || error("The length of the mixtures vector must be equal to the number of classes")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit the [`GaussianMixtureRegressor2`](@ref) model to data
# Notes:
- re-fitting is a new complete fitting but starting with mixtures computed in the previous fitting(s)
"""
function fit!(m::GaussianMixtureRegressor2,x,y)
m.fitted || autotune!(m,(x,y))
x = makematrix(x)
y = makematrix(y)
# Parameter alias..
K = m.hpar.n_classes
initial_probmixtures = m.hpar.initial_probmixtures
mixtures = m.hpar.mixtures
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:K]
end
tol = m.hpar.tol
minimum_variance = m.hpar.minimum_variance
minimum_covariance = m.hpar.minimum_covariance
initialisation_strategy = m.hpar.initialisation_strategy
maximum_iterations = m.hpar.maximum_iterations
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
verbosity >= STD && @warn "Continuing training of a pre-fitted model"
gmmOut = gmm(x,K;initial_probmixtures=m.par.initial_probmixtures,mixtures=m.par.mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="given",maximum_iterations=maximum_iterations,rng = rng)
else
gmmOut = gmm(x,K;initial_probmixtures=initial_probmixtures,mixtures=mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy=initialisation_strategy,maximum_iterations=maximum_iterations,rng = rng)
end
probRecords = gmmOut.pₙₖ
sumProbrecords = sum(probRecords,dims=1)
ysum = probRecords' * y
ymean = vcat(transpose([ysum[r,:] / sumProbrecords[1,r] for r in 1:size(ysum,1)])...)
m.par = GaussianMixtureRegressor2_lp(mixtures = gmmOut.mixtures, initial_probmixtures=makecolvector(gmmOut.pₖ), meanYByMixture = ymean)
m.cres = cache ? probRecords * ymean : nothing
m.info["error"] = gmmOut.ϵ
m.info["lL"] = gmmOut.lL
m.info["BIC"] = gmmOut.BIC
m.info["AIC"] = gmmOut.AIC
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the classes probabilities associated to new data assuming the mixtures and average values per class computed in fitting a [`GaussianMixtureRegressor2`](@ref) model.
"""
function predict(m::GaussianMixtureRegressor2,X)
X = makematrix(X)
N,DX = size(X)
mixtures = m.par.mixtures
yByMixture = m.par.meanYByMixture
initial_probmixtures = m.par.initial_probmixtures
probRecords, lL = estep(X,initial_probmixtures,mixtures)
return probRecords * yByMixture
end
function show(io::IO, ::MIME"text/plain", m::GaussianMixtureRegressor2)
if m.fitted == false
print(io,"GaussianMixtureRegressor2 - A regressor based on Generative Mixture Model (unfitted)")
else
print(io,"GaussianMixtureRegressor2 - A regressor based on Generative Mixture Model (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::GaussianMixtureRegressor2)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"GaussianMixtureRegressor2 - A regressor based on Generative Mixture Model ($(m.hpar.n_classes) classes, unfitted)")
else
print(io,"GaussianMixtureRegressor2 - A regressor based on Generative Mixture Model ($(m.hpar.n_classes) classes, fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
println(io,"Mixtures:")
println(io,m.par.mixtures)
println(io,"Probability of each mixture:")
println(io,m.par.initial_probmixtures)
end
end
# ------------------------------------------------------------------------------
# GaussianMixtureRegressor
"""
$(TYPEDEF)
A multi-dimensional, missing data friendly non-linear regressor based on Generative (Gaussian) Mixture Model.
The training data is used to fit a probabilistic model with latent mixtures (Gaussian distributions with different covariances are already implemented) and then predictions of new data is obtained by fitting the new data to the mixtures.
For hyperparameters see [`GaussianMixture_hp`](@ref) and [`BML_options`](@ref).
Thsi strategy (`GaussianMixtureRegressor`) works by training the EM algorithm on a combined (hcat) matrix of X and Y.
At predict time, the new data is first fitted to the learned mixtures using the e-step part of the EM algorithm (and using missing values for the dimensions belonging to Y) to obtain the probabilistic assignment of each record to the various mixtures. Then these probabilities are multiplied to the mixture averages for the Y dimensions to obtain the predicted value(s) for each record.
# Example:
```julia
julia> using BetaML
julia> X = [1.1 10.1; 0.9 9.8; 10.0 1.1; 12.1 0.8; 0.8 9.8];
julia> Y = X[:,1] .* 2 - X[:,2]
5-element Vector{Float64}:
-7.8999999999999995
-8.0
18.9
23.4
-8.200000000000001
julia> mod = GaussianMixtureRegressor(n_classes=2)
GaussianMixtureRegressor - A regressor based on Generative Mixture Model (unfitted)
julia> ŷ = fit!(mod,X,Y)
Iter. 1: Var. of the post 2.2191120060614065 Log-likelihood -47.70971887023561
5×1 Matrix{Float64}:
-8.033333333333333
-8.033333333333333
21.15
21.15
-8.033333333333333
julia> new_probs = predict(mod,[11 0.9])
1×1 Matrix{Float64}:
21.15
julia> info(mod)
Dict{String, Any} with 6 entries:
"xndims" => 3
"error" => [2.21911, 0.0260833, 3.19141e-39, 0.0]
"AIC" => 60.0684
"fitted_records" => 5
"lL" => -17.0342
"BIC" => 54.9911
julia> parameters(mod)
BetaML.GMM.GMMCluster_lp (a BetaMLLearnableParametersSet struct)
- mixtures: DiagonalGaussian{Float64}[DiagonalGaussian{Float64}([0.9333333333333332, 9.9, -8.033333333333333], [1.1024999999999996, 0.05, 5.0625]), DiagonalGaussian{Float64}([11.05, 0.9500000000000001, 21.15], [1.1024999999999996, 0.05, 5.0625])]
- initial_probmixtures: [0.6, 0.4]
```
"""
mutable struct GaussianMixtureRegressor <: BetaMLUnsupervisedModel
hpar::GaussianMixture_hp
opt::BML_options
par::Union{Nothing,GMMCluster_lp}
cres::Union{Nothing,Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function GaussianMixtureRegressor(;kwargs...)
m = GaussianMixtureRegressor(GaussianMixture_hp(),BML_options(),GMMCluster_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
# Special correction for GaussianMixture_hp
kwkeys = keys(kwargs) #in(2,[1,2,3])
if !in(:mixtures,kwkeys) && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [DiagonalGaussian() for i in 1:3]
elseif !in(:mixtures,kwkeys) && in(:n_classes,kwkeys)
m.hpar.mixtures = [DiagonalGaussian() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: UnionAll && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:3]
elseif typeof(kwargs[:mixtures]) <: UnionAll && in(:n_classes,kwkeys)
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: AbstractVector && !in(:n_classes,kwkeys)
m.hpar.n_classes = length(kwargs[:mixtures])
elseif typeof(kwargs[:mixtures]) <: AbstractVector && in(:n_classes,kwkeys)
kwargs[:n_classes] == length(kwargs[:mixtures]) || error("The length of the mixtures vector must be equal to the number of classes")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit the [`GaussianMixtureRegressor`](@ref) model to data
# Notes:
- re-fitting is a new complete fitting but starting with mixtures computed in the previous fitting(s)
"""
function fit!(m::GaussianMixtureRegressor,x,y)
m.fitted || autotune!(m,(x,y))
x = makematrix(x)
N,DX = size(x)
y = makematrix(y)
x = hcat(x,y)
DFull = size(x,2)
# Parameter alias..
K = m.hpar.n_classes
initial_probmixtures = m.hpar.initial_probmixtures
mixtures = m.hpar.mixtures
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:K]
end
tol = m.hpar.tol
minimum_variance = m.hpar.minimum_variance
minimum_covariance = m.hpar.minimum_covariance
initialisation_strategy = m.hpar.initialisation_strategy
maximum_iterations = m.hpar.maximum_iterations
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
verbosity >= HIGH && @info "Continuing training of a pre-fitted model"
gmmOut = gmm(x,K;initial_probmixtures=m.par.initial_probmixtures,mixtures=m.par.mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="given",maximum_iterations=maximum_iterations,rng = rng)
else
gmmOut = gmm(x,K;initial_probmixtures=initial_probmixtures,mixtures=mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy=initialisation_strategy,maximum_iterations=maximum_iterations,rng = rng)
end
probRecords = gmmOut.pₙₖ
m.par = GMMCluster_lp(mixtures = gmmOut.mixtures, initial_probmixtures=makecolvector(gmmOut.pₖ))
m.cres = cache ? probRecords * [gmmOut.mixtures[k].μ[d] for k in 1:K, d in DX+1:DFull] : nothing
m.info["error"] = gmmOut.ϵ
m.info["lL"] = gmmOut.lL
m.info["BIC"] = gmmOut.BIC
m.info["AIC"] = gmmOut.AIC
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the classes probabilities associated to new data assuming the mixtures computed fitting a [`GaussianMixtureRegressor`](@ref) model on a merged X and Y matrix
"""
function predict(m::GaussianMixtureRegressor,X)
X = makematrix(X)
X = allowmissing(X)
N,DX = size(X)
mixtures = m.par.mixtures
DFull = length(mixtures[1].μ)
K = length(mixtures)
X = hcat(X,fill(missing,N,DFull-DX))
yByMixture = [mixtures[k].μ[d] for k in 1:K, d in DX+1:DFull]
initial_probmixtures = m.par.initial_probmixtures
probRecords, lL = estep(X,initial_probmixtures,mixtures)
return probRecords * yByMixture
end
function show(io::IO, ::MIME"text/plain", m::GaussianMixtureRegressor)
if m.fitted == false
print(io,"GaussianMixtureRegressor - A regressor based on Generative Mixture Model (unfitted)")
else
print(io,"GaussianMixtureRegressor - A regressor based on Generative Mixture Model (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::GaussianMixtureRegressor)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"GaussianMixtureRegressor - A regressor based on Generative Mixture Model ($(m.hpar.n_classes) classes, unfitted)")
else
print(io,"GaussianMixtureRegressor - A regressor based on Generative Mixture Model ($(m.hpar.n_classes) classes, fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
println(io,"Mixtures:")
println(io,m.par.mixtures)
println(io,"Probability of each mixture:")
println(io,m.par.initial_probmixtures)
end
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 15911 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
using Statistics, LinearAlgebra, PDMats
import Distributions: IsoNormal, DiagNormal, FullNormal, logpdf
import PDMats: ScalMat, PDiagMat, PDMat
import BetaML.Clustering: kmeans
export SphericalGaussian, DiagonalGaussian, FullGaussian,
init_mixtures!,lpdf, update_parameters!
#export initVariances!, updateVariances!
abstract type AbstractGaussian <: AbstractMixture end
mutable struct SphericalGaussian{T <:Number} <: AbstractGaussian
μ ::Union{Array{T,1},Nothing}
σ² ::Union{T,Nothing}
#SphericalGaussian(;μ::Union{Array{T,1},Nothing},σ²::Union{T,Nothing}) where {T} = SphericalGaussian(μ,σ²)
@doc """
$(TYPEDSIGNATURES)
SphericalGaussian(μ,σ²) - Spherical Gaussian mixture with mean μ and (single) variance σ²
"""
SphericalGaussian(μ::Union{Array{T,1},Nothing},σ²::Union{T,Nothing}=nothing) where {T} = new{T}(μ,σ²)
SphericalGaussian(type::Type{T}=Float64) where {T} = new{T}(nothing, nothing)
end
mutable struct DiagonalGaussian{T <:Number} <: AbstractGaussian
μ::Union{Array{T,1},Nothing}
σ²::Union{Array{T,1},Nothing}
@doc """
$(TYPEDSIGNATURES)
DiagonalGaussian(μ,σ²) - Gaussian mixture with mean μ and variances σ² (and fixed zero covariances)
"""
DiagonalGaussian(μ::Union{Array{T,1},Nothing},σ²::Union{Array{T,1},Nothing}=nothing) where {T} = new{T}(μ,σ²)
DiagonalGaussian(::Type{T}=Float64) where {T} = new{T}(nothing, nothing)
end
mutable struct FullGaussian{T <:Number} <: AbstractGaussian
μ::Union{Array{T,1},Nothing}
σ²::Union{Array{T,2},Nothing}
@doc """
$(TYPEDSIGNATURES)
FullGaussian(μ,σ²) - Gaussian mixture with mean μ and variance/covariance matrix σ²"""
FullGaussian(μ::Union{Array{T,1},Nothing},σ²::Union{Array{T,2},Nothing}=nothing) where {T} = new{T}(μ,σ²)
FullGaussian(::Type{T}=Float64) where {T} = new{T}(nothing, nothing)
end
function initVariances!(mixtures::Array{T,1}, X; minimum_variance=0.25, minimum_covariance=0.0,rng = Random.GLOBAL_RNG) where {T <: SphericalGaussian}
(N,D) = size(X)
K = length(mixtures)
varX_byD = fill(0.0,D)
for d in 1:D
varX_byD[d] = var(skipmissing(X[:,d]))
end
varX = max(minimum_variance,mean(varX_byD)/K^2)
for (i,m) in enumerate(mixtures)
if isnothing(m.σ²)
m.σ² = varX
end
end
end
function initVariances!(mixtures::Array{T,1}, X; minimum_variance=0.25, minimum_covariance=0.0,rng = Random.GLOBAL_RNG) where {T <: DiagonalGaussian}
(N,D) = size(X)
K = length(mixtures)
varX_byD = fill(0.0,D)
for d in 1:D
varX_byD[d] = max(minimum_variance, var(skipmissing(X[:,d])))
end
for (i,m) in enumerate(mixtures)
if isnothing(m.σ²)
m.σ² = varX_byD
end
end
end
function initVariances!(mixtures::Array{T,1}, X; minimum_variance=0.25, minimum_covariance=0.0,rng = Random.GLOBAL_RNG) where {T <: FullGaussian}
(N,D) = size(X)
K = length(mixtures)
varX_byD = fill(0.0,D)
for d in 1:D
varX_byD[d] = max(minimum_variance, var(skipmissing(X[:,d])))
end
for (i,m) in enumerate(mixtures)
if isnothing(m.σ²)
m.σ² = fill(0.0,D,D)
for d1 in 1:D
for d2 in 1:D
if d1 == d2
m.σ²[d1,d2] = varX_byD[d1]
else
m.σ²[d1,d2] = minimum_covariance
end
end
end
end
end
end
"""
init_mixtures!(mixtures::Array{T,1}, X; minimum_variance=0.25, minimum_covariance=0.0, initialisation_strategy="grid",rng=Random.GLOBAL_RNG)
The parameter `initialisation_strategy` can be `grid`, `kmeans` or `given`:
- `grid`: Uniformly cover the space observed by the data
- `kmeans`: Use the kmeans algorithm. If the data contains missing values, a first run of `predictMissing` is done under init=`grid` to impute the missing values just to allow the kmeans algorithm. Then the em algorithm is used with the output of kmean as init values.
- `given`: Leave the provided set of initial mixtures
"""
function init_mixtures!(mixtures::Array{T,1}, X; minimum_variance=0.25, minimum_covariance=0.0, initialisation_strategy="grid",rng = Random.GLOBAL_RNG) where {T <: AbstractGaussian}
# debug..
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing 2; 3.3 38; missing -2.3; 5.2 -2.4]
#mixtures = [SphericalGaussian() for i in 1:3]
# ---
if initialisation_strategy == "given"
return
end
(N,D) = size(X)
K = length(mixtures)
# count nothing mean mixtures
nMM = 0
for (i,m) in enumerate(mixtures)
if isnothing(m.μ)
nMM += 1
end
end
if initialisation_strategy == "grid"
minX = fill(-Inf,D)
maxX = fill(Inf,D)
for d in 1:D
minX[d] = minimum(skipmissing(X[:,d]))
maxX[d] = maximum(skipmissing(X[:,d]))
end
rangedμ = zeros(nMM,D)
for d in 1:D
rangedμ[:,d] = collect(range(minX[d] + (maxX[d]-minX[d])/(nMM*2) , stop=maxX[d] - (maxX[d]-minX[d])/(nMM*2) , length=nMM))
# ex: rangedμ[:,d] = collect(range(minX[d], stop=maxX[d], length=nMM))
end
j = 1
for m in mixtures
if isnothing(m.μ)
m.μ = rangedμ[j,:]
j +=1
end
end
elseif initialisation_strategy == "kmeans"
if !any(ismissing.(X)) # there are no missing
kmμ = kmeans(X,K,rng=rng)[2]
for (k,m) in enumerate(mixtures)
if isnothing(m.μ)
m.μ = kmμ[k,:]
end
end
else # missings are present
# First pass of predictMissing using initialisation_strategy=grid
#emOut1 = predictMissing(X,K;mixtures=mixtures,verbosity=NONE,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="grid",rng=rng,maximum_iterations=10)
#kmμ = kmeans(emOut1.X̂,K,rng=rng)[2]
# replicate here code of predictMissing as this has been modev to a subsequent module Imputation, so not available here
emOutInner = gmm(X,K;mixtures=mixtures,verbosity=NONE,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="grid",rng=rng,maximum_iterations=10)
(N,D) = size(X)
XMask = .! ismissing.(X)
X̂ = [XMask[n,d] ? X[n,d] : sum([emOutInner.mixtures[k].μ[d] * emOutInner.pₙₖ[n,k] for k in 1:K]) for n in 1:N, d in 1:D ]
X̂ = identity.(X̂)
kmμ = kmeans(X̂,K,rng=rng)[2]
# TODO check how to use the new GMMIputer() but this is defined AFTER the Cluster module, problem !
for (k,m) in enumerate(mixtures)
if isnothing(m.μ)
m.μ = kmμ[k,:]
end
end
end
else
@error "initialisation_strategy $initialisation_strategy not supported by this mixture type"
end
initVariances!(mixtures,X,minimum_variance=minimum_variance, minimum_covariance=minimum_covariance,rng=rng)
end
"""lpdf(m::SphericalGaussian,x,mask) - Log PDF of the mixture given the observation `x`"""
function lpdf(m::SphericalGaussian,x,mask)
x = convert(Vector{nonmissingtype(eltype(x))},x)
μ = m.μ[mask]
σ² = m.σ²
#d = IsoNormal(μ,ScalMat(length(μ),σ²))
#return logpdf(d,x)
return (- (length(x)/2) * log(2π*σ²) - norm(x-μ)^2/(2σ²))
end
"""lpdf(m::DiagonalGaussian,x,mask) - Log PDF of the mixture given the observation `x`"""
function lpdf(m::DiagonalGaussian,x,mask)
x = convert(Vector{nonmissingtype(eltype(x))},x)
μ = m.μ[mask]
σ² = m.σ²[mask]
d = DiagNormal(μ,PDiagMat(σ²))
return logpdf(d,x)
end
"""lpdf(m::FullGaussian,x,mask) - Log PDF of the mixture given the observation `x`"""
function lpdf(m::FullGaussian,x,mask)
x = convert(Vector{nonmissingtype(eltype(x))},x)
μ = m.μ[mask]
nmd = length(μ)
σ² = reshape(m.σ²[mask*mask'],(nmd,nmd))
σ² = σ² + max(eps(), -2minimum(eigvals(σ²))) * I # Improve numerical stability https://stackoverflow.com/q/57559589/1586860 (-2 * minimum...) https://stackoverflow.com/a/35612398/1586860
#=
try
d = FullNormal(μ,PDMat(σ²))
return logpdf(d,x)
catch
println(σ²)
println(mask)
println(μ)
println(x)
println(σ²^(-1))
error("Failed PDMat")
end
=#
diff = x .- μ
#a = det(σ²)
#b = log(max(a,eps()))
#return -(nmd/2)*log(2pi)-(1/2)*b-(1/2)*diff'*σ²^(-1)*diff
return -(nmd/2)*log(2pi)-(1/2)log(max(det(σ²),eps()))-(1/2)*diff'*σ²^(-1)*diff
end
"""
$(TYPEDSIGNATURES)
Return the number of learnable parameters of the mixture model, that is the number of parameters of the individual distribution multiplied by the number of distributions used.
Used to compute the BIC/AIC
"""
npar(mixtures::Array{T,1}) where {T <: AbstractMixture} = nothing
npar(mixtures::Array{T,1}) where {T <: SphericalGaussian} = length(mixtures) * length(mixtures[1].μ) + length(mixtures) # K * D + K
npar(mixtures::Array{T,1}) where {T <: DiagonalGaussian} = length(mixtures) * length(mixtures[1].μ) + length(mixtures) * length(mixtures[1].μ) # K * D + K * D
npar(mixtures::Array{T,1}) where {T <: FullGaussian} = begin K = length(mixtures); D = length(mixtures[1].μ); K * D + K * (D^2+D)/2 end
function updateVariances!(mixtures::Array{T,1}, X, pₙₖ; minimum_variance=0.25, minimum_covariance = 0.0) where {T <: SphericalGaussian}
# debug stuff..
#X = [1 10 20; 1.2 12 missing; 3.1 21 41; 2.9 18 39; 1.5 15 25]
#m1 = SphericalGaussian(μ=[1.0,15,21],σ²=5.0)
#m2 = SphericalGaussian(μ=[3.0,20,30],σ²=10.0)
#mixtures= [m1,m2]
#pₙₖ = [0.9 0.1; 0.8 0.2; 0.1 0.9; 0.1 0.9; 0.4 0.6]
#Xmask = [true true true; true true false; true true true; true true true; true true true]
#minimum_variance=0.25
# ---
(N,D) = size(X)
K = length(mixtures)
Xmask = .! ismissing.(X)
XdimCount = sum(Xmask, dims=2)
# #σ² = [sum([pⱼₓ[n,j] * norm(X[n,:]-μ[j,:])^2 for n in 1:N]) for j in 1:K ] ./ (nⱼ .* D)
for k in 1:K
nom = 0.0
den = dot(XdimCount,pₙₖ[:,k])
m = mixtures[k]
for n in 1:N
if any(Xmask[n,:])
nom += pₙₖ[n,k] * norm(X[n,Xmask[n,:]]-m.μ[Xmask[n,:]])^2
end
end
if(den> 0 && (nom/den) > minimum_variance)
m.σ² = nom/den
else
m.σ² = minimum_variance
end
end
end
function updateVariances!(mixtures::Array{T,1}, X, pₙₖ; minimum_variance=0.25, minimum_covariance = 0.0) where {T <: DiagonalGaussian}
# debug stuff..
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#m1 = DiagonalGaussian([1.0,10.0],[5.0,5.0])
#m2 = DiagonalGaussian([4.0,40.0],[10.0,10.0])
#m3 = DiagonalGaussian([4.0,-2.0],[5.0,5.0])
#mixtures= [m1,m2,m3]
#pₙₖ = [0.9 0.1 0; 0.7 0.1 0.1; 0.8 0.2 0; 0.7 0.3 0; 0.1 0.9 0; 0.4 0.4 0.2; 0.1 0.9 0; 0.2 0.1 0.7 ; 0 0.1 0.9]
#minimum_variance=0.25
# ---
(N,D) = size(X)
K = length(mixtures)
Xmask = .! ismissing.(X)
#XdimCount = sum(Xmask, dims=2)
# #σ² = [sum([pⱼₓ[n,j] * norm(X[n,:]-μ[j,:])^2 for n in 1:N]) for j in 1:K ] ./ (nⱼ .* D)
for k in 1:K
m = mixtures[k]
for d in 1:D
nom = 0.0
den = 0.0
for n in 1:N
if Xmask[n,d]
nom += pₙₖ[n,k] * (X[n,d]-m.μ[d])^2
den += pₙₖ[n,k]
end
end
if(den > 0 )
m.σ²[d] = max(nom/den,minimum_variance)
else
m.σ²[d] = minimum_variance
end
end
end
end
function updateVariances!(mixtures::Array{T,1}, X, pₙₖ; minimum_variance=0.25, minimum_covariance = 0.0) where {T <: FullGaussian}
# debug stuff..
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#m1 = FullGaussian([1.0,10.0],[5.0 1; 1.0 5.0])
#m2 = FullGaussian([4.0,40.0],[10.0 1.0; 1.0 10.0])
#m3 = FullGaussian([4.0,-2.0],[5.0 1; 1.0 5.0])
#mixtures= [m1,m2,m3]
#pₙₖ = [0.9 0.1 0; 0.7 0.1 0.1; 0.8 0.2 0; 0.7 0.3 0; 0.1 0.9 0; 0.4 0.4 0.2; 0.1 0.9 0; 0.2 0.1 0.7 ; 0 0.1 0.9]
#minimum_variance=0.25
# ---
(N,D) = size(X)
K = length(mixtures)
# NDDMAsk is true only if both (N,D1) and (N,D2) are nonmissing values
NDDMask = fill(false,N,D,D)
for n in 1:N
for d1 in 1:D
for d2 in 1:D
if !ismissing(X[n,d1]) && !ismissing(X[n,d2])
NDDMask[n,d1,d2] = true
end
end
end
end
# #σ² = [sum([pⱼₓ[n,j] * norm(X[n,:]-μ[j,:])^2 for n in 1:N]) for j in 1:K ] ./ (nⱼ .* D)
for k in 1:K
m = mixtures[k]
for d2 in 1:D # out var matrix col
for d1 in 1:D # out var matrix row
if d1 >= d2 # lower half of triang
nom = 0.0
den = 0.0
for n in 1:N
if NDDMask[n,d1,d2]
nom += pₙₖ[n,k] * (X[n,d1]-m.μ[d1])*(X[n,d2]-m.μ[d2])
den += pₙₖ[n,k]
end
end
if(den > 0 )
if d1 == d2
m.σ²[d1,d2] = max(nom/den,minimum_variance)
else
m.σ²[d1,d2] = max(nom/den,minimum_covariance)
end
else
if d1 == d2
m.σ²[d1,d2] = minimum_variance
else
#m.σ²[d1,d2] = minimum_variance-0.01 # to avoid singularity in all variances equal to minimum_variance
m.σ²[d1,d2] = minimum_covariance
end
end
else # upper half of the matrix
m.σ²[d1,d2] = m.σ²[d2,d1]
end
end
end
end
end
"""
update_parameters!(mixtures::Array{T,1}, X, pₙₖ; minimum_variance=0.25, minimum_covariance)
Find and set the parameters that maximise the likelihood (m-step in the EM algorithm)
"""
#https://github.com/davidavdav/GaussianMixtures.jl/blob/master/src/train.jl
function update_parameters!(mixtures::Array{T,1}, X, pₙₖ; minimum_variance=0.25, minimum_covariance = 0.0) where {T <: AbstractGaussian}
# debug stuff..
#X = [1 10 20; 1.2 12 missing; 3.1 21 41; 2.9 18 39; 1.5 15 25]
#m1 = SphericalGaussian(μ=[1.0,15,21],σ²=5.0)
#m2 = SphericalGaussian(μ=[3.0,20,30],σ²=10.0)
#mixtures= [m1,m2]
#pₙₖ = [0.9 0.1; 0.8 0.2; 0.1 0.9; 0.1 0.9; 0.4 0.6]
#Xmask = [true true true; true true false; true true true; true true true; true true true]
(N,D) = size(X)
K = length(mixtures)
Xmask = .! ismissing.(X)
#nₖ = sum(pₙₖ,dims=1)'
#n = sum(nₖ)
#pₖ = nₖ ./ n
nkd = fill(0.0,K,D)
#nkd = [sum(pₙₖ[Xmask[:,d],k]) for k in 1:K, d in 1:D] # number of point associated to a given mixture for a specific dimension
# updating μ...
for k in 1:K
m = mixtures[k]
for d in 1:D
nkd[k,d] = sum(pₙₖ[Xmask[:,d],k])
if nkd[k,d] > 1
m.μ[d] = sum(pₙₖ[Xmask[:,d],k] .* X[Xmask[:,d],d])/nkd[k,d]
end
end
end
updateVariances!(mixtures, X, pₙₖ; minimum_variance=minimum_variance, minimum_covariance=minimum_covariance)
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 51159 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
Imputation.jl file
Implement the BetaML.Imputation module
`?BetaML.Imputation` for documentation
- Go to [https://sylvaticus.github.io/BetaML.jl](https://sylvaticus.github.io/BetaML.jl/stable) for more general doc
"""
"""
Imputation module
Provide various imputation methods for missing data. Note that the interpretation of "missing" can be very wide.
For example, reccomendation systems / collaborative filtering (e.g. suggestion of the film to watch) can well be representated as a missing data to impute problem, often with better results than traditional algorithms as k-nearest neighbors (KNN)
Provided imputers:
- [`SimpleImputer`](@ref): Impute data using the feature (column) mean, optionally normalised by l-norms of the records (rows) (fastest)
- [`GaussianMixtureImputer`](@ref): Impute data using a Generative (Gaussian) Mixture Model (good trade off)
- [`RandomForestImputer`](@ref): Impute missing data using Random Forests, with optional replicable multiple imputations (most accurate).
- [`GeneralImputer`](@ref): Impute missing data using a vector (one per column) of arbitrary learning models (classifiers/regressors) that implement `m = Model([options])`, `fit!(m,X,Y)` and `predict(m,X)` (not necessarily from `BetaML`).
Imputations for all these models can be optained by running `mod = ImputatorModel([options])`, `fit!(mod,X)`. The data with the missing values imputed can then be obtained with `predict(mod)`. Use`info(m::Imputer)` to retrieve further information concerning the imputation.
Trained models can be also used to impute missing values in new data with `predict(mox,xNew)`.
Note that if multiple imputations are run (for the supporting imputators) `predict()` will return a vector of predictions rather than a single one`.
## Example
```julia
julia> using Statistics, BetaML
julia> X = [2 missing 10; 2000 4000 1000; 2000 4000 10000; 3 5 12 ; 4 8 20; 1 2 5]
6×3 Matrix{Union{Missing, Int64}}:
2 missing 10
2000 4000 1000
2000 4000 10000
3 5 12
4 8 20
1 2 5
julia> mod = RandomForestImputer(multiple_imputations=10, rng=copy(FIXEDRNG));
julia> fit!(mod,X);
julia> vals = predict(mod)
10-element Vector{Matrix{Union{Missing, Int64}}}:
[2 3 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 4 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 4 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 136 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 137 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 4 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 4 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 4 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 137 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
[2 137 10; 2000 4000 1000; … ; 4 8 20; 1 2 5]
julia> nR,nC = size(vals[1])
(6, 3)
julia> medianValues = [median([v[r,c] for v in vals]) for r in 1:nR, c in 1:nC]
6×3 Matrix{Float64}:
2.0 4.0 10.0
2000.0 4000.0 1000.0
2000.0 4000.0 10000.0
3.0 5.0 12.0
4.0 8.0 20.0
1.0 2.0 5.0
julia> infos = info(mod);
julia> infos["n_imputed_values"]
1
```
"""
module Imputation
using Statistics, Random, LinearAlgebra, StableRNGs, DocStringExtensions
using ForceImport
@force using ..Api
@force using ..Utils
@force using ..Clustering
@force using ..GMM
@force using ..Trees
import ..Trees: buildForest
import ..GMM: gmm, estep
import Base.print
import Base.show
#export predictMissing,
export SimpleI_hp,RandomForestI_hp,GeneralI_hp,
Imputer, SimpleImputer, GaussianMixtureImputer, RandomForestImputer, GeneralImputer
#fit!, predict, info
abstract type Imputer <: BetaMLModel end
# ------------------------------------------------------------------------------
# SimpleImputer
"""
$(TYPEDEF)
Hyperparameters for the [`SimpleImputer`](@ref) model
# Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SimpleI_hp <: BetaMLHyperParametersSet
"The descriptive statistic of the column (feature) to use as imputed value [def: `mean`]"
statistic::Function = mean
"Normalise the feature mean by l-`norm` norm of the records [default: `nothing`]. Use it (e.g. `norm=1` to use the l-1 norm) if the records are highly heterogeneus (e.g. quantity exports of different countries)."
norm::Union{Nothing,Int64} = nothing
end
Base.@kwdef mutable struct SimpleImputer_lp <: BetaMLLearnableParametersSet
cStats::Vector{Float64} = []
norms::Vector{Float64} = []
#imputedValues::Union{Nothing,Matrix{Float64}} = nothing
end
"""
$(TYPEDEF)
Simple imputer using the missing data's feature (column) statistic (def: `mean`), optionally normalised by l-norms of the records (rows)
# Parameters:
- `statistics`: The descriptive statistic of the column (feature) to use as imputed value [def: `mean`]
- `norm`: Normalise the feature mean by l-`norm` norm of the records [default: `nothing`]. Use it (e.g. `norm=1` to use the l-1 norm) if the records are highly heterogeneus (e.g. quantity exports of different countries).
# Limitations:
- data must be numerical
# Example:
```julia
julia> using BetaML
julia> X = [2.0 missing 10; 20 40 100]
2×3 Matrix{Union{Missing, Float64}}:
2.0 missing 10.0
20.0 40.0 100.0
julia> mod = SimpleImputer(norm=1)
SimpleImputer - A simple feature-stat based imputer (unfitted)
julia> X_full = fit!(mod,X)
2×3 Matrix{Float64}:
2.0 4.04494 10.0
20.0 40.0 100.0
julia> info(mod)
Dict{String, Any} with 1 entry:
"n_imputed_values" => 1
julia> parameters(mod)
BetaML.Imputation.SimpleImputer_lp (a BetaMLLearnableParametersSet struct)
- cStats: [11.0, 40.0, 55.0]
- norms: [6.0, 53.333333333333336]
```
"""
mutable struct SimpleImputer <: Imputer
hpar::SimpleI_hp
opt::BML_options
par::Union{Nothing,SimpleImputer_lp}
cres::Union{Nothing,Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function SimpleImputer(;kwargs...)
m = SimpleImputer(SimpleI_hp(),BML_options(),SimpleImputer_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a matrix with missing data using [`SimpleImputer`](@ref)
"""
function fit!(imputer::SimpleImputer,X)
(imputer.fitted == false ) || error("Multiple training unsupported on this model")
#X̂ = copy(X)
nR,nC = size(X)
cache = imputer.opt.cache
missingMask = ismissing.(X)
overallStat = mean(skipmissing(X))
statf = imputer.hpar.statistic
cStats = [sum(ismissing.(X[:,i])) == nR ? overallStat : statf(skipmissing(X[:,i])) for i in 1:nC]
if imputer.hpar.norm == nothing
adjNorms = []
X̂ = [missingMask[r,c] ? cStats[c] : X[r,c] for r in 1:nR, c in 1:nC]
else
adjNorms = [sum(ismissing.(r)) == nC ? missing : norm(collect(skipmissing(r)),imputer.hpar.norm) / (nC - sum(ismissing.(r))) for r in eachrow(X)]
adjNormsMean = mean(skipmissing(adjNorms))
adjNorms[ismissing.(adjNorms)] .= adjNormsMean
X̂ = [missingMask[r,c] ? cStats[c]*adjNorms[r]/sum(adjNorms) : X[r,c] for r in 1:nR, c in 1:nC]
end
imputer.par = SimpleImputer_lp(cStats,adjNorms)
imputer.cres = cache ? X̂ : nothing
imputer.info["n_imputed_values"] = sum(missingMask)
imputer.fitted = true
return cache ? imputer.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the missing data using the feature averages (eventually normalised) learned by fitting a [`SimpleImputer`](@ref) model
"""
function predict(m::SimpleImputer,X)
nR,nC = size(X)
m.fitted || error()
nC == length(m.par.cStats) || error("`SimpleImputer` can only predict missing values in matrices with the same number of columns as the matrice it has been trained with.")
(m.hpar.norm == nothing || nR == length(m.par.norms)) || error("If norms are used, `SimpleImputer` can predict only matrices with the same number of rows as the matrix it has been trained with.")
missingMask = ismissing.(X)
if m.hpar.norm == nothing
X̂ = [missingMask[r,c] ? m.par.cStats[c] : X[r,c] for r in 1:nR, c in 1:nC]
else
X̂ = [missingMask[r,c] ? m.par.cStats[c]*m.par.norms[r]/sum(m.par.norms) : X[r,c] for r in 1:nR, c in 1:nC]
end
return X̂
end
function show(io::IO, ::MIME"text/plain", m::SimpleImputer)
if m.fitted == false
print(io,"SimpleImputer - A simple feature-stat based imputer (unfitted)")
else
print(io,"SimpleImputer - A simple feature-stat based imputer (fitted)")
end
end
function show(io::IO, m::SimpleImputer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"SimpleImputer - A simple feature-stat based imputer (unfitted)")
else
print(io,"SimpleImputer - A simple feature-stat based imputer (fitted)")
println(io,m.info)
end
end
# ------------------------------------------------------------------------------
# GaussianMixtureImputer
Base.@kwdef mutable struct GaussianMixtureImputer_lp <: BetaMLLearnableParametersSet
mixtures::Union{Type,Vector{<: AbstractMixture}} = DiagonalGaussian[] # The type is only temporary, it should always be replaced by an actual mixture
initial_probmixtures::Vector{Float64} = []
probRecords::Union{Nothing,Matrix{Float64}} = nothing
#imputedValues = nothing
end
"""
$(TYPEDEF)
Missing data imputer that uses a Generative (Gaussian) Mixture Model.
For the parameters (`n_classes`,`mixtures`,..) see [`GaussianMixture_hp`](@ref).
# Limitations:
- data must be numerical
- the resulted matrix is a Matrix{Float64}
- currently the Mixtures available do not support random initialisation for missing imputation, and the rest of the algorithm (Expectation-Maximisation) is deterministic, so there is no random component involved (i.e. no multiple imputations)
# Example:
```julia
julia> using BetaML
julia> X = [1 2.5; missing 20.5; 0.8 18; 12 22.8; 0.4 missing; 1.6 3.7];
julia> mod = GaussianMixtureImputer(mixtures=[SphericalGaussian() for i in 1:2])
GaussianMixtureImputer - A Gaussian Mixture Model based imputer (unfitted)
julia> X_full = fit!(mod,X)
Iter. 1: Var. of the post 2.373498171519511 Log-likelihood -29.111866299189792
6×2 Matrix{Float64}:
1.0 2.5
6.14905 20.5
0.8 18.0
12.0 22.8
0.4 4.61314
1.6 3.7
julia> info(mod)
Dict{String, Any} with 7 entries:
"xndims" => 2
"error" => [2.3735, 0.17527, 0.0283747, 0.0053147, 0.000981885]
"AIC" => 57.798
"fitted_records" => 6
"lL" => -21.899
"n_imputed_values" => 2
"BIC" => 56.3403
julia> parameters(mod)
BetaML.Imputation.GaussianMixtureImputer_lp (a BetaMLLearnableParametersSet struct)
- mixtures: AbstractMixture[SphericalGaussian{Float64}([1.0179819950570768, 3.0999990977255845], 0.2865287884295908), SphericalGaussian{Float64}([6.149053737674149, 20.43331198167713], 15.18664378248651)]
- initial_probmixtures: [0.48544987084082347, 0.5145501291591764]
- probRecords: [0.9999996039918224 3.9600817749531375e-7; 2.3866922376272767e-229 1.0; … ; 0.9127030246369684 0.08729697536303167; 0.9999965964161501 3.403583849794472e-6]
```
"""
mutable struct GaussianMixtureImputer <: Imputer
hpar::GaussianMixture_hp
opt::BML_options
par::Union{GaussianMixtureImputer_lp,Nothing}
cres::Union{Nothing,Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function GaussianMixtureImputer(;kwargs...)
m = GaussianMixtureImputer(GaussianMixture_hp(),BML_options(),GaussianMixtureImputer_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
# Special correction for GaussianMixture_hp
kwkeys = keys(kwargs) #in(2,[1,2,3])
if !in(:mixtures,kwkeys) && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [DiagonalGaussian() for i in 1:3]
elseif !in(:mixtures,kwkeys) && in(:n_classes,kwkeys)
m.hpar.mixtures = [DiagonalGaussian() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: UnionAll && !in(:n_classes,kwkeys)
m.hpar.n_classes = 3
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:3]
elseif typeof(kwargs[:mixtures]) <: UnionAll && in(:n_classes,kwkeys)
m.hpar.mixtures = [kwargs[:mixtures]() for i in 1:kwargs[:n_classes]]
elseif typeof(kwargs[:mixtures]) <: AbstractVector && !in(:n_classes,kwkeys)
m.hpar.n_classes = length(kwargs[:mixtures])
elseif typeof(kwargs[:mixtures]) <: AbstractVector && in(:n_classes,kwkeys)
kwargs[:n_classes] == length(kwargs[:mixtures]) || error("The length of the mixtures vector must be equal to the number of classes")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a matrix with missing data using [`GaussianMixtureImputer`](@ref)
"""
function fit!(m::GaussianMixtureImputer,X)
# Parameter alias..
K = m.hpar.n_classes
initial_probmixtures = m.hpar.initial_probmixtures
mixtures = m.hpar.mixtures
if typeof(mixtures) <: UnionAll
mixtures = [mixtures() for i in 1:K]
end
tol = m.hpar.tol
minimum_variance = m.hpar.minimum_variance
minimum_covariance = m.hpar.minimum_covariance
initialisation_strategy = m.hpar.initialisation_strategy
maximum_iterations = m.hpar.maximum_iterations
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.opt.verbosity > STD
@codelocation
end
if m.fitted
verbosity >= STD && @warn "Continuing training of a pre-fitted model"
emOut = gmm(X,K;initial_probmixtures=m.par.initial_probmixtures,mixtures=m.par.mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy="given",maximum_iterations=maximum_iterations,rng = rng)
else
emOut = gmm(X,K;initial_probmixtures=initial_probmixtures,mixtures=mixtures,tol=tol,verbosity=verbosity,minimum_variance=minimum_variance,minimum_covariance=minimum_covariance,initialisation_strategy=initialisation_strategy,maximum_iterations=maximum_iterations,rng = rng)
end
(N,D) = size(X)
nDim = ndims(X)
nmT = nonmissingtype(eltype(X))
XMask = .! ismissing.(X)
nFill = (N * D) - sum(XMask)
n_imputed_values = nFill
m.par = GaussianMixtureImputer_lp(mixtures = emOut.mixtures, initial_probmixtures=makecolvector(emOut.pₖ), probRecords = emOut.pₙₖ)
if cache
X̂ = [XMask[n,d] ? X[n,d] : sum([emOut.mixtures[k].μ[d] * emOut.pₙₖ[n,k] for k in 1:K]) for n in 1:N, d in 1:D ]
m.cres = X̂
end
m.info["error"] = emOut.ϵ
m.info["lL"] = emOut.lL
m.info["BIC"] = emOut.BIC
m.info["AIC"] = emOut.AIC
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(X,1)
m.info["xndims"] = size(X,2)
m.info["n_imputed_values"] = n_imputed_values
m.fitted=true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the missing data using the mixtures learned by fitting a [`GaussianMixtureImputer`](@ref) model
"""
function predict(m::GaussianMixtureImputer,X)
m.fitted || error("Trying to predict from an untrained model")
X = makematrix(X)
N,D = size(X)
XMask = .! ismissing.(X)
mixtures = m.par.mixtures
initial_probmixtures = m.par.initial_probmixtures
probRecords, lL = estep(X,initial_probmixtures,mixtures)
X̂ = [XMask[n,d] ? X[n,d] : sum([mixtures[k].μ[d] * probRecords[n,k] for k in 1:m.hpar.n_classes]) for n in 1:N, d in 1:D ]
return X̂
end
function show(io::IO, ::MIME"text/plain", m::GaussianMixtureImputer)
if m.fitted == false
print(io,"GaussianMixtureImputer - A Gaussian Mixture Model based imputer (unfitted)")
else
print(io,"GaussianMixtureImputer - A Gaussian Mixture Model based imputer (fitted)")
end
end
function show(io::IO, m::GaussianMixtureImputer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"GaussianMixtureImputer - A Gaussian Mixture Model based imputer (unfitted)")
else
print(io,"GaussianMixtureImputer - A Gaussian Mixture Model based imputer (fitted)")
println(io,m.info)
end
end
# ------------------------------------------------------------------------------
# RandomForestImputer
"""
$(TYPEDEF)
Hyperparameters for [`RandomForestImputer`](@ref)
# Parameters:
$(TYPEDFIELDS)
# Example:
```
julia>mod = RandomForestImputer(n_trees=20,max_depth=10,recursive_passages=3)
```
"""
Base.@kwdef mutable struct RandomForestI_hp <: BetaMLHyperParametersSet
"For the underlying random forest algorithm parameters (`n_trees`,`max_depth`,`min_gain`,`min_records`,`max_features:`,`splitting_criterion`,`β`,`initialisation_strategy`, `oob` and `rng`) see [`RandomForestE_hp`](@ref) for the specific RF algorithm parameters"
rfhpar = RandomForestE_hp()
"Specify the positions of the integer columns to treat as categorical instead of cardinal. [Default: empty vector (all numerical cols are treated as cardinal by default and the others as categorical)]"
forced_categorical_cols::Vector{Int64} = Int64[] # like in RF, normally integers are considered ordinal
"Define the times to go trough the various columns to impute their data. Useful when there are data to impute on multiple columns. The order of the first passage is given by the decreasing number of missing values per column, the other passages are random [default: `1`]."
recursive_passages::Int64 = 1
"Determine the number of independent imputation of the whole dataset to make. Note that while independent, the imputations share the same random number generator (RNG)."
multiple_imputations::Int64 = 1
"Columns in the matrix for which to create an imputation model, i.e. to impute. It can be a vector of columns IDs (positions), or the keywords \"auto\" (default) or \"all\". With \"auto\" the model automatically detects the columns with missing data and impute only them. You may manually specify the columns or use \"auto\" if you want to create a imputation model for that columns during training even if all training data are non-missing to apply then the training model to further data with possibly missing values."
cols_to_impute::Union{String,Vector{Int64}} = "auto"
end
Base.@kwdef struct RandomForestImputer_lp <: BetaMLLearnableParametersSet
forests = nothing
cols_to_impute_actual = Int64[]
#imputedValues = nothing
#n_imputed_values::Int64
#oob::Vector{Vector{Float64}}
end
"""
$(TYPEDEF)
Impute missing data using Random Forests, with optional replicable multiple imputations.
See [`RandomForestI_hp`](@ref), [`RandomForestE_hp`](@ref) and [`BML_options`](@ref) for the parameters.
# Notes:
- Given a certain RNG and its status (e.g. `RandomForestImputer(...,rng=StableRNG(FIXEDSEED))`), the algorithm is completely deterministic, i.e. replicable.
- The algorithm accepts virtually any kind of data, sortable or not
# Example:
```julia
julia> using BetaML
julia> X = [1.4 2.5 "a"; missing 20.5 "b"; 0.6 18 missing; 0.7 22.8 "b"; 0.4 missing "b"; 1.6 3.7 "a"]
6×3 Matrix{Any}:
1.4 2.5 "a"
missing 20.5 "b"
0.6 18 missing
0.7 22.8 "b"
0.4 missing "b"
1.6 3.7 "a"
julia> mod = RandomForestImputer(n_trees=20,max_depth=10,recursive_passages=2)
RandomForestImputer - A Random-Forests based imputer (unfitted)
julia> X_full = fit!(mod,X)
** Processing imputation 1
6×3 Matrix{Any}:
1.4 2.5 "a"
0.504167 20.5 "b"
0.6 18 "b"
0.7 22.8 "b"
0.4 20.0837 "b"
1.6 3.7 "a"
```
"""
mutable struct RandomForestImputer <: Imputer
hpar::RandomForestI_hp
opt::BML_options
par::Union{RandomForestImputer_lp,Nothing}
cres
fitted::Bool
info::Dict{String,Any}
end
function RandomForestImputer(;kwargs...)
hps =RandomForestI_hp()
m = RandomForestImputer(hps,BML_options(),RandomForestImputer_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
# Looking for the fields of the fields...
thissubobjfields = fieldnames(nonmissingtype(typeof(fobj)))
for f2 in thissubobjfields
fobj2 = getproperty(fobj,f2)
if kw in fieldnames(typeof(fobj2))
setproperty!(fobj2,kw,kwv)
found = true
end
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a matrix with missing data using [`RandomForestImputer`](@ref)
"""
function fit!(m::RandomForestImputer,X)
nR,nC = size(X)
if m.fitted
m.opt.verbosity >= STD && @warn "This model has already been fitted and it doesn't support multiple training. This training will override the previous one(s)"
end
# Setting default parameters that depends from the data...
max_depth = m.hpar.rfhpar.max_depth == nothing ? size(X,1) : m.hpar.rfhpar.max_depth
max_features = m.hpar.rfhpar.max_features == nothing ? Int(round(sqrt(size(X,2)-1))) : m.hpar.rfhpar.max_features
# Here only the hpar setting, later for each column
#splitting_criterion = m.hpar.splitting_criterion == nothing ? ( (Ty <: Number && !m.hpar.force_classification) ? variance : gini) : m.hpar.splitting_criterion
#splitting_criterion = m.hpar.rfhpar.splitting_criterion
# Setting schortcuts to other hyperparameters/options....
min_gain = m.hpar.rfhpar.min_gain
min_records = m.hpar.rfhpar.min_records
#force_classification = m.hpar.rfhpar.force_classification
n_trees = m.hpar.rfhpar.n_trees
β = m.hpar.rfhpar.beta
oob = m.hpar.rfhpar.oob
cache = m.opt.cache
rng = m.opt.rng
verbosity = m.opt.verbosity
forced_categorical_cols = m.hpar.forced_categorical_cols
recursive_passages = m.hpar.recursive_passages
multiple_imputations = m.hpar.multiple_imputations
# determining cols_to_impute...
if m.hpar.cols_to_impute == "auto"
cols2imp = findall(i -> i==true, [any(ismissing.(c)) for c in eachcol(X)]) # ismissing.(sum.(eachcol(X))))
elseif m.hpar.cols_to_impute == "all"
cols2imp = collect(1:size(X,2))
else
cols2imp = m.hpar.cols_to_impute
end
imputed = fill(similar(X),multiple_imputations)
if max_features == typemax(Int64) && n_trees >1
max_features = Int(round(sqrt(size(X,2))))
end
max_features = min(nC,max_features)
max_depth = min(nR,max_depth)
catCols = [! (nonmissingtype(eltype(identity.(X[:,c]))) <: Number ) || c in forced_categorical_cols for c in 1:nC]
missingMask = ismissing.(X)
nonMissingMask = .! missingMask
n_imputed_values = sum(missingMask)
ooberrors = fill(convert(Vector{Union{Missing,Float64}},fill(missing,nC)),multiple_imputations) # by imputations and dimensions
forests = Array{Trees.Forest}(undef,multiple_imputations,nC)
for imputation in 1:multiple_imputations
verbosity >= STD && println("** Processing imputation $imputation")
Xout = copy(X)
sortedDims = reverse(sortperm(makecolvector(sum(missingMask,dims=1)))) # sorted from the dim with more missing values
ooberrorsImputation = convert(Vector{Union{Missing,Float64}},fill(missing,nC))
for pass in 1:recursive_passages
m.opt.verbosity >= HIGH && println("- processing passage $pass")
if pass > 1
shuffle!(rng, sortedDims) # randomise the order we go trough the various dimensions at this passage
end
for d in sortedDims
!(d in cols2imp) && continue
verbosity >= FULL && println(" - processing dimension $d")
if m.hpar.rfhpar.splitting_criterion == nothing
splitting_criterion = catCols[d] ? gini : variance
else
splitting_criterion = splitting_criterion
end
nmy = nonMissingMask[:,d]
y = catCols[d] ? X[nmy,d] : identity.(X[nmy,d]) # witout the identity it remains any and force always a classification
ty = nonmissingtype(eltype(y))
y = convert(Vector{ty},y)
Xd = Matrix(Xout[nmy,[1:(d-1);(d+1):end]])
dfor = buildForest(Xd,y, # forest model specific for this dimension
n_trees,
max_depth = max_depth,
min_gain = min_gain,
min_records = min_records,
max_features = max_features,
splitting_criterion = splitting_criterion,
β = β,
oob = false,
rng = rng,
force_classification = catCols[d])
# imputing missing values in d...
for i in 1:nR
if ! missingMask[i,d]
continue
end
xrow = permutedims(Vector(Xout[i,[1:(d-1);(d+1):end]]))
yest = predict(dfor,xrow)[1]
if ty <: Int
if catCols[d]
yest = parse(ty,mode(yest))
else
yest = Int(round(yest))
end
elseif !(ty <: Number)
yest = mode(yest)
end
Xout[i,d] = yest
#return Xout
end
# This is last passage: save the model and compute oob errors if requested
if pass == recursive_passages
forests[imputation,d] = dfor
if oob
ooberrorsImputation[d] = Trees.ooberror(dfor,Xd,y,rng=rng) # BetaML.Trees.ooberror(dfor,Xd,y)
end
end
end # end dimension
end # end recursive passage pass
imputed[imputation] = Xout
ooberrors[imputation] = ooberrorsImputation
end # end individual imputation
m.par = RandomForestImputer_lp(forests,cols2imp)
if cache
if multiple_imputations == 1
m.cres = Utils.disallowmissing(imputed[1])
else
m.cres = Utils.disallowmissing.(imputed)
end
end
m.info["n_imputed_values"] = n_imputed_values
m.info["oob_errors"] = ooberrors
m.fitted = true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Return the data with the missing values replaced with the imputed ones using the non-linear structure learned fitting a [`RandomForestImputer`](@ref) model.
# Notes:
- If `multiple_imputations` was set > 1 this is a vector of matrices (the individual imputations) instead of a single matrix.
"""
function predict(m::RandomForestImputer,X)
nR,nC = size(X)
missingMask = ismissing.(X)
nonMissingMask = .! missingMask
multiple_imputations = m.hpar.multiple_imputations
rng = m.opt.rng
forests = m.par.forests
verbosity = m.opt.verbosity
cols2imp = m.par.cols_to_impute_actual
imputed = fill(similar(X),multiple_imputations)
for imputation in 1:multiple_imputations
verbosity >= STD && println("** Processing imputation $imputation")
Xout = copy(X)
for d in 1:nC
!(d in cols2imp) && continue
verbosity >= FULL && println(" - processing dimension $d")
dfor = forests[imputation,d]
is_regression = dfor.is_regression
nmy = nonMissingMask[:,d]
y = is_regression ? identity.(X[nmy,d]) : X[nmy,d]
ty = nonmissingtype(eltype(y))
y = convert(Vector{ty},y)
Xd = Matrix(Xout[nmy,[1:(d-1);(d+1):end]])
dfor = forests[imputation,d]
# imputing missing values in d...
for i in 1:nR
if ! missingMask[i,d]
continue
end
xrow = permutedims(Vector(Xout[i,[1:(d-1);(d+1):end]]))
yest = predict(dfor,xrow)[1]
if ty <: Int
if ! is_regression
yest = parse(ty,mode(yest))
else
yest = Int(round(yest))
end
elseif !(ty <: Number)
yest = mode(yest)
end
Xout[i,d] = yest
#return Xout
end
end # end dimension
imputed[imputation] = Xout
end # end individual imputation
multiple_imputations == 1 ? (return Utils.disallowmissing(imputed[1])) : return Utils.disallowmissing.(imputed)
end
function show(io::IO, ::MIME"text/plain", m::RandomForestImputer)
if m.fitted == false
print(io,"RandomForestImputer - A Random-Forests based imputer (unfitted)")
else
print(io,"RandomForestImputer - A Random-Forests based imputer (fitted)")
end
end
function show(io::IO, m::RandomForestImputer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"RandomForestImputer - A Random-Forests based imputer (unfitted)")
else
print(io,"RandomForestImputer - A Random-Forests based imputer (fitted)")
println(io,m.info)
end
end
# ------------------------------------------------------------------------------
# GeneralImputer
"""
$(TYPEDEF)
Hyperparameters for [`GeneralImputer`](@ref)
# Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct GeneralI_hp <: BetaMLHyperParametersSet
"Columns in the matrix for which to create an imputation model, i.e. to impute. It can be a vector of columns IDs (positions), or the keywords \"auto\" (default) or \"all\". With \"auto\" the model automatically detects the columns with missing data and impute only them. You may manually specify the columns or use \"all\" if you want to create a imputation model for that columns during training even if all training data are non-missing to apply then the training model to further data with possibly missing values."
cols_to_impute::Union{String,Vector{Int64}} = "auto"
"An entimator model (regressor or classifier), with eventually its options (hyper-parameters), to be used to impute the various columns of the matrix. It can also be a `cols_to_impute`-length vector of different estimators to consider a different estimator for each column (dimension) to impute, for example when some columns are categorical (and will hence require a classifier) and some others are numerical (hence requiring a regressor). [default: `nothing`, i.e. use BetaML random forests, handling classification and regression jobs automatically]."
estimator = nothing
"Wheter the estimator(s) used to predict the missing data support itself missing data in the training features (X). If not, when the model for a certain dimension is fitted, dimensions with missing data in the same rows of those where imputation is needed are dropped and then only non-missing rows in the other remaining dimensions are considered. It can be a vector of boolean values to specify this property for each individual estimator or a single booleann value to apply to all the estimators [default: `false`]"
missing_supported::Union{Vector{Bool},Bool} = false
"The function used by the estimator(s) to fit the model. It should take as fist argument the model itself, as second argument a matrix representing the features, and as third argument a vector representing the labels. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.fit!`]"
fit_function::Union{Vector{Function},Function} = fit!
"The function used by the estimator(s) to predict the labels. It should take as fist argument the model itself and as second argument a matrix representing the features. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.predict`]"
predict_function::Union{Vector{Function},Function} = predict
"Define the number of times to go trough the various columns to impute their data. Useful when there are data to impute on multiple columns. The order of the first passage is given by the decreasing number of missing values per column, the other passages are random [default: `1`]."
recursive_passages::Int64 = 1
"Determine the number of independent imputation of the whole dataset to make. Note that while independent, the imputations share the same random number generator (RNG)."
multiple_imputations::Int64 = 1
end
Base.@kwdef struct GeneralImputer_lp <: BetaMLLearnableParametersSet
fittedModels = nothing # by cols_to_imute only
cols_to_impute_actual = Int64[]
x_used_cols = Vector{Int64}[] # by all columns
#imputedValues = nothing
end
"""
$(TYPEDEF)
Impute missing values using arbitrary learning models.
Impute missing values using any arbitrary learning model (classifier or regressor, not necessarily from BetaML) that implement an interface `m = Model([options])`, `train!(m,X,Y)` and `predict(m,X)`. For non-BetaML supervised models the actual training and predict functions must be specified in the `fit_function` and `predict_function` parameters respectively.
If needed (for example when some columns with missing data are categorical and some numerical) different models can be specified for each column.
Multiple imputations and multiple "passages" trought the various colums for a single imputation are supported.
See [`GeneralI_hp`](@ref) for all the hyper-parameters.
# Examples:
- *Using BetaML models*:
```julia
julia> using BetaML
julia> X = [1.4 2.5 "a"; missing 20.5 "b"; 0.6 18 missing; 0.7 22.8 "b"; 0.4 missing "b"; 1.6 3.7 "a"]
6×3 Matrix{Any}:
1.4 2.5 "a"
missing 20.5 "b"
0.6 18 missing
0.7 22.8 "b"
0.4 missing "b"
1.6 3.7 "a"
julia> mod = GeneralImputer(recursive_passages=2,multiple_imputations=2)
GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted)
julia> mX_full = fit!(mod,X);
** Processing imputation 1
** Processing imputation 2
julia> mX_full[1]
6×3 Matrix{Any}:
1.4 2.5 "a"
0.546722 20.5 "b"
0.6 18 "b"
0.7 22.8 "b"
0.4 19.8061 "b"
1.6 3.7 "a"
julia> mX_full[2]
6×3 Matrix{Any}:
1.4 2.5 "a"
0.554167 20.5 "b"
0.6 18 "b"
0.7 22.8 "b"
0.4 20.7551 "b"
1.6 3.7 "a"
julia> info(mod)
Dict{String, Any} with 1 entry:
"n_imputed_values" => 3
```
- *Using third party packages* (in this example `DecisionTree`):
```julia
julia> using BetaML
julia> import DecisionTree
julia> X = [1.4 2.5 "a"; missing 20.5 "b"; 0.6 18 missing; 0.7 22.8 "b"; 0.4 missing "b"; 1.6 3.7 "a"]
6×3 Matrix{Any}:
1.4 2.5 "a"
missing 20.5 "b"
0.6 18 missing
0.7 22.8 "b"
0.4 missing "b"
1.6 3.7 "a"
julia> mod = GeneralImputer(estimator=[DecisionTree.DecisionTreeRegressor(),DecisionTree.DecisionTreeRegressor(),DecisionTree.DecisionTreeClassifier()], fit_function = DecisionTree.fit!, predict_function=DecisionTree.predict, recursive_passages=2)
GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted)
julia> X_full = fit!(mod,X)
** Processing imputation 1
6×3 Matrix{Any}:
1.4 2.5 "a"
0.94 20.5 "b"
0.6 18 "b"
0.7 22.8 "b"
0.4 13.5 "b"
1.6 3.7 "a"
```
"""
mutable struct GeneralImputer <: Imputer
hpar::GeneralI_hp
opt::BML_options
par::Union{GeneralImputer_lp,Nothing}
cres
fitted::Bool
info::Dict{String,Any}
end
function GeneralImputer(;kwargs...)
hps = GeneralI_hp()
m = GeneralImputer(hps,BML_options(),GeneralImputer_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
## Looking for the fields of the fields...
#thissubobjfields = fieldnames(nonmissingtype(typeof(fobj)))
#for f2 in thissubobjfields
# fobj2 = getproperty(fobj,f2)
# if kw in fieldnames(typeof(fobj2))
# setproperty!(fobj2,kw,kwv)
# end
#end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a matrix with missing data using [`GeneralImputer`](@ref)
"""
function fit!(m::GeneralImputer,X)
nR,nC = size(X)
multiple_imputations = m.hpar.multiple_imputations
recursive_passages = m.hpar.recursive_passages
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
# determining cols_to_impute...
if m.hpar.cols_to_impute == "auto"
cols2imp = findall(i -> i==true, [any(ismissing.(c)) for c in eachcol(X)]) # ismissing.(sum.(eachcol(X))))
elseif m.hpar.cols_to_impute == "all"
cols2imp = collect(1:size(X,2))
else
cols2imp = m.hpar.cols_to_impute
end
nD2Imp = length(cols2imp)
# Setting `estimators`, a matrix of multiple_imputations x nD2Imp individual models...
if ! m.fitted
if m.hpar.estimator == nothing
estimators = [RandomForestEstimator(rng = m.opt.rng, verbosity=verbosity) for i in 1:multiple_imputations, d in 1:nD2Imp]
elseif typeof(m.hpar.estimator) <: AbstractVector
length(m.hpar.estimator) == nD2Imp || error("I can't use $(length(m.hpar.estimator)) estimators to impute $(nD2Imp) columns.")
estimators = vcat([permutedims(deepcopy(m.hpar.estimator)) for i in 1:multiple_imputations]...)
else # single estimator
estimators = [deepcopy(m.hpar.estimator) for i in 1:multiple_imputations, j in 1:nD2Imp]
end
else
m.opt.verbosity >= STD && @warn "This imputer has already been fitted. Not all learners support multiple training."
estimators = m.par.fittedModels
end
missing_supported = typeof(m.hpar.missing_supported) <: AbstractArray ? m.hpar.missing_supported : fill(m.hpar.missing_supported,nD2Imp)
fit_functions = typeof(m.hpar.fit_function) <: AbstractArray ? m.hpar.fit_function : fill(m.hpar.fit_function,nD2Imp)
predict_functions = typeof(m.hpar.predict_function) <: AbstractArray ? m.hpar.predict_function : fill(m.hpar.predict_function,nD2Imp)
imputed = fill(similar(X),multiple_imputations)
missingMask = ismissing.(X)
nonMissingMask = .! missingMask
n_imputed_values = sum(missingMask)
x_used_cols = [Int64[] for d in 1:size(X,2)]
for imputation in 1:multiple_imputations
verbosity >= STD && println("** Processing imputation $imputation")
Xout = copy(X)
sortedDims = reverse(sortperm(makecolvector(sum(missingMask,dims=1)))) # sorted from the dim with more missing values
for pass in 1:recursive_passages
Xout_passage = copy(Xout)
m.opt.verbosity >= HIGH && println("- processing passage $pass")
if pass > 1
shuffle!(rng, sortedDims) # randomise the order we go trough the various dimensions at this passage
end
for d in sortedDims
!(d in cols2imp) && continue
dIdx = findfirst(x -> x == d, cols2imp)
verbosity >= FULL && println(" - processing dimension $d")
msup = missing_supported[dIdx]
if msup # missing is support, I consider all non-missing y rows and all dimensions..
nmy = nonMissingMask[:,d]
y = identity.(X[nmy,d]) # otherwise for some models it remains a classification
ty = nonmissingtype(eltype(y))
y = convert(Vector{ty},y)
Xd = Matrix(Xout[nmy,[1:(d-1);(d+1):end]])
x_used_cols[d] = setdiff(collect(1:nC),d)
else # missing is NOT supported, I consider only cols with nonmissing data in rows to impute and full rows in the remaining cols
nmy = nonMissingMask[:,d]
# Step 1 removing cols with missing values in the rows that we will need to impute (i.e. that are also missing in the the y col)..
# I need to remove col and not row, as I need to impute this value, I can't just skip the row
candidates_d = setdiff(collect(1:nC),d)
for (ri,r) in enumerate(eachrow(Xout))
!nmy[ri] || continue # we want to look only where y is missing to remove cols
for dc in candidates_d
if ismissing(r[dc])
candidates_d = setdiff(candidates_d,dc)
end
end
end
x_used_cols[d] = candidates_d
Xd = Xout[:,candidates_d]
# Step 2: for training, consider only the rows where not-dropped cols values are all nonmissing
nmxrows = [all(.! ismissing.(r)) for r in eachrow(Xd)]
nmrows = nmxrows .& nmy # non missing both in Y and remained X rows
y = identity.(X[nmrows,d]) # otherwise for some models it remains a classification
ty = nonmissingtype(eltype(y))
y = convert(Vector{ty},y)
tX = nonmissingtype(eltype(Xd))
Xd = convert(Matrix{tX},Matrix(Xd[nmrows,:]))
end
dmodel = deepcopy(estimators[imputation,dIdx])
fit_functions[dIdx](dmodel,Xd,y)
# imputing missing values in d...
for i in 1:nR
if ! missingMask[i,d]
continue
end
xrow = Vector(Xout[i,x_used_cols[d]])
if !msup # no missing supported, the row shoudn't contain missing values
xrow = Utils.disallowmissing(xrow)
end
yest = predict_functions[dIdx](dmodel,xrow)
# handling some particualr cases...
if typeof(yest) <: AbstractMatrix
yest = yest[1,1]
elseif typeof(yest) <: AbstractVector
yest = yest[1]
end
if typeof(yest) <: AbstractVector{<:AbstractDict}
yest = mode(yest[1],rng=rng)
elseif typeof(yest) <: AbstractDict
yest = mode(yest,rng=rng)
end
if ty <: Int
if typeof(yest) <: AbstractString
yest = parse(ty,yest)
elseif typeof(yest) <: Number
yest = Int(round(yest))
else
error("I don't know how to convert this type $(typeof(yest)) to an integer!")
end
end
Xout_passage[i,d] = yest
#return Xout
end
# This is last passage: save the model
if pass == recursive_passages
estimators[imputation,dIdx] = dmodel
end
end # end dimension
Xout = copy(Xout_passage)
end # end recursive passage pass
imputed[imputation] = Xout
end # end individual imputation
m.par = GeneralImputer_lp(estimators,cols2imp,x_used_cols)
if cache
if multiple_imputations == 1
m.cres = Utils.disallowmissing(imputed[1])
else
m.cres = Utils.disallowmissing.(imputed)
end
end
m.info["n_imputed_values"] = n_imputed_values
m.fitted = true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Return the data with the missing values replaced with the imputed ones using the non-linear structure learned fitting a [`GeneralImputer`](@ref) model.
# Notes:
- if `multiple_imputations` was set > 1 this is a vector of matrices (the individual imputations) instead of a single matrix.
- due to the fact that the final models are fitted with already imputed values when multiple passages are emploied, these models can not be used to impute "new" matrices if they do not support themselves missing values. In this case, use `X̂new = fit!(m::GeneralImputer,Xnew)` instad of `fit!(m::GeneralImputer,X); X̂new = predict(m,Xnew)`.
"""
function predict(m::GeneralImputer,X)
cols2imp = m.par.cols_to_impute_actual
nD2Imp = length(cols2imp)
missing_supported = typeof(m.hpar.missing_supported) <: AbstractArray ? m.hpar.missing_supported : fill(m.hpar.missing_supported,nD2Imp)
m.hpar.recursive_passages == 1 || all(missing_supported) || error("`predict(m::GeneralImputer,Xnew)` can not be used with multiple recursive passages in models that don't support missing values. Fit a new model for `Xnew` instead.")
nR,nC = size(X)
missingMask = ismissing.(X)
nonMissingMask = .! missingMask
multiple_imputations = m.hpar.multiple_imputations
rng = m.opt.rng
estimators = m.par.fittedModels
verbosity = m.opt.verbosity
x_used_cols = m.par.x_used_cols
predict_functions = typeof(m.hpar.predict_function) <: AbstractArray ? m.hpar.predict_function : fill(m.hpar.predict_function,nD2Imp)
imputed = fill(similar(X),multiple_imputations)
for imputation in 1:multiple_imputations
verbosity >= STD && println("** Processing imputation $imputation")
Xout = copy(X)
for d in 1:nC
!(d in cols2imp) && continue
verbosity >= FULL && println(" - processing dimension $d")
dIdx = findfirst(x -> x == d, cols2imp)
msup = missing_supported[dIdx]
nmy = nonMissingMask[:,d]
y = X[nmy,d]
ty = nonmissingtype(eltype(y))
y = convert(Vector{ty},y)
#Xd = Matrix(Xout[nmy,[1:(d-1);(d+1):end]])
dmod = estimators[imputation,dIdx]
# imputing missing values in d...
for i in 1:nR
if ! missingMask[i,d]
continue
end
xrow = Vector(Xout[i,x_used_cols[d]])
if !msup # no missing supported, the row shoudn't contain missing values
xrow = Utils.disallowmissing(xrow)
end
yest = predict_functions[dIdx](dmod,xrow)
# handling some particualr cases...
if typeof(yest) <: AbstractMatrix
yest = yest[1,1]
elseif typeof(yest) <: AbstractVector
yest = yest[1]
end
if typeof(yest) <: AbstractVector{<:AbstractDict}
yest = mode(yest[1],rng=rng)
elseif typeof(yest) <: AbstractDict
yest = mode(yest,rng=rng)
end
if ty <: Int
if typeof(yest) <: AbstractString
yest = parse(ty,yest)
elseif typeof(yest) <: Number
yest = Int(round(yest))
else
error("I don't know how to convert this type $(typeof(yest)) to an integer!")
end
end
Xout[i,d] = yest
#return Xout
end
end # end dimension
imputed[imputation] = Xout
end # end individual imputation
multiple_imputations == 1 ? (return Utils.disallowmissing(imputed[1])) : return Utils.disallowmissing.(imputed)
end
function show(io::IO, ::MIME"text/plain", m::GeneralImputer)
if m.fitted == false
print(io,"GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted)")
else
print(io,"GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted) (fitted)")
end
end
function show(io::IO, m::GeneralImputer)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted) (unfitted)")
else
print(io,"GeneralImputer - A imputer based on an arbitrary regressor/classifier(unfitted) (fitted)")
println(io,m.info)
end
end
end # end Imputation module | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 43450 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
BetaML.Nn module
Implement the functionality required to define an artificial Neural Network, train it with data, forecast data and assess its performances.
Common type of layers and optimisation algorithms are already provided, but you can define your own ones subclassing respectively the `AbstractLayer` and `OptimisationAlgorithm` abstract types.
The module provide the following types or functions. Use `?[type or function]` to access their full signature and detailed documentation:
# Model definition:
- [`DenseLayer`](@ref): Classical feed-forward layer with user-defined activation function
- [`DenseNoBiasLayer`](@ref): Classical layer without the bias parameter
- [`VectorFunctionLayer`](@ref): Layer whose activation function run over the ensable of its nodes rather than on each one individually. No learnable weigths on input, optional learnable weigths as parameters of the activation function.
- [`ScalarFunctionLayer`](@ref): Layer whose activation function run over each node individually, like a classic `DenseLayer`, but with no learnable weigths on input and optional learnable weigths as parameters of the activation function.
- [`ReplicatorLayer`](@ref): Alias for a `ScalarFunctionLayer` with no learnable parameters and identity as activation function
- [`ReshaperLayer`](@ref): Reshape the output of a layer (or the input data) to the shape needed for the next one
- [`PoolingLayer`](@ref): In the middle between `VectorFunctionLayer` and `ScalarFunctionLayer`, it applyes a function to the set of nodes defined in a sliding kernel. Weightless.
- [`ConvLayer`](@ref): A generic N+1 (channels) dimensional convolutional layer
- [`GroupedLayer`](@ref): To stack several layers into a single layer, e.g. for multi-branches networks
- [`NeuralNetworkEstimator`](@ref): Build the chained network and define a cost function
Each layer can use a default activation function, one of the functions provided in the `Utils` module (`relu`, `tanh`, `softmax`,...) or one provided by you.
BetaML will try to recognise if it is a "known" function for which it sets the exact derivatives, otherwise you can normally provide the layer with it.
If the derivative of the activation function is not provided (either manually or automatically), AD will be used and training may be slower, altought this difference tends to vanish with bigger datasets.
You can alternativly implement your own layer defining a new type as subtype of the abstract type `AbstractLayer`. Each user-implemented layer must define the following methods:
- A suitable constructor
- `forward(layer,x)`
- `backward(layer,x,next_gradient)`
- `get_params(layer)`
- `get_gradient(layer,x,next_gradient)`
- `set_params!(layer,w)`
- `size(layer)`
# Model fitting:
- `fit!(nn,X,Y)`: fitting function
- `fitting_info(nn)`: Default callback function during fitting
- `SGD`: The classical optimisation algorithm
- `ADAM`: A faster moment-based optimisation algorithm
To define your own optimisation algorithm define a subtype of `OptimisationAlgorithm` and implement the function `single_update!(θ,▽;opt_alg)` and eventually `init_optalg!(⋅)` specific for it.
# Model predictions and assessment:
- `predict(nn)` or `predict(nn,X)`: Return the output given the data
While high-level functions operating on the dataset expect it to be in the standard format (n_records × n_dimensions matrices) it is customary to represent the chain of a neural network as a flow of column vectors, so all low-level operations (operating on a single datapoint) expect both the input and the output as a column vector.
"""
module Nn
#import Base.Threads.@spawn
using Random, LinearAlgebra, StaticArrays, LoopVectorization, Zygote, ProgressMeter, Reexport, DocStringExtensions
import Distributions: Uniform
using ForceImport
@force using ..Api
@force using ..Utils
import Base.size
import Base: +, -, *, /, sum, sqrt
import Base.show
# module own functions
export AbstractLayer, forward, backward, get_params, get_gradient, set_params!, size, preprocess! # layer API
#export forward_old, backward_old, get_gradient_old
export DenseLayer, DenseNoBiasLayer, VectorFunctionLayer, ScalarFunctionLayer, ReplicatorLayer # Available layers
export GroupedLayer
export ConvLayer, ReshaperLayer, PoolingLayer
export init_optalg!, single_update! # Optimizers API
export SGD,ADAM, DebugOptAlg # Available optimizers
export Learnable, fitting_info, NeuralNetworkEstimator, NeuralNetworkE_hp, NeuralNetworkE_options # NN API
# export get_nparams, NN, buildNetwork, predict, loss, train!, getindex, show # old
# for working on gradient as e.g [([1.0 2.0; 3.0 4.0], [1.0,2.0,3.0]),([1.0,2.0,3.0],1.0)]
"""
Learnable(data)
Structure representing the learnable parameters of a layer or its gradient.
The learnable parameters of a layers are given in the form of a N-tuple of Array{Float64,N2} where N2 can change (e.g. we can have a layer with the first parameter being a matrix, and the second one being a scalar).
We wrap the tuple on its own structure a bit for some efficiency gain, but above all to define standard mathematic operations on the gradients without doing "type piracy" with respect to Base tuples.
"""
mutable struct Learnable{ET}
#data::Union{Tuple{Vararg{Array{Float64,N} where N}},Vector{Tuple{Vararg{Array{Float64,N} where N}}}}
data::Tuple{Vararg{Array{ET,N} where N}}
function Learnable(data)
if data == ()
return new{Float64}(data)
else
return new{eltype(eltype(data))}(data)
end
end
end
function +(items::Learnable...)
values = collect(items[1].data)
N = length(values)
@inbounds for item in items[2:end]
@inbounds @simd for n in 1:N # @inbounds @simd
values[n] += item.data[n]
end
end
return Learnable(Tuple(values))
end
sum(items::Learnable...) = +(items...)
function -(items::Learnable...)
values = collect(items[1].data)
N = length(values)
@inbounds for item in items[2:end]
@inbounds @simd for n in 1:N # @simd
values[n] -= item.data[n]
end
end
return Learnable(Tuple(values))
end
function *(items::Learnable...)
values = collect(items[1].data)
N = length(values)
@inbounds for item in items[2:end]
@inbounds @simd for n in 1:N # @simd
values[n] = values[n] .* item.data[n]
end
end
return Learnable(Tuple(values))
end
+(item::Learnable,sc::Number) = Learnable(Tuple([item.data[i] .+ sc for i in 1:length(item.data)]))
+(sc::Number, item::Learnable) = +(item,sc)
-(item::Learnable,sc::Number) = Learnable(Tuple([item.data[i] .- sc for i in 1:length(item.data)]))
-(sc::Number, item::Learnable) = (-(item,sc)) * -1
*(item::Learnable,sc::Number) = Learnable(item.data .* sc)
*(sc::Number, item::Learnable) = Learnable(sc .* item.data)
/(item::Learnable,sc::Number) = Learnable(item.data ./ sc)
/(sc::Number,item::Learnable,) = Learnable(Tuple([sc ./ item.data[i] for i in 1:length(item.data)]))
sqrt(item::Learnable) = Learnable(Tuple([sqrt.(item.data[i]) for i in 1:length(item.data)]))
/(item1::Learnable,item2::Learnable) = Learnable(Tuple([item1.data[i] ./ item2.data[i] for i in 1:length(item1.data)]))
#=
# not needed ??
function Base.iterate(iter::Learnable, state=(iter.data[1], 1))
element, count = state
if count > length(iter)
return nothing
elseif count == length(iter)
return (element, (iter.data[count], count + 1))
end
return (element, (iter.data[count+1], count + 1))
end
Base.length(iter::Learnable) = length(iter.data)
#Base.eltype(iter::Learnable) = Int
=#
## Sckeleton for the layer functionality.
# See nn_default_layers.jl for actual implementations
abstract type AbstractLayer end
abstract type RecursiveLayer <: AbstractLayer end
include("default_layers/DenseLayer.jl")
include("default_layers/DenseNoBiasLayer.jl")
include("default_layers/VectorFunctionLayer.jl")
include("default_layers/ScalarFunctionLayer.jl")
include("default_layers/GroupedLayer.jl")
include("default_layers/ConvLayer.jl")
include("default_layers/PoolingLayer.jl")
include("default_layers/ReshaperLayer.jl")
include("default_layers/RNNLayer.jl")
"""
forward(layer,x)
Predict the output of the layer given the input
# Parameters:
* `layer`: Worker layer
* `x`: Input to the layer
# Return:
- An Array{T,1} of the prediction (even for a scalar)
"""
function forward(layer::AbstractLayer,x)
error("Not implemented for this kind of layer. Please implement `forward(layer,x)`.")
end
"""
backward(layer,x,next_gradient)
Compute backpropagation for this layer with respect to its inputs
# Parameters:
* `layer`: Worker layer
* `x`: Input to the layer
* `next_gradient`: Derivative of the overal loss with respect to the input of the next layer (output of this layer)
# Return:
* The evaluated gradient of the loss with respect to this layer inputs
"""
function backward(layer::AbstractLayer,x,next_gradient)
error("Not implemented for this kind of layer. Please implement `backward(layer,x,next_gradient)`.")
end
"""
get_params(layer)
Get the layers current value of its trainable parameters
# Parameters:
* `layer`: Worker layer
# Return:
* The current value of the layer's trainable parameters as tuple of matrices. It is up to you to decide how to organise this tuple, as long you are consistent with the `get_gradient()` and `set_params()` functions. Note that starting from BetaML 0.2.2 this tuple needs to be wrapped in its `Learnable` type.
"""
function get_params(layer::AbstractLayer)
error("Not implemented for this kind of layer. Please implement `get_params(layer)`.")
end
"""
get_gradient(layer,x,next_gradient)
Compute backpropagation for this layer with respect to the layer weigths
# Parameters:
* `layer`: Worker layer
* `x`: Input to the layer
* `next_gradient`: Derivative of the overaall loss with respect to the input of the next layer (output of this layer)
# Return:
* The evaluated gradient of the loss with respect to this layer's trainable parameters as tuple of matrices. It is up to you to decide how to organise this tuple, as long you are consistent with the `get_params()` and `set_params()` functions. Note that starting from BetaML 0.2.2 this tuple needs to be wrapped in its `Learnable` type.
"""
function get_gradient(layer::AbstractLayer,x,next_gradient)
error("Not implemented for this kind of layer. Please implement `get_gradient(layer,x,next_gradient)`.")
end
"""
set_params!(layer,w)
Set the trainable parameters of the layer with the given values
# Parameters:
* `layer`: Worker layer
* `w`: The new parameters to set (Learnable)
# Notes:
* The format of the tuple wrapped by Learnable must be consistent with those of the `get_params()` and `get_gradient()` functions.
"""
function set_params!(layer::AbstractLayer,w)
error("Not implemented for this kind of layer. Please implement `set_params!(layer,w)`.")
end
"""
size(layer)
Get the size of the layers in terms of (size in input, size in output) - both as tuples
# Notes:
* You need to use `import Base.size` before defining this function for your layer
"""
function size(layer::AbstractLayer)
error("Not implemented for this kind of layer. Please implement `size(layer)`.")
end
"""get_nparams(layer)
Return the number of parameters of a layer.
It doesn't need to be implemented by each layer type, as it uses get_params().
"""
function get_nparams(layer::AbstractLayer)
pars = get_params(layer)
nP = 0
for p in pars.data
nP += *(size(p)...)
end
return nP
end
"""
$(TYPEDSIGNATURES)
Preprocess the layer with information known at layer creation (i.e. no data info used)
This function is used for some layers to cache some computation that doesn't require the data and it is called at the beginning of `fit!`.
For example, it is used in ConvLayer to store the ids of the convolution.
# Notes:
- as it doesn't depend on data, it is not reset by `reset!`
"""
function preprocess!(layer::AbstractLayer)
return nothing
end
# ------------------------------------------------------------------------------
# NN-related functions
"""
NN
Low-level representation of a Neural Network. Use the model `NeuralNetworkEstimator` instead.
# Fields:
* `layers`: Array of layers objects
* `cf`: Cost function
* `dcf`: Derivative of the cost function
* `trained`: Control flag for trained networks
"""
mutable struct NN
layers::Array{AbstractLayer,1}
cf::Function
dcf::Union{Function,Nothing}
trained::Bool
name::String
end
"""
buildNetwork(layers,cf;dcf,name)
Instantiate a new Feedforward Neural Network
!!! warning
This function has been de-exported in BetaML 0.9.
Use the model [`NeuralNetworkEstimator`](@ref) instead.
Parameters:
* `layers`: Array of layers objects
* `cf`: Cost function
* `dcf`: Derivative of the cost function [def: `nothing`]
* `name`: Name of the network [def: "Neural Network"]
# Notes:
* Even if the network ends with a single output note, the cost function and its derivative should always expect y and ŷ as column vectors.
"""
function buildNetwork(layers,cf;dcf=match_known_derivatives(cf),name="Neural Network")
return NN(layers,cf,dcf,false,name)
end
"""
predict(nn::NN,x)
Low-level network predictions. Use instead `predict(m::NeuralNetworkEstimator)`
# Parameters:
* `nn`: Worker network
* `x`: Input to the network (n × d)
"""
function predict(nn::NN,x)
#x = makematrix(x)
# get the output dimensions
n = size(x)[1]
lastlayer_size = size(nn.layers[end])[2]
length(lastlayer_size) == 1 || error("The last NN layer should always be a single dimension vector. Eventually use `ReshaperLayer` to reshape its output as a vector.")
d = lastlayer_size[1]
out = zeros(eltype(x),n,d)
for i in 1:size(x)[1]
values = selectdim(x,1,i) # x[i,:]
for l in nn.layers
values = forward(l,values)
end
out[i,:] = values
end
return out
end
"""
loss(fnn,x,y)
Low level funciton that compute the avg. network loss on a test set (or a single (1 × d) data point)
# Parameters:
* `fnn`: Worker network
* `x`: Input to the network (n) or (n x d)
* `y`: Label input (n) or (n x d)
"""
function loss(nn::NN,x,y)
#x = makematrix(x) # TODO: check these two lines
y = makematrix(y)
(n,d) = size(x)
#(nn.trained || n == 1) ? "" : @warn "Seems you are trying to test a neural network that has not been tested. Use first `train!(nn,x,y)`"
ϵ = 0.0
for i in 1:n
ŷ = predict(nn,x[i,:]')[1,:]
ϵ += nn.cf(y[i,:],ŷ)
end
return ϵ/n
end
"""
get_params(nn)
Retrieve current weigthts
# Parameters:
* `nn`: Worker network
# Notes:
* The output is a vector of tuples of each layer's input weigths and bias weigths
"""
@inline function get_params(nn::NN)
return [get_params(l) for l in nn.layers]
end
"""
get_gradient(nn,x,y)
Low level function that retrieve the current gradient of the weigthts (i.e. derivative of the cost with respect to the weigths). Unexported in BetaML >= v0.9
# Parameters:
* `nn`: Worker network
* `x`: Input to the network (d,1)
* `y`: Label input (d,1)
#Notes:
* The output is a vector of tuples of each layer's input weigths and bias weigths
"""
function get_gradient(nn::NN,x::Union{T,AbstractArray{T,N1}},y::Union{T2,AbstractArray{T2,N2}}) where { T <: Number, T2 <: Number, N1, N2}
#x = makecolvector(x)
#y = makecolvector(y)
nLayers = length(nn.layers)
# Stap 1: Forward pass
forwardStack = Vector{Array{Float64}}(undef,nLayers+1)
forwardStack[1] = x
@inbounds for (i,l) in enumerate(nn.layers)
#println(i)
forwardStack[i+1] = forward(l,forwardStack[i])
end
# Step 2: Backpropagation pass
backwardStack = Vector{Array{Float64}}(undef,nLayers+1)
if nn.dcf != nothing
backwardStack[end] = nn.dcf(y,forwardStack[end]) # adding dϵ_dHatY
else
backwardStack[end] = gradient(nn.cf,y,forwardStack[end])[2] # using AD from Zygote
end
@inbounds for lidx in nLayers:-1:1
l = nn.layers[lidx]
#println(lidx)
#println(l)
dϵ_do = backward(l,forwardStack[lidx],backwardStack[lidx+1])
backwardStack[lidx] = dϵ_do
end
# Step 3: Computing gradient of weigths
dWs = Array{Learnable,1}(undef,nLayers)
@inbounds for lidx in 1:nLayers
dWs[lidx] = get_gradient(nn.layers[lidx],forwardStack[lidx],backwardStack[lidx+1])
end
return dWs
end
"""
get_batchgradient(nn,xbatch,ybatch)
Retrieve the current gradient of the weigthts (i.e. derivative of the cost with respect to the weigths)
# Parameters:
* `nn`: Worker network
* `xbatch`: Input to the network (n,d)
* `ybatch`: Label input (n,d)
#Notes:
* The output is a vector of tuples of each layer's input weigths and bias weigths
"""
function get_batchgradient(nn,xbatch::AbstractArray{T,N1},ybatch::AbstractArray{T2,N2}) where {T <: Number, T2 <: Number, N1, N2}
#return [get_gradient(nn,xbatch[j,:],ybatch[j,:]) for j in 1:size(xbatch,1)]
bsize = size(xbatch,1)
gradients = Array{Vector{Learnable},1}(undef,bsize)
# Note: in Julia 1.6 somehow the multithreading is less efficient than in Julia 1.5
# Using @inbounds @simd result faster than using 4 threads, so reverting to it.
# But to keep following the evolution, as there seems to be some issues on performances
# in Julia 1.6: https://discourse.julialang.org/t/drop-of-performances-with-julia-1-6-0-for-interpolationkernels/58085
# Maybe when that's solved it will be again more convenient to use multi-threading
#Threads.@threads
@inbounds for j in 1:bsize # @simd
gradients[j] = get_gradient(nn,xbatch[j,:],ybatch[j,:])
end
return gradients
end
"""
set_params!(nn,w)
Update weigths of the network
# Parameters:
* `nn`: Worker network
* `w`: The new weights to set
"""
function set_params!(nn::NN,w)
for lidx in 1:length(nn.layers)
set_params!(nn.layers[lidx],w[lidx])
end
end
"get_nparams(nn) - Return the number of trainable parameters of the neural network."
function get_nparams(nn::NN)
nP = 0
for l in nn.layers
nP += get_nparams(l)
end
return nP
end
function preprocess!(nn::NN)
for l in nn.layers
preprocess!(l)
end
end
Base.getindex(n::NN, i::AbstractArray) = NN(n.layers[i]...)
# ------------------------------------------------------------------------------
# Optimisation-related functions
"""
OptimisationAlgorithm
Abstract type representing an Optimisation algorithm.
Currently supported algorithms:
- `SGD` (Stochastic) Gradient Descent
- `ADAM` The ADAM algorithm, an adaptive moment estimation optimiser.
See `?[Name OF THE ALGORITHM]` for their details
You can implement your own optimisation algorithm using a subtype of `OptimisationAlgorithm` and implementing its constructor and the update function `singleUpdate(⋅)` (type `?singleUpdate` for details).
"""
abstract type OptimisationAlgorithm end
include("Nn_default_optalgs.jl")
"""
fitting_info(nn,xbatch,ybatch,x,y;n,batch_size,epochs,epochs_ran,verbosity,n_epoch,n_batch)
Default callback funtion to display information during training, depending on the verbosity level
# Parameters:
* `nn`: Worker network
* `xbatch`: Batch input to the network (batch_size,din)
* `ybatch`: Batch label input (batch_size,dout)
* `x`: Full input to the network (n_records,din)
* `y`: Full label input (n_records,dout)
* `n`: Size of the full training set
* `n_batches` : Number of baches per epoch
* `epochs`: Number of epochs defined for the training
* `epochs_ran`: Number of epochs already ran in previous training sessions
* `verbosity`: Verbosity level defined for the training (NONE,LOW,STD,HIGH,FULL)
* `n_epoch`: Counter of the current epoch
* `n_batch`: Counter of the current batch
#Notes:
* Reporting of the error (loss of the network) is expensive. Use `verbosity=NONE` for better performances
"""
function fitting_info(nn,xbatch,ybatch,x,y;n,n_batches,epochs,epochs_ran,verbosity,n_epoch,n_batch)
if verbosity == NONE
return false # doesn't stop the training
end
nMsgDict = Dict(LOW => 0, STD => 10,HIGH => 100, FULL => n)
nMsgs = nMsgDict[verbosity]
if verbosity == FULL || ( n_batch == n_batches && ( n_epoch == 1 || n_epoch % ceil(epochs/nMsgs) == 0))
ϵ = loss(nn,x,y)
println("Training.. \t avg loss on epoch $n_epoch ($(n_epoch+epochs_ran)): \t $(ϵ)")
end
return false
end
"""
train!(nn,x,y;epochs,batch_size,sequential,opt_alg,verbosity,cb)
Low leval function that trains a neural network with the given x,y data.
!!! warning
This function is deprecated and has been unexported in BetaML v0.9.
Use the model [`NeuralNetworkEstimator`](@ref) instead.
# Parameters:
* `nn`: Worker network
* `x`: Training input to the network (records x dimensions)
* `y`: Label input (records x dimensions)
* `epochs`: Number of passages over the training set [def: `100`]
* `batch_size`: Size of each individual batch [def: `min(size(x,1),32)`]
* `sequential`: Wether to run all data sequentially instead of random [def: `false`]
* `opt_alg`: The optimisation algorithm to update the gradient at each batch [def: `ADAM()`]
* `verbosity`: A verbosity parameter for the trade off information / efficiency [def: `STD`]
* `cb`: A callback to provide information. [def: `fitting_info`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return:
- A named tuple with the following information
- `epochs`: Number of epochs actually ran
- `ϵ_epochs`: The average error on each epoch (if `verbosity > LOW`)
- `θ_epochs`: The parameters at each epoch (if `verbosity > STD`)
# Notes:
- Currently supported algorithms:
- `SGD`, the classical (Stochastic) Gradient Descent optimiser
- `ADAM`, an adaptive moment estimation optimiser
- Look at the individual optimisation algorithm (`?[Name OF THE ALGORITHM]`) for info on its parameter, e.g. [`?SGD`](@ref SGD) for the Stochastic Gradient Descent.
- You can implement your own optimisation algorithm using a subtype of `OptimisationAlgorithm` and implementing its constructor and the update function `single_update!(⋅)` (type `?single_update!` for details).
- You can implement your own callback function, altought the one provided by default is already pretty generic (its output depends on the `verbosity` parameter). See [`fitting_info`](@ref) for informations on the cb parameters.
- Both the callback function and the [`single_update!`](@ref) function of the optimisation algorithm can be used to stop the training algorithm, respectively returning `true` or `stop=true`.
- The verbosity can be set to any of `NONE`,`LOW`,`STD`,`HIGH`,`FULL`.
- The update is done computing the average gradient for each batch and then calling `single_update!` to let the optimisation algorithm perform the parameters update
"""
function train!(nn::NN,x,y; epochs=100, batch_size=min(size(x,1),32), sequential=false, nepochs_ran=0,verbosity::Verbosity=STD, cb=fitting_info, opt_alg::OptimisationAlgorithm=ADAM(),rng = Random.GLOBAL_RNG)#, η=t -> 1/(1+t), λ=1, rShuffle=true, nMsgs=10, tol=0opt_alg::SD=SD())
if verbosity > STD
@codelocation
end
#x = makematrix(x)
#y = makematrix(y)
preprocess!(nn)
(n,d) = size(x)
batch_size = min(size(x,1),batch_size)
if verbosity > NONE # Note that are two "Verbosity type" objects. To compare with numbers use Int(NONE) > 1
println("***\n*** Training $(nn.name) for $epochs epochs with algorithm $(typeof(opt_alg)).")
end
ϵ_epoch_l = Inf
θ_epoch_l = get_params(nn)
ϵ_epoch = loss(nn,x,y)
θ_epoch = get_params(nn)
ϵ_epochs = Float64[]
θ_epochs = []
init_optalg!(opt_alg::OptimisationAlgorithm;θ=get_params(nn),batch_size=batch_size,x=x,y=y)
if verbosity == NONE
showTime = typemax(Float64)
elseif verbosity <= LOW
showTime = 50
elseif verbosity <= STD
showTime = 1
elseif verbosity <= HIGH
showTime = 0.5
else
showTime = 0.2
end
@showprogress dt=showTime desc="Training the Neural Network..." for t in 1:epochs
batches = batch(n,batch_size,sequential=sequential,rng=rng)
n_batches = length(batches)
#if t == 1 # removed otherwise the array of losses/pars would be nepochs+1
# if (verbosity >= STD) push!(ϵ_epochs,ϵ_epoch); end
# if (verbosity > STD) push!(θ_epochs,θ_epoch); end
#end
for (i,batch) in enumerate(batches)
xbatch = x[batch, :]
ybatch = y[batch, :]
θ = get_params(nn)
# remove @spawn and fetch (on next row) to get single thread code
# note that there is no random number issue here..
#gradients = @spawn get_gradient(nn,xbatch,ybatch)
#sumGradient = sum(fetch(gradients))
gradients = get_batchgradient(nn,xbatch,ybatch)
sumGradient = sum(gradients)
▽ = sumGradient / length(batch)
#▽ = gradDiv.(gradSum([get_gradient(nn,xbatch[j,:],ybatch[j,:]) for j in 1:batch_size]), batch_size)
#println("****foooo")
#println(▽)
res = single_update!(θ,▽;n_epoch=t+nepochs_ran,n_batch=i,n_batches=n_batches,xbatch=xbatch,ybatch=ybatch,opt_alg=opt_alg)
set_params!(nn,res.θ)
cbOut = cb(nn,xbatch,ybatch,x,y,n=d,n_batches=n_batches,epochs=epochs,epochs_ran=nepochs_ran,verbosity=verbosity,n_epoch=t,n_batch=i)
if(res.stop==true || cbOut==true)
nn.trained = true
return (epochs=t,ϵ_epochs=ϵ_epochs,θ_epochs=θ_epochs)
end
end
if (verbosity >= STD)
ϵ_epoch_l = ϵ_epoch
ϵ_epoch = loss(nn,x,y)
push!(ϵ_epochs,ϵ_epoch);
end
if (verbosity > STD)
θ_epoch_l = θ_epoch
θ_epoch = get_params(nn)
push!(θ_epochs,θ_epoch); end
end
if (verbosity > NONE)
if verbosity > LOW
ϵ_epoch = loss(nn,x,y)
end
println("Training of $epochs epoch completed. Final epoch error: $(ϵ_epoch).");
end
nn.trained = true
return (epochs=epochs,ϵ_epochs=ϵ_epochs,θ_epochs=θ_epochs)
end
"""
single_update!(θ,▽;n_epoch,n_batch,batch_size,xbatch,ybatch,opt_alg)
Perform the parameters update based on the average batch gradient.
# Parameters:
- `θ`: Current parameters
- `▽`: Average gradient of the batch
- `n_epoch`: Count of current epoch
- `n_batch`: Count of current batch
- `n_batches`: Number of batches per epoch
- `xbatch`: Data associated to the current batch
- `ybatch`: Labels associated to the current batch
- `opt_alg`: The Optimisation algorithm to use for the update
# Notes:
- This function is overridden so that each optimisation algorithm implement their
own version
- Most parameters are not used by any optimisation algorithm. They are provided
to support the largest possible class of optimisation algorithms
- Some optimisation algorithms may change their internal structure in this function
"""
function single_update!(θ,▽;n_epoch,n_batch,n_batches,xbatch,ybatch,opt_alg::OptimisationAlgorithm)
return single_update!(θ,▽,opt_alg;n_epoch=n_epoch,n_batch=n_batch,n_batches=n_batches,xbatch=xbatch,ybatch=ybatch)
end
function single_update!(θ,▽,opt_alg::OptimisationAlgorithm;n_epoch,n_batch,n_batches,xbatch,ybatch)
error("singleUpdate() not implemented for this optimisation algorithm")
end
"""
init_optalg!(opt_alg;θ,batch_size,x,y)
Initialize the optimisation algorithm
# Parameters:
- `opt_alg`: The Optimisation algorithm to use
- `θ`: Current parameters
- `batch_size`: The size of the batch
- `x`: The training (input) data
- `y`: The training "labels" to match
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- Only a few optimizers need this function and consequently ovverride it. By default it does nothing, so if you want write your own optimizer and don't need to initialise it, you don't have to override this method
"""
init_optalg!(opt_alg::OptimisationAlgorithm;θ,batch_size,x,y,rng = Random.GLOBAL_RNG) = nothing
#=
if rShuffle
# random shuffle x and y
ridx = shuffle(1:size(x)[1])
x = x[ridx, :]
y = y[ridx , :]
end
ϵ = 0
#η = dyn_η ? 1/(1+t) : η
ηₜ = η(t)*λ
for i in 1:size(x)[1]
xᵢ = x[i,:]'
yᵢ = y[i,:]'
W = get_params(nn)
dW = get_gradient(nn,xᵢ,yᵢ)
newW = gradientDescentSingleUpdate(W,dW,ηₜ)
set_params!(nn,newW)
ϵ += loss(nn,xᵢ,yᵢ)
end
if nMsgs != 0 && (t % ceil(epochs/nMsgs) == 0 || t == 1 || t == epochs)
println("Avg. error after epoch $t : $(ϵ/size(x)[1])")
end
if abs(ϵl/size(x)[1] - ϵ/size(x)[1]) < (tol * abs(ϵl/size(x)[1]))
if nMsgs != 0
println((tol * abs(ϵl/size(x)[1])))
println("*** Avg. error after epoch $t : $(ϵ/size(x)[1]) (convergence reached")
end
converged = true
break
else
ϵl = ϵ
end
end
if nMsgs != 0 && converged == false
println("*** Avg. error after epoch $epochs : $(ϵ/size(x)[1]) (convergence not reached)")
end
nn.trained = true
end
=#
# ------------------------------------------------------------------------------
# V2 Api
#$([println(\"- $(i)\" for i in subtypes(AbstractLayer)])
# $(subtypes(AbstractLayer))
#
"""
**`$(TYPEDEF)`**
Hyperparameters for the `Feedforward` neural network model
## Parameters:
$(FIELDS)
To know the available layers type `subtypes(AbstractLayer)`) and then type `?LayerName` for information on how to use each layer.
"""
Base.@kwdef mutable struct NeuralNetworkE_hp <: BetaMLHyperParametersSet
"Array of layer objects [def: `nothing`, i.e. basic network]. See `subtypes(BetaML.AbstractLayer)` for supported layers"
layers::Union{Array{AbstractLayer,1},Nothing} = nothing
"""Loss (cost) function [def: `squared_cost`]
It must always assume y and ŷ as (n x d) matrices, eventually using `dropdims` inside.
"""
loss::Union{Nothing,Function} = squared_cost
"Derivative of the loss function [def: `dsquared_cost` if `loss==squared_cost`, `nothing` otherwise, i.e. use the derivative of the squared cost or autodiff]"
dloss::Union{Function,Nothing} = nothing
"Number of epochs, i.e. passages trough the whole training sample [def: `200`]"
epochs::Int64 = 200
"Size of each individual batch [def: `16`]"
batch_size::Int64 = 16
"The optimisation algorithm to update the gradient at each batch [def: `ADAM()`]"
opt_alg::OptimisationAlgorithm = ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges = Dict("epochs"=>[50,100,150],"batch_size"=>[2,4,8,16,32],"opt_alg"=>[SGD(λ=2),SGD(λ=1),SGD(λ=3),ADAM(λ=0.5),ADAM(λ=1),ADAM(λ=0.25)], "shuffle"=>[false,true]),multithreads=false)
end
"""
NeuralNetworkE_options
A struct defining the options used by the Feedforward neural network model
## Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct NeuralNetworkE_options
"Cache the results of the fitting stage, as to allow predict(mod) [default: `true`]. Set it to `false` to save memory for large data."
cache::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"The verbosity level to be used in training or prediction (see [`Verbosity`](@ref)) [deafult: `STD`]
"
verbosity::Verbosity = STD
"A call back function to provide information during training [def: `fitting_info`"
cb::Function=fitting_info
"0ption for hyper-parameters autotuning [def: `false`, i.e. not autotuning performed]. If activated, autotuning is performed on the first `fit!()` call. Controll auto-tuning trough the option `tunemethod` (see the model hyper-parameters)"
autotune::Bool = false
"Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"
rng::AbstractRNG = Random.GLOBAL_RNG
end
Base.@kwdef mutable struct NeuralNetworkEstimator_lp <: BetaMLLearnableParametersSet
nnstruct::Union{Nothing,NN} = nothing
end
"""
**`NeuralNetworkEstimator`**
A "feedforward" (but also multi-branch) neural network (supervised).
For the parameters see [`NeuralNetworkE_hp`](@ref) and for the training options [`NeuralNetworkE_options`](@ref) (we have a few more options for this specific estimator).
# Notes:
- data must be numerical
- the label can be a _n-records_ vector or a _n-records_ by _n-dimensions_ matrix, but the result is always a matrix.
- For one-dimension regressions drop the unnecessary dimension with `dropdims(ŷ,dims=2)`
- For classification tasks the columns should normally be interpreted as the probabilities for each categories
# Examples:
- Classification...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> ohmod = OneHotEncoder()
A OneHotEncoder BetaMLModel (unfitted)
julia> y_oh = fit!(ohmod,y)
6×2 Matrix{Bool}:
1 0
0 1
0 1
0 1
0 1
1 0
julia> layers = [DenseLayer(2,6),DenseLayer(6,2),VectorFunctionLayer(2,f=softmax)];
julia> m = NeuralNetworkEstimator(layers=layers,opt_alg=ADAM(),epochs=300,verbosity=LOW)
NeuralNetworkEstimator - A Feed-forward neural network (unfitted)
julia> ŷ_prob = fit!(m,X,y_oh)
***
*** Training for 300 epochs with algorithm ADAM.
Training.. avg ϵ on (Epoch 1 Batch 1): 0.4116936481380642
Training of 300 epoch completed. Final epoch error: 0.44308719831108734.
6×2 Matrix{Float64}:
0.853198 0.146802
0.0513715 0.948629
0.0894273 0.910573
0.0367079 0.963292
0.00548038 0.99452
0.808334 0.191666
julia> ŷ = inverse_predict(ohmod,ŷ_prob)
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
"a"
```
- Regression...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = 2 .* X[:,1] .- X[:,2] .+ 3;
julia> layers = [DenseLayer(2,6),DenseLayer(6,6),DenseLayer(6,1)];
julia> m = NeuralNetworkEstimator(layers=layers,opt_alg=ADAM(),epochs=3000,verbosity=LOW)
NeuralNetworkEstimator - A Feed-forward neural network (unfitted)
julia> ŷ = fit!(m,X,y);
***
*** Training for 3000 epochs with algorithm ADAM.
Training.. avg ϵ on (Epoch 1 Batch 1): 33.30063874270561
Training of 3000 epoch completed. Final epoch error: 34.61265465430473.
julia> hcat(y,ŷ)
6×2 Matrix{Float64}:
4.1 4.11015
-16.5 -16.5329
-13.8 -13.8381
-18.4 -18.3876
-27.2 -27.1667
2.7 2.70542
```
"""
mutable struct NeuralNetworkEstimator <: BetaMLSupervisedModel
hpar::NeuralNetworkE_hp
opt::NeuralNetworkE_options
par::Union{Nothing,NeuralNetworkEstimator_lp}
cres::Union{Nothing,AbstractArray}
fitted::Bool
info::Dict{String,Any}
end
function NeuralNetworkEstimator(;kwargs...)
m = NeuralNetworkEstimator(NeuralNetworkE_hp(),NeuralNetworkE_options(),NeuralNetworkEstimator_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
# Special correction for NeuralNetworkE_hp
kwkeys = keys(kwargs) #in(2,[1,2,3])
#if !in(:dloss,kwkeys) # if dloss in not explicitly provided
# if (in(:loss,kwkeys) && kwargs[:loss] == squared_cost ) || # loss is explicitly provided and it is equal to squared_loss
# (!in(:loss,kwkeys) ) # (or) loss in not explicitly provided
# m.hpar.dloss = dsquared_cost
# end
#end
if !in(:dloss,kwkeys) # if dloss in not explicitly provided
m.hpar.dloss = match_known_derivatives(m.hpar.loss)
end
return m
end
function fit!(m::NeuralNetworkEstimator,X,Y)
(m.fitted) || autotune!(m,(X,Y))
# Parameter alias..
layers = m.hpar.layers
loss = m.hpar.loss
dloss = m.hpar.dloss
epochs = m.hpar.epochs
batch_size = m.hpar.batch_size
opt_alg = m.hpar.opt_alg
shuffle = m.hpar.shuffle
cache = m.opt.cache
descr = m.opt.descr
verbosity = m.opt.verbosity
cb = m.opt.cb
rng = m.opt.rng
fitted = m.fitted
nR,nD = size(X)
nRy, nDy = size(Y,1), size(Y,2)
nR == nRy || error("X and Y have different number of records (rows)")
if !fitted
if layers == nothing
# let's see if y is continuous, all positives or all in [0,1] in order to choose the last layer according
allPos = all(Y .>= 0.0)
allSum1 = all(sum(Y,dims=2) .≈ 1.0)
allProbs = allPos && allSum1 && nDy >1
if nDy == 1 || allProbs
innerSize = (nD <= 4) ? Int(round(nD*2)) : Int(round(nD*1.5))
elseif nDy < 5
innerSize = (nD <= 4) ? Int(round(nD*2*nDy/1.5)) : Int(round(nD*1.5*nDy/1.5))
elseif nDy < 10
innerSize = Int(round(nD*1.3*nDy/3))
else
innerSize = Int(round(nD*1.3*log(2,nDy)))
end
#innerSize = nDy < 10 ? Int(round(nD*2)) : Int(round(nD*1.3))
#println("nD: $nD \t nDy: $nDy")
#println(innerSize)
l1 = DenseLayer(nD,innerSize, f=relu, df=drelu, rng=rng)
l2 = DenseLayer(innerSize,innerSize, f=relu, df=drelu, rng=rng)
if !allPos
l3 = DenseLayer(innerSize,nDy, f=identity, df=didentity, rng=rng)
layers = [l1,l2,l3]
elseif allPos && ! allProbs
l3 = DenseLayer(innerSize,nDy, f=relu, df=drelu, rng=rng)
layers = [l1,l2,l3]
else
l3 = DenseLayer(innerSize,nDy, f=relu, df=drelu, rng=rng)
l4 = VectorFunctionLayer(nDy,f=softmax)
layers = [l1,l2,l3,l4]
end
m.hpar.layers = layers
end
# Check that the first layer has the dimensions of X and the last layer has the output dimensions of Y
nn_isize_tuple = size(layers[1])[1]
#println(nn_isize_tuple)
nn_osize_tuple = size(layers[end])[2]
#length(nn_isize_tuple) == 1 || error("The input of a neural network should always be a single dimensional vector. Use eventually `ReshaperLayer` to reshape it to a vector.")
length(nn_osize_tuple) == 1 || error("The last neural network layer should always return a single dimensional vector. Use eventually `ReshaperLayer` to reshape it to a vector.")
nn_isize = nn_isize_tuple[1]
nn_osize = nn_osize_tuple[1]
nn_isize == nD || error("The first layer of the network must have the ndims of the input data ($nD) instead of $(nn_isize).")
nn_osize == nDy || error("The last layer of the network must have the ndims of the output data ($nDy) instead of $(nn_osize). For classification tasks, this is normally the number of possible categories.")
m.par = NeuralNetworkEstimator_lp(NN(deepcopy(layers),loss,dloss,false,descr))
m.info["nepochs_ran"] = 0
m.info["loss_per_epoch"] = Float64[]
m.info["par_per_epoch"] = []
m.info["xndims"] = nD
m.info["yndims"] = nDy
#m.info["fitted_records"] = O
end
nnstruct = m.par.nnstruct
out = train!(nnstruct,X,Y; epochs=epochs, batch_size=batch_size, sequential=!shuffle, verbosity=verbosity, cb=cb, opt_alg=opt_alg,nepochs_ran=m.info["nepochs_ran"],rng = rng)
m.info["nepochs_ran"] += out.epochs
append!(m.info["loss_per_epoch"],out.ϵ_epochs)
append!(m.info["par_per_epoch"],out.θ_epochs)
m.info["xndims"] = nD
m.info["fitted_records"] = nR
m.info["nLayers"] = length(nnstruct.layers)
m.info["nPar"] = get_nparams(m.par.nnstruct)
if cache
ŷ = predict(nnstruct,X)
if ndims(ŷ) > 1 && size(layers[end])[2][1] == 1
m.cres = dropdims(ŷ,dims=2)
else
m.cres = ŷ
end
end
m.fitted = true
m.par.nnstruct.trained = true
return cache ? m.cres : nothing
end
function predict(m::NeuralNetworkEstimator,X)
ŷ = predict(m.par.nnstruct,X)
nn_osize = size(m.par.nnstruct.layers[end])[2][1]
if ndims(ŷ) > 1 && nn_osize == 1
return dropdims(ŷ,dims=2)
else
return ŷ
end
end
function show(io::IO, ::MIME"text/plain", m::NeuralNetworkEstimator)
if m.fitted == false
print(io,"NeuralNetworkEstimator - A Feed-forward neural network (unfitted)")
else
print(io,"NeuralNetworkEstimator - A Feed-forward neural network (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::NeuralNetworkEstimator)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
println(io,"NeuralNetworkEstimator - A $(length(m.hpar.layers))-layers feedfordward neural network (unfitted)")
println(io,"Loss function:")
println(io,m.hpar.loss)
println(io,"Optimisation algorithm:")
println(io,m.hpar.opt_alg)
println("Layers:")
println("#\t # In \t\t # Out \t\t Type")
for (i,l) in enumerate(m.hpar.layers)
shapes = size(l)
println("$i \t $(shapes[1]) \t\t $(shapes[2]) \t\t $(typeof(l)) ")
end
else
println(io,"NeuralNetworkEstimator - A $(m.info["xndims"])-dimensions $(m.info["nLayers"])-layers feedfordward neural network (fitted on $(m.info["fitted_records"]) records)")
println(io,"Cost function:")
println(io,m.hpar.loss)
println(io,"Optimisation algorithm:")
println(io,m.hpar.opt_alg)
println(io, "Layers:")
println(io, "#\t # In \t\t # Out \t\t Type")
for (i,l) in enumerate(m.par.nnstruct.layers)
shapes = size(l)
println(io, "$i \t $(shapes[1]) \t\t $(shapes[2]) \t\t $(typeof(l)) ")
end
println(io,"Output of `info(model)`:")
for (k,v) in info(m)
print(io,"- ")
print(io,k)
print(io,":\t")
println(io,v)
end
end
end
end # end module
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3671 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# ------------------------------------------------------------------------------
# SGD
"""
SGD(;η=t -> 1/(1+t), λ=2)
Stochastic Gradient Descent algorithm (default)
# Fields:
- `η`: Learning rate, as a function of the current epoch [def: t -> 1/(1+t)]
- `λ`: Multiplicative constant to the learning rate [def: 2]
"""
struct SGD <: OptimisationAlgorithm
η::Function
λ::Float64
function SGD(;η=t -> 1/(1+t), λ=2)
return new(η,λ)
end
end
function single_update!(θ,▽,opt_alg::SGD;n_epoch,n_batch,n_batches,xbatch,ybatch)
η = opt_alg.η(n_epoch) * opt_alg.λ
#newθ = gradSub.(θ,gradMul.(▽,η))
θ = θ - ▽ .* η
#newθ = gradientDescentSingleUpdate(θ,▽,η)
return (θ=θ,stop=false)
end
#gradientDescentSingleUpdate(θ::Number,▽::Number,η) = θ .- (η .* ▽)
#gradientDescentSingleUpdate(θ::AbstractArray,▽::AbstractArray,η) = gradientDescentSingleUpdate.(θ,▽,Ref(η))
#gradientDescentSingleUpdate(θ::Tuple,▽::Tuple,η) = gradientDescentSingleUpdate.(θ,▽,Ref(η))
#epochs=1000, η=t -> 1/(1+t), λ=1, rShuffle=true, nMsgs=10, tol=0
# ------------------------------------------------------------------------------
# ADAM
#
"""
ADAM(;η, λ, β₁, β₂, ϵ)
The [ADAM](https://arxiv.org/pdf/1412.6980.pdf) algorithm, an adaptive moment estimation optimiser.
# Fields:
- `η`: Learning rate (stepsize, α in the paper), as a function of the current epoch [def: t -> 0.001 (i.e. fixed)]
- `λ`: Multiplicative constant to the learning rate [def: 1]
- `β₁`: Exponential decay rate for the first moment estimate [range: ∈ [0,1], def: 0.9]
- `β₂`: Exponential decay rate for the second moment estimate [range: ∈ [0,1], def: 0.999]
- `ϵ`: Epsilon value to avoid division by zero [def: 10^-8]
"""
mutable struct ADAM <: OptimisationAlgorithm
η::Function
λ::Float64
β₁::Float64
β₂::Float64
ϵ::Float64
m::Vector{Learnable}
v::Vector{Learnable}
function ADAM(;η=t -> 0.001, λ=1.0, β₁=0.9, β₂=0.999, ϵ=1e-8)
return new(η,λ,β₁,β₂,ϵ,[],[])
end
end
"""
init_optalg!(opt_alg::ADAM;θ,batch_size,x,y,rng)
Initialize the ADAM algorithm with the parameters m and v as zeros and check parameter bounds
"""
function init_optalg!(opt_alg::ADAM;θ,batch_size,x,y,rng = Random.GLOBAL_RNG)
opt_alg.m = θ .- θ # setting to zeros
opt_alg.v = θ .- θ # setting to zeros
if opt_alg.β₁ <= 0 || opt_alg.β₁ >= 1 @error "The parameter β₁ must be ∈ [0,1]" end
if opt_alg.β₂ <= 0 || opt_alg.β₂ >= 1 @error "The parameter β₂ must be ∈ [0,1]" end
end
function single_update!(θ,▽,opt_alg::ADAM;n_epoch,n_batch,n_batches,xbatch,ybatch)
β₁,β₂,ϵ = opt_alg.β₁, opt_alg.β₂, opt_alg.ϵ
η = opt_alg.η(n_epoch)*opt_alg.λ
t = (n_epoch-1)*n_batches+n_batch
opt_alg.m = β₁ .* opt_alg.m .+ (1 .- β₁) .* ▽
opt_alg.v = β₂ .* opt_alg.v .+ (1 .- β₂) .* (▽ .* ▽)
#opt_alg.v = [β₂ .* opt_alg.v.data[i] .+ (1-β₂) .* (▽.data[i] .* ▽.data[i]) for i in 1:size(opt_alg.v.data)]
m̂ = opt_alg.m ./ (1 .- β₁.^t)
v̂ = opt_alg.v ./ (1 .- β₂.^t)
θ = θ .- (η .* m̂) ./(sqrt.(v̂) .+ ϵ)
return (θ=θ,stop=false)
end
# ------------------------------------------------------------------------------
# DebugOptAlg
struct DebugOptAlg <: OptimisationAlgorithm
dString::String
function DebugOptAlg(;dString="Hello World, I am a Debugging Algorithm. I done nothing to your Net.")
return new(dString)
end
end
function single_update!(θ,▽,opt_alg::DebugOptAlg;n_epoch,n_batch,batch_size,ϵ_epoch,ϵ_epoch_l)
println(opt_alg.dString)
return (θ=θ,stop=false)
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 17848 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Experimental
"""
$(TYPEDEF)
A generic N+1 (channels) dimensional convolutional layer
**EXPERIMENTAL**: Still too slow for practical applications
This convolutional layer has two constructors, one with the form `ConvLayer(input_size,kernel_size,nchannels_in,nchannels_out)`, and an alternative one as `ConvLayer(input_size_with_channel,kernel_size,nchannels_out)`.
If the input is a vector, use a [`ReshaperLayer`](@ref) in front.
# Fields:
$(TYPEDFIELDS)
"""
struct ConvLayer{ND,NDPLUS1,NDPLUS2,TF <: Function, TDF <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
"Input size (including nchannel_in as last dimension)"
input_size::SVector{NDPLUS1,Int64}
"Output size (including nchannel_out as last dimension)"
output_size::SVector{NDPLUS1,Int64}
"Weight tensor (aka \"filter\" or \"kernel\") with respect to the input from previous layer or data (kernel_size array augmented by the nchannels_in and nchannels_out dimensions)"
weight::Array{WET,NDPLUS2}
"Wether to use (and learn) a bias weigth [def: true]"
usebias::Bool
"Bias (nchannels_out array)"
bias::Array{WET,1}
"Padding (initial)"
padding_start::SVector{ND,Int64}
"Padding (ending)"
padding_end::SVector{ND,Int64}
"Stride"
stride::SVector{ND,Int64}
"Number of dimensions (excluding input and output channels)"
ndims::Int64
"Activation function"
f::TF
"Derivative of the activation function"
df::TDF
"x ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
x_ids::Vector{SVector{NDPLUS1,Int64}}
"y ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
y_ids::Vector{SVector{NDPLUS1,Int64}}
"w ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
w_ids::Vector{SVector{NDPLUS2,Int64}}
"A y-dims array of vectors of ids of x(s) contributing to the giving y"
y_to_x_ids::Array{Vector{NTuple{NDPLUS1,Int64}},NDPLUS1}
"A y-dims array of vectors of corresponding w(s) contributing to the giving y"
y_to_w_ids::Array{Vector{NTuple{NDPLUS2,Int64}},NDPLUS1}
@doc """
$(TYPEDSIGNATURES)
Instantiate a new nD-dimensional, possibly multichannel ConvolutionalLayer
The input data is either a column vector (in which case is reshaped) or an array of `input_size` augmented by the `n_channels` dimension, the output size depends on the `input_size`, `kernel_size`, `padding` and `striding` but has always `nchannels_out` as its last dimention.
# Positional arguments:
* `input_size`: Shape of the input layer (integer for 1D convolution, tuple otherwise). Do not consider the channels number here.
* `kernel_size`: Size of the kernel (aka filter or learnable weights) (integer for 1D or hypercube kernels or nD-sized tuple for assymmetric kernels). Do not consider the channels number here.
* `nchannels_in`: Number of channels in input
* `nchannels_out`: Number of channels in output
# Keyword arguments:
* `stride`: "Steps" to move the convolution with across the various tensor dimensions [def: `ones`]
* `padding`: Integer or 2-elements tuple of tuples of the starting end ending padding across the various dimensions [def: `nothing`, i.e. set the padding required to keep the same dimensions in output (with stride==1)]
* `f`: Activation function [def: `relu`]
* `df`: Derivative of the activation function [default: try to match a known funcion, AD otherwise. Use `nothing` to force AD]
* `kernel_eltype`: Kernel eltype [def: `Float64`]
* `kernel_init`: Initial weigths with respect to the input [default: Xavier initialisation]. If explicitly provided, it should be a multidimensional array of `kernel_size` augmented by `nchannels_in` and `nchannels_out` dimensions
* `bias_init`: Initial weigths with respect to the bias [default: Xavier initialisation]. If given it should be a `nchannels_out` vector of scalars.
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- Xavier initialization is sampled from a `Uniform` distribution between `⨦ sqrt(6/(prod(input_size)*nchannels_in))`
- to retrieve the output size of the layer, use `size(ConvLayer[2])`. The output size on each dimension _d_ (except the last one that is given by `nchannels_out`) is given by the following formula (ceiled): `output_size[d] = 1 + (input_size[d]+2*padding[d]-kernel_size[d])/stride[d]`
- with strides higher than 1, the automatic padding is set to keep out_size = in_side/stride
"""
function ConvLayer(input_size,kernel_size,nchannels_in,nchannels_out;
stride = (ones(Int64,length(input_size))...,),
rng = Random.GLOBAL_RNG,
padding = nothing, # zeros(Int64,length(input_size)),
kernel_eltype = Float64,
kernel_init = xavier_init(
prod(input_size)*nchannels_in,
prod(input_size)*nchannels_in,
(kernel_size...,nchannels_in,nchannels_out),
rng=rng,
eltype=kernel_eltype),
usebias = true,
bias_init = usebias ? xavier_init(
prod(input_size)*nchannels_in,
prod(input_size)*nchannels_in,
nchannels_out,
rng=rng,
eltype=kernel_eltype) : zeros(kernel_eltype,nchannels_out),
f = identity,
df = match_known_derivatives(f))
# be sure all are tuples of right dimension...
if typeof(input_size) <: Integer
input_size = (input_size,)
end
nD = length(input_size)
if typeof(kernel_size) <: Integer
kernel_size = ([kernel_size for d in 1:nD]...,)
end
length(input_size) == length(kernel_size) || error("Number of dimensions of the kernel must equate number of dimensions of input data")
if typeof(stride) <: Integer
stride = ([stride for d in 1:nD]...,)
end
if typeof(padding) <: Integer
padding_start = ([padding for d in 1:nD]...,)
padding_end = ([padding for d in 1:nD]...,)
elseif isnothing(padding) # compute padding to keep same size/stride if not provided
target_out_size = [Int(ceil(input_size[d]/stride[d])) for d in 1:length(input_size)]
#target_out_size = [input_size[d]/stride[d] for d in 1:length(input_size)]
#println(target_out_size)
padding_total = [(target_out_size[d]-1)*stride[d] - input_size[d]+kernel_size[d] for d in 1:length(input_size)]
#println(padding_total)
padding_start = Int.(ceil.(padding_total ./ 2))
padding_end = padding_total .- padding_start
else
padding_start = padding[1]
padding_end = padding[2]
end
nD == length(stride) || error("`stride` must be either a scalar or a tuple that equates the number of dimensions of input data")
nD == length(padding_start) == length(padding_end) || error("`padding` must be: (a) the value `nothing` for automatic computation, (b) a scalar for same padding on all dimensions or (c) a 2-elements tuple where each elements are tuples that equate the number of dimensions of input data for indicating the padding to set in front of the data and the padding to set at the ending of the data")
#println(typeof(weight_init))
#println(weight_init)
#new{nD,nD+1}(weight_init,usebias,bias_init,padding,stride,nD,f,df)
# computation of the output size
#nchannels_out = kernel_size[end]
#in_size = (input_size...,nchannels_in)
out_size = ([1 + Int(floor((input_size[d]+padding_start[d]+padding_end[d]-size(kernel_init,d))/stride[d])) for d in 1:nD]...,nchannels_out)
#println(size(layer.weight[1],2))
y_to_x_ids = [Vector{NTuple{nD+1,Int64}}() for i in CartesianIndices(out_size)]
y_to_w_ids = [Vector{NTuple{nD+2,Int64}}() for i in CartesianIndices(out_size)]
#println(nchannels_out)
new{nD,nD+1,nD+2,typeof(f),typeof(df),kernel_eltype}((input_size...,nchannels_in),out_size,kernel_init,usebias,bias_init,padding_start,padding_end,stride,nD,f,df,[],[],[],y_to_x_ids,y_to_w_ids)
end
end
"""
$(TYPEDSIGNATURES)
Alternative constructor for a `ConvLayer` where the number of channels in input is specified as a further dimension in the input size instead of as a separate parameter, so to use `size(previous_layer)[2]` if one wish.
For arguments and default values see the documentation of the main constructor.
"""
function ConvLayer(input_size_with_channel,kernel_size,nchannels_out;
stride = (ones(Int64,length(input_size_with_channel)-1)...,),
rng = Random.GLOBAL_RNG,
padding = nothing, # zeros(Int64,length(input_size)),
kernel_eltype = Float64,
kernel_init = xavier_init(
prod(input_size_with_channel),
prod(input_size_with_channel),
(kernel_size...,input_size_with_channel[end],nchannels_out),
rng=rng,
eltype=kernel_eltype),
usebias = true,
bias_init = usebias ? xavier_init(
prod(input_size_with_channel),
prod(input_size_with_channel),
nchannels_out,
rng=rng,
eltype=kernel_eltype) : zeros(kernel_eltype,nchannels_out),
f = identity,
df = match_known_derivatives(f))
return ConvLayer(input_size_with_channel[1:end-1],kernel_size,input_size_with_channel[end],nchannels_out; stride=stride,rng=rng,padding=padding, kernel_eltype = kernel_eltype,kernel_init=kernel_init,usebias=usebias,bias_init=bias_init,f=f,df=df)
end
function preprocess!(layer::ConvLayer{ND,NDPLUS1,NDPLUS2}) where {ND,NDPLUS1,NDPLUS2}
if length(layer.x_ids) > 0
return # layer already prepocessed
end
input_size, output_size = size(layer)
nchannels_out = output_size[end]
nchannels_in = input_size[end]
convsize = input_size[1:end-1]
ndims_conv = ND
wsize = size(layer.weight)
ysize = output_size
#println(ysize)
# preallocating temp variables
w_idx = Array{Int64,1}(undef,NDPLUS2)
y_idx = Array{Int64,1}(undef,NDPLUS1)
w_idx_conv = Array{Int64,1}(undef,ND)
y_idx_conv = Array{Int64,1}(undef,ND)
idx_x_source_padded = Array{Int64,1}(undef,ND)
checkstart = Array{Bool,1}(undef,ND)
checkend = Array{Bool,1}(undef,ND)
x_idx = Array{Int64,1}(undef,NDPLUS1)
@inbounds for nch_in in 1:nchannels_in
#println("Processing in layer :", nch_in)
@inbounds for nch_out in 1:nchannels_out
#println("- processing out layer :", nch_out)
@inbounds for w_idx_conv in CartesianIndices( ((wsize[1:end-2]...),) )
w_idx_conv = Tuple(w_idx_conv)
w_idx = (w_idx_conv...,nch_in,nch_out)
@inbounds for y_idx_conv in CartesianIndices( ((ysize[1:end-1]...),) )
y_idx_conv = Tuple(y_idx_conv)
y_idx = (y_idx_conv...,nch_out)
#println("y_idx: ",y_idx)
#println("w_idx: ",w_idx)
#println("layer.stride: ",layer.stride)
#quit(1)
check = true
@inbounds for d in 1:ndims_conv
idx_x_source_padded[d] = w_idx_conv[d] + (y_idx_conv[d] - 1 ) * layer.stride[d]
checkstart[d] = idx_x_source_padded[d] > layer.padding_start[d]
checkend[d] = idx_x_source_padded[d] <= layer.padding_start[d] .+ convsize[d]
checkstart[d] && checkend[d] || begin check = false; break; end
end
check || continue
@inbounds @simd for d in 1:ndims_conv
x_idx[d] = idx_x_source_padded[d] - layer.padding_start[d]
end
x_idx[ndims_conv+1] = nch_in
#println("---")
#println("x_idx: ", x_idx)
#println("w_idx: ", w_idx)
#println("y_idx: ", y_idx)
push!(layer.x_ids,((x_idx...,)))
push!(layer.w_ids,w_idx)
push!(layer.y_ids,y_idx)
#println(x_idx)
#println(typeof(x_idx))
#println(y_idx)
#println(typeof(y_idx))
push!(layer.y_to_x_ids[y_idx...],(x_idx...,))
push!(layer.y_to_w_ids[y_idx...],(w_idx...,))
#de_dx_ch_in[idx_x_source...] += dϵ_dz_ch_out[dy_idx...] * w_ch_in_out[w_idx...]
end
end
end
end
end
#=
function _zComp!(y,layer::ConvLayer{ND,NDPLUS1,NDPLUS2},x) where {ND,NDPLUS1,NDPLUS2}
if ndims(x) == 1
reshape(x,size(layer)[1])
end
for idx in 1:length(layer.y_ids)
@inbounds y[layer.y_ids[idx]...] += x[layer.x_ids[idx]...] * layer.weight[layer.w_ids[idx]...]
end
if(layer.usebias)
output_size = size(y)
for ch_out in 1:output_size[end]
y_ch_out = selectdim(y,NDPLUS1,ch_out)
y_ch_out .+= layer.bias[ch_out]
end
end
return nothing
end
=#
function _zComp!(y,layer::ConvLayer{ND,NDPLUS1,NDPLUS2,TF,TDF,WET},x) where {ND,NDPLUS1,NDPLUS2,TF,TDF,WET}
if ndims(x) == 1
reshape(x,size(layer)[1])
end
for y_idx in eachindex(y)
yi = zero(WET)
n = length(layer.y_to_x_ids[y_idx])
@inbounds for idx in 1:n
yi += x[layer.y_to_x_ids[y_idx][idx]...] * layer.weight[layer.y_to_w_ids[y_idx][idx]...]
end
y[y_idx] = yi
end
if(layer.usebias)
output_size = size(y)
for ch_out in 1:output_size[end]
y_ch_out = selectdim(y,NDPLUS1,ch_out)
y_ch_out .+= layer.bias[ch_out]
end
end
return nothing
end
function _dedxComp!(de_dx,layer::ConvLayer{ND,NDPLUS1,NDPLUS2},dϵ_dz) where {ND,NDPLUS1,NDPLUS2}
for idx in 1:length(layer.y_ids)
@inbounds de_dx[layer.x_ids[idx]...] += dϵ_dz[layer.y_ids[idx]...] * layer.weight[layer.w_ids[idx]...]
end
return nothing
end
function _dedwComp!(de_dw,layer::ConvLayer{ND,NDPLUS1,NDPLUS2},dϵ_dz,x) where {ND,NDPLUS1,NDPLUS2}
for idx in 1:length(layer.y_ids)
@inbounds de_dw[layer.w_ids[idx]...] += dϵ_dz[layer.y_ids[idx]...] * x[layer.x_ids[idx]...]
end
return nothing
end
#= no advantages
function _zComp!(y,x,w,bias,y_ids,x_ids,w_ids,usebias)
for idx in 1:length(y_ids)
@inbounds y[y_ids[idx]...] += x[x_ids[idx]...] * w[w_ids[idx]...]
end
if(usebias)
output_size = size(y)
for ch_out in 1:output_size[end]
y_ch_out = selectdim(y,length(output_size),ch_out)
y_ch_out .+= bias[ch_out]
end
end
return nothing
end
function _zComp!(y,layer::ConvLayer{2,3,4},x) # where {ND,NDPLUS1,NDPLUS2}
if ndims(x) == 1
reshape(x,size(layer)[1])
end
for idx in 1:length(layer.y_ids)
@inbounds y[layer.y_ids[idx][1],layer.y_ids[idx][2],layer.y_ids[idx][3]] +=
x[layer.x_ids[idx][1],layer.x_ids[idx][2],layer.x_ids[idx][3]] *
layer.weight[layer.w_ids[idx][1],layer.w_ids[idx][2],layer.w_ids[idx][3],layer.w_ids[idx][4]]
end
if(layer.usebias)
output_size = size(y)
for ch_out in 1:output_size[end]
y_ch_out = selectdim(y,3,ch_out)
y_ch_out .+= layer.bias[ch_out]
end
end
return nothing
end
=#
"""
$(TYPEDSIGNATURES)
Compute forward pass of a ConvLayer
"""
function forward(layer::ConvLayer{ND,NDPLUS1,NDPLUS2,TF,TDF,WET},x) where {ND,NDPLUS1,NDPLUS2,TF,TDF,WET}
z = zeros(WET,size(layer)[2])
_zComp!(z,layer,x)
return layer.f.(z)
end
function backward(layer::ConvLayer{ND,NDPLUS1,NDPLUS2,TF,TDF,WET},x, next_gradient) where {ND,NDPLUS1,NDPLUS2,TF,TDF,WET}
_, output_size = size(layer)
z = zeros(WET,output_size)
_zComp!(z,layer,x)
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient
de_dx = zeros(WET, layer.input_size...)
_dedxComp!(de_dx,layer,dϵ_dz)
return de_dx
end
function get_params(layer::ConvLayer)
if layer.usebias
return Learnable((layer.weight,layer.bias))
else
return Learnable((layer.weight,))
end
end
function get_gradient(layer::ConvLayer{ND,NDPLUS1,NDPLUS2,TF,TDF,WET},x, next_gradient) where {ND,NDPLUS1,NDPLUS2,TF,TDF,WET}
_, output_size = size(layer)
z = zeros(WET, output_size)
_zComp!(z, layer,x)
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient
de_dw = zeros(WET,size(layer.weight))
_dedwComp!(de_dw,layer,dϵ_dz,x)
if layer.usebias
dbias = zeros(WET,length(layer.bias))
for bias_idx in 1:length(layer.bias)
nchannel_out = bias_idx
dϵ_dz_nchannelOut = selectdim(dϵ_dz,NDPLUS1,nchannel_out)
dbias[bias_idx] = sum(dϵ_dz_nchannelOut)
end
return Learnable((de_dw,dbias))
else
return Learnable((de_dw,))
end
end
function set_params!(layer::ConvLayer,w)
layer.weight .= w.data[1]
layer.usebias && (layer.bias .= w.data[2])
end
"""
$(TYPEDSIGNATURES)
Get the dimensions of the layers in terms of (dimensions in input, dimensions in output) including channels as last dimension
"""
function size(layer::ConvLayer)
#nchannels_in = layer.input_size[end]
#nchannels_out = size(layer.weight)[end]
#in_size = (layer.input_size...,)
#out_size = ([1 + Int(floor((layer.input_size[d]+layer.padding_start[d]+layer.padding_end[d]-size(layer.weight,d))/layer.stride[d])) for d in 1:layer.ndims]...,nchannels_out)
#println(size(layer.weight[1],2))
#return (in_size,out_size)
return ((layer.input_size...,),(layer.output_size...,))
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 4249 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
$(TYPEDEF)
Representation of a layer in the network
# Fields:
* `w`: Weigths matrix with respect to the input from previous layer or data (n x n pr. layer)
* `wb`: Biases (n)
* `f`: Activation function
* `df`: Derivative of the activation function
"""
struct DenseLayer{TF <: Function, TDF <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
w::Array{WET,2}
wb::Array{WET,1}
f::TF
df::TDF
@doc """
$(TYPEDSIGNATURES)
Instantiate a new DenseLayer
# Positional arguments:
* `nₗ`: Number of nodes of the previous layer
* `n`: Number of nodes
# Keyword arguments:
* `w_eltype`: Eltype of the weigths [def: `Float64`]
* `w`: Initial weigths with respect to input [default: Xavier initialisation, dims = (n,nₗ)]
* `wb`: Initial weigths with respect to bias [default: Xavier initialisation, dims = (n)]
* `f`: Activation function [def: `identity`]
* `df`: Derivative of the activation function [default: try to match with well-known derivatives, resort to AD if `f` is unknown]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- Xavier initialization = `rand(Uniform(-sqrt(6)/sqrt(nₗ+n),sqrt(6)/sqrt(nₗ+n))`
- Specify `df=nothing` to explicitly use AD
"""
function DenseLayer(nₗ,n;rng = Random.GLOBAL_RNG,
w_eltype = Float64,
w = xavier_init(nₗ,n,rng=rng,eltype=w_eltype),
wb = xavier_init(nₗ,n,rng=rng,n,eltype=w_eltype),
f=identity,df=match_known_derivatives(f))
size(w) == (n,nₗ) || error("If manually provided, w should have size (n,nₗ)")
size(wb) == (n,) || error("If manually provided, wb should have size (n)")
# To be sure w is a matrix and wb a column vector..
w = reshape(w,n,nₗ)
wb = reshape(wb,n)
return new{typeof(f),typeof(df),w_eltype}(w,wb,f,df)
end
end
function _zComp(layer::DenseLayer{TF,DTF,WET},x) where {TF, DTF, WET}
w = layer.w
wb = layer.wb
z = zeros(WET,size(w,1))
@inbounds for n in axes(w,1)
zn = zero(eltype(x))
for nl in axes(x,1) # @turbo
zn += w[n,nl] * x[nl]
end
zn += wb[n]
z[n] = zn
end
return z
end
function _zComp!(z,layer::DenseLayer{TF,DTF,WET},x) where {TF, DTF, WET}
@inbounds for n in axes(layer.w,1)
zn = zero(WET)
for nl in axes(x,1) # @turbo
zn += layer.w[n,nl] * x[nl]
end
z[n] += zn
z[n] += layer.wb[n]
end
return nothing
end
function forward(layer::DenseLayer{TF,DTF,WET},x) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x) #@avx layer.w * x + layer.wb #_zComp(layer,x) #layer.w * x + layer.wb # _zComp(layer,x) # layer.w * x + layer.wb # testd @avx
return layer.f.(z)
end
function backward(layer::DenseLayer{TF,DTF,WET},x,next_gradient) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x)
#z = _zComp(layer,x) #@avx layer.w * x + layer.wb #_zComp(layer,x) # layer.w * x + layer.wb # _zComp(layer,x) # @avx layer.w * x + layer.wb # tested @avx
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = dfz .* next_gradient # @turbo
dϵ_dI = layer.w' * dϵ_dz # @turbo # @avx
return dϵ_dI
end
function get_params(layer::DenseLayer)
return Learnable((layer.w,layer.wb))
end
function get_gradient(layer::DenseLayer{TF,DTF,WET},x,next_gradient) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x)
#z = _zComp(layer,x) #@avx layer.w * x + layer.wb # _zComp(layer,x) #layer.w * x + layer.wb # @avx
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = dfz .* next_gradient # @turbo
dϵ_dw = dϵ_dz * x' # @turbo # @avx
dϵ_dwb = dϵ_dz
return Learnable((dϵ_dw,dϵ_dwb))
end
function set_params!(layer::DenseLayer,w)
layer.w .= w.data[1]
layer.wb .= w.data[2]
end
function size(layer::DenseLayer)
w_size = size(layer.w')
return ((w_size[1],),(w_size[2],))
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3291 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
$(TYPEDEF)
Representation of a layer without bias in the network
# Fields:
* `w`: Weigths matrix with respect to the input from previous layer or data (n x n pr. layer)
* `f`: Activation function
* `df`: Derivative of the activation function
"""
struct DenseNoBiasLayer{TF <: Function, TDF <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
w::Array{WET,2}
f::TF
df::TDF
@doc """
$(TYPEDSIGNATURES)
Instantiate a new DenseNoBiasLayer
# Positional arguments:
* `nₗ`: Number of nodes of the previous layer
* `n`: Number of nodes
# Keyword arguments:
* `w_eltype`: Eltype of the weigths [def: `Float64`]
* `w`: Initial weigths with respect to input [default: Xavier initialisation, dims = (nₗ,n)]
* `f`: Activation function [def: `identity`]
* `df`: Derivative of the activation function [default: try to match with well-known derivatives, resort to AD if `f` is unknown]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- Xavier initialization = `rand(Uniform(-sqrt(6)/sqrt(nₗ+n),sqrt(6)/sqrt(nₗ,n))`
"""
function DenseNoBiasLayer(nₗ,n;rng = Random.GLOBAL_RNG,
w_eltype = Float64,
w = xavier_init(nₗ,n,rng=rng,eltype=w_eltype),
f=identity,df=match_known_derivatives(f))
# To be sure w is a matrix and wb a column vector..
w = reshape(w,n,nₗ)
return new{typeof(f),typeof(df),w_eltype}(w,f,df)
end
end
function _zComp(layer::DenseNoBiasLayer,x)
w = layer.w
z = zeros(eltype(x),size(w,1))
@inbounds for n in axes(w,1)
zn = zero(eltype(x))
@turbo for nl in axes(x,1)
zn += w[n,nl] * x[nl]
end
z[n] = zn
end
return z
end
function _zComp!(z,layer::DenseNoBiasLayer{TF,DTF,WET},x) where {TF, DTF, WET}
@inbounds for n in axes(layer.w,1)
zn = zero(WET)
@turbo for nl in axes(x,1)
zn += layer.w[n,nl] * x[nl]
end
z[n] += zn
end
return nothing
end
function forward(layer::DenseNoBiasLayer{TF,DTF,WET},x) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x)
return layer.f.(z)
end
function backward(layer::DenseNoBiasLayer{TF,DTF,WET},x,next_gradient) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x)
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient
dϵ_dI = @turbo layer.w' * dϵ_dz
return dϵ_dI
end
function get_params(layer::DenseNoBiasLayer)
return Learnable((layer.w,))
end
function get_gradient(layer::DenseNoBiasLayer{TF,DTF,WET},x,next_gradient) where {TF, DTF, WET}
z = zeros(WET,size(layer)[2][1])
_zComp!(z,layer,x)
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient
dϵ_dw = @turbo dϵ_dz * x'
return Learnable((dϵ_dw,))
end
function set_params!(layer::DenseNoBiasLayer,w)
layer.w .= w.data[1]
end
function size(layer::DenseNoBiasLayer)
w_size = size(layer.w')
return ((w_size[1],),(w_size[2],))
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3202 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
$(TYPEDEF)
Representation of a "group" of layers, each of which operates on different inputs (features) and acting as a single layer in the network.
# Fields:
- `layers`: The individual layers that compose this grouped layer
"""
struct GroupedLayer <: AbstractLayer
layers::Array{AbstractLayer,1}
@doc """
$(TYPEDSIGNATURES)
Instantiate a new GroupedLayer, a layer made up of several other layers stacked together in order to cover all the data dimensions but without connect all the inputs to all the outputs like a single `DenseLayer` would do.
# Positional arguments:
- `layers`: The individual layers that compose this grouped layer
# Notes:
- can be used to create composable neural networks with multiple branches
- tested only with 1 dimensional layers. For convolutional networks use ReshaperLayers before and/or after.
"""
function GroupedLayer(layers)
return new(layers)
end
end
function _get_n_layers_weights(layer::GroupedLayer)
return [length(get_params(l).data) for l in layer.layers ]
end
function forward(layer::GroupedLayer,x)
nL = length(layer.layers)
isizes = [size(l)[1][1] for l in layer.layers]
isizes_swapped = vcat(0,isizes)
return vcat([forward(layer.layers[i],selectdim(x,1,sum(isizes_swapped[1:i])+1: sum(isizes_swapped[1:i+1]))) for i in 1:nL]...)
end
function backward(layer::GroupedLayer,x,next_gradient)
nL = length(layer.layers)
isizes = [size(l)[1][1] for l in layer.layers]
isizes_swapped = vcat(0,isizes)
osizes = [size(l)[2][1] for l in layer.layers]
osizes_swapped = vcat(0,osizes)
return vcat([backward(layer.layers[i], # todo: attention here if first layer has zero paraemters !
selectdim(x,1,sum(isizes_swapped[1:i])+1 : sum(isizes_swapped[1:i+1])),
selectdim(next_gradient,1,sum(osizes_swapped[1:i])+1: sum(osizes_swapped[1:i+1]))
) for i in 1:nL]...) # dϵ_dI
end
function get_params(layer::GroupedLayer)
return Learnable((vcat([ [get_params(l).data...] for l in layer.layers]...)...,))
end
function get_gradient(layer::GroupedLayer,x,next_gradient)
nL = length(layer.layers)
isizes = [size(l)[1][1] for l in layer.layers]
isizes_swapped = vcat(0,isizes)
osizes = [size(l)[2][1] for l in layer.layers]
osizes_swapped = vcat(0,osizes)
return Learnable((vcat([ [get_gradient(layer.layers[i],
selectdim(x,1,sum(isizes_swapped[1:i])+1: sum(isizes_swapped[1:i+1])),
selectdim(next_gradient,1,sum(osizes_swapped[1:i])+1: sum(osizes_swapped[1:i+1]))
).data...] for i in 1:nL]...)...,)) # [dϵ_dw]
end
function set_params!(layer::GroupedLayer,w)
nWs = _get_n_layers_weights(layer)
nWs_swapped = vcat(0,nWs)
nL = length(layer.layers)
for i in 1:length(layer.layers)
set_params!(layer.layers[i],Learnable(w.data[sum(nWs_swapped[1:i])+1:sum(nWs_swapped[1:i+1])]))
end
end
function size(layer::GroupedLayer)
isize = sum([size(l)[1][1] for l in layer.layers])
osize = sum([size(l)[2][1] for l in layer.layers])
return ((isize,),(osize,))
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 12599 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Experimental. Works well, but still too slow for any practical application.
"""
$(TYPEDEF)
Representation of a pooling layer in the network (weightless)
**EXPERIMENTAL**: Still too slow for practical applications
In the middle between `VectorFunctionLayer` and `ScalarFunctionLayer`, it applyes a function to the set of nodes defined in a sliding kernel.
# Fields:
$(TYPEDFIELDS)
"""
struct PoolingLayer{ND,NDPLUS1,NDPLUS2,TF <: Function, TDF <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
"Input size (including nchannel_in as last dimension)"
input_size::SVector{NDPLUS1,Int64}
"Output size (including nchannel_out as last dimension)"
output_size::SVector{NDPLUS1,Int64}
"kernel_size augmented by the nchannels_in and nchannels_out dimensions"
kernel_size::SVector{NDPLUS2,Int64}
"Padding (initial)"
padding_start::SVector{ND,Int64}
"Padding (ending)"
padding_end::SVector{ND,Int64}
"Stride"
stride::SVector{ND,Int64}
"Number of dimensions (excluding input and output channels)"
ndims::Int64
"Activation function"
f::TF
"Derivative of the activation function"
df::TDF #Union{Function,Nothing}
"x ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
#x_ids::Array{NTuple{NDPLUS1,Int32},1}
"y ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
#y_ids::Array{NTuple{NDPLUS1,Int32},1}
"w ids of the convolution (computed in `preprocessing`` - itself at the beginning of `train`"
#w_ids::Array{NTuple{NDPLUS2,Int32},1}
"A x-dims array of vectors of ids of y reached by the given x"
#x_to_y_ids::Array{Vector{NTuple{NDPLUS1,Int32}},NDPLUS1} # not needed
"A y-dims array of vectors of ids of x(s) contributing to the giving y"
y_to_x_ids::Array{Vector{NTuple{NDPLUS1,Int64}},NDPLUS1}
@doc """
$(TYPEDSIGNATURES)
Instantiate a new nD-dimensional, possibly multichannel PoolingLayer
The input data is either a column vector (in which case is reshaped) or an array of `input_size` augmented by the `n_channels` dimension, the output size depends on the `input_size`, `kernel_size`, `padding` and `striding` but has always `nchannels_out` as its last dimention.
# Positional arguments:
* `input_size`: Shape of the input layer (integer for 1D convolution, tuple otherwise). Do not consider the channels number here.
* `kernel_eltype`: Kernel eltype [def: `Float64`]
* `kernel_size`: Size of the kernel (aka filter) (integer for 1D or hypercube kernels or nD-sized tuple for assymmetric kernels). Do not consider the channels number here.
* `nchannels_in`: Number of channels in input
* `nchannels_out`: Number of channels in output
# Keyword arguments:
* `stride`: "Steps" to move the convolution with across the various tensor dimensions [def: `kernel_size`, i.e. each X contributes to a single y]
* `padding`: Integer or 2-elements tuple of tuples of the starting end ending padding across the various dimensions [def: `nothing`, i.e. set the padding required to keep out_side = in_side / stride ]
* `f`: Activation function. It should have a vector as input and produce a scalar as output[def: `maximum`]
* `df`: Derivative (gradient) of the activation function for the various inputs. [default: `nothing` (i.e. use AD)]
# Notes:
- to retrieve the output size of the layer, use `size(PoolLayer[2])`. The output size on each dimension _d_ (except the last one that is given by `nchannels_out`) is given by the following formula (ceiled): `output_size[d] = 1 + (input_size[d]+2*padding[d]-kernel_size[d])/stride[d]`
- differently from a ConvLayer, the pooling applies always on a single channel level, so that the output has always the same number of channels of the input. If you want to reduce the channels number either use a `ConvLayer` with the desired number of channels in output or use a `ReghaperLayer` to add a 1-element further dimension that will be treated as "channel" and choose the desided stride for the last pooling dimension (the one that was originally the channel dimension)
"""
function PoolingLayer(input_size,kernel_size,nchannels_in;
stride = kernel_size,
kernel_eltype = Float64,
padding = nothing, # (zeros(Int64,length(input_size)),zeros(Int64,length(input_size))),
f = maximum,
df = match_known_derivatives(f))
nchannels_out = nchannels_in
# be sure all are tuples of right dimension...
if typeof(input_size) <: Integer
input_size = (input_size,)
end
nD = length(input_size)
if typeof(kernel_size) <: Integer
kernel_size = ([kernel_size for d in 1:nD]...,)
end
length(input_size) == length(kernel_size) || error("Number of dimensions of the kernel must equate number of dimensions of input data")
if typeof(stride) <: Integer
stride = ([stride for d in 1:nD]...,)
end
if typeof(padding) <: Integer
padding_start = ([padding for d in 1:nD]...,)
padding_end = ([padding for d in 1:nD]...,)
elseif isnothing(padding) # compute padding to keep same size/stride if not provided
target_out_size = [Int(ceil(input_size[d]/stride[d])) for d in 1:length(input_size)]
padding_total = [(target_out_size[d]-1)*stride[d] - input_size[d]+kernel_size[d] for d in 1:length(input_size)]
padding_start = Int.(ceil.(padding_total ./ 2))
padding_end = padding_total .- padding_start
else
padding_start = padding[1]
padding_end = padding[2]
end
nD == length(stride) || error("`stride` must be either a scalar or a tuple that equates the number of dimensions of input data")
nD == length(padding_start) == length(padding_end) || error("`padding` must be: (a) the value `nothing` for automatic computation to keep same size, (b) a scalar for same padding on all dimensions or (c) a 2-elements tuple where each elements are tuples that equate the number of dimensions of input data for indicating the padding to set in front of the data and the padding to set at the ending of the data")
#println(typeof(weight_init))
#println(weight_init)
#new{nD,nD+1}(weight_init,usebias,bias_init,padding,stride,nD,f,df)
input_size_with_nchin = (input_size...,nchannels_in)
kernel_size_with_nchin_nchout = (kernel_size...,nchannels_in, nchannels_out)
# Computation of out_size. Code from size() that we can't yet use
output_size_with_nchout = ([1 + Int(floor((input_size[d]+padding_start[d]+padding_end[d]-kernel_size[d])/stride[d])) for d in 1:nD]...,nchannels_out)
#x_to_y_ids = [Vector{NTuple{nD+1,Int32}}() for i in CartesianIndices(input_size_with_nchin)] # not needed
y_to_x_ids = [Vector{NTuple{nD+1,Int64}}() for i in CartesianIndices(output_size_with_nchout)]
new{nD,nD+1,nD+2,typeof(f),typeof(df),kernel_eltype}(input_size_with_nchin,output_size_with_nchout,kernel_size_with_nchin_nchout,padding_start,padding_end,stride,nD,f,df,y_to_x_ids)
end
end
"""
$(TYPEDSIGNATURES)
Alternative constructor for a `PoolingLayer` where the number of channels in input is specified as a further dimension in the input size instead of as a separate parameter, so to use `size(previous_layer)[2]` if one wish.
For arguments and default values see the documentation of the main constructor.
"""
function PoolingLayer(input_size_with_channel,kernel_size;
stride = kernel_size,
padding = nothing, # (zeros(Int64,length(input_size)),zeros(Int64,length(input_size))),
f = maximum,
kernel_eltype = Float64,
df = match_known_derivatives(f))
return PoolingLayer(input_size_with_channel[1:end-1],kernel_size,input_size_with_channel[end]; kernel_eltype = kernel_eltype,stride=stride,padding=padding,f=f,df=df)
end
function preprocess!(layer::PoolingLayer{ND,NDPLUS1,NDPLUS2}) where {ND,NDPLUS1,NDPLUS2}
if layer.y_to_x_ids != [Vector{NTuple{NDPLUS1,Int32}}() for i in CartesianIndices((layer.output_size...,))]
return # layer already prepocessed
end
input_size, output_size = size(layer)
nchannels_in = input_size[end]
convsize = input_size[1:end-1]
ndims_conv = ND
ksize = layer.kernel_size
ysize = output_size
#println(ysize)
# preallocating temp variables
k_idx = Array{Int32,1}(undef,NDPLUS2)
y_idx = Array{Int32,1}(undef,NDPLUS1)
k_idx_conv = Array{Int32,1}(undef,ND)
y_idx_conv = Array{Int32,1}(undef,ND)
idx_x_source_padded = Array{Int32,1}(undef,ND)
checkstart = Array{Bool,1}(undef,ND)
checkend = Array{Bool,1}(undef,ND)
x_idx = Array{Int32,1}(undef,NDPLUS1)
@inbounds for nch_in in 1:nchannels_in
#println("Processing in layer :", nch_in)
#@inbounds for nch_out in 1:nchannels_out
#println("- processing out layer :", nch_out)
@inbounds for k_idx_conv in CartesianIndices( ((ksize[1:end-2]...),) )
k_idx_conv = Tuple(k_idx_conv)
k_idx = (k_idx_conv...,nch_in,nch_in)
@inbounds for y_idx_conv in CartesianIndices( ((ysize[1:end-1]...),) )
y_idx_conv = Tuple(y_idx_conv)
y_idx = (y_idx_conv...,nch_in)
#println("y_idx: ",y_idx)
#println("k_idx: ",k_idx)
#println("layer.stride: ",layer.stride)
#quit(1)
check = true
@inbounds for d in 1:ndims_conv
idx_x_source_padded[d] = k_idx_conv[d] + (y_idx_conv[d] - 1 ) * layer.stride[d]
checkstart[d] = idx_x_source_padded[d] > layer.padding_start[d]
checkend[d] = idx_x_source_padded[d] <= layer.padding_start[d] .+ convsize[d]
checkstart[d] && checkend[d] || begin check = false; break; end
end
check || continue
@inbounds @simd for d in 1:ndims_conv
x_idx[d] = idx_x_source_padded[d] - layer.padding_start[d]
end
x_idx[ndims_conv+1] = nch_in
#println()
push!(layer.y_to_x_ids[y_idx...],((x_idx...,)))
#println("---")
#println("x_idx: ", x_idx)
#println("w_idx: ", w_idx)
#println("y_idx: ", y_idx)
#push!(layer.x_ids,((x_idx...,)))
#push!(layer.w_ids,w_idx)
#push!(layer.y_ids,y_idx)
#de_dx_ch_in[idx_x_source...] += dϵ_dz_ch_out[dy_idx...] * w_ch_in_out[w_idx...]
end
end
#end
end
end
"""
$(TYPEDSIGNATURES)
Compute forward pass of a ConvLayer
"""
function forward(layer::PoolingLayer{ND,NDPLUS1,NDPLUS2,TF, TDF, WET},x) where {ND,NDPLUS1,NDPLUS2,TF, TDF, WET}
_, output_size = size(layer)
y = zeros(WET,output_size)
for y_idx in eachindex(y)
x_ids = layer.y_to_x_ids[y_idx]
x_vals = [x[idx...] for idx in x_ids]
y[y_idx] = layer.f(x_vals)
end
return y
end
function backward(layer::PoolingLayer{ND,NDPLUS1,NDPLUS2},x, next_gradient) where {ND,NDPLUS1,NDPLUS2}
de_dx = zeros(layer.input_size...)
for y_idx in eachindex(next_gradient)
#println("----")
x_ids = layer.y_to_x_ids[y_idx]
x_vals = [x[idx...] for idx in x_ids]
df_val = layer.df(x_vals)
#println("y_idx: ",y_idx)
#println("x_idx: ",x_ids)
#println("x_vals: ",x_vals)
#println("df_val: ",df_val)
#println("next_gradient[y_idx]: ",next_gradient[y_idx])
for (i,x_idx) in enumerate(x_ids)
#println("- x_idx: ", x_idx)
de_dx[x_idx...] += next_gradient[y_idx] .* df_val[i]
end
end
return de_dx
end
function get_params(layer::PoolingLayer)
return Learnable(())
end
function get_gradient(layer::PoolingLayer{ND,NDPLUS1,NDPLUS2},x, next_gradient) where {ND,NDPLUS1,NDPLUS2}
return Learnable(())
end
function set_params!(layer::PoolingLayer,w)
end
"""
$(TYPEDSIGNATURES)
Get the dimensions of the layers in terms of (dimensions in input, dimensions in output) including channels as last dimension
"""
function size(layer::PoolingLayer{ND,NDPLUS1,NDPLUS2}) where {ND,NDPLUS1,NDPLUS2}
return ((layer.input_size...,),(layer.output_size...,))
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3852 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Experimental
"""
RNNLayer
**Experimental - not working**
Representation of a layer in the network
# Fields:
* `wx`: Weigths matrix with respect to the input from data (n by n_input)
* `ws`: Weigths matrix with respect to the layer state (n x n )
* `wb`: Biases (n)
* `f`: Activation function
* `df`: Derivative of the activation function
* `s` : State
"""
mutable struct RNNLayer <: RecursiveLayer
wx::Array{Float64,2}
ws::Array{Float64,2}
wb::Array{Float64,1}
s::Array{Float64,1}
f::Function
df::Union{Function,Nothing}
"""
RNNLayer(nₗ,n;f,wx,ws,wb,df)
**Experimantal - not working**
Instantiate a new RNNLayer
# Positional arguments:
* `nₗ`: Number of nodes of the input
* `n`: Number of nodes of the state (and the output)
# Keyword arguments:
* `wx`: Initial weigths with respect to input [default: Xavier initialisation, dims = (n,nₗ)]
* `ws`: Initial weigths with respect to input [default: Xavier initialisation, dims = (n,n)]
* `wb`: Initial weigths with respect to bias [default: Xavier initialisation, dims = (n)]
* `s`: Initial states [def: zeros(n)]
* `f`: Activation function [def: `relu`]
* `df`: Derivative of the activation function [default: `nothing` (i.e. use AD)]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- Xavier initialization = `rand(Uniform(-sqrt(6)/sqrt(nₗ+n),sqrt(6)/sqrt(nₗ+n))`
"""
function RNNLayer(nₗ,n;rng = Random.GLOBAL_RNG,wx=rand(rng, Uniform(-sqrt(6)/sqrt(nₗ+n),sqrt(6)/sqrt(nₗ+n)),n,nₗ),ws=rand(rng, Uniform(-sqrt(6)/sqrt(n+n),sqrt(6)/sqrt(n+n)),n,n),wb=rand(rng, Uniform(-sqrt(6)/sqrt(nₗ+n),sqrt(6)/sqrt(nₗ+n)),n),s=zeros(n),f=relu,df=match_known_derivatives(f))
# To be sure w is a matrix and wb a column vector..
wx = reshape(w,n,nₗ)
ws = reshape(w,n,n)
wb = reshape(wb,n)
s = reshape(s,n)
return new(w,wb,s,f,df)
end
end
#=
function _zComp(layer::RNNLayer,x)
w = layer.w
wb = layer.wb
z = zeros(eltype(x),size(w,1))
@inbounds for n in axes(w,1)
zn = zero(eltype(x))
@simd for nl in axes(x,1)
zn += w[n,nl] * x[nl]
end
zn += wb[n]
z[n] = zn
end
return z
end
=#
function forward(layer::RNNLayer,x)
#z = _zComp(layer,x) #@avx layer.w * x + layer.wb #_zComp(layer,x) #layer.w * x + layer.wb # _zComp(layer,x) # layer.w * x + layer.wb # testd @avx
z = layer.wb + layer.wx * x + layer.ws * s
return layer.f.(z)
end
function backward(layer::RNNLayer,x,next_gradient) #TODO
z = _zComp(layer,x) #@avx layer.w * x + layer.wb #_zComp(layer,x) # layer.w * x + layer.wb # _zComp(layer,x) # @avx layer.w * x + layer.wb # tested @avx
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient # tested @avx
dϵ_dI = layer.w' * dϵ_dz # @avx
return dϵ_dI
end
function get_params(layer::RNNLayer)
return Learnable((layer.wb,layer.wx,layer.ws))
end
function get_gradient(layer::RNNLayer,x,next_gradient) #TODO
z = _zComp(layer,x) #@avx layer.w * x + layer.wb # _zComp(layer,x) #layer.w * x + layer.wb # @avx
if layer.df != nothing
dfz = layer.df.(z)
else
dfz = layer.f'.(z) # using AD
end
dϵ_dz = @turbo dfz .* next_gradient
dϵ_dw = @turbo dϵ_dz * x' # @avx
dϵ_dwb = dϵ_dz
return Learnable((dϵ_dw,dϵ_dwb))
end
function set_params!(layer::RNNLayer,w)
layer.wb = w.data[1]
layer.wx = w.data[2]
layer.ws = w.data[3]
end
function size(layer::RNNLayer)
w_size = size(layer.w')
return ((w_size[1],),(w_size[2],))
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1586 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Experimental
"""
$(TYPEDEF)
Representation of a "reshaper" (weigthless) layer in the network
Reshape the output of a layer (or the input data) to the shape needed for the next one.
# Fields:
$(TYPEDFIELDS)
"""
struct ReshaperLayer{NDIN,NDOUT} <: AbstractLayer
"Input size"
input_size::SVector{NDIN,Int64}
"Output size"
output_size::SVector{NDOUT,Int64}
@doc """
$(TYPEDSIGNATURES)
Instantiate a new ReshaperLayer
# Positional arguments:
* `input_size`: Shape of the input layer (tuple).
* `output_size`: Shape of the input layer (tuple) [def: `prod([input_size...]))`, i.e. reshape to a vector of appropriate lenght].
"""
function ReshaperLayer(input_size, output_size=prod([input_size...]))
NDIN = length(input_size)
if typeof(output_size) <: Integer
output_size = (output_size,)
end
NDOUT = length(output_size)
return new{NDIN,NDOUT}(input_size,output_size)
end
end
function forward(layer::ReshaperLayer,x)
return reshape(x,layer.output_size...)
end
function backward(layer::ReshaperLayer,x,next_gradient)
return reshape(next_gradient,layer.input_size...)
end
function get_params(layer::ReshaperLayer)
return Learnable(())
end
function get_gradient(layer::ReshaperLayer,x,next_gradient)
return Learnable(())
end
function set_params!(layer::ReshaperLayer,w)
return nothing
end
function size(layer::ReshaperLayer)
return (layer.input_size,layer.output_size)
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 5055 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
$(TYPEDEF)
Representation of a ScalarFunction layer in the network.
ScalarFunctionLayer applies the activation function directly to the output of
the previous layer (i.e., without passing for a weigth matrix), but using an
optional learnable parameter (an array) used as second argument, similarly to
[`VectorFunctionLayer`(@ref).
Differently from `VectorFunctionLayer`, the function is applied scalarwise to
each node.
The number of nodes in input must be set to the same as in the previous layer
# Fields:
* `w`: Weigths (parameter) array passes as second argument to the activation
function (if not empty)
* `n`: Number of nodes in output (≡ number of nodes in input )
* `f`: Activation function (vector)
* `dfx`: Derivative of the (vector) activation function with respect to the
layer inputs (x)
* `dfw`: Derivative of the (vector) activation function with respect to the
optional learnable weigths (w)
# Notes:
* The output `size` of this layer is the same as those of the previous layers.
"""
struct ScalarFunctionLayer{N, TF <: Function, TDFX <: Union{Nothing,Function}, TDFW <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
w::Array{WET,N}
n::Int64
f::TF
dfx::TDFX
dfw::TDFW
@doc """
$(TYPEDSIGNATURES)
Instantiate a new ScalarFunctionLayer
# Positional arguments:
* `nₗ`: Number of nodes (must be same as in the previous layer)
# Keyword arguments:
* `wsize`: A tuple or array specifying the size (number of elements) of the
learnable parameter [def: empty array]
* `w_eltype`: Eltype of the weigths [def: `Float64`]
* `w`: Initial weigths with respect to input [default: Xavier initialisation, dims = (nₗ,n)]
* `f`: Activation function [def: `softmax`]
* `dfx`: Derivative of the activation function with respect to the data [default: try to match with well-known derivatives, resort to AD if `f` is unknown]
* `dfw`: Derivative of the activation function with respect to the
learnable parameter [default: `nothing` (i.e. use AD)]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- If the derivative is provided, it should return the gradient as a (n,n) matrix (i.e. the Jacobian)
- Xavier initialization = `rand(Uniform(-sqrt(6)/sqrt(sum(wsize...)),sqrt(6)/sqrt(sum(wsize...))))`
"""
function ScalarFunctionLayer(nₗ;rng = Random.GLOBAL_RNG,wsize=Int64[],
w_eltype = Float64,
w = xavier_init(sum(wsize)/2,sum(wsize)/2,rng=rng,Tuple(wsize),eltype=w_eltype),
f=softmax,
dfx=match_known_derivatives(f),
dfw=nothing)
nw = length(wsize)
return new{nw,typeof(f),typeof(dfx),typeof(dfw),w_eltype}(w,nₗ,f,dfx,dfw)
end
end
"""
$(TYPEDSIGNATURES)
Create a weigthless layer whose output is equal to the input.
# Fields:
* `n`: Number of nodes in output (≡ number of nodes in input )
# Notes:
- The output `size` of this layer is the same as those of the previous layers.
- This is just an alias for a [`ScalarFunctionLayer`](@ref) with no weigths and identity function.
"""
function ReplicatorLayer(n)
return ScalarFunctionLayer(n,f=identity)
end
function forward(layer::ScalarFunctionLayer{N},x) where {N}
return N == 0 ? layer.f.(x) : layer.f.(x,layer.w)
end
function backward(layer::ScalarFunctionLayer{N},x,next_gradient) where N
if N == 0
if layer.dfx != nothing
dfz = layer.dfx.(x)
else # using AD
dfz = layer.f'.(x)
end
dϵ_dI = @turbo dfz .* next_gradient
return dϵ_dI
else
if layer.dfx != nothing
df_dx = layer.dfx.(x,Ref(layer.w))
else # using AD
#tempfunction(x) = layer.f.(x,Ref(layer.w))
df_dx = [gradient(xt -> layer.f(xt,layer.w),xi)[1] for xi in x]
end
dϵ_dI = @turbo df_dx .* next_gradient
return dϵ_dI
end
end
function get_params(layer::ScalarFunctionLayer{N}) where {N}
return N == 0 ? Learnable(()) : Learnable((layer.w,))
end
function get_gradient(layer::ScalarFunctionLayer{N},x,next_gradient) where {N}
if N == 0
return Learnable(()) # parameterless layer
else
if layer.dfw != nothing
dfz = [layer.dfw(xi,wj) for wj in layer.w, xi in x]
else # using AD
tempfunction(w) = [layer.f(xi,w) for xi in x]
dfz = (autojacobian(tempfunction,layer.w; nY=layer.n))'
end
dϵ_dw = @turbo dfz * next_gradient
return Learnable((dϵ_dw,))
end
end
function set_params!(layer::ScalarFunctionLayer{N},w) where {N}
if N > 0
layer.w .= w.data[1]
end
return nothing
end
function size(layer::ScalarFunctionLayer{N}) where {N}
# Output size for the ScalarFunctionLayer is given by its activation function
# We test its length with dummy values
return ((layer.n,),(layer.n,))
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 5912 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
$(TYPEDEF)
Representation of a VectorFunction layer in the network. Vector
function layer expects a vector activation function, i.e. a function taking the
whole output of the previous layer an input rather than working on a single node
as "normal" activation functions would do.
Useful for example with the SoftMax function in classification or with the
`pool1D` function to implement a "pool" layer in 1 dimensions.
By default it is weightless, i.e. it doesn't apply any transformation to the
output coming from the previous layer except the activation function. However,
by passing the parameter `wsize` (a touple or array - tested only 1D) you can
pass the learnable parameter to the activation function too. It is your
responsability to be sure the activation function accept only X or also this
learnable array (as second argument).
The number of nodes in input must be set to the same as in the previous layer
(and if you are using this for classification, to the number of classes, i.e.
the _previous_ layer must be set equal to the number of classes in the
predictions).
# Fields:
* `w`: Weigths (parameter) array passes as second argument to the activation
function (if not empty)
* `nₗ`: Number of nodes in input (i.e. length of previous layer)
* `n`: Number of nodes in output (automatically inferred in the constructor)
* `f`: Activation function (vector)
* `dfx`: Derivative of the (vector) activation function with respect to the
layer inputs (x)
* `dfw`: Derivative of the (vector) activation function with respect to the
optional learnable weigths (w)
# Notes:
* The output `size` of this layer is given by the size of the output function,
that not necessarily is the same as the previous layers.
"""
struct VectorFunctionLayer{N, TF <: Function, TDFX <: Union{Nothing,Function}, TDFW <: Union{Nothing,Function}, WET <: Number} <: AbstractLayer
w::Array{WET,N}
nₗ::Int64
n::Int64
f::TF
dfx::TDFX
dfw::TDFW
@doc """
$(TYPEDSIGNATURES)
Instantiate a new VectorFunctionLayer
# Positional arguments:
* `nₗ`: Number of nodes (must be same as in the previous layer)
# Keyword arguments:
* `wsize`: A tuple or array specifying the size (number of elements) of the
learnable parameter [def: empty array]
* `w_eltype`: Eltype of the weigths [def: `Float64`]
* `w`: Initial weigths with respect to input [default: Xavier initialisation, dims = (nₗ,n)]
* `f`: Activation function [def: `softmax`]
* `dfx`: Derivative of the activation function with respect to the data
[default: try to match with well-known derivatives, resort to AD if `f` is unknown]
* `dfw`: Derivative of the activation function with respect to the
learnable parameter [default: `nothing` (i.e. use AD)]
* `dummyDataToTestOutputSize`: Dummy data to test the output size [def:
`ones(nₗ)`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
- If the derivative is provided, it should return the gradient as a (n,n) matrix (i.e. the Jacobian)
- To avoid recomputing the activation function just to determine its output size,
we compute the output size once here in the layer constructor by calling the
activation function with `dummyDataToTestOutputSize`. Feel free to change
it if it doesn't match with the activation function you are setting
- Xavier initialization = `rand(Uniform(-sqrt(6)/sqrt(sum(wsize...)),sqrt(6)/sqrt(sum(wsize...))))`
"""
function VectorFunctionLayer(nₗ;rng = Random.GLOBAL_RNG,wsize=Int64[],
w_eltype = Float64,
w = xavier_init(sum(wsize)/2,sum(wsize)/2,rng=rng,Tuple(wsize),eltype=w_eltype),
f=softmax,
dfx=match_known_derivatives(f),
dfw=nothing,
dummyDataToTestOutputSize=ones(nₗ))
nw = length(wsize)
if nw ==0
n = length(f(dummyDataToTestOutputSize))
else
n = length(f(dummyDataToTestOutputSize,w))
end
return new{nw,typeof(f),typeof(dfx),typeof(dfw),w_eltype}(w,nₗ,n,f,dfx,dfw)
end
end
function forward(layer::VectorFunctionLayer{N},x) where {N}
return N == 0 ? layer.f(x) : layer.f(x,layer.w)
end
function backward(layer::VectorFunctionLayer{N},x,next_gradient) where N
if N == 0
if layer.dfx != nothing
dfz = layer.dfx(x)'
else # using AD
dfz = (autojacobian(layer.f,x; nY=layer.n))'
end
dϵ_dI = @turbo dfz * next_gradient
return dϵ_dI
else
if layer.dfx != nothing
dfz = layer.dfx(x,layer.w)'
else # using AD
tempfunction(x) = layer.f(x,layer.w)
nYl = layer.n
dfz = (autojacobian(tempfunction,x; nY=nYl))'
end
dϵ_dI = @turbo dfz * next_gradient
return dϵ_dI
end
end
function get_params(layer::VectorFunctionLayer{N}) where {N}
return N == 0 ? Learnable(()) : Learnable((layer.w,))
end
function get_gradient(layer::VectorFunctionLayer{N},x,next_gradient) where {N}
if N == 0
return Learnable(()) # parameterless layer
else
if layer.dfw != nothing
dfz = layer.dfw(x,layer.w)'
else # using AD
dfz = (autojacobian(wt -> layer.f(x,wt),layer.w; nY=layer.n))'
end
dϵ_dw = @turbo dfz * next_gradient
return Learnable((dϵ_dw,))
end
end
function set_params!(layer::VectorFunctionLayer{N},w) where {N}
if N > 0
layer.w .= w.data[1]
end
return nothing
end
function size(layer::VectorFunctionLayer{N}) where {N}
# Output size for the VectorFunctionLayer is given by its activation function
# We test its length with dummy values
return ((layer.nₗ,),(layer.n,))
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1450 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
Perceptron module
Provide linear and kernel classifiers.
Provide the following supervised models:
- [`PerceptronClassifier`](@ref): Train data using the classical perceptron
- [`KernelPerceptronClassifier`](@ref): Train data using the kernel perceptron
- [`PegasosClassifier`](@ref): Train data using the pegasos algorithm
All algorithms are multiclass, with `PerceptronClassifier` and `PegasosClassifier` employing a one-vs-all strategy, while `KernelPerceptronClassifier` employs a _one-vs-one_ approach, and return a "probability" for each class in term of a dictionary for each record. Use `mode(ŷ)` to return a single class prediction per record.
These models are available in the MLJ framework as `PerceptronClassifier`,`KernelPerceptronClassifier` and `PegasosClassifier` respectivly.
"""
module Perceptron
using LinearAlgebra, Random, ProgressMeter, Reexport, CategoricalArrays, DocStringExtensions
using ForceImport
@force using ..Api
@force using ..Utils
import Base.show
# export perceptron, perceptronBinary, KernelPerceptronClassifier, KernelPerceptronClassifierBinary, pegasos, pegasosBinary, predict
export PerceptronClassifier, KernelPerceptronClassifier, PegasosClassifier
export PerceptronC_hp, KernelPerceptronC_hp, PegasosC_hp
include("Perceptron_classic.jl")
include("Perceptron_kernel.jl")
include("Perceptron_pegasos.jl")
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 16055 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
perceptron(x,y;θ,θ₀,T,nMsgs,shuffle,force_origin,return_mean_hyperplane)
Train the multiclass classifier "perceptron" algorithm based on x and y (labels).
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model [`PerceptronClassifier`](@ref) instead.
The perceptron is a _linear_ classifier. Multiclass is supported using a one-vs-all approach.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data, can be in any format (string, integers..)
* `θ`: Initial value of the weights (parameter) [def: `zeros(d)`]
* `θ₀`: Initial value of the weight (parameter) associated to the constant
term [def: `0`]
* `T`: Maximum number of iterations across the whole set (if the set
is not fully classified earlier) [def: 1000]
* `nMsg`: Maximum number of messages to show if all iterations are done [def: `0`]
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `force_origin`: Whether to force `θ₀` to remain zero [def: `false`]
* `return_mean_hyperplane`: Whether to return the average hyperplane coefficients instead of the final ones [def: `false`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return a named tuple with:
* `θ`: The weights of the classifier
* `θ₀`: The weight of the classifier associated to the constant term
* `classes`: The classes (unique values) of y
# Notes:
* The trained parameters can then be used to make predictions using the function `predict()`.
* This model is available in the MLJ framework as the `PerceptronClassifier`
# Example:
```jldoctest
julia> model = perceptron([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
julia> ŷ = predict([2.1 3.1; 7.3 5.2], model.θ, model.θ₀, model.classes)
```
"""
function perceptron(x::AbstractMatrix, y::AbstractVector; θ=nothing,θ₀=nothing, T=1000, nMsgs=0, shuffle=false, force_origin=false, return_mean_hyperplane=false, rng = Random.GLOBAL_RNG, verbosity=NONE)
yclasses = unique(y)
nCl = length(yclasses)
nD = size(x,2)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
#if nCl == 2
# outθ = Array{Vector{Float64},1}(undef,1)
# outθ₀ = Array{Float64,1}(undef,1)
#else
outθ = Array{Vector{Float64},1}(undef,nCl)
outθ₀ = Array{Float64,1}(undef,nCl)
#end
if θ₀ == nothing
θ₀ = zeros(nCl)
end
if θ == nothing
θ = [zeros(nD) for _ in 1:nCl]
end
for (i,c) in enumerate(yclasses)
ybin = ((y .== c) .*2 .-1) # conversion to -1/+1
outBinary = perceptronBinary(x, ybin; θ=θ[i],θ₀=θ₀[i], T=T, nMsgs=nMsgs, shuffle=shuffle, force_origin=force_origin, rng=rng, verbosity=verbosity)
if return_mean_hyperplane
outθ[i] = outBinary.avgθ
outθ₀[i] = outBinary.avgθ₀
else
outθ[i] = outBinary.θ
outθ₀[i] = outBinary.θ₀
end
if i == 1 && nCl == 2
outθ[2] = - outθ[1]
outθ₀[2] = .- outθ₀[1]
break # if there are only two classes we do compute only one passage, as A vs B would be the same as B vs A
end
end
return (θ=outθ,θ₀=outθ₀,classes=yclasses)
end
"""
perceptronBinary(x,y;θ,θ₀,T,nMsgs,shuffle,force_origin)
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model PerceptronClassifier() instead.
Train the binary classifier "perceptron" algorithm based on x and y (labels)
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data, in the format of ⨦ 1
* `θ`: Initial value of the weights (parameter) [def: `zeros(d)`]
* `θ₀`: Initial value of the weight (parameter) associated to the constant
term [def: `0`]
* `T`: Maximum number of iterations across the whole set (if the set
is not fully classified earlier) [def: 1000]
* `nMsg`: Maximum number of messages to show if all iterations are done
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `force_origin`: Whether to force `θ₀` to remain zero [def: `false`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return a named tuple with:
* `θ`: The final weights of the classifier
* `θ₀`: The final weight of the classifier associated to the constant term
* `avgθ`: The average weights of the classifier
* `avgθ₀`: The average weight of the classifier associated to the constant term
* `errors`: The number of errors in the last iteration
* `besterrors`: The minimum number of errors in classifying the data ever reached
* `iterations`: The actual number of iterations performed
* `separated`: Weather the data has been successfully separated
# Notes:
* The trained parameters can then be used to make predictions using the function `predict()`.
# Example:
```jldoctest
julia> model = perceptronBinary([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
```
"""
function perceptronBinary(x, y; θ=zeros(size(x,2)),θ₀=0.0, T=1000, nMsgs=10, shuffle=false, force_origin=false, rng = Random.GLOBAL_RNG, verbosity=NONE)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
if nMsgs > 5
@codelocation
println("***\n*** Training perceptron for maximum $T iterations. Random shuffle: $shuffle")
end
x = makematrix(x)
(n,d) = size(x)
ny = size(y,1)
ny == n || error("y has different number of records (rows) than x!")
bestϵ = Inf
lastϵ = Inf
if force_origin θ₀ = 0.0; end
sumθ = θ; sumθ₀ = θ₀
@showprogress dt=1 desc="Training Perceptron..." for t in 1:T
ϵ = 0
if shuffle
# random shuffle x and y
ridx = Random.shuffle(rng, 1:size(x,1))
x = x[ridx, :]
y = y[ridx]
end
@inbounds for i in 1:n
if y[i]*(θ' * x[i,:] + θ₀) <= eps()
θ = θ + y[i] * x[i,:]
θ₀ = force_origin ? 0.0 : θ₀ + y[i]
sumθ += θ; sumθ₀ += θ₀
ϵ += 1
end
end
if (ϵ == 0)
if nMsgs > 0
println("*** Avg. error after epoch $t : $(ϵ/size(x)[1]) (all elements of the set has been correctly classified)")
end
return (θ=θ,θ₀=θ₀,avgθ=sumθ/(n*T),avgθ₀=sumθ₀/(n*T),errors=0,besterrors=0,iterations=t,separated=true)
elseif ϵ < bestϵ
bestϵ = ϵ
end
lastϵ = ϵ
if nMsgs > 5 && (t % ceil(T/nMsgs) == 0 || t == 1 || t == T)
println("Avg. error after iteration $t : $(ϵ/size(x)[1])")
end
end
return (θ=θ,θ₀=θ₀,avgθ=sumθ/(n*T),avgθ₀=sumθ₀/(n*T),errors=lastϵ,besterrors=bestϵ,iterations=T,separated=false)
end
"""
predict(x,θ,θ₀)
Predict a binary label {-1,1} given the feature vector and the linear coefficients
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the `predict` function with your desired model instead.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `θ`: The trained parameters
* `θ₀`: The trained bias barameter [def: `0`]
# Return :
* `y`: Vector of the predicted labels
# Example:
```julia
julia> predict([1.1 2.1; 5.3 4.2; 1.8 1.7], [3.2,1.2])
```
"""
function predict(x::Matrix,θ,θ₀=0.0)
x = makematrix(x)
θ = makecolvector(θ)
(n,d) = size(x)
d2 = length(θ)
if (d2 != d) error("x and θ must have the same dimensions."); end
y = zeros(Int64,n)
for i in 1:n
y[i] = (θ' * x[i,:] + θ₀) > eps() ? 1 : -1 # no need to divide by the norm to get the sign!
end
return y
end
"""
predict(x,θ,θ₀,classes)
Predict a multiclass label given the feature vector, the linear coefficients and the classes vector
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the `predict` function of your desired model instead.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `θ`: Vector of the trained parameters for each one-vs-all model (i.e. `model.θ`)
* `θ₀`: Vector of the trained bias barameter for each one-vs-all model (i.e. `model.θ₀`)
* `classes`: The overal classes encountered in training (i.e. `model.classes`)
# Return :
* `ŷ`: Vector of dictionaries `label=>probability`
# Notes:
* Use `mode(ŷ)` if you want a single predicted label per record
# Example:
```julia
julia> model = perceptron([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
julia> ŷtrain = predict([10 10; 2.5 2.5],model.θ,model.θ₀, model.classes)
"""
function predict(x,θ::AbstractVector{T},θ₀::AbstractVector{Float64},classes::Vector{Tcl}) where {T<: AbstractVector{Float64},Tcl}
(n,d) = size(x)
nCl = length(classes)
y = Array{Dict{Tcl,Float64},1}(undef,n)
for i in 1:n
probRaw = Array{Float64,1}(undef,nCl)
for (c,cl) in enumerate(classes)
if nCl == 2 && c ==2
probRaw[2] = - probRaw[1]
else
probRaw[c] = (θ[c]' * x[i,:] + θ₀[c])
end
end
prob = softmax(probRaw)
y[i] = Dict(zip(classes,prob))
end
return y
end
# ----------------------------------------------
# API V2...
"""
$(TYPEDEF)
Hyperparameters for the [`PerceptronClassifier`](@ref) model
# Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct PerceptronC_hp <: BetaMLHyperParametersSet
"Initial parameters. If given, should be a matrix of n-classes by feature dimension + 1 (to include the constant term as the first element) [def: `nothing`, i.e. zeros]"
initial_parameters::Union{Nothing,Matrix{Float64}} = nothing
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `1000`]"
epochs::Int64 = 1000
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"Whether to force the parameter associated with the constant term to remain zero [def: `false`]"
force_origin::Bool = false
"Whether to return the average hyperplane coefficients instead of the final ones [def: `false`]"
return_mean_hyperplane::Bool=false
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges=Dict("epochs" =>[50,100,1000,10000], "shuffle"=>[true,false], "force_origin"=>[true,false],"return_mean_hyperplane"=>[true,false]),multithreads=true)
end
Base.@kwdef mutable struct PerceptronClassifier_lp <: BetaMLLearnableParametersSet
weigths::Union{Nothing,Matrix{Float64}} = nothing
classes::Vector = []
end
"""
$(TYPEDEF)
The classical "perceptron" linear classifier (supervised).
For the parameters see [`?PerceptronC_hp`](@ref PerceptronC_hp) and [`?BML_options`](@ref BML_options).
# Notes:
- data must be numerical
- online fitting (re-fitting with new data) is not supported
# Example:
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> mod = PerceptronClassifier(epochs=100,return_mean_hyperplane=false)
PerceptronClassifier - The classic linear perceptron classifier (unfitted)
julia> ŷ = fit!(mod,X,y) |> mode
Running function BetaML.Perceptron.#perceptronBinary#84 at /home/lobianco/.julia/dev/BetaML/src/Perceptron/Perceptron_classic.jl:150
Type `]dev BetaML` to modify the source code (this would change its location on disk)
***
*** Training perceptron for maximum 100 iterations. Random shuffle: true
Avg. error after iteration 1 : 0.5
*** Avg. error after epoch 5 : 0.0 (all elements of the set has been correctly classified)
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
"a"
```
"""
mutable struct PerceptronClassifier <: BetaMLSupervisedModel
hpar::PerceptronC_hp
opt::BML_options
par::Union{Nothing,PerceptronClassifier_lp}
cres::Union{Nothing,Vector}
fitted::Bool
info::Dict{String,Any}
end
function PerceptronClassifier(;kwargs...)
m = PerceptronClassifier(PerceptronC_hp(),BML_options(),PerceptronClassifier_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit the [`PerceptronClassifier`](@ref) model to data
"""
function fit!(m::PerceptronClassifier,X,Y)
m.fitted || autotune!(m,(X,Y))
# Parameter alias..
initial_parameters = m.hpar.initial_parameters
epochs = m.hpar.epochs
shuffle = m.hpar.shuffle
force_origin = m.hpar.force_origin
return_mean_hyperplane = m.hpar.return_mean_hyperplane
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
nR,nD = size(X)
yclasses = unique(Y)
nCl = length(yclasses)
initial_parameters = (initial_parameters == nothing) ? zeros(nCl, nD+1) : initial_parameters
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
out = perceptron(X,Y; θ₀=initial_parameters[:,1], θ=[initial_parameters[c,2:end] for c in 1:nCl], T=epochs, nMsgs=nMsgs, shuffle=shuffle, force_origin=force_origin, return_mean_hyperplane=return_mean_hyperplane, rng = rng, verbosity=verbosity)
weights = hcat(out.θ₀,vcat(out.θ' ...))
m.par = PerceptronClassifier_lp(weights,out.classes)
if cache
out = predict(X,out.θ,out.θ₀,out.classes)
m.cres = cache ? out : nothing
end
m.info["fitted_records"] = nR
m.info["xndims"] = nD
m.info["n_classes"] = size(weights,1)
m.fitted = true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict the labels associated to some feature data using the linear coefficients learned by fitting a [`PerceptronClassifier`](@ref) model
"""
function predict(m::PerceptronClassifier,X)
θ₀ = [i for i in m.par.weigths[:,1]]
θ = [r for r in eachrow(m.par.weigths[:,2:end])]
return predict(X,θ,θ₀,m.par.classes)
end
function show(io::IO, ::MIME"text/plain", m::PerceptronClassifier)
if m.fitted == false
print(io,"PerceptronClassifier - The classic linear perceptron classifier (unfitted)")
else
print(io,"PerceptronClassifier - The classic linear perceptron classifier (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::PerceptronClassifier)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
println(io,"PerceptronClassifier - A linear perceptron classifier (unfitted)")
else
println(io,"PerceptronClassifier - A $(m.info["xndims"])-dimensions $(m.info["n_classes"])-classes linear perceptron classifier (fitted on $(m.info["fitted_records"]) records)")
println(io,"Weights:")
println(io,m.par.weigths)
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 19834 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
kernel_perceptron_classifier(x,y;K,T,α,nMsgs,shuffle)
Train a multiclass kernel classifier "perceptron" algorithm based on x and y.
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model KernelPerceptronClassifier() instead.
`KernelPerceptronClassifier` is a (potentially) non-linear perceptron-style classifier employing user-defined kernel funcions. Multiclass is supported using a one-vs-one approach.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data
* `K`: Kernel function to employ. See `?radial_kernel` or `?polynomial_kernel`for details or check `?BetaML.Utils` to verify if other kernels are defined (you can alsways define your own kernel) [def: [`radial_kernel`](@ref)]
* `T`: Maximum number of iterations (aka "epochs") across the whole set (if the set is not fully classified earlier) [def: 100]
* `α`: Initial distribution of the number of errors errors [def: `nothing`, i.e. zeros]. If provided, this should be a nModels-lenght vector of nRecords integer values vectors , where nModels is computed as `(n_classes * (n_classes - 1)) / 2`
* `nMsg`: Maximum number of messages to show if all iterations are done [def: `0`]
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return a named tuple with:
* `x`: The x data (eventually shuffled if `shuffle=true`)
* `y`: The label
* `α`: The errors associated to each record
* `classes`: The labels classes encountered in the training
# Notes:
* The trained model can then be used to make predictions using the function `predict()`.
* This model is available in the MLJ framework as the `KernelPerceptronClassifier`
# Example:
```jldoctest
julia> model = kernel_perceptron_classifier([1.1 1.1; 5.3 4.2; 1.8 1.7; 7.5 5.2;], ["a","c","b","c"])
julia> ŷtest = Perceptron.predict([10 10; 2.2 2.5; 1 1],model.x,model.y,model.α, model.classes,K=model.K)
```
"""
function kernel_perceptron_classifier(x, y; K=radial_kernel, T=100, α=nothing, nMsgs=0, shuffle=false, rng = Random.GLOBAL_RNG, verbosity=NONE)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
x = makematrix(x)
yclasses = unique(y)
nCl = length(yclasses)
nModels = Int((nCl * (nCl - 1)) / 2)
(n,d) = size(x)
ny = size(y,1)
ny == n || error("y and x have differnt number of records (rows)!")
outX = Array{typeof(x),1}(undef,nModels)
outY = Array{Array{Int64,1},1}(undef,nModels)
outα = Array{Array{Int64,1},1}(undef,nModels)
α = (α == nothing) ? [zeros(Int64,length(y)) for i in 1:nModels] : α
modelCounter = 1
for (i,c) in enumerate(yclasses)
for (i2,c2) in enumerate(yclasses)
if i2 <= i continue end # never false with a single class (always "continue")
ids = ( (y .== c) .| (y .== c2) )
thisx = x[ids,:]
thisy = y[ids]
thisα = α[modelCounter][ids]
ybin = ((thisy .== c) .*2 .-1) # conversion to +1 (if c) or -1 (if c2)
outBinary = kernel_perceptron_classifier_binary(thisx, ybin; K=K, T=T, α=thisα, nMsgs=nMsgs, shuffle=shuffle, rng = rng, verbosity=verbosity)
outX[modelCounter] = outBinary.x
outY[modelCounter] = outBinary.y
outα[modelCounter] = outBinary.α
modelCounter += 1
end
end
return (x=outX,y=outY,α=outα,classes=yclasses,K=K)
end
"""
kernel_perceptron_classifier_binary(x,y;K,T,α,nMsgs,shuffle)
Train a binary kernel classifier "perceptron" algorithm based on x and y
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model KernelPerceptronClassifier() instead.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data, in the format of ⨦ 1
* `K`: Kernel function to employ. See `?radial_kernel` or `?polynomial_kernel`for details or check `?BetaML.Utils` to verify if other kernels are defined (you can alsways define your own kernel) [def: [`radial_kernel`](@ref)]
* `T`: Maximum number of iterations across the whole set (if the set is not fully classified earlier) [def: 1000]
* `α`: Initial distribution of the errors [def: `zeros(length(y))`]
* `nMsg`: Maximum number of messages to show if all iterations are done
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return a named tuple with:
* `x`: the x data (eventually shuffled if `shuffle=true`)
* `y`: the label
* `α`: the errors associated to each record
* `errors`: the number of errors in the last iteration
* `besterrors`: the minimum number of errors in classifying the data ever reached
* `iterations`: the actual number of iterations performed
* `separated`: a flag if the data has been successfully separated
# Notes:
* The trained data can then be used to make predictions using the function `predict()`. **If the option `shuffle` has been used, it is important to use there the returned (x,y,α) as these would have been shuffled compared with the original (x,y)**.
* Please see @KernelPerceptronClassifier for a multi-class version
# Example:
```jldoctest
julia> model = kernel_perceptron_classifier_binary([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
```
"""
function kernel_perceptron_classifier_binary(x, y; K=radial_kernel, T=1000, α=zeros(Int64,length(y)), nMsgs=10, shuffle=false, rng = Random.GLOBAL_RNG, verbosity = NONE)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
if nMsgs > 5
@codelocation
println("***\n*** Training kernel perceptron for maximum $T iterations. Random shuffle: $shuffle")
end
x = makematrix(x)
α = deepcopy(α) # let's not modify the argument !
(n,d) = size(x)
ny = size(y,1)
ny == n || error("y and x have different number of records (rows)!")
bestϵ = Inf
lastϵ = Inf
if nMsgs == 0
showTime = typemax(Float64)
elseif nMsgs < 5
showTime = 50
elseif nMsgs < 10
showTime = 1
elseif nMsgs < 100
showTime = 0.5
else
showTime = 0.2
end
@showprogress dt=showTime desc="Training Kernel Perceptron..." for t in 1:T
ϵ = 0
if shuffle
# random shuffle x, y and alpha
ridx = Random.shuffle(rng, 1:size(x,1))
x = x[ridx, :]
y = y[ridx]
α = α[ridx]
end
@inbounds for i in 1:n
if y[i]*sum([α[j]*y[j]*K(x[j,:],x[i,:]) for j in 1:n]) <= 0 + eps()
α[i] += 1
ϵ += 1
end
end
if (ϵ == 0)
if nMsgs > 0
println("*** Avg. error after epoch $t : $(ϵ/size(x)[1]) (all elements of the set has been correctly classified)")
end
return (x=x,y=y,α=α,errors=0,besterrors=0,iterations=t,separated=true,K=K)
elseif ϵ < bestϵ
bestϵ = ϵ
end
lastϵ = ϵ
if nMsgs != 0 && (t % ceil(T/nMsgs) == 0 || t == 1 || t == T)
println("Avg. error after iteration $t : $(ϵ/size(x)[1])")
end
end
return (x=x,y=y,α=α,errors=lastϵ,besterrors=bestϵ,iterations=T,separated=false)
end
"""
predict(x,xtrain,ytrain,α;K)
Predict a binary label {-1,1} given the feature vector and the training data together with their errors (as trained by a kernel perceptron algorithm)
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the `predict` function with your desired model instead.
# Parameters:
* `x`: Feature matrix of the data to predict (n × d)
* `xtrain`: The feature vectors used for the training
* `ytrain`: The labels of the training set
* `α`: The errors associated to each record
* `K`: The kernel function used for the training and to be used for the prediction [def: [`radial_kernel`](@ref)]
# Return :
* `y`: Vector of the predicted labels
# Example:
```julia
julia> predict([1.1 2.1; 5.3 4.2; 1.8 1.7], [3.2,1.2])
```
"""
function predict(x,xtrain,ytrain,α;K=radial_kernel)
x = makematrix(x)
xtrain = makematrix(xtrain)
(n,d) = size(x)
(ntrain,d2) = size(xtrain)
if (d2 != d) error("xtrain and x must have the same dimensions."); end
# corner case all one category
if length(unique(ytrain)) == 1
return fill(unique(ytrain)[1],n)
end
if ( length(ytrain) != ntrain || length(α) != ntrain) error("xtrain, ytrain and α must all have the same length."); end
y = zeros(Int64,n)
for i in 1:n
y[i] = sum([ α[j] * ytrain[j] * K(x[i,:],xtrain[j,:]) for j in 1:ntrain]) > eps() ? 1 : -1
end
return y
end
"""
predict(x,xtrain,ytrain,α,classes;K)
Predict a multiclass label given the new feature vector and a trained kernel perceptron model.
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the `predict` function with your desired model instead.
# Parameters:
* `x`: Feature matrix of the data to predict (n × d)
* `xtrain`: A vector of the feature matrix used for training each of the one-vs-one class matches (i.e. `model.x`)
* `ytrain`: A vector of the label vector used for training each of the one-vs-one class matches (i.e. `model.y`)
* `α`: A vector of the errors associated to each record (i.e. `model.α`)
* `classes`: The overal classes encountered in training (i.e. `model.classes`)
* `K`: The kernel function used for the training and to be used for the prediction [def: [`radial_kernel`](@ref)]
# Return :
* `ŷ`: Vector of dictionaries `label=>probability` (warning: it isn't really a probability, it is just the standardized number of matches "won" by this class compared with the other classes)
# Notes:
* Use `mode(ŷ)` if you want a single predicted label per record
# Example:
```julia
julia> model = KernelPerceptronClassifier([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
julia> ŷtrain = Perceptron.predict([10 10; 2.2 2.5],model.x,model.y,model.α, model.classes,K=model.K)
```
"""
function predict(x,xtrain,ytrain,α,classes::AbstractVector{Tcl};K=radial_kernel) where {Tcl}
(n,d) = size(x)
nCl = length(classes)
y = Array{Dict{Tcl,Float64},1}(undef,n)
# corner case single class in training
if nCl == 1
return fill(Dict(classes[1] => 100.0),n)
end
nModels = Int((nCl * (nCl - 1)) / 2)
if !(nModels == length(xtrain) == length(ytrain) == length(α)) error("xtrain, ytrain or α have a length not compatible with the number of classes in this model."); end
x = makematrix(x)
d2 = size(xtrain[1],2)
if (d2 != d) error("xtrain and x must have the same dimensions."); end
for i in 1:n
#countByClass = zeros(Float64,nCl)
countByClass = zeros(Int64,nCl)
mCounter = 1
for (ic,c) in enumerate(classes)
for (ic2,c2) in enumerate(classes)
if ic2 <= ic
continue
end
nThisModel = size(xtrain[mCounter],1)
if ( length(ytrain[mCounter]) != nThisModel || length(α[mCounter]) != nThisModel) error("xtrain, ytrain and α must all have the same length."); end
# note that we assign "winning" scores between pair of classes matches only based on who win, not by how much he did
# todo check
#if sum([ α[mCounter][j] * ((ytrain[mCounter][j] .== c) .*2 .-1) * K(x[i,:],xtrain[mCounter][j,:]) for j in 1:nThisModel]) > eps()
score = sum([ α[mCounter][j] * (ytrain[mCounter][j]) * K(x[i,:],xtrain[mCounter][j,:]) for j in 1:nThisModel])
#println(score)
if score > eps()
countByClass[ic] += 1
#countByClass[ic] += score
else
countByClass[ic2] += 1
#countByClass[ic2] += -score
end
mCounter += 1
end
end
#println(countByClass)
prob = softmax(countByClass)
y[i] = Dict(zip(classes,prob))
end
return y
end
# ----------------------------------------------
# API V2...
"""
$(TYPEDEF)
Hyperparameters for the [`KernelPerceptronClassifier`](@ref) model
# Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct KernelPerceptronC_hp <: BetaMLHyperParametersSet
"Kernel function to employ. See `?radial_kernel` or `?polynomial_kernel` for details or check `?BetaML.Utils` to verify if other kernels are defined (you can alsways define your own kernel) [def: [`radial_kernel`](@ref)]"
kernel::Function = radial_kernel
"Initial distribution of the number of errors errors [def: `nothing`, i.e. zeros]. If provided, this should be a nModels-lenght vector of nRecords integer values vectors , where nModels is computed as `(n_classes * (n_classes - 1)) / 2`"
initial_errors::Union{Nothing,Vector{Vector{Int64}}} = nothing
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `100`]"
epochs::Int64 = 100
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges=Dict("kernel" =>[radial_kernel,polynomial_kernel, (x,y) -> polynomial_kernel(x,y,degree=3)], "epochs" =>[50,100,1000,10000], "shuffle"=>[true,false]),multithreads=true)
end
Base.@kwdef mutable struct KernelPerceptronClassifier_lp <: BetaMLLearnableParametersSet
xtrain::Union{Nothing,Vector{Matrix{Float64}}} = nothing
ytrain::Union{Nothing,Vector{Vector{Int64}}} = nothing
errors::Union{Nothing,Vector{Vector{Int64}}} = nothing
classes::Vector = []
end
"""
$(TYPEDEF)
A "kernel" version of the `Perceptron` model (supervised) with user configurable kernel function.
For the parameters see [`? KernelPerceptronC_hp`](@ref KernelPerceptronC_hp) and [`?BML_options`](@ref BML_options)
# Limitations:
- data must be numerical
- online training (retraining) is not supported
# Example:
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> quadratic_kernel(x,y) = polynomial_kernel(x,y;degree=2)
quadratic_kernel (generic function with 1 method)
julia> mod = KernelPerceptronClassifier(epochs=100, kernel= quadratic_kernel)
KernelPerceptronClassifier - A "kernelised" version of the perceptron classifier (unfitted)
julia> ŷ = fit!(mod,X,y) |> mode
Running function BetaML.Perceptron.#KernelPerceptronClassifierBinary#17 at /home/lobianco/.julia/dev/BetaML/src/Perceptron/Perceptron_kernel.jl:133
Type `]dev BetaML` to modify the source code (this would change its location on disk)
***
*** Training kernel perceptron for maximum 100 iterations. Random shuffle: true
Avg. error after iteration 1 : 0.5
Avg. error after iteration 10 : 0.16666666666666666
*** Avg. error after epoch 13 : 0.0 (all elements of the set has been correctly classified)
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
```
"""
mutable struct KernelPerceptronClassifier <: BetaMLSupervisedModel
hpar:: KernelPerceptronC_hp
opt::BML_options
par::Union{Nothing,KernelPerceptronClassifier_lp}
cres::Union{Nothing,Vector}
fitted::Bool
info::Dict{String,Any}
end
function KernelPerceptronClassifier(;kwargs...)
m = KernelPerceptronClassifier( KernelPerceptronC_hp(),BML_options(),KernelPerceptronClassifier_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a [`KernelPerceptronClassifier`](@ref) model.
"""
function fit!(m::KernelPerceptronClassifier,X,Y)
m.fitted || autotune!(m,(X,Y))
# Parameter alias..
kernel = m.hpar.kernel
initial_errors = m.hpar.initial_errors
epochs = m.hpar.epochs
shuffle = m.hpar.shuffle
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
nR,nD = size(X)
yclasses = unique(Y)
nCl = length(yclasses)
nModels = Int((nCl * (nCl - 1)) / 2)
initial_errors = (initial_errors == nothing) ? [zeros(nR) for i in 1:nCl] : initial_errors
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
out = kernel_perceptron_classifier(X, Y; K=kernel, T=epochs, α=initial_errors, nMsgs=nMsgs, shuffle=shuffle, rng = rng, verbosity=verbosity)
m.par = KernelPerceptronClassifier_lp(out.x,out.y,out.α,out.classes)
if cache
out = predict(X,m.par.xtrain,m.par.ytrain,m.par.errors,m.par.classes;K=kernel)
m.cres = cache ? out : nothing
end
m.info["fitted_records"] = nR
m.info["xndims"] = nD
m.info["n_classes"] = nCl
m.info["nModels"] = nModels
m.fitted = true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict labels using a fitted [`KernelPerceptronClassifier`](@ref) model.
"""
function predict(m::KernelPerceptronClassifier,X)
return predict(X,m.par.xtrain,m.par.ytrain,m.par.errors,m.par.classes;K=m.hpar.kernel)
end
function show(io::IO, ::MIME"text/plain", m::KernelPerceptronClassifier)
if m.fitted == false
print(io,"KernelPerceptronClassifier - A \"kernelised\" version of the perceptron classifier (unfitted)")
else
print(io,"KernelPerceptronClassifier - A \"kernelised\" version of the perceptron classifier (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::KernelPerceptronClassifier)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
println(io,"KernelPerceptronClassifier - A \"kernelised\" version of the perceptron classifier (unfitted)")
else
println(io,"KernelPerceptronClassifier - A $(m.info["xndims"])-dimensions $(m.info["n_classes"])-classes \"kernelised\" version of the perceptron classifier (fitted on $(m.info["fitted_records"]) records)")
print(io,"Kernel: ")
print(io,m.hpar.kernel)
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
Subsets and Splits