licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 14106 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
pegasos(x,y;θ,θ₀,λ,η,T,nMsgs,shuffle,force_origin,return_mean_hyperplane)
Train the multiclass classifier "pegasos" algorithm according to x (features) and y (labels)
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model `PegasosClassifier` instead.
PegasosClassifier is a _linear_, gradient-based classifier. Multiclass is supported using a one-vs-all approach.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data, can be in any format (string, integers..)
* `θ`: Initial value of the weights (parameter) [def: `zeros(d)`]
* `θ₀`: Initial value of the weight (parameter) associated to the constant term [def: `0`]
* `λ`: Multiplicative term of the learning rate
* `η`: Learning rate [def: (t -> 1/sqrt(t))]
* `T`: Maximum number of iterations across the whole set (if the set is not fully classified earlier) [def: 1000]
* `nMsg`: Maximum number of messages to show if all iterations are done
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `force_origin`: Whether to force `θ₀` to remain zero [def: `false`]
* `return_mean_hyperplane`: Whether to return the average hyperplane coefficients instead of the average ones [def: `false`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Return a named tuple with:
* `θ`: The weights of the classifier
* `θ₀`: The weight of the classifier associated to the constant term
* `classes`: The classes (unique values) of y
# Notes:
* The trained parameters can then be used to make predictions using the function `predict()`.
* This model is available in the MLJ framework as the `PegasosClassifier`
# Example:
```jldoctest
julia> model = pegasos([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
julia> ŷ = predict([2.1 3.1; 7.3 5.2], model.θ, model.θ₀, model.classes)
```
"""
function pegasos(x, y; θ=nothing,θ₀=nothing, λ=0.5,η= (t -> 1/sqrt(t)), T=1000, nMsgs=0, shuffle=false, force_origin=false,return_mean_hyperplane=false, rng = Random.GLOBAL_RNG, verbosity=NONE)
yclasses = unique(y)
nCl = length(yclasses)
nD = size(x,2)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
#if nCl == 2
# outθ = Array{Vector{Float64},1}(undef,1)
# outθ₀ = Array{Float64,1}(undef,1)
#else
outθ = Array{Vector{Float64},1}(undef,nCl)
outθ₀ = Array{Float64,1}(undef,nCl)
#end
if θ₀ == nothing
θ₀ = zeros(nCl)
end
if θ == nothing
θ = [zeros(nD) for _ in 1:nCl]
end
for (i,c) in enumerate(yclasses)
ybin = ((y .== c) .*2 .-1) # conversion to -1/+1
outBinary = pegasosBinary(x, ybin; θ=θ[i],θ₀=θ₀[i], λ=λ,η=η, T=T, nMsgs=nMsgs, shuffle=shuffle, force_origin=force_origin, rng=rng, verbosity=verbosity)
if return_mean_hyperplane
outθ[i] = outBinary.avgθ
outθ₀[i] = outBinary.avgθ₀
else
outθ[i] = outBinary.θ
outθ₀[i] = outBinary.θ₀
end
if nCl == 2
outθ[2] = - outθ[1]
outθ₀[2] = .- outθ₀[1]
break # if there are only two classes we do compute only one passage, as A vs B would be the same as B vs A
end
end
return (θ=outθ,θ₀=outθ₀,classes=yclasses)
end
"""
pegasosBinary(x,y;θ,θ₀,λ,η,T,nMsgs,shuffle,force_origin)
Train the peagasos algorithm based on x and y (labels)
!!! warning
Direct usage of this low-level function is deprecated. It has been unexported in BetaML 0.9.
Use the model `PegasosClassifier` instead.
# Parameters:
* `x`: Feature matrix of the training data (n × d)
* `y`: Associated labels of the training data, in the format of ⨦ 1
* `θ`: Initial value of the weights (parameter) [def: `zeros(d)`]
* `θ₀`: Initial value of the weight (parameter) associated to the constant term [def: `0`]
* `λ`: Multiplicative term of the learning rate
* `η`: Learning rate [def: (t -> 1/sqrt(t))]
* `T`: Maximum number of iterations across the whole set (if the set is not fully classified earlier) [def: 1000]
* `nMsg`: Maximum number of messages to show if all iterations are done
* `shuffle`: Whether to randomly shuffle the data at each iteration [def: `false`]
* `force_origin`: Whether to force `θ₀` to remain zero [def: `false`]
# Return a named tuple with:
* `θ`: The final weights of the classifier
* `θ₀`: The final weight of the classifier associated to the constant term
* `avgθ`: The average weights of the classifier
* `avgθ₀`: The average weight of the classifier associated to the constant term
* `errors`: The number of errors in the last iteration
* `besterrors`: The minimum number of errors in classifying the data ever reached
* `iterations`: The actual number of iterations performed
* `separated`: Weather the data has been successfully separated
# Notes:
* The trained parameters can then be used to make predictions using the function `predict()`.
# Example:
```jldoctest
julia> pegasos([1.1 2.1; 5.3 4.2; 1.8 1.7], [-1,1,-1])
```
"""
function pegasosBinary(x, y; θ=zeros(size(x,2)),θ₀=0.0, λ=0.5,η= (t -> 1/sqrt(t)), T=1000, nMsgs=10, shuffle=false, force_origin=false, rng = Random.GLOBAL_RNG, verbosity=verbosity)
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
@codelocation
end
if nMsgs > 5
println("***\n*** Training pegasos for maximum $T iterations. Random shuffle: $shuffle")
end
x = makematrix(x)
(n,d) = size(x)
ny = size(y,1)
ny == n || error("y and x have different number of rows (records) !")
bestϵ = Inf
lastϵ = Inf
if force_origin θ₀ = 0.0; end
sumθ = θ; sumθ₀ = θ₀
@showprogress dt=1 desc="Training PegasosClassifier..." for t in 1:T
ϵ = 0
ηₜ = η(t)
if shuffle
# random shuffle x and y
ridx = Random.shuffle(rng, 1:size(x,1))
x = x[ridx, :]
y = y[ridx]
end
@inbounds for i in 1:n
if y[i]*(θ' * x[i,:] + θ₀) <= eps()
θ = (1-ηₜ*λ) * θ + ηₜ * y[i] * x[i,:]
θ₀ = force_origin ? 0.0 : θ₀ + ηₜ * y[i]
sumθ += θ; sumθ₀ += θ₀
ϵ += 1
else
θ = (1-ηₜ*λ) * θ
end
end
if (ϵ == 0)
if nMsgs > 5
println("*** Avg. error after epoch $t : $(ϵ/size(x)[1]) (all elements of the set has been correctly classified)")
end
return (θ=θ,θ₀=θ₀,avgθ=sumθ/(n*T),avgθ₀=sumθ₀/(n*T),errors=0,besterrors=0,iterations=t,separated=true)
elseif ϵ < bestϵ
bestϵ = ϵ
end
lastϵ = ϵ
if nMsgs != 0 && (t % ceil(T/nMsgs) == 0 || t == 1 || t == T)
println("Avg. error after iteration $t : $(ϵ/size(x)[1])")
end
end
return (θ=θ,θ₀=θ₀,avgθ=sumθ/(n*T),avgθ₀=sumθ₀/(n*T),errors=lastϵ,besterrors=bestϵ,iterations=T,separated=false)
end
# ----------------------------------------------
# API V2...
"""
$(TYPEDEF)
Hyperparameters for the [`PegasosClassifier`](@ref) model.
## Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct PegasosC_hp <: BetaMLHyperParametersSet
"Learning rate [def: (epoch -> 1/sqrt(epoch))]"
learning_rate::Function = (epoch -> 1/sqrt(epoch))
"Multiplicative term of the learning rate [def: `0.5`]"
learning_rate_multiplicative::Float64 = 0.5
"Initial parameters. If given, should be a matrix of n-classes by feature dimension + 1 (to include the constant term as the first element) [def: `nothing`, i.e. zeros]"
initial_parameters::Union{Nothing,Matrix{Float64}} = nothing
"Maximum number of epochs, i.e. passages trough the whole training sample [def: `1000`]"
epochs::Int64 = 1000
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"Whether to force the parameter associated with the constant term to remain zero [def: `false`]"
force_origin::Bool = false
" Whether to return the average hyperplane coefficients instead of the final ones [def: `false`]"
return_mean_hyperplane::Bool=false
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges=Dict("learning_rate" =>[(epoch -> 1/sqrt(epoch)),(epoch -> 1/epoch),(epoch -> 1)], "epochs" =>[50,100,1000,10000], "shuffle"=>[true,false], "force_origin"=>[true,false],"return_mean_hyperplane"=>[true,false]),multithreads=true)
end
Base.@kwdef mutable struct PegasosClassifier_lp <: BetaMLLearnableParametersSet
weigths::Union{Nothing,Matrix{Float64}} = nothing
classes::Vector = []
end
"""
$(TYPEDEF)
The `PegasosClassifier` model, a _linear_, gradient-based classifier. Multiclass is supported using a one-vs-all approach.
See [`?PegasosC_hp`](@ref PegasosC_hp) and [`?BML_options`](@ref BML_options) for applicable hyperparameters and options.
# Example:
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> mod = PegasosClassifier(epochs=100,learning_rate = (epoch -> 0.05) )
PegasosClassifier - a loss-based linear classifier without regularisation term (unfitted)
julia> ŷ = fit!(mod,X,y) |> mode
***
*** Training pegasos for maximum 100 iterations. Random shuffle: true
Avg. error after iteration 1 : 0.5
*** Avg. error after epoch 3 : 0.0 (all elements of the set has been correctly classified)
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
"a"
```
"""
mutable struct PegasosClassifier <: BetaMLSupervisedModel
hpar::PegasosC_hp
opt::BML_options
par::Union{Nothing,PegasosClassifier_lp}
cres::Union{Nothing,Vector}
fitted::Bool
info::Dict{String,Any}
end
function PegasosClassifier(;kwargs...)
m = PegasosClassifier(PegasosC_hp(),BML_options(),PegasosClassifier_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a [`PegasosClassifier`](@ref) model.
"""
function fit!(m::PegasosClassifier,X,Y)
m.fitted || autotune!(m,(X,Y))
# Parameter alias..
learning_rate = m.hpar.learning_rate
learning_rate_multiplicative = m.hpar.learning_rate_multiplicative
initial_parameters = m.hpar.initial_parameters
epochs = m.hpar.epochs
shuffle = m.hpar.shuffle
force_origin = m.hpar.force_origin
return_mean_hyperplane = m.hpar.return_mean_hyperplane
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
nR,nD = size(X)
yclasses = unique(Y)
nCl = length(yclasses)
initial_parameters = (initial_parameters == nothing) ? zeros(nCl, nD+1) : initial_parameters
if verbosity == NONE
nMsgs = 0
elseif verbosity <= LOW
nMsgs = 5
elseif verbosity <= STD
nMsgs = 10
elseif verbosity <= HIGH
nMsgs = 100
else
nMsgs = 100000
end
out = pegasos(X,Y; θ₀=initial_parameters[:,1], θ=[initial_parameters[c,2:end] for c in 1:nCl], λ=learning_rate_multiplicative, η=learning_rate, T=epochs, nMsgs=nMsgs, shuffle=shuffle, force_origin=force_origin, return_mean_hyperplane=return_mean_hyperplane, rng = rng, verbosity=verbosity)
weights = hcat(out.θ₀,vcat(out.θ' ...))
m.par = PegasosClassifier_lp(weights,out.classes)
if cache
out = predict(X,out.θ,out.θ₀,out.classes)
m.cres = cache ? out : nothing
end
m.info["fitted_records"] = nR
m.info["xndims"] = nD
m.info["n_classes"] = size(weights,1)
m.fitted = true
return cache ? m.cres : nothing
end
"""
$(TYPEDSIGNATURES)
Predict labels using a fitted [`PegasosClassifier`](@ref) model.
"""
function predict(m::PegasosClassifier,X)
θ₀ = [ i for i in m.par.weigths[:,1]]
θ = [r for r in eachrow(m.par.weigths[:,2:end])]
return predict(X,θ,θ₀,m.par.classes)
end
function show(io::IO, ::MIME"text/plain", m::PegasosClassifier)
if m.fitted == false
print(io,"PegasosClassifier - a loss-based linear classifier without regularisation term (unfitted)")
else
print(io,"PegasosClassifier - a loss-based linear classifier without regularisation term (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::PegasosClassifier)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
println(io,"PegasosClassifier - A loss-based linear classifier without regularisation term (unfitted)")
else
println(io,"PegasosClassifier - A $(m.info["xndims"])-dimensions $(m.info["n_classes"])-classes a loss-based linear classifier without regularisation term (fitted on $(m.info["fitted_records"]) records)")
println(io,"Weights:")
println(io,m.par.weights)
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 5716 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
BetaML.Stats module
Implement classical statistical methods. EXPERIMENTAL !
The module provide the following functions. Use `?[type or function]` to access their full signature and detailed documentation:
# Hyphothesis testing
Acknowlegdments: most code is based on the MITx MOOC [Fundamentals of Statistics](https://www.edx.org/course/fundamentals-of-statistics)
"""
module Stats
using LinearAlgebra, Random, Distributions
using ForceImport
@force using ..Api
@force using ..Utils
export welchSatterthwaite, huberLoss, check, mEstimationBruteForce, findQuantile, goodnessOfFitDiscrete, ksTest, computeDensity, computeKSTableValue
welchSatterthwaite(σx, σy,n,m) = Int(floor(((σx^2/n) + (σy^2/m))^2 / ( (σx^4/(n^2*(n-1)) + (σy^4/(m^2*(m-1)) ) ))))
huberLoss(x,δ=0.01) = abs(x) < δ ? x^2/2 : δ*(abs(x)-δ/2)
check(x,α) = x >=0 ? α * x : - (1- α) * x
"""
mEstimationBruteForce(obs,candidates,lossFunction=abs)
"Solve" m-estimation in 1-D by "brute-force", i.e. by trying all the candidates provided to the function.
"""
function mEstimationBruteForce(obs,candidates,lossFunction=abs)
score = +Inf
θstar = 0
for c in candidates
candidateScore = mean(lossFunction.(obs .- c))
if candidateScore < score
score = candidateScore
θstar = c
end
end
return θstar
end
# test
function findQuantile(obs,α;precision=0.001)
score = +Inf
quantile = 0
candidates = minimum(obs):precision:maximum(obs)
for c in candidates
candidateScore = mean(check.(obs .- c,α))
if candidateScore < score
score = candidateScore
quantile = c
end
end
return quantile
end
function goodnessOfFitDiscrete(data,p0=[1/length(data) for i in 1:length(data)];α=0.05)
K = length(p0)
N = sum(data)
if length(data) != K
@error "p0 and data must have the same number of categories!"
end
p̂ = data ./ N
T = N * sum((p̂[k] - p0[k])^2/p0[k] for k in 1:K)
χDist = Chisq(K-1)
rejectedH₀ = T > quantile(χDist,1-α)
p_value = 1 - cdf(χDist,T)
return (testValue=T, threshold=quantile(χDist,1-α),rejectedH₀=rejectedH₀, p_value=p_value)
end
function computeDensity(data,support)
counts = [count(i -> i==s,data) for s in support]
if length(data) > sum(counts)
shareDataNotInSupport = (length(data) - sum(counts)) / length(data)
if shareDataNotInSupport >= 0.0001
@warn "$shareDataNotInSupport of the data is not in the support"
end
end
return counts
end
"""
goodnessOfFitDiscrete(data,support,f₀;compressedData=true,α=0.05,d=0)
Perform a goodness to fit chi-squared test to check for a particular MMF.
The passed distribution must support the method `pdf(dist,x)` for the provided support.
H₀ can be either the PDF with a specified set of parameters or the PDF in general. In this case the distribution object should be passed to this function with the MLE estimators that best fit the data (it is NOT done inside this function). In such case the `d` parameter should be set to the number of estimated parameters in order to remove the `d` degree of freedom from the chi-square test.
"""
function goodnessOfFitDiscrete(data,support,f₀;compressedData=true,α=0.05,d=0)
if !compressedData
data = computeDensity(data,support)
end
K = length(support)
N = sum(data)
p̂ = data ./ N
df = K - d - 1
p0 = pdf.(f₀,support)
T = N * sum((p̂[k] - p0[k])^2/p0[k] for k in 1:K)
χDist = Chisq(df)
rejectedH₀ = T > quantile(χDist,1-α)
p_value = 1 - cdf(χDist,T)
return (testValue=T, threshold=quantile(χDist,1-α),rejectedH₀=rejectedH₀, p_value=p_value)
end
"""
ksTest(data,f₀;α=0.05,asymptoticThreshold)
Perform the Kolmogorov-Smirnov goodness-of-fits table using the asymptotic Kolmogorov distribution (for N > 30, as it is faster) or the non-asymptotic KS Table for N < 30.
Note that as n → ∞, Distributions.quantile_bisect(distr,1-α) * sqrt(N) → quantile(distr,1-α)
For a three-digit precision use a asymptoticThreshold >= 1000, but slower!
"""
function ksTest(data,f₀,;α=0.05,asymptoticThreshold=100)
data = sort(data)
N = length(data)
cdfhat = collect(0:N) ./ N
maxDist = 0.0
for (n,x) in enumerate(data)
dist = max(abs(cdfhat[n]-cdf(f₀,x)), abs(cdfhat[n+1]-cdf(f₀,x)))
if dist > maxDist
maxDist = dist
end
end
T = sqrt(N) * maxDist
distr = N > asymptoticThreshold ? Kolmogorov() : KSDist(N)
q = N > asymptoticThreshold ? quantile(distr,1-α) : Distributions.quantile_bisect(distr,1-α) * sqrt(N)
rejectedH₀ = T > q
p_value = 1 - cdf(distr,T)
return (testValue=T, threshold=q,rejectedH₀=rejectedH₀, p_value=p_value)
end
"""
computeKSTableValue(f₀,N,α,repetitions=1000)
Compute the values of the Kolmogorov-Smirnov table by numerical simulation
"""
function computeKSTableValue(f₀,N,α,repetitions=1000)
Ts = Array{Float64,1}(undef,repetitions)
for rep in 1:repetitions
data = sort(rand(f₀,N))
N = length(data)
cdfhat = collect(0:N) ./ N
maxDist = 0.0
for (n,x) in enumerate(data)
dist = max(abs(cdfhat[n]-cdf(f₀,x)), abs(cdfhat[n+1]-cdf(f₀,x)))
if dist > maxDist
maxDist = dist
end
end
T = sqrt(N) * maxDist
Ts[rep] = T
end
Ts = sort(Ts)
return Ts[Int(ceil((1-α)*repetitions))]/sqrt(N)
end
end # end module
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 2980 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
Implementation of the `AbstractTrees.jl`-interface
(see: [AbstractTrees.jl](https://github.com/JuliaCollections/AbstractTrees.jl)).
The functions `children` and `printnode` make up the interface traits of `AbstractTrees.jl`.
This enables the visualization of a `BetaML/DecisionTree` using a plot recipe.
For more information see [JuliaAI/DecisionTree.jl](https://github.com/JuliaAI/DecisionTree.jl).
The file `src/abstract_trees.jl` in that repo serves as a model implementation.
"""
export InfoNode, InfoLeaf, wrapdn, DecisionNode, Leaf
"""
These types are introduced so that additional information currently not present in
a `DecisionTree`-structure -- namely the feature names --
can be used for visualization.
"""
struct InfoNode{T} <: AbstractTrees.AbstractNode{DecisionNode{T}}
node :: DecisionNode{T}
info :: NamedTuple
end
AbstractTrees.nodevalue(n::InfoNode) = n.node # round(n.node,sigdigits=4)
struct InfoLeaf{T} <: AbstractTrees.AbstractNode{Leaf{T}}
leaf :: Leaf{T}
info :: NamedTuple
end
AbstractTrees.nodevalue(l::InfoLeaf) = l.leaf # round(l.leaf,sigdigits=4)
"""
wrapdn(node:: DecisionNode, ...)
Called on the root node of a `DecsionTree` `dc` in order to add visualization information.
In case of a `BetaML/DecisionTree` this is typically a list of feature names as follows:
`wdc = wrapdn(dc, featurenames = ["Colour","Size"])`
"""
wrapdn(node::DecisionNode, info::NamedTuple = NamedTuple()) = InfoNode(node, info)
wrapdn(leaf::Leaf, info::NamedTuple = NamedTuple()) = InfoLeaf(leaf, info)
wrapdn(mod::DecisionTreeEstimator, info::NamedTuple = NamedTuple()) = wrapdn(mod.par.tree, info)
wrapdn(m::Union{DecisionNode,Leaf,DecisionTreeEstimator};featurenames=[]) = wrapdn(m,(featurenames=featurenames,))
#### Implementation of the `AbstractTrees`-interface
AbstractTrees.children(node::InfoNode) = (
wrapdn(node.node.trueBranch, node.info),
wrapdn(node.node.falseBranch, node.info)
)
AbstractTrees.children(node::InfoLeaf) = ()
function AbstractTrees.printnode(io::IO, node::InfoNode)
q = node.node.question
condition = isa(q.value, Number) ? ">=" : "=="
col = :featurenames ∈ keys(node.info) ? node.info.featurenames[q.column] : q.column
print(io, "$(col) $condition $(q.value)?")
end
function AbstractTrees.printnode(io::IO, leaf::InfoLeaf)
for p in leaf.leaf.predictions
if isa(p, Pair)
println(io, Pair(p[1],round(p[2],sigdigits=4)))
elseif isa(p,Number)
println(io, round(p,sigdigits=4))
else
println(io, p)
end
end
end
function show(io::IO,node::Union{InfoNode,InfoLeaf})
#print(io, "Is col $(question.column) $condition $(question.value) ?")
print(io, "A wrapped Decision Tree")
end
function show(io::IO, ::MIME"text/plain", node::Union{InfoNode,InfoLeaf})
print(io, "A wrapped Decision Tree")
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 42287 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# ------------------------------------------------------------------------------
# TYPE HIERARCHY AND DEFINITIONS
abstract type AbstractNode end
abstract type AbstractDecisionNode <: AbstractNode end
abstract type AbstractLeaf <: AbstractNode end
struct BetaMLClass
d::Int64
end
function convert(::Type{BetaMLClass},x::Integer)
return BetaMLClass(x)
end
function convert(::Type{BetaMLClass},x)
return x
end
function convert(::Type{T},x::BetaMLClass) where {T <:Integer}
return convert(T,x.d)
end
function convert(::Type{BetaMLClass}, x::BetaMLClass)
return x
end
"""
Question
A question used to partition a dataset.
This struct just records a 'column number' and a 'column value' (e.g., Green).
"""
abstract type AbstractQuestion end
struct Question{Tx} <: AbstractQuestion
column::Int64
value::Tx
end
"""
Leaf(y,depth)
A tree's leaf (terminal) node.
# Constructor's arguments:
- `y`: The labels assorciated to each record (either numerical or categorical)
- `depth`: The nodes's depth in the tree
# Struct members:
- `predictions`: Either the relative label's count (i.e. a PMF) or the mean
- `depth`: The nodes's depth in the tree
"""
struct Leaf{Ty} <: AbstractLeaf
predictions::Union{Number,Dict{Ty,Float64}}
depth::Int64
npoints::Int64
function Leaf(y::AbstractArray{Ty,1},depth::Int64) where {Ty}
if eltype(y) <: Number
rawPredictions = y
predictions = mean(rawPredictions)
else
rawPredictions = class_counts_with_labels(y)
total = sum(values(rawPredictions))
predictions = Dict{Ty,Float64}()
[predictions[k] = rawPredictions[k] / total for k in keys(rawPredictions)]
end
new{Ty}(predictions,depth,length(y))
end
end
struct TempNode
trueBranch::Bool
parentNode::AbstractDecisionNode
depth::Int64
x
y
end
"""
A Decision Node asks a question.
This holds a reference to the question, and to the two child nodes.
"""
"""
DecisionNode(question,trueBranch,falseBranch, depth)
A tree's non-terminal node.
# Constructor's arguments and struct members:
- `question`: The question asked in this node
- `trueBranch`: A reference to the "true" branch of the trees
- `falseBranch`: A reference to the "false" branch of the trees
- `depth`: The nodes's depth in the tree
"""
mutable struct DecisionNode{Tx} <: AbstractDecisionNode
# Note that a decision node is indeed type unstable, as it host other decision nodes whose X type could be different (different X features can have different type)
question::Question{Tx}
trueBranch::Union{Nothing,AbstractNode}
falseBranch::Union{Nothing,AbstractNode}
depth::Int64
pTrue::Float64
function DecisionNode(question::Question{Tx},trueBranch::Union{Nothing,AbstractNode},falseBranch::Union{Nothing,AbstractNode}, depth,pTrue) where {Tx}
return new{Tx}(question,trueBranch,falseBranch, depth,pTrue)
end
end
# Avi v2..
"""
$(TYPEDEF)
Hyperparameters for [`DecisionTreeEstimator`](@ref) (Decision Tree).
## Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct DecisionTreeE_hp <: BetaMLHyperParametersSet
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `nothing`, i.e. no limits]"
max_depth::Union{Nothing,Int64} = nothing
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64 = 0.0
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64 = 2
"The maximum number of (random) features to consider at each partitioning [def: `nothing`, i.e. look at all features]"
max_features::Union{Nothing,Int64} = nothing
"Whether to force a classification task even if the labels are numerical (typically when labels are integers encoding some feature rather than representing a real cardinal measure) [def: `false`]"
force_classification::Bool = false
"This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `nothing`, i.e. `gini` for categorical labels (classification task) and `variance` for numerical labels(regression task)]. Either `gini`, `entropy`, `variance` or a custom function. It can also be an anonymous function."
splitting_criterion::Union{Nothing,Function} = nothing
"Use an experimental faster algoritm for looking up the best split in ordered fields (colums). Currently it brings down the fitting time of an order of magnitude, but predictions are sensibly affected. If used, control the meaning of integer fields with `integer_encoded_cols`."
fast_algorithm::Bool = false
"A vector of columns positions to specify which integer columns should be treated as encoding of categorical variables insteads of ordered classes/values. [def: `nothing`, integer columns with less than 20 unique values are considered categorical]. Useful in conjunction with `fast_algorithm`, little difference otherwise."
integer_encoded_cols::Union{Nothing,Array{Int64,1}} =nothing
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges=Dict("max_depth" =>[5,10,nothing], "min_gain"=>[0.0, 0.1, 0.5], "min_records"=>[2,3,5],"max_features"=>[nothing,5,10,30]),multithreads=true)
end
Base.@kwdef mutable struct DT_lp <: BetaMLLearnableParametersSet
tree::Union{Nothing,AbstractNode} = nothing
Ty::DataType = Any
end
"""
$(TYPEDEF)
A Decision Tree classifier and regressor (supervised).
Decision Tree works by finding the "best" question to split the fitting data (according to the metric specified by the parameter `splitting_criterion` on the associated labels) untill either all the dataset is separated or a terminal condition is reached.
For the parameters see [`?DecisionTreeE_hp`](@ref DecisionTreeE_hp) and [`?BML_options`](@ref BML_options).
# Notes:
- Online fitting (re-fitting with new data) is not supported
- Missing data (in the feature dataset) is supported.
# Examples:
- Classification...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> mod = DecisionTreeEstimator(max_depth=5)
DecisionTreeEstimator - A Decision Tree model (unfitted)
julia> ŷ = fit!(mod,X,y) |> mode
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
"a"
julia> println(mod)
DecisionTreeEstimator - A Decision Tree classifier (fitted on 6 records)
Dict{String, Any}("job_is_regression" => 0, "fitted_records" => 6, "max_reached_depth" => 2, "avg_depth" => 2.0, "xndims" => 2)
*** Printing Decision Tree: ***
1. Is col 2 >= 18.0 ?
--> True : Dict("b" => 1.0)
--> False: Dict("a" => 1.0)
```
- Regression...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = 2 .* X[:,1] .- X[:,2] .+ 3;
julia> mod = DecisionTreeEstimator(max_depth=10)
DecisionTreeEstimator - A Decision Tree model (unfitted)
julia> ŷ = fit!(mod,X,y);
julia> hcat(y,ŷ)
6×2 Matrix{Float64}:
4.1 3.4
-16.5 -17.45
-13.8 -13.8
-18.4 -17.45
-27.2 -27.2
2.7 3.4
julia> println(mod)
DecisionTreeEstimator - A Decision Tree regressor (fitted on 6 records)
Dict{String, Any}("job_is_regression" => 1, "fitted_records" => 6, "max_reached_depth" => 4, "avg_depth" => 3.25, "xndims" => 2)
*** Printing Decision Tree: ***
1. Is col 2 >= 18.0 ?
--> True :
1.2. Is col 2 >= 31.0 ?
--> True : -27.2
--> False:
1.2.3. Is col 2 >= 20.5 ?
--> True : -17.450000000000003
--> False: -13.8
--> False: 3.3999999999999995
```
- Visualisation...
You can either text-print or plot a decision tree using the `AbstractTree` and `TreeRecipe` package..
```julia
julia> println(mod)
DecisionTreeEstimator - A Decision Tree regressor (fitted on 6 records)
Dict{String, Any}("job_is_regression" => 1, "fitted_records" => 6, "max_reached_depth" => 4, "avg_depth" => 3.25, "xndims" => 2)
*** Printing Decision Tree: ***
1. Is col 2 >= 18.0 ?
--> True :
1.2. Is col 2 >= 31.0 ?
--> True : -27.2
--> False:
1.2.3. Is col 2 >= 20.5 ?
--> True : -17.450000000000003
--> False: -13.8
--> False: 3.3999999999999995
julia> using Plots, TreeRecipe, AbstractTrees
julia> featurenames = ["Something", "Som else"];
julia> wrapped_tree = wrapdn(dtree, featurenames = featurenames); # featurenames is otional
julia> print_tree(wrapped_tree)
Som else >= 18.0?
├─ Som else >= 31.0?
│ ├─ -27.2
│ │
│ └─ Som else >= 20.5?
│ ├─ -17.450000000000003
│ │
│ └─ -13.8
│
└─ 3.3999999999999995
julia> plot(wrapped_tree)
```

"""
mutable struct DecisionTreeEstimator <: BetaMLSupervisedModel
hpar::DecisionTreeE_hp
opt::BML_options
par::Union{Nothing,DT_lp}
cres
fitted::Bool
info::Dict{String,Any}
end
function DecisionTreeEstimator(;kwargs...)
m = DecisionTreeEstimator(DecisionTreeE_hp(),BML_options(),DT_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
# ------------------------------------------------------------------------------
# MODEL ALGORITHMS AND TRAINING
"""
match(question, x)
Return a dicotomic answer of a question when applied to a given feature record.
It compares the feature value in the given record to the value stored in the
question.
Numerical features are compared in terms of disequality (">="), while categorical features are compared in terms of equality ("==").
"""
function match(question::Question{Tx}, x) where {Tx}
val = x[question.column]
if Tx <: Number
return val >= question.value
else
return val == question.value
end
end
"""
partition(question,x)
Dicotomically partitions a dataset `x` given a question.
For each row in the dataset, check if it matches the question. If so, add it to 'true rows', otherwise, add it to 'false rows'.
Rows with missing values on the question column are assigned randomly proportionally to the assignment of the non-missing rows.
"""
function partition(question::Question{Tx},x,mCols;sorted=false,rng = Random.GLOBAL_RNG) where {Tx}
N = size(x,1)
# TODO: possible huge improvement: pass to partition only the individual column of x rather than the whole x on all the columns
trueIdx = fill(false,N);
if in(question.column,mCols) # do we have missings in this col ?
missingIdx = fill(false,N)
nFalse = 0
@inbounds for (rIdx,row) in enumerate(eachrow(x))
if(ismissing(row[question.column]))
missingIdx[rIdx] = true
elseif match(question,row)
trueIdx[rIdx] = true
else
nFalse += 1
end
end
# Assigning missing rows randomly proportionally to non-missing rows
p = sum(trueIdx)/(sum(trueIdx)+nFalse)
@inbounds for rIdx in 1:N
if missingIdx[rIdx]
r = rand(rng)
if r <= p
trueIdx[rIdx] = true
end
end
end
else
if sorted
#val = x[question.column]
@views idx = searchsorted(x[:,question.column], question.value)
if Tx <: Number
trueIdx[first(idx):end] .= true
else
trueIdx[idx] .= true
end
else
@inbounds for (rIdx,row) in enumerate(eachrow(x))
if match(question,row)
trueIdx[rIdx] = true
end
end
end
end
return trueIdx
end
"""
infoGain(left, right, parentUncertainty; splitting_criterion)
Compute the information gain of a specific partition.
Compare the "information gain" my measuring the difference betwwen the "impurity" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items.
# Parameters:
- `leftY`: Child #1 labels
- `rightY`: Child #2 labels
- `parentUncertainty`: "Impurity" of the labels of the parent node
- `splitting_criterion`: Metric to adopt to determine the "impurity" (see below)
You can use your own function as the metric. We provide the following built-in metrics:
- `gini` (categorical)
- `entropy` (categorical)
- `variance` (numerical)
"""
function infoGain(leftY, rightY, parentUncertainty; splitting_criterion=gini)
n_left = length(leftY)
n_right = length(rightY)
n_total = n_left + n_right
p = n_left / n_total
left_score = Float64(splitting_criterion(leftY))
right_score = Float64(splitting_criterion(rightY))
return parentUncertainty - p * left_score - (1 - p) * right_score
end
function infoGainOld(leftY, rightY, parentUncertainty; splitting_criterion=gini)
p = size(leftY,1) / (size(leftY,1) + size(rightY,1))
return parentUncertainty - p * splitting_criterion(leftY) - (1 - p) * splitting_criterion(rightY)
end
function findbestgain_sortedvector(x, y, d, candidates; mCols, currentUncertainty, splitting_criterion, rng)
n = length(candidates)
if n < 2
return candidates[1]
end
l = max(1, div(n, 4)) # lower bound candidate
u = min(n, div(3 * n, 4)) # upper bound candidate
lquestion = Question(d, candidates[l])
ltrueIdx = partition(lquestion, x, mCols, sorted=true, rng=rng)
lgain = (any(ltrueIdx) && !all(ltrueIdx)) ? infoGain(y[ltrueIdx], y[.!ltrueIdx], currentUncertainty, splitting_criterion=splitting_criterion) : 0.0
uquestion = Question(d, candidates[u])
utrueIdx = partition(uquestion, x, mCols, sorted=true, rng=rng)
ugain = (any(utrueIdx) && !all(utrueIdx)) ? infoGain(y[utrueIdx], y[.!utrueIdx], currentUncertainty, splitting_criterion=splitting_criterion) : 0.0
return (lgain > ugain) ? findbestgain_sortedvector(x, y, d, candidates[1:u-1]; mCols=mCols, currentUncertainty=currentUncertainty, splitting_criterion=splitting_criterion, rng=rng) :
findbestgain_sortedvector(x, y, d, candidates[l+1:end]; mCols=mCols, currentUncertainty=currentUncertainty, splitting_criterion=splitting_criterion, rng=rng)
end
function findbestgain_sortedvectorOLD(x,y,d,candidates;mCols,currentUncertainty,splitting_criterion,rng)
#println(splitting_criterion)
#println("HERE I AM CALLED ! Dimension $d, candidates: ", candidates)
n = size(candidates,1)
#println("n is: ",n)
#println("candidates is: ", candidates)
if n < 2
return candidates[1]
end
l = max(1,Int(round((1/4) * n ))) # lower bound candidate
u = min(n,Int(round((3/4) * n ))) # upper bound candidate
#println("l is: ",l)
#println("u is: ",u)
lquestion = Question(d, candidates[l])
ltrueIdx = partition(lquestion,x,mCols,sorted=true,rng=rng)
lgain = 0.0
if !all(ltrueIdx) && any(ltrueIdx)
lgain = infoGain(y[ltrueIdx], y[map(!,ltrueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
end
uquestion = Question(d, candidates[u])
utrueIdx = partition(uquestion,x,mCols,sorted=true,rng=rng)
ugain = 0.0
if !all(utrueIdx) && any(utrueIdx)
ugain = infoGain(y[utrueIdx], y[map(!,utrueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
end
if lgain > ugain
return findbestgain_sortedvector(x,y,d,candidates[1:u-1];mCols=mCols,currentUncertainty=currentUncertainty,splitting_criterion=splitting_criterion,rng=rng)
else
return findbestgain_sortedvector(x,y,d,candidates[l+1:n];mCols=mCols,currentUncertainty=currentUncertainty,splitting_criterion=splitting_criterion,rng=rng)
end
end
"""
findBestSplit(x,y;max_features,splitting_criterion)
Find the best possible split of the database.
Find the best question to ask by iterating over every feature / value and calculating the information gain.
# Parameters:
- `x`: The feature dataset
- `y`: The labels dataset
- `max_features`: Maximum number of (random) features to look up for the "best split"
- `splitting_criterion`: The metric to define the "impurity" of the labels
- `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"""
function findBestSplit(x,y::AbstractArray{Ty,1}, mCols;max_features,splitting_criterion=gini, integer_encoded_cols, fast_algorithm, rng = Random.GLOBAL_RNG) where {Ty}
bestGain = 0.0 # keep track of the best information gain
bestQuestion = Question(1,1.0) # keep track of the feature / value that produced it
currentUncertainty = Float64(splitting_criterion(y))
(N,D) = size(x) # number of columns (the last column is the label)
left_buffer = Array{Ty,1}(undef,N)
right_buffer = Array{Ty,1}(undef,N)
featuresToConsider = (max_features >= D) ? (1:D) : sample(rng, 1:D, max_features, replace=false)
for d in featuresToConsider # for each feature (we consider only max_features features randomly)
values = unique(skipmissing(x[:,d])) # unique values in the column
sortable = Utils.issortable(x[:,d])
if(sortable && !in(d,integer_encoded_cols))
sortIdx = sortperm(x[:,d])
sortedx = x[sortIdx,:]
sortedy = y[sortIdx]
if fast_algorithm
bestvalue = findbestgain_sortedvector(sortedx,sortedy,d,sortedx;mCols=mCols,currentUncertainty=currentUncertainty,splitting_criterion=splitting_criterion,rng=rng)
bestQuestionD = Question(d,bestvalue)
btrueIdx = partition(bestQuestionD,sortedx,mCols,sorted=true,rng=rng)
bestGainD = 0.0 # keep track of the best information gain
if !all(btrueIdx) && any(btrueIdx)
bestGainD = infoGain(sortedy[btrueIdx], sortedy[map(!,btrueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
end
if bestGainD >= bestGain
bestGain, bestQuestion = bestGainD, bestQuestionD
end
else
for val in values # for each value- it is this one that I can optimize (when it is sortable)!
# try splitting the dataset
#println(question)
question = Question(d, val)
trueIdx = partition(question,sortedx,mCols,sorted=true,rng=rng)
# Skip this split if it doesn't divide the
# dataset.
if all(trueIdx) || ! any(trueIdx)
continue
end
# Calculate the information gain from this split
#=
@no_escape begin
left = @alloc(eltype(sortedy), length(trueIdx))
right = @alloc(eltype(sortedy), length(sortedy)-sum(trueIdx))
#println(length(left))
#println(length(right))
nl = 1; nr = 1
for i in 1:length(sortedy)
if trueIdx[i]
left[nl] = sortedy[i]
nl += 1
else
right[nr] = sortedy[i]
nr += 1
end
end
@views gain = infoGain(left, right , currentUncertainty, splitting_criterion=splitting_criterion)
end
=#
nl = 1; nr = 1
Nl = sum(trueIdx)
Nr = N - Nl
for i in 1:N
if trueIdx[i]
left_buffer[nl] = sortedy[i]
nl += 1
else
right_buffer[nr] = sortedy[i]
nr += 1
end
end
@views gain = infoGain(left_buffer[1:Nl], right_buffer[1:Nr] , currentUncertainty, splitting_criterion=splitting_criterion)
#=
left = @view sortedy[trueIdx]
right = @view sortedy[map(!,trueIdx)]
gain = infoGain(left, right , currentUncertainty, splitting_criterion=splitting_criterion)
=#
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= bestGain
# println("*** New best gain: ", question)
bestGain, bestQuestion = gain, question
#else
# println(" bad gain: ", question)
end
end
end
else
sortIdx = 1:N
sortedx = x
sortedy = y
for val in values # for each value - not optimisable
# try splitting the dataset
#println(question)
question = Question(d, val)
trueIdx = partition(question,sortedx,mCols,sorted=false,rng=rng)
# Skip this split if it doesn't divide the
# dataset.
if all(trueIdx) || ! any(trueIdx)
continue
end
nl = 1; nr = 1
Nl = sum(trueIdx)
Nr = N - Nl
for i in 1:N
if trueIdx[i]
left_buffer[nl] = sortedy[i]
nl += 1
else
right_buffer[nr] = sortedy[i]
nr += 1
end
end
# Calculate the information gain from this split
@views gain = infoGain(left_buffer[1:Nl], right_buffer[1:Nr] , currentUncertainty, splitting_criterion=splitting_criterion)
if gain >= bestGain
bestGain, bestQuestion = gain, question
end
end
end
end
return bestGain, bestQuestion
end
function findBestSplitOLD(x,y::AbstractArray{Ty,1}, mCols;max_features,splitting_criterion=gini, integer_encoded_cols, fast_algorithm, rng = Random.GLOBAL_RNG) where {Ty}
bestGain = 0.0 # keep track of the best information gain
bestQuestion = Question(1,1.0) # keep track of the feature / value that produced it
currentUncertainty = splitting_criterion(y)
(N,D) = size(x) # number of columns (the last column is the label)
featuresToConsider = (max_features >= D) ? (1:D) : StatsBase.sample(rng, 1:D, max_features, replace=false)
for d in featuresToConsider # for each feature (we consider only max_features features randomly)
values = Set(skipmissing(x[:,d])) # unique values in the column
sortable = Utils.issortable(x[:,d])
if(sortable && !in(d,integer_encoded_cols))
sortIdx = sortperm(x[:,d])
sortedx = x[sortIdx,:]
sortedy = y[sortIdx]
if fast_algorithm
bestvalue = findbestgain_sortedvector(sortedx,sortedy,d,sortedx;mCols=mCols,currentUncertainty=currentUncertainty,splitting_criterion=splitting_criterion,rng=rng)
bestQuestionD = Question(d,bestvalue)
btrueIdx = partition(bestQuestionD,sortedx,mCols,sorted=true,rng=rng)
bestGainD = 0.0 # keep track of the best information gain
if !all(btrueIdx) && any(btrueIdx)
bestGainD = infoGain(sortedy[btrueIdx], sortedy[map(!,btrueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
end
if bestGainD >= bestGain
bestGain, bestQuestion = bestGainD, bestQuestionD
end
else
for val in values # for each value- it is this one that I can optimize (when it is sortable)!
question = Question(d, val)
# try splitting the dataset
#println(question)
trueIdx = partition(question,sortedx,mCols,sorted=true,rng=rng)
# Skip this split if it doesn't divide the
# dataset.
if all(trueIdx) || ! any(trueIdx)
continue
end
# Calculate the information gain from this split
gain = infoGain(sortedy[trueIdx], sortedy[map(!,trueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= bestGain
# println("*** New best gain: ", question)
bestGain, bestQuestion = gain, question
#else
# println(" bad gain: ", question)
end
end
end
else
sortIdx = 1:N
sortedx = x
sortedy = y
for val in values # for each value - not optimisable
question = Question(d, val)
# try splitting the dataset
#println(question)
trueIdx = partition(question,sortedx,mCols,sorted=false,rng=rng)
# Skip this split if it doesn't divide the
# dataset.
if all(trueIdx) || ! any(trueIdx)
continue
end
# Calculate the information gain from this split
gain = infoGain(sortedy[trueIdx], sortedy[map(!,trueIdx)], currentUncertainty, splitting_criterion=splitting_criterion)
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= bestGain
bestGain, bestQuestion = gain, question
end
end
end
end
return bestGain, bestQuestion
end
"""
buildTree(x, y, depth; max_depth, min_gain, min_records, max_features, splitting_criterion, force_classification)
Builds (define and train) a Decision Tree.
!!! warning
Direct usage of this low-level function is deprecated and it has been unexported in BetaML 0.9.
Use [`DecisionTreeEstimator`](@ref) instead.
Given a dataset of features `x` and the corresponding dataset of labels `y`, recursivelly build a decision tree by finding at each node the best question to split the data untill either all the dataset is separated or a terminal condition is reached.
The given tree is then returned.
# Parameters:
- `x`: The dataset's features (N × D)
- `y`: The dataset's labels (N × 1)
- `max_depth`: The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `N`, i.e. no limits]
- `min_gain`: The minimum information gain to allow for a node's partition [def: `0`]
- `min_records`: The minimum number of records a node must holds to consider for a partition of it [def: `2`]
- `max_features`: The maximum number of (random) features to consider at each partitioning [def: `D`, i.e. look at all features]
- `splitting_criterion`: Either `gini`, `entropy` or `variance`[def: `gini` for categorical labels (classification task) and `variance` for numerical labels(regression task)]
- `force_classification`: Whether to force a classification task even if the labels are numerical (typically when labels are integers encoding some feature rather than representing a real cardinal measure) [def: `false`]
- `rng`: Random Number Generator ((see [`FIXEDSEED`](@ref))) [deafult: `Random.GLOBAL_RNG`]
# Notes:
Missing data (in the feature dataset) are supported.
"""
function buildTree(x, y::AbstractArray{Ty,1}; max_depth = size(x,1), min_gain=0.0, min_records=2, max_features=size(x,2), force_classification=false, splitting_criterion = (Ty <: Number && !force_classification) ? variance : gini, integer_encoded_cols=nothing, fast_algorithm=false, mCols=nothing, rng = Random.GLOBAL_RNG, verbosity=NONE) where {Ty}
#println(depth)
# Force what would be a regression task into a classification task
if force_classification && Ty <: Number
y = string.(y)
end
if(mCols == nothing) mCols = cols_with_missing(x) end
nodes = TempNode[]
depth = 1
if isnothing(integer_encoded_cols)
integer_encoded_cols = Int64[]
for (d,c) in enumerate(eachcol(x))
if(all(isinteger_bml.(skipmissing(c)))) && length(unique(skipmissing(c))) < 20 # hardcoded: when using automatic identifier of integer encoded cols, if less than XX values, we consider the integers to be an categorical encoded variable
push!(integer_encoded_cols,d)
end
end
end
# Deciding if the root node is a Leaf itself or not
# Check if this branch has still the minimum number of records required and we are reached the max_depth allowed. In case, declare it a leaf
if size(x,1) <= min_records || depth >= max_depth return Leaf(y, depth) end
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
gain, question = findBestSplit(x,y,mCols;max_features=max_features,splitting_criterion=splitting_criterion,integer_encoded_cols=integer_encoded_cols,fast_algorithm=fast_algorithm,rng=rng)
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if gain <= min_gain return Leaf(y, depth) end
trueIdx = partition(question,x,mCols,rng=rng)
rootNode = DecisionNode(question,nothing,nothing,1,sum(trueIdx)/length(trueIdx))
push!(nodes,TempNode(true,rootNode,depth+1,x[trueIdx,:],y[trueIdx]))
push!(nodes,TempNode(false,rootNode,depth+1,x[map(!,trueIdx),:],y[map(!,trueIdx)]))
while length(nodes) > 0
thisNode = pop!(nodes)
# Check if this branch has still the minimum number of records required, that we didn't reached the max_depth allowed and that there is still a gain in splitting. In case, declare it a leaf
isLeaf = false
if size(thisNode.x,1) <= min_records || thisNode.depth >= max_depth
isLeaf = true
else
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
gain, question = findBestSplit(thisNode.x,thisNode.y,mCols;max_features=max_features,splitting_criterion=splitting_criterion,integer_encoded_cols=integer_encoded_cols,fast_algorithm=fast_algorithm,rng=rng)
if gain <= min_gain
isLeaf = true
end
end
if isLeaf
newNode = Leaf(thisNode.y, thisNode.depth)
else
trueIdx = partition(question,thisNode.x,mCols,rng=rng)
newNode = DecisionNode(question,nothing,nothing,thisNode.depth,sum(trueIdx)/length(trueIdx))
push!(nodes,TempNode(true,newNode,thisNode.depth+1,thisNode.x[trueIdx,:],thisNode.y[trueIdx]))
push!(nodes,TempNode(false,newNode,thisNode.depth+1,thisNode.x[map(!,trueIdx),:],thisNode.y[map(!,trueIdx)]))
end
thisNode.trueBranch ? (thisNode.parentNode.trueBranch = newNode) : (thisNode.parentNode.falseBranch = newNode)
end
return rootNode
end
# API V2
"""
$(TYPEDSIGNATURES)
Fit a [`DecisionTreeEstimator`](@ref) to the data
"""
function fit!(m::DecisionTreeEstimator,x,y::AbstractArray{Ty,1}) where {Ty}
if m.fitted
m.opt.verbosity >= STD && @warn "This model has already been fitted (trained) and it doesn't support multiple fitting. This fitting will override the previous one(s)"
else
autotune!(m,(x,y))
end
Tynm = nonmissingtype(Ty)
# Setting default parameters that depends from the data...
max_depth = m.hpar.max_depth == nothing ? size(x,1) : m.hpar.max_depth
max_features = m.hpar.max_features == nothing ? size(x,2) : m.hpar.max_features
splitting_criterion = m.hpar.splitting_criterion == nothing ? ( (Tynm <: Number && !m.hpar.force_classification) ? variance : gini) : m.hpar.splitting_criterion
if (Tynm <: Integer && m.hpar.force_classification)
y = convert.(BetaMLClass,y)
end
# Setting schortcuts to other hyperparameters/options....
min_gain = m.hpar.min_gain
min_records = m.hpar.min_records
force_classification = m.hpar.force_classification
fast_algorithm = m.hpar.fast_algorithm
integer_encoded_cols = m.hpar.integer_encoded_cols
cache = m.opt.cache
rng = m.opt.rng
verbosity = m.opt.verbosity
tree = buildTree(x, y; max_depth = max_depth, min_gain=min_gain, min_records=min_records, max_features=max_features, force_classification=force_classification, splitting_criterion = splitting_criterion, mCols=nothing, fast_algorithm=fast_algorithm, integer_encoded_cols=integer_encoded_cols, rng = rng)
m.par = DT_lp(tree,Tynm)
if cache
#println(Tynm)
#println("zzz")
rawout = [v[1] for v in predictSingle.(Ref(tree),eachrow(x),rng=rng)]
if (Tynm <: Integer && m.hpar.force_classification)
#println("foo")
#println(rawout)
#out = convert.(Dict{Tynm,Float64},rawout)
out = [ Dict([convert(Tynm,k) => v for (k,v) in e]) for e in rawout]
else
out = rawout
end
m.cres = out
else
m.cres = nothing
end
m.fitted = true
job_is_regression = (force_classification || ! (Tynm <: Number) ) ? false : true
m.info["fitted_records"] = size(x,1)
m.info["xndims"] = size(x,2)
m.info["job_is_regression"] = job_is_regression ? 1 : 0
(m.info["avg_depth"],m.info["max_reached_depth"]) = computeDepths(m.par.tree)
return cache ? m.cres : nothing
end
# ------------------------------------------------------------------------------
# MODEL PREDICTIONS
"""
predictSingle(tree,x)
Predict the label of a single feature record. See [`predict`](@ref).
"""
function predictSingle(node::Union{DecisionNode{Tx},Leaf{Ty}}, x;ignore_dims=[], rng = Random.GLOBAL_RNG) where {Tx,Ty}
# Base case: we've reached a leaf
if typeof(node) <: Leaf
return node.predictions, node.npoints
end
# Decide whether to follow the true-branch or the false-branch.
# Compare the feature / value stored in the node,
# to the example we're considering.
# If this node concerns a dimension to ignore, we follow both branches and merge their results
if in(node.question.column,ignore_dims)
#println("I am in ignore_cols")
true_vals,true_npoints = predictSingle(node.trueBranch,x,ignore_dims=ignore_dims,rng=rng)
false_vals,false_npoints = predictSingle(node.falseBranch,x,ignore_dims=ignore_dims,rng=rng)
total_npoints = true_npoints + false_npoints
if typeof(true_vals) <: Dict
# Not a number or an integer with force classification: a classification
# println("A classification task")
# println(true_vals)
#return true_vals, true_npoints # TODO
return mean_dicts([true_vals,false_vals],weights=[true_npoints,false_npoints]),total_npoints
else
#println("A regression task")
return true_vals * true_npoints/total_npoints + false_vals * false_npoints/total_npoints, total_npoints
end
# if (Ty <: Integer && m.hpar.force_classification)
# return [ Dict([convert(Ty,k) => v for (k,v) in e]) for e in rawout]
# else
# return rawout
# end
end
# If the feature on which to base prediction is missing, we follow the true branch with a probability equal to the share of true
# records over all the records during this node training..
if ismissing(x[node.question.column])
r = rand(rng)
return (node.pTrue >= r) ? predictSingle(node.trueBranch,x,ignore_dims=ignore_dims,rng=rng) : predictSingle(node.falseBranch,x,ignore_dims=ignore_dims,rng=rng)
end
if match(node.question,x)
return predictSingle(node.trueBranch,x,ignore_dims=ignore_dims,rng=rng)
else
return predictSingle(node.falseBranch,x,ignore_dims=ignore_dims,rng=rng)
end
end
"""
predict(tree,x)
Predict the labels of a feature dataset.
!!! warning
Direct usage of this low-level function is deprecated.
Use [`DecisionTreeEstimator`](@ref) and the associated `predict(m::Model,x)` function instead.
For each record of the dataset, recursivelly traverse the tree to find the prediction most opportune for the given record.
If the labels the tree has been fitted with are numeric, the prediction is also numeric.
If the labels were categorical, the prediction is a dictionary with the probabilities of each item.
In the first case (numerical predictions) use `relative_mean_error(ŷ,y)` to assess the mean relative error, in the second case you can use `accuracy(ŷ,y)`.
"""
function predict(tree::Union{DecisionNode{Tx}, Leaf{Ty}}, x; ignore_dims=[],rng = Random.GLOBAL_RNG) where {Tx,Ty}
predictions = [v[1] for v in predictSingle.(Ref(tree),eachrow(x),ignore_dims=ignore_dims,rng=rng)]
return predictions
end
# API V2...
"""
$(TYPEDSIGNATURES)
Predict the labels associated to some feature data using a trained [`DecisionTreeEstimator`](@ref)
"""
function predict(m::DecisionTreeEstimator,x;ignore_dims=[])
Ty = m.par.Ty # this should already be the nonmissing type
# we want a row
if typeof(x) <: AbstractArray
if ndims(x) == 1
x = permutedims(x)
end
else
x = permutedims([x])
end
rawout = [v[1] for v in predictSingle.(Ref(m.par.tree),eachrow(x),ignore_dims=ignore_dims,rng=m.opt.rng)]
if (Ty <: Integer && m.hpar.force_classification)
return [ Dict([convert(Ty,k) => v for (k,v) in e]) for e in rawout]
else
return rawout
end
end
# ------------------------------------------------------------------------------
# OTHER (MODEL OPTIONAL PARTS, INFO, VISUALISATION,...)
function computeDepths(node::AbstractNode)
leafDepths = Int64[]
nodeQueue = AbstractNode[]
push!(nodeQueue,node)
while length(nodeQueue) > 0
thisNode = pop!(nodeQueue)
if(typeof(thisNode) <: AbstractLeaf )
push!(leafDepths, thisNode.depth)
else
push!(nodeQueue, thisNode.trueBranch)
push!(nodeQueue, thisNode.falseBranch)
end
end
return (mean(leafDepths),maximum(leafDepths))
end
"""
print(node)
Print a Decision Tree (textual)
"""
function _printNode(node::AbstractNode, rootDepth="")
depth = node.depth
fullDepth = rootDepth*string(depth)*"."
spacing = ""
if depth == 1
println("*** Printing Decision Tree: ***")
else
spacing = join(["\t" for i in 1:depth],"")
end
# Base case: we've reached a leaf
if typeof(node) <: Leaf
println(" $(node.predictions)")
return
end
# Print the question at this node
print("\n$spacing$fullDepth ")
print(node.question)
print("\n")
# Call this function recursively on the true branch
print(spacing * "--> True :")
_printNode(node.trueBranch, fullDepth)
# Call this function recursively on the false branch
print(spacing * "--> False:")
_printNode(node.falseBranch, fullDepth)
end
function show(io::IO, ::MIME"text/plain", m::DecisionTreeEstimator)
if m.fitted == false
print(io,"DecisionTreeEstimator - A Decision Tree model (unfitted)")
else
job = m.info["job_is_regression"] == 1 ? "regressor" : "classifier"
print(io,"DecisionTreeEstimator - A Decision Tree $job (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::DecisionTreeEstimator)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"DecisionTreeEstimator - A Decision Tree model (unfitted)")
else
job = m.info["job_is_regression"] == 1 ? "regressor" : "classifier"
println(io,"DecisionTreeEstimator - A Decision Tree $job (fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
_printNode(m.par.tree)
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 25627 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# ------------------------------------------------------------------------------
# TYPE HIERARCHY AND DEFINITIONS
"""
Forest{Ty}
TLow level type representing a Random Forest.
Individual trees are stored in the array `trees`. The "type" of the forest is given by the type of the labels on which it has been trained.
# Struct members:
- `trees`: The individual Decision Trees
- `is_regression`: Whether the forest is to be used for regression jobs or classification
- `oobData`: For each tree, the rows number if the data that have _not_ being used to train the specific tree
- `ooberror`: The out of bag error (if it has been computed)
- `weights`: A weight for each tree depending on the tree's score on the oobData (see [`buildForest`](@ref))
"""
mutable struct Forest{Ty} <: BetaMLLearnableParametersSet
trees::Array{Union{AbstractDecisionNode,Leaf{Ty}},1}
is_regression::Bool
oobData::Array{Array{Int64,1},1}
ooberror::Float64
weights::Array{Float64,1}
end
# Api V2..
"""
$(TYPEDEF)
Hyperparameters for [`RandomForestEstimator`](@ref) (Random Forest).
## Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct RandomForestE_hp <: BetaMLHyperParametersSet
"Number of (decision) trees in the forest [def: `30`]"
n_trees::Int64 = 30
"The maximum depth the tree is allowed to reach. When this is reached the node is forced to become a leaf [def: `nothing`, i.e. no limits]"
max_depth::Union{Nothing,Int64} = nothing
"The minimum information gain to allow for a node's partition [def: `0`]"
min_gain::Float64 = 0.0
"The minimum number of records a node must holds to consider for a partition of it [def: `2`]"
min_records::Int64 = 2
"The maximum number of (random) features to consider when choosing the optimal partition of the dataset [def: `nothing`, i.e. square root of the dimensions of the training data`]"
max_features::Union{Nothing,Int64} = nothing
"Share of samples to bootstrap for each individual tree [def: `1.0`]"
sampling_share::Float64 = 1.0
"Whether to force a classification task even if the labels are numerical (typically when labels are integers encoding some feature rather than representing a real cardinal measure) [def: `false`]"
force_classification::Bool = false
"Either `gini`, `entropy` or `variance`. This is the name of the function to be used to compute the information gain of a specific partition. This is done by measuring the difference betwwen the \"impurity\" of the labels of the parent node with those of the two child nodes, weighted by the respective number of items. [def: `nothing`, i.e. `gini` for categorical labels (classification task) and `variance` for numerical labels(regression task)]. It can be an anonymous function."
splitting_criterion::Union{Nothing,Function} = nothing
"Use an experimental faster algoritm for looking up the best split in ordered fields (colums). Currently it brings down the fitting time of an order of magnitude, but predictions are sensibly affected. If used, control the meaning of integer fields with `integer_encoded_cols`."
fast_algorithm::Bool = false
"A vector of columns positions to specify which integer columns should be treated as encoding of categorical variables insteads of ordered classes/values. [def: `nothing`, integer columns with less than 20 unique values are considered categorical]. Useful in conjunction with `fast_algorithm`, little difference otherwise."
integer_encoded_cols::Union{Nothing,Array{Int64,1}} =nothing
"Parameter that regulate the weights of the scoring of each tree, to be (optionally) used in prediction based on the error of the individual trees computed on the records on which trees have not been trained. Higher values favour \"better\" trees, but too high values will cause overfitting [def: `0`, i.e. uniform weigths]"
beta::Float64 = 0.0
"Wheter to compute the _Out-Of-Bag_ error, an estimation of the validation error (the mismatching error for classification and the relative mean error for regression jobs)."
oob::Bool = false
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges=Dict("n_trees" => [10, 20, 30, 40], "max_depth" =>[5,10,nothing], "min_gain"=>[0.0, 0.1, 0.5], "min_records"=>[2,3,5],"max_features"=>[nothing,5,10,30],"beta"=>[0,0.01,0.1]),multithreads=false) # RF are already MT
end
Base.@kwdef mutable struct RF_lp <: BetaMLLearnableParametersSet
forest::Union{Nothing,Forest} = nothing #TODO: Forest contain info that is actualy in report. Currently we duplicate, we should just remove them from par by making a dedicated struct instead of Forest
Ty::DataType = Any
end
"""
$(TYPEDEF)
A Random Forest classifier and regressor (supervised).
Random forests are _ensemble_ of Decision Trees models (see [`?DecisionTreeEstimator`](@ref DecisionTreeEstimator)).
For the parameters see [`?RandomForestE_hp`](@ref RandomForestE_hp) and [`?BML_options`](@ref BML_options).
# Notes :
- Each individual decision tree is built using bootstrap over the data, i.e. "sampling N records with replacement" (hence, some records appear multiple times and some records do not appear in the specific tree training). The `maxx_feature` injects further variability and reduces the correlation between the forest trees.
- The predictions of the "forest" (using the function `predict()`) are then the aggregated predictions of the individual trees (from which the name "bagging": **b**oostrap **agg**regat**ing**).
- The performances of each individual trees, as measured using the records they have not being trained with, can then be (optionally) used as weights in the `predict` function. The parameter `beta ≥ 0` regulate the distribution of these weights: larger is `β`, the greater the importance (hence the weights) attached to the best-performing trees compared to the low-performing ones. Using these weights can significantly improve the forest performances (especially using small forests), however the correct value of `beta` depends on the problem under exam (and the chosen caratteristics of the random forest estimator) and should be cross-validated to avoid over-fitting.
- Note that training `RandomForestEstimator` uses multiple threads if these are available. You can check the number of threads available with `Threads.nthreads()`. To set the number of threads in Julia either set the environmental variable `JULIA_NUM_THREADS` (before starting Julia) or start Julia with the command line option `--threads` (most integrated development editors for Julia already set the number of threads to 4).
- Online fitting (re-fitting with new data) is not supported
- Missing data (in the feature dataset) is supported.
# Examples:
- Classification...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = ["a","b","b","b","b","a"];
julia> mod = RandomForestEstimator(n_trees=5)
RandomForestEstimator - A 5 trees Random Forest model (unfitted)
julia> ŷ = fit!(mod,X,y) |> mode
6-element Vector{String}:
"a"
"b"
"b"
"b"
"b"
"a"
julia> println(mod)
RandomForestEstimator - A 5 trees Random Forest classifier (fitted on 6 records)
Dict{String, Any}("job_is_regression" => 0, "avg_avg_depth" => 1.8, "fitted_records" => 6, "avg_mmax_reached_depth" => 1.8, "oob_errors" => Inf, "xndims" => 2)
```
- Regression...
```julia
julia> using BetaML
julia> X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
julia> y = 2 .* X[:,1] .- X[:,2] .+ 3;
julia> mod = RandomForestEstimator(n_trees=5)
RandomForestEstimator - A 5 trees Random Forest model (unfitted)
julia> ŷ = fit!(mod,X,y);
julia> hcat(y,ŷ)
6×2 Matrix{Float64}:
4.1 2.98
-16.5 -18.37
-13.8 -14.61
-18.4 -17.37
-27.2 -20.78
2.7 2.98
julia> println(mod)
RandomForestEstimator - A 5 trees Random Forest regressor (fitted on 6 records)
Dict{String, Any}("job_is_regression" => 1, "fitted_records" => 6, "avg_avg_depth" => 2.8833333333333333, "oob_errors" => Inf, "avg_max_reached_depth" => 3.4, "xndims" => 2)
```
"""
mutable struct RandomForestEstimator <: BetaMLSupervisedModel
hpar::RandomForestE_hp
opt::BML_options
par::Union{Nothing,RF_lp}
cres
fitted::Bool
info::Dict{String,Any}
end
function RandomForestEstimator(;kwargs...)
m = RandomForestEstimator(RandomForestE_hp(),BML_options(),RF_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
# ------------------------------------------------------------------------------
# MODEL ALGORITHMS AND TRAINING
"""
buildForest(x, y, n_trees; max_depth, min_gain, min_records, max_features, splitting_criterion, force_classification)
Builds (define and train) a "forest" of Decision Trees.
!!! warning
Direct usage of this low-level function is deprecated and it has been unexported in BetaML 0.9.
Use [`RandomForestEstimator`](@ref) instead.
# Parameters:
See [`buildTree`](@ref). The function has all the parameters of `bildTree` (with the `max_features` defaulting to `√D` instead of `D`) plus the following parameters:
- `n_trees`: Number of trees in the forest [def: `30`]
- `β`: Parameter that regulate the weights of the scoring of each tree, to be (optionally) used in prediction (see later) [def: `0`, i.e. uniform weigths]
- `oob`: Whether to coompute the out-of-bag error, an estimation of the generalization accuracy [def: `false`]
- `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Output:
- The function returns a Forest object.
- The forest weights default to array of ones if `β ≤ 0` and the oob error to `+Inf` if `oob` == `false`.
# Notes :
- Each individual decision tree is built using bootstrap over the data, i.e. "sampling N records with replacement" (hence, some records appear multiple times and some records do not appear in the specific tree training). The `maxFeature` injects further variability and reduces the correlation between the forest trees.
- The predictions of the "forest" (using the function `predict()`) are then the aggregated predictions of the individual trees (from which the name "bagging": **b**oostrap **agg**regat**ing**).
- This function optionally reports a weight distribution of the performances of eanch individual trees, as measured using the records he has not being trained with. These weights can then be (optionally) used in the `predict` function. The parameter `β ≥ 0` regulate the distribution of these weights: larger is `β`, the greater the importance (hence the weights) attached to the best-performing trees compared to the low-performing ones. Using these weights can significantly improve the forest performances (especially using small forests), however the correct value of β depends on the problem under exam (and the chosen caratteristics of the random forest estimator) and should be cross-validated to avoid over-fitting.
- Note that this function uses multiple threads if these are available. You can check the number of threads available with `Threads.nthreads()`. To set the number of threads in Julia either set the environmental variable `JULIA_NUM_THREADS` (before starting Julia) or start Julia with the command line option `--threads` (most integrated development editors for Julia already set the number of threads to 4).
"""
function buildForest(x, y::AbstractArray{Ty,1}, n_trees=30; max_depth = size(x,1), min_gain=0.0, min_records=2, max_features=Int(round(sqrt(size(x,2)))), sampling_share=1.0, force_classification=false, splitting_criterion = (Ty <: Number && !force_classification) ? variance : gini, integer_encoded_cols=nothing, fast_algorithm=false, β=0, oob=false,rng = Random.GLOBAL_RNG, verbosity=NONE) where {Ty}
# Force what would be a regression task into a classification task
if force_classification && Ty <: Number
y = string.(y)
end
trees = Array{Union{AbstractDecisionNode,Leaf{Ty}},1}(undef,n_trees)
notSampledByTree = Array{Array{Int64,1},1}(undef,n_trees) # to later compute the Out of Bag Error
errors = Float64[]
job_is_regression = (force_classification || !(eltype(y) <: Number )) ? false : true # we don't need the tertiary operator here, but it is more clear with it...
(N,D) = size(x)
if isnothing(integer_encoded_cols)
integer_encoded_cols = Int64[]
for (d,c) in enumerate(eachcol(x))
if(all(isinteger_bml.(skipmissing(c)))) && length(unique(skipmissing(c))) < 20 # hardcoded: when using automatic identifier of integer encoded cols, if more than XX values, we consider that is not a categorical variable
push!(integer_encoded_cols,d)
end
end
end
masterSeed = rand(rng,100:9999999999999) ## Some RNG have problems with very small seed. Also, the master seed has to be computed _before_ generate_parallel_rngs
rngs = generate_parallel_rngs(rng,Threads.nthreads())
#for i in 1:n_trees # for easier debugging/profiling...
Threads.@threads for i in 1:n_trees
tsrng = rngs[Threads.threadid()] # Thread safe random number generator
Random.seed!(tsrng,masterSeed+i*10)
toSample = rand(tsrng, 1:N, Int(round(N*sampling_share)))
notToSample = setdiff(1:N,toSample)
bootstrappedx = x[toSample,:] # "boosted is different than "bootstrapped": https://towardsdatascience.com/random-forest-and-its-implementation-71824ced454f
bootstrappedy = y[toSample]
#controlx = x[notToSample,:]
#controly = y[notToSample]
tree = buildTree(bootstrappedx, bootstrappedy; max_depth = max_depth, min_gain=min_gain, min_records=min_records, max_features=max_features, splitting_criterion = splitting_criterion, force_classification=force_classification, integer_encoded_cols=integer_encoded_cols, fast_algorithm=fast_algorithm, rng = tsrng, verbosity=verbosity)
#ŷ = predict(tree,controlx)
trees[i] = tree
notSampledByTree[i] = notToSample
end
weights = ones(Float64,n_trees)
if β > 0
weights = updateTreesWeights!(Forest{Ty}(trees,job_is_regression,notSampledByTree,0.0,weights), x, y, β=β, rng=rng)
end
oobe = +Inf
if oob
oobe = ooberror(Forest{Ty}(trees,job_is_regression,notSampledByTree,0.0,weights),x,y,rng=rng)
end
return Forest{Ty}(trees,job_is_regression,notSampledByTree,oobe,weights)
end
# API V2
"""
$(TYPEDSIGNATURES)
Fit a [`RandomForestEstimator`](@ref) to the data
"""
function fit!(m::RandomForestEstimator,x,y::AbstractArray{Ty,1}) where {Ty}
if m.fitted
m.opt.verbosity >= STD && @warn "This model has already been fitted and it doesn't support multiple training. This training will override the previous one(s)"
else
autotune!(m,(x,y))
end
Tynm = nonmissingtype(Ty)
# Setting default parameters that depends from the data...
max_depth = m.hpar.max_depth == nothing ? size(x,1) : m.hpar.max_depth
max_features = m.hpar.max_features == nothing ? Int(round(sqrt(size(x,2)))) : m.hpar.max_features
splitting_criterion = m.hpar.splitting_criterion == nothing ? ( (Tynm <: Number && !m.hpar.force_classification) ? variance : gini) : m.hpar.splitting_criterion
if (Tynm <: Integer && m.hpar.force_classification)
y = convert.(BetaMLClass,y)
end
# Setting schortcuts to other hyperparameters/options....
min_gain = m.hpar.min_gain
min_records = m.hpar.min_records
sampling_share = m.hpar.sampling_share
force_classification = m.hpar.force_classification
n_trees = m.hpar.n_trees
fast_algorithm = m.hpar.fast_algorithm
integer_encoded_cols = m.hpar.integer_encoded_cols
β = m.hpar.beta
oob = m.hpar.oob
cache = m.opt.cache
rng = m.opt.rng
verbosity = m.opt.verbosity
forest = buildForest(x, y, n_trees; max_depth = max_depth, min_gain=min_gain, min_records=min_records, sampling_share=sampling_share, max_features=max_features, force_classification=force_classification, splitting_criterion = splitting_criterion, fast_algorithm=fast_algorithm, integer_encoded_cols=integer_encoded_cols, β=β, oob=false, rng = rng)
m.par = RF_lp(forest,Tynm)
if cache
rawout = predictSingle.(Ref(forest),eachrow(x),rng=rng) # TODO
if (Tynm <: Integer && m.hpar.force_classification)
out = [ Dict([convert(Tynm,k) => v for (k,v) in e]) for e in rawout]
else
out = rawout
end
m.cres = out
else
m.cres = nothing
end
if oob
m.par.forest.ooberror = ooberror(m.par.forest,x,y;rng = rng)
end
m.fitted = true
m.info["fitted_records"] = size(x,1)
m.info["xndims"] = size(x,2)
m.info["job_is_regression"] = m.par.forest.is_regression ? 1 : 0
m.info["oob_errors"] = m.par.forest.ooberror
depths = vcat([transpose([computeDepths(tree)[1],computeDepths(tree)[2]]) for tree in m.par.forest.trees]...)
(m.info["avg_avg_depth"],m.info["avg_max_reached_depth"]) = mean(depths,dims=1)[1], mean(depths,dims=1)[2]
return cache ? m.cres : nothing
end
# ------------------------------------------------------------------------------
# MODEL PREDICTIONS
# Optionally a weighted mean of tree's prediction is used if the parameter `weights` is given.
"""
predictSingle(forest,x)
Predict the label of a single feature record. See [`predict`](@ref).
"""
function predictSingle(forest::Forest{Ty}, x; ignore_dims=[], rng = Random.GLOBAL_RNG) where {Ty}
trees = forest.trees
weights = forest.weights
predictions = [v[1] for v in predictSingle.(trees,Ref(x),ignore_dims=ignore_dims,rng=rng)] # TODO
if eltype(predictions) <: AbstractDict # categorical
#weights = 1 .- treesErrors # back to the accuracy
return mean_dicts(predictions,weights=weights)
else
#weights = exp.( - treesErrors)
return dot(predictions,weights)/sum(weights)
end
end
"""
[predict(forest,x)](@id forest_prediction)
Predict the labels of a feature dataset.
!!! warning
Direct usage of this low-level function is deprecated and it has been unexported in BetaML 0.9.
Use [`RandomForestEstimator`](@ref) and the associated `predict(m::Model,x)` function instead.
For each record of the dataset and each tree of the "forest", recursivelly traverse the tree to find the prediction most opportune for the given record.
If the labels the tree has been trained with are numeric, the prediction is also numeric (the mean of the different trees predictions, in turn the mean of the labels of the training records ended in that leaf node).
If the labels were categorical, the prediction is a dictionary with the probabilities of each item and in such case the probabilities of the different trees are averaged to compose the forest predictions. This is a bit different than most other implementations where the mode instead is reported.
In the first case (numerical predictions) use `relative_mean_error(ŷ,y)` to assess the mean relative error, in the second case you can use `accuracy(ŷ,y)`.
"""
function predict(forest::Forest{Ty}, x;ignore_dims=[], rng = Random.GLOBAL_RNG) where {Ty}
predictions = predictSingle.(Ref(forest),eachrow(x),ignore_dims=ignore_dims,rng=rng)
return predictions
end
# API V2...
"""
$(TYPEDSIGNATURES)
Predict the labels associated to some feature data using a trained [`RandomForestEstimator`](@ref)
"""
function predict(m::RandomForestEstimator,x; ignore_dims=[])
#TODO: get Tynm here! and OrdinalEncoder!
#Ty = get_parametric_types(m.par)[1] |> nonmissingtype
Ty = m.par.Ty # this should already be the nonmissing type
# we want a row
if typeof(x) <: AbstractArray
if ndims(x) == 1
x = permutedims(x)
end
else
x = permutedims([x])
end
rawout = predictSingle.(Ref(m.par.forest),eachrow(x),ignore_dims=ignore_dims,rng=m.opt.rng)
if (Ty <: Integer && m.hpar.force_classification)
return [ Dict([convert(Ty,k) => v for (k,v) in e]) for e in rawout]
else
return rawout
end
end
# ------------------------------------------------------------------------------
# OTHER (MODEL OPTIONAL PARTS, INFO, VISUALISATION,...)
"""
updateTreesWeights!(forest,x,y;β)
Update the weights of each tree (to use in the prediction of the forest) based on the error of the individual tree computed on the records on which it has not been trained.
As training a forest is expensive, this function can be used to "just" upgrade the trees weights using different betas, without retraining the model.
"""
function updateTreesWeights!(forest::Forest{Ty},x,y;β=50,rng = Random.GLOBAL_RNG) where {Ty}
trees = forest.trees
notSampledByTree = forest.oobData
job_is_regression = forest.is_regression
weights = Float64[]
for (i,tree) in enumerate(trees)
yoob = y[notSampledByTree[i]]
if length(yoob) > 0
ŷ = predict(tree,x[notSampledByTree[i],:],rng=rng)
if job_is_regression
push!(weights,exp(- β*relative_mean_error(yoob,ŷ)))
else
push!(weights,accuracy(yoob,ŷ)*β)
end
else # there has been no data that has not being used for this tree, because by a (rare!) chance all the sampled data for this tree was on a different row
push!(weights,forest.weights[i])
end
end
forest.weights = weights
return weights
end
"""
ooberror(forest,x,y;rng)
Compute the Out-Of-Bag error, an estimation of the validation error.
This function is called at time of train the forest if the parameter `oob` is `true`, or can be used later to get the oob error on an already trained forest.
The oob error reported is the mismatching error for classification and the relative mean error for regression.
"""
function ooberror(forest::Forest{Ty},x,y;rng = Random.GLOBAL_RNG) where {Ty}
trees = forest.trees
job_is_regression = forest.is_regression
notSampledByTree = forest.oobData
weights = forest.weights
B = length(trees)
N = size(x,1)
if job_is_regression
ŷ = Array{Float64,1}(undef,N)
else
ŷ = Array{Dict{Ty,Float64},1}(undef,N)
end
# Rarelly a given n has been visited by al lthe trees of the forest, so there is no trees available to compute the oob error
# This serves as a mask to remove this n from the computation of the oob error
nMask = fill(true,N)
for (n,x) in enumerate(eachrow(x))
unseenTreesBools = in.(n,notSampledByTree)
if sum(unseenTreesBools) == 0 # this particular record has been visited by all trees of the forest
nMask[n] = false
continue
end
unseenTrees = trees[(1:B)[unseenTreesBools]]
unseenTreesWeights = weights[(1:B)[unseenTreesBools]]
ŷi = predictSingle(Forest{Ty}(unseenTrees,job_is_regression,forest.oobData,0.0,unseenTreesWeights),x,rng=rng)
if !job_is_regression && Ty <: Number # we are in the ugly case where we want integers but we have dict of Strings, need to convert
ŷi = Dict(map((k,v) -> parse(Int,k)=>v, keys(ŷi), values(ŷi)))
end
ŷ[n] = ŷi
end
if job_is_regression
return relative_mean_error(y[nMask],ŷ[nMask],normdim=false,normrec=false)
else
return error(y[nMask],ŷ[nMask])
end
end
function show(io::IO, ::MIME"text/plain", m::RandomForestEstimator)
if m.fitted == false
print(io,"RandomForestEstimator - A $(m.hpar.n_trees) trees Random Forest model (unfitted)")
else
job = m.info["job_is_regression"] == 1 ? "regressor" : "classifier"
print(io,"RandomForestEstimator - A $(m.hpar.n_trees) trees Random Forest $job (fitted on $(m.info["fitted_records"]) records)")
end
end
function show(io::IO, m::RandomForestEstimator)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"RandomForestEstimator - A $(m.hpar.n_trees) trees Random Forest model (unfitted)")
else
job = m.info["job_is_regression"] == 1 ? "regressor" : "classifier"
println(io,"RandomForestEstimator - A $(m.hpar.n_trees) trees Random Forest $job (fitted on $(m.info["fitted_records"]) records)")
println(io,m.info)
end
end | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 2564 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
BetaML.Trees module
Implement the [`DecisionTreeEstimator`](@ref) and [`RandomForestEstimator`](@ref) models (Decision Trees and Random Forests).
Both Decision Trees and Random Forests can be used for regression or classification problems, based on the type of the labels (numerical or not). The automatic selection can be overridden with the parameter `force_classification=true`, typically if labels are integer representing some categories rather than numbers. For classification problems the output of `predict` is a dictionary with the key being the labels with non-zero probabilitity and the corresponding value its probability; for regression it is a numerical value.
Please be aware that, differently from most other implementations, the Random Forest algorithm collects and averages the probabilities from the trees, rather than just repording the mode, i.e. no information is lost and the output of the forest classifier is still a PMF.
To retrieve the prediction with the highest probability use [`mode`](@ref) over the prediciton returned by the model. Most error/accuracy measures in the [`Utils`](@ref) BetaML module works diretly with this format.
Missing data and trully unordered types are supported on the features, both on training and on prediction.
The module provide the following functions. Use `?[type or function]` to access their full signature and detailed documentation:
Features are expected to be in the standard format (nRecords × nDimensions matrices) and the labels (either categorical or numerical) as a nRecords column vector.
Acknowlegdments: originally based on the [Josh Gordon's code](https://www.youtube.com/watch?v=LDRbO9a6XPU)
"""
module Trees
using LinearAlgebra, Random, Statistics, StatsBase, Reexport, CategoricalArrays, DocStringExtensions
import AbstractTrees
using ForceImport
@force using ..Api
@force using ..Utils
import Base.print
import Base.show
import Base.convert
export DecisionTreeEstimator, DecisionTreeE_hp
# export AbstractDecisionNode,Leaf, DecisionNode,
# export buildTree
#predictSingle # TODO: to remove
export RandomForestEstimator, RandomForestE_hp
#export Forest
# export buildForest
# updateTreesWeights! # TODO:to remove
include("DecisionTrees.jl") # Decision Trees algorithm and API
include("AbstractTrees_BetaML_interface.jl") # Code to allow plotting of a DecisionTree
include("RandomForests.jl") # Random Forests algorithm and API
end # end module
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1239 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Part of submodule Utils of BetaML _ the Beta Machine Learning Toolkit
# Vatious utils to help in logging/debugging
"""
@codelocation()
Helper macro to print during runtime an info message concerning the code being executed position
"""
macro codelocation()
return quote
st = stacktrace(backtrace())
myf = ""
for frm in st
funcname = frm.func
if frm.func != :backtrace && frm.func!= Symbol("macro expansion")
myf = frm.func
break
end
end
println("Running function ", $("$(__module__)"),".$(myf) at ",$("$(__source__.file)"),":",$("$(__source__.line)"))
println("Type `]dev BetaML` to modify the source code (this would change its location on disk)")
end
end
"""
$(TYPEDSIGNATURES)
Convert any integer to one of the defined betaml verbosity levels.
Currently "steps" are 0, 10, 20 and 30
"""
function to_betaml_verbosity(i::Integer)
if i <= 0
return NONE
elseif i <= 10
return LOW
elseif i <= 20
return STD
elseif i <= 30
return HIGH
else
return FULL
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 51649 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Part of submodule Utils of BetaML _ the Beta Machine Learning Toolkit
# Various measures of pairs (x,y) (including vectors or matrix pairs)
import StatsBase
# ------------------------------------------------------------------------------
# Some common distance measures
# https://weaviate.io/blog/distance-metrics-in-vector-search
"""L1 norm distance (aka _Manhattan Distance_)"""
l1_distance(x,y) = sum(abs.(x-y))
"""Euclidean (L2) distance"""
l2_distance(x,y) = norm(x-y)
"""Squared Euclidean (L2) distance"""
l2squared_distance(x,y) = norm(x-y)^2
"""Cosine distance"""
cosine_distance(x,y) = 1-dot(x,y)/(norm(x)*norm(y))
"""
$(TYPEDSIGNATURES)
Compute pairwise distance matrix between elements of an array identified across dimension `dims`.
# Parameters:
- `x`: the data array
- distance: a distance measure [def: `l2_distance`]
- dims: the dimension of the observations [def: `1`, i.e. records on rows]
# Returns:
- a n_records by n_records simmetric matrix of the pairwise distances
# Notes:
- if performances matters, you can use something like `Distances.pairwise(Distances.euclidean,x,dims=1)` from the [`Distances`](https://github.com/JuliaStats/Distances.jl) package.
"""
function pairwise(x::AbstractArray;distance=l2_distance,dims=1)
N = size(x,dims)
out = zeros(N,N)
for r in 1:N
for c in 1:r
out[r,c] = distance(selectdim(x,dims,r),selectdim(x,dims,c))
end
end
for r in 1:N
for c in r+1:N
out[r,c] = out[c,r]
end
end
return out
end
################################################################################
### VARIOUS ERROR / LOSS / ACCURACY MEASURES
################################################################################
# ------------------------------------------------------------------------------
# Classification tasks...
# Used as neural network loss function
"""
crossentropy(y,ŷ; weight)
Compute the (weighted) cross-entropy between the predicted and the sampled probability distributions.
To be used in classification problems.
"""
crossentropy(y,ŷ ; weight = ones(eltype(y),length(y))) = -sum(y .* log.(ŷ .+ 1e-15) .* weight)
dcrossentropy(y,ŷ; weight = ones(eltype(y),length(y))) = - y .* weight ./ (ŷ .+ 1e-15)
"""
kl_divergence(distribution1,distribution2,base2=true)
Compute the Kullback–Leibler divergence between two PMFs
"""
# Note that the KL divergence can also be written as cross entropy - entropy:
# kl_divergence(d1,d2) = - sum(d1 .* log2.(d2 .+ 1e-15 )) - ( - sum(d1 .* log2.(d1 .+ 1e-15)) )
kl_divergence(d1,d2,base2=true) = base2 ? sum(d1 .* log2.((d1 .+ 1e-15) ./ (d2 .+ 1e-15)) ) : sum(d1 .* log.((d1 .+ 1e-15) ./ (d2 .+ 1e-15)) )
""" accuracy(ŷ,y;ignorelabels=false) - Categorical accuracy between two vectors (T vs T). """
function accuracy(y::AbstractArray{T,1},ŷ::AbstractArray{T,1}; ignorelabels=false) where {T}
# See here for better performances: https://discourse.julialang.org/t/permutations-of-a-vector-that-retain-the-vector-structure/56790/7
if(!ignorelabels)
return sum(ŷ .== y)/length(ŷ)
else
classes = unique(y)
nCl = length(classes)
N = size(y,1)
pSet = collect(permutations(1:nCl))
bestAcc = -Inf
yOrigIdx = [findfirst(x -> x == y[i] , classes) for i in 1:N]
ŷOrigIdx = [findfirst(x -> x == ŷ[i] , classes) for i in 1:N]
for perm in pSet
py = perm[yOrigIdx] # permuted specific version
acc = sum(ŷOrigIdx .== py)/N
if acc > bestAcc
bestAcc = acc
end
end
return bestAcc
end
end
""" error(y,ŷ;ignorelabels=false) - Categorical error (T vs T)"""
error(y::AbstractArray{T,1},ŷ::AbstractArray{T,1}; ignorelabels=false) where {T} = (1 - accuracy(y,ŷ;ignorelabels=ignorelabels) )
"""
accuracy(y,ŷ;tol)
Categorical accuracy with probabilistic prediction of a single datapoint (PMF vs Int).
Use the parameter tol [def: `1`] to determine the tollerance of the prediction, i.e. if considering "correct" only a prediction where the value with highest probability is the true value (`tol` = 1), or consider instead the set of `tol` maximum values.
"""
function accuracy(y_pos::Int64,ŷ::AbstractArray{T,1};tol=1,rng=Random.GLOBAL_RNG) where {T <: Number}
#if length(Set(ŷ) == 1 # all classes the same prob
# return rand(rng) < (1 / length(y)) ? 1 : 0 # If all values have the same prob, it returns 1 with prob 1/n_classes
#end
tol > 1 || return mode(ŷ;rng=rng) == y_pos ? 1 : 0 # if tol is one we delegate the choice of a single prediction to mode, that handles multimodal pmfs
sIdx = sortperm(ŷ)[end:-1:1]
if ŷ[y_pos] in ŷ[sIdx[1:min(tol,length(sIdx))]]
return 1
else
return 0
end
end
"""
accuracy(y,ŷ;tol)
Categorical accuracy with probabilistic prediction of a single datapoint given in terms of a dictionary of probabilities (Dict{T,Float64} vs T).
# Parameters:
- `ŷ`: The returned probability mass function in terms of a Dictionary(Item1 => Prob1, Item2 => Prob2, ...)
- `tol`: The tollerance to the prediction, i.e. if considering "correct" only a prediction where the value with highest probability is the true value (`tol` = 1), or consider instead the set of `tol` maximum values [def: `1`].
"""
function accuracy(y::T,ŷ::AbstractDict{T,Float64};tol=1,rng=Random.GLOBAL_RNG) where {T}
if !(y in keys(ŷ)) return 0 end
tol > 1 || return (mode(ŷ;rng=rng) == y) ? 1 : 0 # if tol is one we delegate the choice of a single prediction to mode, that handles multimodal pmfs
sIdx = sortperm(collect(values(ŷ)))[end:-1:1] # sort by decreasing values of the dictionary values
sKeys = collect(keys(ŷ))[sIdx][1:min(tol,length(sIdx))] # retrieve the corresponding keys
return (y in sKeys) ? 1 : 0
end
@doc raw"""
accuracy(y,ŷ;tol,ignorelabels)
Categorical accuracy with probabilistic predictions of a dataset (PMF vs Int).
# Parameters:
- `y`: The N array with the correct category for each point $n$.
- `ŷ`: An (N,K) matrix of probabilities that each ``\hat y_n`` record with ``n \in 1,....,N`` being of category ``k`` with $k \in 1,...,K$.
- `tol`: The tollerance to the prediction, i.e. if considering "correct" only a prediction where the value with highest probability is the true value (`tol` = 1), or consider instead the set of `tol` maximum values [def: `1`].
- `ignorelabels`: Whether to ignore the specific label order in y. Useful for unsupervised learning algorithms where the specific label order don't make sense [def: false]
"""
function accuracy(y::AbstractArray{Int64,1},ŷ::AbstractArray{T,2};tol=1,ignorelabels=false,rng=Random.GLOBAL_RNG) where {T <: Number}
(N,D) = size(ŷ)
pSet = ignorelabels ? collect(permutations(1:D)) : [collect(1:D)]
bestAcc = -Inf
for perm in pSet
pŷ = hcat([ŷ[:,c] for c in perm]...)
acc = sum([accuracy(y[i],pŷ[i,:];tol=tol,rng=rng) for i in 1:N])/N
if acc > bestAcc
bestAcc = acc
end
end
return bestAcc
end
@doc raw"""
accuracy(y,ŷ;tol)
Categorical accuracy with probabilistic predictions of a dataset given in terms of a dictionary of probabilities (Dict{T,Float64} vs T).
# Parameters:
- `ŷ`: An array where each item is the estimated probability mass function in terms of a Dictionary(Item1 => Prob1, Item2 => Prob2, ...)
- `y`: The N array with the correct category for each point $n$.
- `tol`: The tollerance to the prediction, i.e. if considering "correct" only a prediction where the value with highest probability is the true value (`tol` = 1), or consider instead the set of `tol` maximum values [def: `1`].
"""
function accuracy(y::AbstractArray{T,1},ŷ::AbstractArray{Dict{T,Float64},1};tol=1,rng=Random.GLOBAL_RNG) where {T}
N = size(ŷ,1)
acc = sum([accuracy(y[i],ŷ[i];tol=tol,rng=rng) for i in 1:N])/N
return acc
end
"""
$(TYPEDEF)
Compute the loss of a given model over a given (x,y) dataset running cross-validation
"""
function l2loss_by_cv(m,data;nsplits=5,nrepeats=1,rng=Random.GLOBAL_RNG)
if length(data) == 2 # supervised model
x,y = data[1],data[2]
sampler = KFold(nsplits=nsplits,nrepeats=nrepeats,rng=rng)
if (ndims(y) == 1)
ohm = OneHotEncoder(handle_unknown="infrequent",cache=false)
fit!(ohm,y)
end
(μ,σ) = cross_validation([x,y],sampler) do trainData,valData,rng
(xtrain,ytrain) = trainData; (xval,yval) = valData
fit!(m,xtrain,ytrain)
ŷval = predict(m,xval)
if (eltype(ŷval) <: Dict)
yval = predict(ohm,yval)
ŷval = predict(ohm,ŷval)
end
ϵ = norm(yval-ŷval)/size(yval,1)
reset!(m)
return ismissing(ϵ) ? Inf : ϵ
end
return μ
elseif length(data) == 1 # unsupervised model with inverse_predict
x= data[1]
sampler = KFold(nsplits=nsplits,nrepeats=nrepeats,rng=rng)
(μ,σ) = cross_validation([x],sampler) do trainData,valData,rng
(xtrain,) = trainData; (xval,) = valData
fit!(m,xtrain)
x̂val_red = predict(m,xval)
x̂val = inverse_predict(m,x̂val_red)
ϵ = norm(xval .- x̂val)/size(xval,1)
reset!(m)
return ismissing(ϵ) ? Inf : ϵ
end
return μ
else
@error "Function `l2loss_by_cv` accepts only 1-lenght or 2-length data for respectivelly unsupervised and supervised models"
end
end
""" error(y,ŷ) - Categorical error with probabilistic prediction of a single datapoint (Int vs PMF). """
error(y::Int64,ŷ::Array{T,1};tol=1) where {T <: Number} = 1 - accuracy(y,ŷ;tol=tol)
""" error(y,ŷ) - Categorical error with probabilistic predictions of a dataset (Int vs PMF). """
error(y::Array{Int64,1},ŷ::Array{T,2};tol=1) where {T <: Number} = 1 - accuracy(y,ŷ;tol=tol)
""" error(y,ŷ) - Categorical error with with probabilistic predictions of a dataset given in terms of a dictionary of probabilities (T vs Dict{T,Float64}). """
error(y::Array{T,1},ŷ::Array{Dict{T,Float64},1};tol=1) where {T} = 1 - accuracy(y,ŷ;tol=tol)
"""
$(TYPEDSIGNATURES)
Provide Silhouette scoring for cluster outputs
# Parameters:
- `distances`: the nrecords by nrecords pairwise distance matrix
- `classes`: the vector of assigned classes to each record
# Notes:
- the matrix of pairwise distances can be obtained with the function [`pairwise`](@ref)
- this function doesn't sample. Eventually sample before
- to get the score for the cluster simply compute the `mean`
- see also the [Wikipedia article](https://en.wikipedia.org/wiki/Silhouette_(clustering))
# Example:
```julia
julia> x = [1 2 3 3; 1.2 3 3.1 3.2; 2 4 6 6.2; 2.1 3.5 5.9 6.3];
julia> s_scores = silhouette(pairwise(x),[1,2,2,2])
4-element Vector{Float64}:
0.0
-0.7590778795827623
0.5030093571833065
0.4936350560759424
```
"""
function silhouette(distances,classes)
uclasses = unique(classes)
K = length(uclasses)
N = size(distances,1)
out = Array{Float64,1}(undef,N)
positions = hcat([classes .== cl for cl in uclasses]...) # N by K
nByClass = reshape(sum(positions,dims=1),K)
#println(nByClass)
for n in 1:N
cl = classes[n]
a = 0.0
b = Inf
#println("---------")
#println("n: $n")
for clidx in 1:K
#print("- cl $clidx")
cldists = distances[n,positions[:,clidx]]
if cl == uclasses[clidx] # own cluster
a = sum(cldists)/ (nByClass[clidx]-1)
#println(" a: $a")
else
btemp = sum(cldists) / nByClass[clidx]
#println(" b: $btemp")
if btemp < b
b = btemp
end
end
end
if isnan(a)
out[n] = 0.0
elseif a < b
out[n] = 1-(a/b)
else
out[n] = (b/a) -1
end
#println("- s: $(out[n])")
end
return out
end
"""
$(TYPEDEF)
Hyperparameters for [`ConfusionMatrix`](@ref)
# Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct ConfusionMatrix_hp <: BetaMLHyperParametersSet
"The categories (aka \"levels\") to represent. [def: `nothing`, i.e. unique ground true values]."
categories::Union{Vector,Nothing} = nothing
"How to handle categories not seen in the ground true values or not present in the provided `categories` array? \"error\" (default) rises an error, \"infrequent\" adds a specific category for these values."
handle_unknown::String = "error"
"How to handle missing values in either ground true or predicted values ? \"error\" [default] will rise an error, \"drop\" will drop the record"
handle_missing::String = "error"
"Which value to assign to the \"other\" category (i.e. categories not seen in the gound truth or not present in the provided `categories` array? [def: ` nothing`, i.e. typemax(Int64) for integer vectors and \"other\" for other types]. This setting is active only if `handle_unknown=\"infrequent\"` and in that case it MUST be specified if the vector to one-hot encode is neither integer or strings"
other_categories_name = nothing
"A dictionary to map categories to some custom names. Useful for example if categories are integers, or you want to use shorter names [def: `Dict()`, i.e. not used]. This option isn't currently compatible with missing values or when some record has a value not in this provided dictionary."
categories_names = Dict()
"Wether `predict` should return the normalised scores. Note that both unnormalised and normalised scores remain available using `info`. [def: `true`]"
normalise_scores = true
end
Base.@kwdef mutable struct ConfusionMatrix_lp <: BetaMLLearnableParametersSet
categories::Vector = []
original_vector_eltype::Union{Type,Nothing} = nothing
scores::Union{Nothing,Matrix{Int64}} = nothing
end
"""
$(TYPEDEF)
Compute a confusion matrix detailing the mismatch between observations and predictions of a categorical variable
For the parameters see [`ConfusionMatrix_hp`](@ref) and [`BML_options`](@ref).
The "predicted" values are either the scores or the normalised scores (depending on the parameter `normalise_scores` [def: `true`]).
# Notes:
- The Confusion matrix report can be printed (i.e. `print(cm_model)`. If you plan to print the Confusion Matrix report, be sure that the type of the data in `y` and `ŷ` can be converted to `String`.
- Information in a structured way is available trought the `info(cm)` function that returns the following dictionary:
- `accuracy`: Oveall accuracy rate
- `misclassification`: Overall misclassification rate
- `actual_count`: Array of counts per lebel in the actual data
- `predicted_count`: Array of counts per label in the predicted data
- `scores`: Matrix actual (rows) vs predicted (columns)
- `normalised_scores`: Normalised scores
- `tp`: True positive (by class)
- `tn`: True negative (by class)
- `fp`: False positive (by class)
- `fn`: False negative (by class)
- `precision`: True class i over predicted class i (by class)
- `recall`: Predicted class i over true class i (by class)
- `specificity`: Predicted not class i over true not class i (by class)
- `f1score`: Harmonic mean of precision and recall
- `mean_precision`: Mean by class, respectively unweighted and weighted by actual_count
- `mean_recall`: Mean by class, respectively unweighted and weighted by actual_count
- `mean_specificity`: Mean by class, respectively unweighted and weighted by actual_count
- `mean_f1score`: Mean by class, respectively unweighted and weighted by actual_count
- `categories`: The categories considered
- `fitted_records`: Number of records considered
- `n_categories`: Number of categories considered
# Example:
The confusion matrix can also be plotted, e.g.:
```julia
julia> using Plots, BetaML
julia> y = ["apple","mandarin","clementine","clementine","mandarin","apple","clementine","clementine","apple","mandarin","clementine"];
julia> ŷ = ["apple","mandarin","clementine","mandarin","mandarin","apple","clementine","clementine",missing,"clementine","clementine"];
julia> cm = ConfusionMatrix(handle_missing="drop")
A ConfusionMatrix BetaMLModel (unfitted)
julia> normalised_scores = fit!(cm,y,ŷ)
3×3 Matrix{Float64}:
1.0 0.0 0.0
0.0 0.666667 0.333333
0.0 0.2 0.8
julia> println(cm)
A ConfusionMatrix BetaMLModel (fitted)
-----------------------------------------------------------------
*** CONFUSION MATRIX ***
Scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "apple" "mandarin" "clementine"
"apple" 2 0 0
"mandarin" 0 2 1
"clementine" 0 1 4
Normalised scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "apple" "mandarin" "clementine"
"apple" 1.0 0.0 0.0
"mandarin" 0.0 0.666667 0.333333
"clementine" 0.0 0.2 0.8
*** CONFUSION REPORT ***
- Accuracy: 0.8
- Misclassification rate: 0.19999999999999996
- Number of classes: 3
N Class precision recall specificity f1score actual_count predicted_count
TPR TNR support
1 apple 1.000 1.000 1.000 1.000 2 2
2 mandarin 0.667 0.667 0.857 0.667 3 3
3 clementine 0.800 0.800 0.800 0.800 5 5
- Simple avg. 0.822 0.822 0.886 0.822
- Weighted avg. 0.800 0.800 0.857 0.800
-----------------------------------------------------------------
Output of `info(cm)`:
- mean_precision: (0.8222222222222223, 0.8)
- fitted_records: 10
- specificity: [1.0, 0.8571428571428571, 0.8]
- precision: [1.0, 0.6666666666666666, 0.8]
- misclassification: 0.19999999999999996
- mean_recall: (0.8222222222222223, 0.8)
- n_categories: 3
- normalised_scores: [1.0 0.0 0.0; 0.0 0.6666666666666666 0.3333333333333333; 0.0 0.2 0.8]
- tn: [8, 6, 4]
- mean_f1score: (0.8222222222222223, 0.8)
- actual_count: [2, 3, 5]
- accuracy: 0.8
- recall: [1.0, 0.6666666666666666, 0.8]
- f1score: [1.0, 0.6666666666666666, 0.8]
- mean_specificity: (0.8857142857142858, 0.8571428571428571)
- predicted_count: [2, 3, 5]
- scores: [2 0 0; 0 2 1; 0 1 4]
- tp: [2, 2, 4]
- fn: [0, 1, 1]
- categories: ["apple", "mandarin", "clementine"]
- fp: [0, 1, 1]
julia> res = info(cm);
julia> heatmap(string.(res["categories"]),string.(res["categories"]),res["normalised_scores"],seriescolor=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix (normalised scores)")
```

"""
mutable struct ConfusionMatrix <: BetaMLUnsupervisedModel
hpar::ConfusionMatrix_hp
opt::BML_options
par::Union{Nothing,ConfusionMatrix_lp}
cres::Union{Nothing,Matrix{Int64},Matrix{Float64}}
fitted::Bool
info::Dict{String,Any}
end
function ConfusionMatrix(;kwargs...)
m = ConfusionMatrix(ConfusionMatrix_hp(),BML_options(),ConfusionMatrix_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a [`ConfusionMatrix`](@ref) model to data.
!!! warning
Data is expected in the order "ground truth, predictions" (i.e. `fit!(cm_model,y,ŷ)`)
This model supports multiple training (but the categories, if not provided, are extracteed from the first training y only), while prediction with new data (i.e. `predict(cm_model,ŷnew)`) is not supported.
"""
function fit!(m::ConfusionMatrix,Y,Ŷ)
nR = size(Y,1)
size(Ŷ,1) == nR || error("Y and Ŷ have different number of elements!")
rng = m.opt.rng
if eltype(Ŷ) <: Dict || ndims(Ŷ) > 1# allow probabilistic outputs
Ŷ = mode(Ŷ,rng=rng)
end
vtype = eltype(Y)
# Parameter aliases
categories = m.hpar.categories
handle_unknown = m.hpar.handle_unknown
handle_missing = m.hpar.handle_missing
other_categories_name = m.hpar.other_categories_name
categories_names = m.hpar.categories_names
if isnothing(other_categories_name)
if nonmissingtype(vtype) <: Integer
other_categories_name = typemax(Int64)
else
other_categories_name = "other"
end
end
normalise_scores = m.hpar.normalise_scores
cache = m.opt.cache
verbosity = m.opt.verbosity
fitted = m.fitted
if categories_names != Dict()
Y = map(x->categories_names[x], Y)
Ŷ = map(x->categories_names[x], Ŷ)
end
if fitted
categories = m.par.categories
nCl = length(categories)
scores = m.par.scores
else
categories = isnothing(categories) ? collect(skipmissing(unique(Y))) : deepcopy(categories)
handle_unknown == "infrequent" && push!(categories,other_categories_name)
nCl = length(categories)
scores = zeros(Int64,nCl,nCl)
end
for n in 1:nR
if (ismissing(Y[n]) || ismissing(Ŷ[n]))
if handle_missing == "error"
error("Found a `missing` value in the data. To automatically drop missing data use the option `handle_missing=\"drop\"` in the `ConfusionMatrix` constructor.")
else
continue
end
end
r = findfirst(x -> isequal(x,Y[n]),categories)
c = findfirst(x -> isequal(x,Ŷ[n]),categories)
if isnothing(r)
if handle_unknown == "error"
error("Found a category ($(Y[n])) not present in `categories` and the `handle_unknown` is set to `error`. Perhaps you want to swith it to `infrequent`.")
elseif handle_unknown == "infrequent"
r = length(categories)
else
error("I don't know how to process `handle_unknown == $(handle_unknown)`")
end
end
if isnothing(c)
if handle_unknown == "error"
error("Found a predicted category ($(Y[n])) not present in `categories` or in the true categories and the `handle_unknown` is set to `error`. Perhaps you want to swith it to `infrequent`.")
elseif handle_unknown == "infrequent"
c = length(categories)
else
error("I don't know how to process `handle_unknown == $(handle_unknown)`")
end
end
scores[r,c] += 1
end
predicted_count = dropdims(sum(scores,dims=1)',dims=2)
actual_count = dropdims(sum(scores,dims=2),dims=2)
normalised_scores = zeros(nCl, nCl)
[normalised_scores[r,:] = scores[r,:] ./ actual_count[r] for r in 1:nCl]
tp = [scores[i,i] for i in 1:nCl]
tn = [sum(scores[r,c] for r in 1:nCl, c in 1:nCl if r != i && c != i) for i in 1:nCl]
fp = [sum(scores[r,c] for r in 1:nCl, c in 1:nCl if r != i && c == i) for i in 1:nCl]
fn = [sum(scores[r,c] for r in 1:nCl, c in 1:nCl if r == i && c != i) for i in 1:nCl]
precision = tp ./ (tp .+ fp)
recall = tp ./ (tp .+ fn)
specificity = tn ./ (tn .+ fp)
f1score = (2 .* tp) ./ (2 .* tp .+ fp .+ fn )
mean_precision = (mean(precision), sum(precision .* actual_count) / sum(actual_count) )
mean_recall = (mean(recall), sum(recall .* actual_count) / sum(actual_count) )
mean_specificity = (mean(specificity), sum(specificity .* actual_count) / sum(actual_count) )
mean_f1score = (mean(f1score), sum(f1score .* actual_count) / sum(actual_count) )
accuracy = sum(tp)/sum(scores)
misclassification = 1-accuracy
cache && (m.cres = normalise_scores ? normalised_scores : scores)
m.par = ConfusionMatrix_lp(categories,vtype,scores)
m.info["accuracy"] = accuracy # Overall accuracy rate
m.info["misclassification"] = misclassification # Overall misclassification rate
m.info["actual_count"] = actual_count # Array of counts per lebel in the actual data
m.info["predicted_count"] = predicted_count # Array of counts per label in the predicted data
m.info["scores"] = scores # Matrix actual (rows) vs predicted (columns)
m.info["normalised_scores"] = normalised_scores # Normalised scores
m.info["tp"] = tp # True positive (by class)
m.info["tn"] = tn # True negative (by class)
m.info["fp"] = fp # False positive (by class)
m.info["fn"] = fn # False negative (by class)
m.info["precision"] = precision # True class i over predicted class i (by class)
m.info["recall"] = recall # Predicted class i over true class i (by class)
m.info["specificity"] = specificity # Predicted not class i over true not class i (by class)
m.info["f1score"] = f1score # Harmonic mean of precision and recall
m.info["mean_precision"] = mean_precision # Mean by class, respectively unweighted and weighted by actual_count
m.info["mean_recall"] = mean_recall # Mean by class, respectively unweighted and weighted by actual_count
m.info["mean_specificity"] = mean_specificity # Mean by class, respectively unweighted and weighted by actual_count
m.info["mean_f1score"] = mean_f1score # Mean by class, respectively unweighted and weighted by actual_count
m.info["categories"] = categories
m.info["fitted_records"] = sum(scores)
m.info["n_categories"] = nCl
m.fitted = true
return cache ? m.cres : nothing
end
function show(io::IO, m::ConfusionMatrix)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"A $(typeof(m)) BetaMLModel (unfitted)")
else
println(io,"A $(typeof(m)) BetaMLModel (fitted)")
res = info(m)
labels = string.(res["categories"])
nCl = length(labels)
println(io,"\n-----------------------------------------------------------------\n")
println(io, "*** CONFUSION MATRIX ***")
println(io,"")
println(io,"Scores actual (rows) vs predicted (columns):\n")
displayScores = vcat(permutedims(labels),res["scores"])
displayScores = hcat(vcat("Labels",labels),displayScores)
show(io, "text/plain", displayScores)
println(io,"")
println(io,"Normalised scores actual (rows) vs predicted (columns):\n")
displayScores = vcat(permutedims(labels),res["normalised_scores"])
displayScores = hcat(vcat("Labels",labels),displayScores)
show(io, "text/plain", displayScores)
println(io,"\n\n *** CONFUSION REPORT ***\n")
labelWidth = max(8, maximum(length.(labels))+1 )
println(io,"- Accuracy: $(res["accuracy"])")
println(io,"- Misclassification rate: $(res["misclassification"])")
println(io,"- Number of classes: $(nCl)")
println(io,"")
println(io," N ",rpad("Class",labelWidth),"precision recall specificity f1score actual_count predicted_count")
println(io," ",rpad(" ",labelWidth), " TPR TNR support ")
println(io,"")
# https://discourse.julialang.org/t/printf-with-variable-format-string/3805/4
print_formatted(io, fmt, args...) = @eval @printf($io, $fmt, $(args...))
for i in 1:nCl
print_formatted(io, "%3d %-$(labelWidth)s %8.3f %8.3f %12.3f %8.3f %12i %15i\n", i, labels[i], res["precision"][i], res["recall"][i], res["specificity"][i], res["f1score"][i], res["actual_count"][i], res["predicted_count"][i])
end
println(io,"")
print_formatted(io, "- %-$(labelWidth+2)s %8.3f %8.3f %12.3f %8.3f\n", "Simple avg.", res["mean_precision"][1], res["mean_recall"][1], res["mean_specificity"][1], res["mean_f1score"][1])
print_formatted(io, "- %-$(labelWidth+2)s %8.3f %8.3f %12.3f %8.3f\n", "Weighted avg.", res["mean_precision"][2], res["mean_recall"][2], res["mean_specificity"][2], res["mean_f1score"][2])
println("\n-----------------------------------------------------------------")
println("Output of `info(cm)`:")
for (k,v) in info(m)
print(io,"- ")
print(io,k)
print(io,":\t")
println(io,v)
end
end
end
# OLD START HERE ---------------------------------------------------------------
# Resources concerning Confusion Matrices:
# https://towardsdatascience.com/confusion-matrix-for-your-multi-class-machine-learning-model-ff9aa3bf7826
# https://en.wikipedia.org/wiki/Confusion_matrix
# https://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
# ------------------------------------------------------------------------------
# Regression tasks...
# Used as neural network loss function
"""
squared_cost(y,ŷ)
Compute the squared costs between a vector of observations and one of prediction as (1/2)*norm(y - ŷ)^2.
Aside the 1/2 term, it correspond to the squared l-2 norm distance and when it is averaged on multiple datapoints corresponds to the Mean Squared Error ([MSE](https://en.wikipedia.org/wiki/Mean_squared_error)).
It is mostly used for regression problems.
"""
squared_cost(y,ŷ) = (1/2)*norm(y - ŷ)^2
dsquared_cost(y,ŷ) = ( ŷ - y)
"""
mse(y,ŷ)
Compute the mean squared error (MSE) (aka mean squared deviation - MSD) between two vectors y and ŷ.
Note that while the deviation is averaged by the length of `y` is is not scaled to give it a relative meaning.
"""
mse(y,ŷ) = (sum((y-ŷ).^(2))/length(y))
"""
relative_mean_error(y, ŷ;normdim=false,normrec=false,p=1)
Compute the relative mean error (l-1 based by default) between y and ŷ.
There are many ways to compute a relative mean error. In particular, if normrec (normdim) is set to true, the records (dimensions) are normalised, in the sense that it doesn't matter if a record (dimension) is bigger or smaller than the others, the relative error is first computed for each record (dimension) and then it is averaged.
With both `normdim` and `normrec` set to `false` (default) the function returns the relative mean error; with both set to `true` it returns the mean relative error (i.e. with p=1 the "[mean absolute percentage error (MAPE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error)")
The parameter `p` [def: `1`] controls the p-norm used to define the error.
The _mean relative error_ enfatises the relativeness of the error, i.e. all observations and dimensions weight the same, wether large or small. Conversly, in the _relative mean error_ the same relative error on larger observations (or dimensions) weights more.
For example, given `y = [1,44,3]` and `ŷ = [2,45,2]`, the _mean relative error_ `mean_relative_error(y,ŷ,normrec=true)` is `0.452`, while the _relative mean error_ `relative_mean_error(y,ŷ, normrec=false)` is "only" `0.0625`.
"""
function relative_mean_error(y,ŷ;normdim=false,normrec=false,p=1)
ŷ = makematrix(ŷ)
y = makematrix(y)
(n,d) = size(y)
#ϵ = abs.(ŷ-y) .^ p
if (!normdim && !normrec) # relative mean error
avgϵRel = (sum(abs.(ŷ-y).^p)^(1/p) / (n*d)) / (sum( abs.(y) .^p)^(1/p) / (n*d)) # (avg error) / (avg y)
# avgϵRel = (norm((ŷ-y),p)/(n*d)) / (norm(y,p) / (n*d))
elseif (!normdim && normrec) # normalised by record (i.e. all records play the same weight)
avgϵRel_byRec = (sum(abs.(ŷ-y) .^ (1/p),dims=2).^(1/p) ./ d) ./ (sum(abs.(y) .^ (1/p) ,dims=2) ./d)
avgϵRel = mean(avgϵRel_byRec)
elseif (normdim && !normrec) # normalised by dimensions (i.e. all dimensions play the same weight)
avgϵRel_byDim = (sum(abs.(ŷ-y) .^ (1/p),dims=1).^(1/p) ./ n) ./ (sum(abs.(y) .^ (1/p) ,dims=1) ./n)
avgϵRel = mean(avgϵRel_byDim)
else # mean relative error
avgϵRel = sum(abs.((ŷ-y)./ y).^p)^(1/p)/(n*d) # avg(error/y)
# avgϵRel = (norm((ŷ-y)./ y,p)/(n*d))
end
return avgϵRel
end
# ------------------------------------------------------------------------------
# FeatureRanker
# https://towardsdatascience.com/stop-permuting-features-c1412e31b63f
"""
$(TYPEDEF)
Hyperparameters for [`FeatureRanker`](@ref)
# Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct FeatureR_hp <: BetaMLHyperParametersSet
"The estimator model to test."
model = nothing
"""Metric used to calculate the default column ranking. Two metrics are currently provided: "sobol" uses the variance decomposition based Sobol (total) index comparing ŷ vs. ŷ₋ⱼ; "mda" uses the mean decrease in accuracy comparing y vs. ŷ. Note that regardless of this setting, both measures are available by querying the model with `info()`, this setting only determines which one to use for the default ranking of the prediction output and which columns to remove if `recursive` is true [def: `"sobol"`]."""
metric::String = "sobol"
"""Wheter to refit the estimator model for each omitted dimension. If false, the respective column is randomly shuffled but no "new" fit is performed. This option is ignored for models that support prediction with omitted dimensions [def: `false`]."""
refit = false
"The `sobol` and `mda` metrics treat integer y's as regression tasks. Use `force_classification = true` to force that integers to be treated as classes. Note that this has no effect on model training, where it has to be set eventually in the model's own hyperparameters [def: `false`]."
force_classification = false
"If `false` the variance importance is computed in a single stage over all the variables, otherwise the less important variable is removed (according to `metric`) and then the algorithm is run again with the remaining variables, recursively [def: `false`]."
recursive::Bool = false
"Number of splits in the cross-validation function used to judge the importance of each dimension [def: `5`]."
nsplits::Int64 = 5
"Number of different sample rounds in cross validation. Increase this if your dataset is very small [def: `1`]."
nrepeats::Int64 = 1
"""Minimum number of records (or share of it, if a float) to consider in the first loop used to retrieve the less important variable. The sample is then linearly increased up to `sample_max` to retrieve the most important variable.
This parameter is ignored if `recursive=false`.
Note that there is a fixed limit of `nsplits*5` that prevails if lower [def: `25`]."""
sample_min::Union{Float64,Int64} = 25
"""Maximum number of records (or share of it, if a float) to consider in the last loop used to retrieve the most important variable, or if `recursive=false` [def: `1.0`]."""
sample_max::Union{Float64,Int64} = 1.0
"The function used by the estimator(s) to fit the model. It should take as fist argument the model itself, as second argument a matrix representing the features, and as third argument a vector representing the labels. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.fit!`]"
fit_function::Function = fit!
"The function used by the estimator(s) to predict the labels. It should take as fist argument the model itself and as second argument a matrix representing the features. This parameter is mandatory for non-BetaML estimators and can be a single value or a vector (one per estimator) in case of different estimator packages used. [default: `BetaML.predict`]"
predict_function::Function = predict
"""The keyword to ignore specific dimensions in prediction. If the model supports this keyword in the prediction function, when we loop over the various dimensions we use only prediction with this keyword instead of re-training [def: `"ignore_dims"`]."""
# See https://towardsdatascience.com/variable-importance-in-random-forests-20c6690e44e0
ignore_dims_keyword::String = "ignore_dims"
end
Base.@kwdef struct FeatureR_lp <: BetaMLLearnableParametersSet
ranks::Vector{Int64} = Int64[]
end
"""
$(TYPEDEF)
A flexible feature ranking estimator using multiple feature importance metrics
FeatureRanker helps to determine the importance of features in predictions of any black-box machine learning model (not necessarily from the BetaML suit), internally using cross-validation.
By default, it ranks variables (columns) in a single pass, without retraining on each one. However, it is possible to specify the model to use multiple passages (where in each passage the less important variable is permuted) or to retrain the model on each variable that is temporarily permuted to test the model without it ("permute and relearn").
Furthermore, if the ML model under evaluation supports ignoring variables during prediction (as BetaML tree models do), it is possible to specify the keyword argument for such an option in the prediction function of the target model.
See [`FeatureR_hp`](@ref) for all hyperparameters.
The `predict(m::FeatureRanker)` function returns the ranking of the features, from least to most important. Use `info(m)` for more information, such as the loss per (omitted) variable or the Sobol (total) indices and their standard deviations in the different cross-validation trials.
# Example:
```julia
julia> using BetaML, Distributions, Plots
julia> N = 1000;
julia> xa = rand(N,3);
julia> xb = xa[:,1] .* rand.(Normal(1,0.5)); # a correlated but uninfluent variable
julia> x = hcat(xa,xb);
julia> y = [10*r[1]^2-5 for r in eachrow(x)]; # only the first variable influence y
julia> rank = fit!(fr,x,y) # from the less influent to the most one
4-element Vector{Int64}:
3
2
4
1
julia> sobol_by_col = info(fr)["sobol_by_col"]
4-element Vector{Float64}:
0.705723128278327
0.003127023154446514
0.002676421850738828
0.018814767195347915
julia> ntrials_per_metric = info(fr)["ntrials_per_metric"]
5
julia> bar(string.(rank),sobol_by_col[rank],label="Sobol by col", yerror=quantile(Normal(1,0),0.975) .* (sobol_by_col_sd[rank]./sqrt(ntrials_per_metric)))
```

# Notes:
- When `recursive=true`, the reported loss by column is the cumulative loss when, at each loop, the previous dimensions identified as unimportant plus the one under test are all permuted, except for the most important variable, where the metric reported is the one on the same loop as the second to last less important variable.
- The reported ranking may not be equal to `sortperm([measure])` when `recursive=true`, because removing variables with very low power may, by chance, increase the accuracy of the model for the remaining tested variables.
- To use `FeatureRanker` with a third party estimator model, it needs to be wrapped in a BetaML-like API: `m=ModelName(hyperparameters...); fit_function(m,x,y); predict_function(m,x)` where `fit_function` and `predict_function` can be specified in the `FeatureRanker` options.
"""
mutable struct FeatureRanker <: BetaMLModel
hpar::FeatureR_hp
opt::BML_options
par::Union{FeatureR_lp,Nothing}
cres::Vector{Int64}
fitted::Bool
info::Dict{String,Any}
end
function FeatureRanker(;kwargs...)
hps = FeatureR_hp()
m = FeatureRanker(hps,BML_options(),FeatureR_lp(),[],false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
"""
$(TYPEDSIGNATURES)
Fit a Variable Importance model using [`FeatureRanker`](@ref)
"""
function fit!(m::FeatureRanker,X,y)
(m.fitted && m.opt.verbosity >= STD) && @warn "This FeatureRanker has already been fitted. This fit overrides the previous one(s)"
nR,nC = size(X)
rng = m.opt.rng
nsplits = m.hpar.nsplits
nrepeats = m.hpar.nrepeats
force_classification = m.hpar.force_classification
recursive = m.hpar.recursive
metric = m.hpar.metric
cache = m.opt.cache
sample_min = typeof(m.hpar.sample_min) <: AbstractFloat ? max(nsplits*5, Int64(round(m.hpar.sample_min * nR ))) : max(nsplits*5,m.hpar.sample_min)
sample_max = typeof(m.hpar.sample_max) <: AbstractFloat ? max(sample_min, Int64(round(m.hpar.sample_max * nR ))) : max(sample_min,m.hpar.sample_max)
sample_max <= nR || @error "Not enought records"
recursive && (sample_max - sample_min < nC) && @error "Not enought records for a recursive anaylis"
# This is the number of samples for each recursive lookup of important cols
if recursive
nsamples = Int.(round.(collect(LinRange(sample_min,sample_min,nC-1))))
else
nsamples = [sample_max]
end
ohm = nothing
if ( (ndims(y) == 1) && ( !(eltype(y) <: Number) || (eltype(y) <: Integer && force_classification)) )
ohm = OneHotEncoder(handle_unknown="infrequent",cache=false,rng=rng)
fit!(ohm,y)
ohD = info(ohm)["n_categories"]
end
ŷfull = similar(y)
if ( !(eltype(y) <: Number) || (eltype(y) <: Integer && force_classification))
ŷfull = Array{Float64,2}(undef,nR,ohD)
end
# Output containers...
ranks = zeros(Int64,nC)
metric_scores = Dict(
"mda"=>zeros(nC),
"sobol"=>zeros(nC),
"mda_sd"=>zeros(nC),
"sobol_sd"=>zeros(nC),
)
μ_full_full = 0.0
σ_full_full = 0.0
# Loop for each repetition (could be just one)...
for (ia,ns) in enumerate(nsamples)
m.opt.verbosity > STD && @info "Processing round $ia over $(length(nsamples)).."
colids_nottotest = ranks[ranks.>0]
colids_ontest = setdiff(1:nC,colids_nottotest)
((μ_full,σ_full),(metric_mda,metric_sobol),(mda_sd,sobol_sd)) = compute_cols_losses(m,X,y,ia,ns,colids_nottotest,ohm)
# Storing full dims model output if this is the first round
if ia == 1
μ_full_full = μ_full
σ_full_full = σ_full
end
# Stroring outcomes of cols on test...
metric_scores["mda"][colids_ontest] .= metric_mda
metric_scores["sobol"][colids_ontest] .= metric_sobol
metric_scores["mda_sd"][colids_ontest] .= mda_sd
metric_scores["sobol_sd"][colids_ontest] .= sobol_sd
# Sorting outcomes of cols on test...
if metric == "mda"
sorted_colids_ontest = colids_ontest[sortperm(metric_mda)] # from the lower loss to the bigger one
elseif metric == "sobol"
sorted_colids_ontest = colids_ontest[sortperm(metric_sobol)]
else
@error "Unknown ranking metric."
end
if recursive
ranks[ia] = sorted_colids_ontest[1]
# If this is the last "match" me need to add also the most important col
if ia == length(nsamples)
ranks[ia+1] = sorted_colids_ontest[2]
end
else
ranks = sorted_colids_ontest
end
end
m.par = FeatureR_lp(ranks)
m.fitted = true
if cache
m.cres = cache ? ranks : nothing
end
m.info["fitted_records"] = nR
m.info["xndims"] = nC
m.info["loss_by_col"] = metric_scores["mda"]
m.info["loss_by_col_sd"] = metric_scores["mda_sd"]
m.info["sobol_by_col"] = metric_scores["sobol"]
m.info["sobol_by_col_sd"] = metric_scores["sobol_sd"]
m.info["loss_all_cols"] = μ_full_full
m.info["loss_all_cols_sd"] = σ_full_full
m.info["ntrials_per_metric"] = nsplits*nrepeats
m.fitted = true
return cache ? m.cres : nothing
end
function compute_cols_losses(m,X,y,ia,ns,cols_ids,ohm)
rec_ids = StatsBase.sample(1:size(X,1), ns; replace=false)
X = X[rec_ids,:]
y = (ndims(y) ==1 ) ? y[rec_ids] : y[rec_ids,:]
nR,nC = size(X)
partial_predict_supported = hasmethod(m.hpar.predict_function,Tuple{typeof(m.hpar.model),Array},(Symbol(m.hpar.ignore_dims_keyword),))
refit = m.hpar.refit
# random shuffle all cols already removed
for idcol in cols_ids
@views shuffle!(X[:,idcol])
end
# determine cols to test for removal
cols_totest = setdiff(1:nC,cols_ids)
n_cols_totest = length(cols_totest)
sampler = KFold(nsplits=m.hpar.nsplits,nrepeats=m.hpar.nrepeats,rng=m.opt.rng)
# run cross validation and on each split check the full column and each col shuffled
batch = 1
repetition = 1
(metrics_μ,metrics_σ) = cross_validation([X,y],sampler, return_statistics=true) do trainData,valData,rng
m.opt.verbosity > HIGH && @info "- processing batch $batch ..."
repetition = Int(ceil(batch/m.hpar.nsplits))
(xtrain,ytrain) = trainData; (xval,yval) = valData
m.hpar.fit_function(m.hpar.model,xtrain,ytrain)
ŷval = m.hpar.predict_function(m.hpar.model,xval)
if (eltype(ŷval) <: Dict)
yval = predict(ohm,yval)
ŷval = predict(ohm,ŷval)
end
ϵ = norm(yval-ŷval)/size(yval,1)
#partial_predict_supported || reset!(m.hpar.model)
#reset!(model)
metric_by_cols_mda = fill(0.0, n_cols_totest) # this will be the output
metric_by_cols_sobol = fill(0.0, n_cols_totest) # this will be the output
for (i,col_totest) in enumerate(cols_totest)
m.opt.verbosity > HIGH && @info "- testing col id $col_to_test ..."
xtraind = hcat(xtrain[:,1:col_totest-1],shuffle(rng, xtrain[:,col_totest]),xtrain[:,col_totest+1:end])
xvald = hcat(xval[:,1:col_totest-1],shuffle(rng, xval[:,col_totest]),xval[:,col_totest+1:end])
if ! partial_predict_supported
if refit
m.hpar.fit_function(m.hpar.model,xtraind,ytrain)
ŷval_i = m.hpar.predict_function(m.hpar.model,xvald)
reset!(m.hpar.model)
else
ŷval_i = m.hpar.predict_function(m.hpar.model,xvald)
end
else
ŷval_i = m.hpar.predict_function(m.hpar.model,xvald;Symbol(m.hpar.ignore_dims_keyword)=>[col_totest])
#ŷval_i = predict(m.hpar.model,xval;ignore_dims=[col_totest])
end
if (eltype(ŷval_i) <: Dict)
ŷval_i = predict(ohm,ŷval_i)
end
metric_by_cols_mda[i] = norm(yval-ŷval_i)/size(yval,1)
metric_by_cols_sobol[i] = sobol_index(ŷval,ŷval_i)
end
reset!(m.hpar.model)
batch += 1
# cross validation stats works for salars and vector results but not vector of vector, I need to concatenate them
return vcat(ϵ, metric_by_cols_mda, metric_by_cols_sobol)
end
fullloss_μ = metrics_μ[1]
mda_μ = metrics_μ[2:n_cols_totest+1]
sobol_μ = metrics_μ[n_cols_totest+2:end]
fullloss_σ = metrics_σ[1]
mda_σ = metrics_σ[2:n_cols_totest+1]
sobol_σ = metrics_σ[n_cols_totest+2:end]
return ((fullloss_μ,fullloss_σ),(mda_μ,sobol_μ), (mda_σ,sobol_σ))
end
"""
$(TYPEDSIGNATURES)
"""
function predict(m::FeatureRanker,X)
m.opt.verbosity >= STD && @warn "FeatureRanker doesn't extend to new data. X is ignored. Use `predict(m::FeatureRanker)` to avoid this warning."
return m.par.ranks
end
function show(io::IO, ::MIME"text/plain", m::FeatureRanker)
if m.fitted == false
print(io,"FeatureRanker - A meta-model to extract variable importance of an arbitrary regressor/classifier (unfitted)")
else
print(io,"FeatureRanker - A meta-model to extract variable importance of an arbitrary regressor/classifier (fitted)")
end
end
function show(io::IO, m::FeatureRanker)
m.opt.descr != "" && println(io,m.opt.descr)
if m.fitted == false
print(io,"FeatureRanker - A meta-model to extract variable importance of an arbitrary regressor/classifier (unfitted)")
else
print(io,"FeatureRanker - A meta-model to extract variable importance of an arbitrary regressor/classifier (fitted)")
println(io,m.info)
end
end
# ------------------------------------------------------------------------------
"""
$(TYPEDSIGNATURES)
Compute the variance-analysis based (total) Sobol index.
Provided the first input is the model output with all the variables (dimensions) considered and the second input it the model output with the variable _j_ removed, the Sobol index returns the reduction in output explained variance if the jth output variable is removed, i.e. higher values highliths a more important variable
# Example
```julia
julia> ŷ = [1.0, 2.4, 1.5, 1.8];
julia> ŷ₋₁ = [0.8, 2.5, 1.5, 1.7];
julia> ŷ₋₂ = [0.7,2.6,1.4,1.6];
julia> sobol_index(ŷ,ŷ₋₁)
0.03892944038929439
julia> sobol_index(ŷ,ŷ₋₂)
0.15571776155717756
```
"""
# https://towardsdatascience.com/variable-importance-in-random-forests-20c6690e44e0
# https://en.wikipedia.org/wiki/Variance-based_sensitivity_analysis
# https://arxiv.org/pdf/2102.13347
# https://arxiv.org/pdf/2401.00800
# https://towardsdatascience.com/sobol-indices-to-measure-feature-importance-54cedc3281bc
# https://artowen.su.domains/reports/sobolshapley.pdf
# https://hal.science/hal-03741384/document
# https://www.sciencedirect.com/science/article/abs/pii/S0167473023000504
# https://onlinelibrary.wiley.com/doi/full/10.1002/qre.3398
# https://scanr.enseignementsup-recherche.gouv.fr/publication/doi10.1007%25252f978-3-031-12402-0_3
# https://erwanscornet.github.io/talks/mdi.pdf
# https://towardsdatascience.com/random-forests-in-2023-modern-extensions-of-a-powerful-method-b62debaf1d62
function sobol_index(ŷ::Array{T,1},ŷ₋ⱼ::Array{T,1}) where {T}
mean_prediction = mean(ŷ)
return sum((ŷ .- ŷ₋ⱼ).^2)/sum((ŷ .- mean_prediction).^2)
end
# Input is nrecords (rows) by D cols (PMF or OHE)
function sobol_index(ŷ::Array{T,2},ŷ₋ⱼ::Array{T,2}) where {T}
ymean = mean(ŷ,dims=1)
N = size(ŷ,1)
return sum( kl_divergence(ŷ[i,:],ŷ₋ⱼ[i,:])^2 for i in 1:N)/sum(kl_divergence(ŷ[i,:],ymean)^2 for i in 1:N)
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1795 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Part of submodule Utils of BetaML _ the Beta Machine Learning Toolkit
# Miscelaneaous funcitons / types
using Base.Threads
using Base.Threads: threadid, threading_run
@static if VERSION ≥ v"1.9-rc1"
# This makes the threadsif macro work
using Base.Threads: threadpoolsize
end
"""
$(TYPEDSIGNATURES)
Conditionally apply multi-threading to `for` loops.
This is a variation on `Base.Threads.@threads` that adds a run-time boolean flag to enable or disable threading.
# Example:
```julia
function optimize(objectives; use_threads=true)
@threadsif use_threads for k = 1:length(objectives)
# ...
end
end
# Notes:
- Borrowed from https://github.com/JuliaQuantumControl/QuantumControlBase.jl/blob/master/src/conditionalthreads.jl
```
"""
macro threadsif(cond, loop)
if !(isa(loop, Expr) && loop.head === :for)
throw(ArgumentError("@threadsif requires a `for` loop expression"))
end
if !(loop.args[1] isa Expr && loop.args[1].head === :(=))
throw(ArgumentError("nested outer loops are not currently supported by @threadsif"))
end
quote
if $(esc(cond))
$(Threads._threadsfor(loop.args[1], loop.args[2], :static))
else
$(esc(loop))
end
end
end
# Attention, it uses Julia internals!
get_parametric_types(obj) = typeof(obj).parameters
isinteger_bml(_) = false
isinteger_bml(_::Integer) = true
isinteger_bml(_::Nothing) = error("Trying to run isinteger() over a `Nothing` value")
isinteger_bml(_::Missing) = missing
isinteger_bml(x::AbstractFloat) = isinteger(x)
"""
online_mean(new;mean=0.0,n=0)
Update the mean with new values.
"""
online_mean(new;mean=0.0,n=0) = ((mean*n)+new)/(n+1)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 53391 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Part of submodule Utils of BetaML - The Beta Machine Learning Toolkit
# Functions typically used for processing (manipulating) data, typically preprocessing data before running a ML model
import StatsBase: countmap
# ------------------------------------------------------------------------------
# Various reshaping functions
import Base.reshape
""" reshape(myNumber, dims..) - Reshape a number as a n dimensional Array """
reshape(x::T, dims...) where {T <: Number} = (x = [x]; reshape(x,dims) )
makecolvector(x::T) where {T} = [x]
makecolvector(x::T) where {T <: AbstractArray} = reshape(x,length(x))
makerowvector(x::T) where {T <: Number} = return [x]'
makerowvector(x::T) where {T <: AbstractArray} = reshape(x,1,length(x))
"""Transform an Array{T,1} in an Array{T,2} and leave unchanged Array{T,2}."""
makematrix(x::AbstractVector) = reshape(x, (size(x)...,1))
makematrix(x::AbstractMatrix) = x
"""Return wheather an array is sortable, i.e. has methos issort defined"""
issortable(::AbstractArray{T,N}) where {T,N} = hasmethod(isless, Tuple{nonmissingtype(T),nonmissingtype(T)})
allowmissing(x::AbstractArray{T,N}) where {T,N} = convert(Union{Array{T,N},Missing},x)
disallowmissing(x::AbstractArray{T,N}) where {T,N} = convert(Array{nonmissingtype(T),N},x)
"""
getpermutations(v::AbstractArray{T,1};keepStructure=false)
Return a vector of either (a) all possible permutations (uncollected) or (b) just those based on the unique values of the vector
Useful to measure accuracy where you don't care about the actual name of the labels, like in unsupervised classifications (e.g. clustering)
"""
function getpermutations(v::AbstractArray{T,1};keepStructure=false) where {T}
if !keepStructure
return Combinatorics.permutations(v)
else
classes = unique(v)
nCl = length(classes)
N = size(v,1)
pSet = Combinatorics.permutations(1:nCl)
nP = length(pSet)
vPermutations = fill(similar(v),nP)
vOrigIdx = [findfirst(x -> x == v[i] , classes) for i in 1:N]
for (pIdx,perm) in enumerate(pSet)
vPermutations[pIdx] = classes[perm[vOrigIdx]] # permuted specific version
end
return vPermutations
end
end
""" singleunique(x) Return the unique values of x whether x is an array of arrays, an array or a scalar"""
function singleunique(x::Union{T,AbstractArray{T}}) where {T <: Union{Any,AbstractArray{T2}} where T2 <: Any }
if typeof(x) <: AbstractArray{T2} where {T2 <: AbstractArray}
return unique(vcat(unique.(x)...))
elseif typeof(x) <: AbstractArray{T2} where {T2}
return unique(x)
else
return [x]
end
end
# API V2 for encoders
"""
$(TYPEDEF)
Hyperparameters for both [`OneHotEncoder`](@ref) and [`OrdinalEncoder`](@ref)
# Parameters:
$(FIELDS)
"""
Base.@kwdef mutable struct OneHotE_hp <: BetaMLHyperParametersSet
"The categories to represent as columns. [def: `nothing`, i.e. unique training values or range for integers]. Do not include `missing` in this list."
categories::Union{Vector,Nothing} = nothing
"How to handle categories not seen in training or not present in the provided `categories` array? \"error\" (default) rises an error, \"missing\" labels the whole output with missing values, \"infrequent\" adds a specific column for these categories in one-hot encoding or a single new category for ordinal one."
handle_unknown::String = "error"
"Which value during inverse transformation to assign to the \"other\" category (i.e. categories not seen on training or not present in the provided `categories` array? [def: ` nothing`, i.e. typemax(Int64) for integer vectors and \"other\" for other types]. This setting is active only if `handle_unknown=\"infrequent\"` and in that case it MUST be specified if the vector to one-hot encode is neither integer or strings"
other_categories_name = nothing
end
Base.@kwdef mutable struct OneHotEncoder_lp <: BetaMLLearnableParametersSet
categories::Vector = []
original_vector_eltype::Union{Type,Nothing} = nothing
end
"""
$(TYPEDEF)
Encode a vector of categorical values as one-hot columns.
The algorithm distinguishes between _missing_ values, for which it returns a one-hot encoded row of missing values, and _other_ categories not in the provided list or not seen during training that are handled according to the `handle_unknown` parameter.
For the parameters see [`OneHotE_hp`](@ref) and [`BML_options`](@ref). This model supports `inverse_predict`.
# Example:
```julia
julia> using BetaML
julia> x = ["a","d","e","c","d"];
julia> mod = OneHotEncoder(handle_unknown="infrequent",other_categories_name="zz")
A OneHotEncoder BetaMLModel (unfitted)
julia> x_oh = fit!(mod,x) # last col is for the "infrequent" category
5×5 Matrix{Bool}:
1 0 0 0 0
0 1 0 0 0
0 0 1 0 0
0 0 0 1 0
0 1 0 0 0
julia> x2 = ["a","b","c"];
julia> x2_oh = predict(mod,x2)
3×5 Matrix{Bool}:
1 0 0 0 0
0 0 0 0 1
0 0 0 1 0
julia> x2_back = inverse_predict(mod,x2_oh)
3-element Vector{String}:
"a"
"zz"
"c"
```
The model works on a single column. To one-hot encode a matrix you can use a loop, like:
```julia
julia> m = [1 2; 2 1; 1 1; 2 2; 2 3; 1 3]; # 2 categories in the first col, 3 in the second one
julia> m_oh = hcat([fit!(OneHotEncoder(),c) for c in eachcol(m)]...)
6×5 Matrix{Bool}:
1 0 0 1 0
0 1 1 0 0
1 0 1 0 0
0 1 0 1 0
0 1 0 0 1
1 0 0 0 1
```
"""
mutable struct OneHotEncoder <: BetaMLUnsupervisedModel
hpar::OneHotE_hp
opt::BML_options
par::Union{Nothing,OneHotEncoder_lp}
cres::Union{Nothing,Matrix{Bool},Matrix{Union{Bool,Missing}}}
fitted::Bool
info::Dict{String,Any}
end
"""
$(TYPEDEF)
Encode a vector of categorical values as integers.
The algorithm distinguishes between _missing_ values, for which it propagate the missing, and _other_ categories not in the provided list or not seen during training that are handled according to the `handle_unknown` parameter.
For the parameters see [`OneHotE_hp`](@ref) and [`BML_options`](@ref). This model supports `inverse_predict`.
# Example:
```julia
julia> using BetaML
julia> x = ["a","d","e","c","d"];
julia> mod = OrdinalEncoder(handle_unknown="infrequent",other_categories_name="zz")
A OrdinalEncoder BetaMLModel (unfitted)
julia> x_int = fit!(mod,x)
5-element Vector{Int64}:
1
2
3
4
2
julia> x2 = ["a","b","c","g"];
julia> x2_int = predict(mod,x2) # 5 is for the "infrequent" category
4-element Vector{Int64}:
1
5
4
5
julia> x2_back = inverse_predict(mod,x2_oh)
4-element Vector{String}:
"a"
"zz"
"c"
"zz"
```
"""
mutable struct OrdinalEncoder <: BetaMLUnsupervisedModel
hpar::OneHotE_hp
opt::BML_options
par::Union{Nothing,OneHotEncoder_lp}
cres::Union{Nothing,Vector{Int64},Vector{Union{Int64,Missing}}}
fitted::Bool
info::Dict{String,Any}
end
function OneHotEncoder(;kwargs...)
m = OneHotEncoder(OneHotE_hp(),BML_options(),OneHotEncoder_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
function OrdinalEncoder(;kwargs...)
m = OrdinalEncoder(OneHotE_hp(),BML_options(),OneHotEncoder_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
function _fit!(m::Union{OneHotEncoder,OrdinalEncoder},x,enctype::Symbol)
x = makecolvector(x)
N = size(x,1)
vtype = eltype(x) # nonmissingtype(eltype(x))
# Parameter aliases
categories = m.hpar.categories
handle_unknown = m.hpar.handle_unknown
other_categories_name = m.hpar.other_categories_name
if isnothing(other_categories_name)
if nonmissingtype(vtype) <: Integer
other_categories_name = typemax(Int64)
else
other_categories_name = "other"
end
end
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if nonmissingtype(vtype) <: Number && !(nonmissingtype(vtype) <: Integer)
# continuous column: we just apply identity
m.par = OneHotEncoder_lp([],vtype)
return cache ? nothing : x
end
if isnothing(categories)
if nonmissingtype(vtype) <: Integer
minx = minimum(x)
maxx = maximum(x)
categories = collect(minx:maxx)
else
categories = collect(skipmissing(unique(x)))
end
else
categories = deepcopy(categories)
end
handle_unknown == "infrequent" && push!(categories,other_categories_name)
m.par = OneHotEncoder_lp(categories,vtype)
if cache
if enctype == :onehot
K = length(categories)
outx = fill(false,N,K)
else
K = 1
outx = zeros(Int64,N,K)
end
for n in 1:N
if ismissing(x[n])
outx = (enctype == :onehot) ? convert(Matrix{Union{Missing,Bool}},outx) : convert(Matrix{Union{Missing,Int64}},outx)
outx[n,:] = fill(missing,K)
continue
end
kidx = findfirst(y -> isequal(y,x[n]),categories)
if isnothing(kidx)
if handle_unknown == "error"
error("Found a category ($(x[n])) not present in the list and the `handle_unknown` is set to `error`. Perhaps you want to swith it to either `missing` or `infrequent`.")
elseif handle_unknown == "missing"
outx = (enctype == :onehot) ? convert(Matrix{Union{Missing,Bool}},outx) : convert(Matrix{Union{Missing,Int64}},outx)
outx[n,:] = fill(missing,K);
continue
elseif handle_unknown == "infrequent"
outx[n,K] = (enctype == :onehot) ? true : length(categories)
continue
else
error("I don't know how to process `handle_unknown == $(handle_unknown)`")
end
end
enctype == :onehot ? (outx[n,kidx] = true) : outx[n,1] = kidx
end
m.cres = (enctype == :onehot) ? outx : collect(dropdims(outx,dims=2))
end
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["n_categories"] = length(categories)
m.fitted = true
return cache ? m.cres : nothing
end
fit!(m::OneHotEncoder,x) = _fit!(m,x,:onehot)
fit!(m::OrdinalEncoder,x) = _fit!(m,x,:ordinal)
function _predict(m::Union{OneHotEncoder,OrdinalEncoder},x,enctype::Symbol)
x = makecolvector(x)
N = size(x,1)
vtype = eltype(x) # nonmissingtype(eltype(x))
# Parameter aliases
handle_unknown = m.hpar.handle_unknown
categories = m.par.categories
if enctype == :onehot
K = length(categories)
outx = fill(false,N,K)
else
K = 1
outx = zeros(Int64,N,K)
end
for n in 1:N
if ismissing(x[n])
outx = (enctype == :onehot) ? convert(Matrix{Union{Missing,Bool}},outx) : convert(Matrix{Union{Missing,Int64}},outx)
outx[n,:] = fill(missing,K)
continue
end
kidx = findfirst(y -> isequal(y,x[n]),categories)
if isnothing(kidx)
if handle_unknown == "error"
error("Found a category ($(x[n])) not present in the list and the `handle_unknown` is set to `error`. Perhaps you want to swith it to either `missing` or `infrequent`.")
continue
elseif handle_unknown == "missing"
outx = (enctype == :onehot) ? convert(Matrix{Union{Missing,Bool}},outx) : convert(Matrix{Union{Missing,Int64}},outx)
outx[n,:] = fill(missing,K);
continue
elseif handle_unknown == "infrequent"
outx[n,K] = (enctype == :onehot) ? true : length(categories)
continue
else
error("I don't know how to process `handle_unknown == $(handle_unknown)`")
end
else
enctype == :onehot ? (outx[n,kidx] = true) : outx[n,1] = kidx
end
end
return (enctype == :onehot) ? outx : dropdims(outx,dims=2)
end
# Case where X is a vector of dictionaries
function _predict(m::Union{OneHotEncoder,OrdinalEncoder},x::Vector{<:Dict},enctype::Symbol)
N = size(x,1)
# Parameter aliases
handle_unknown = m.hpar.handle_unknown
categories = m.par.categories
if enctype == :onehot
K = length(categories)
outx = fill(0.0,N,K)
else
error("Predictions of a Ordinal Encoded with a vector of dictionary is not supported")
end
for n in 1:N
for (k,v) in x[n]
kidx = findfirst(y -> isequal(y,k),categories)
if isnothing(kidx)
if handle_unknown == "error"
error("Found a category ($(k)) not present in the list and the `handle_unknown` is set to `error`. Perhaps you want to swith it to either `missing` or `infrequent`.")
continue
elseif handle_unknown == "missing"
outx[n,:] = fill(missing,K);
continue
elseif handle_unknown == "infrequent"
outx[n,K] = v
continue
else
error("I don't know how to process `handle_unknown == $(handle_unknown)`")
end
else
outx[n,kidx] = v
end
end
end
return outx
end
predict(m::OneHotEncoder,x) = _predict(m,x,:onehot)
predict(m::OrdinalEncoder,x) = _predict(m,x,:ordinal)
function _inverse_predict(m,x,enctype::Symbol)
# Parameter aliases
handle_unknown = m.hpar.handle_unknown
categories = m.par.categories
original_vector_eltype = m.par.original_vector_eltype
other_categories_name = m.hpar.other_categories_name
if isnothing(other_categories_name)
if nonmissingtype(original_vector_eltype ) <: Integer
other_categories_name = typemax(Int64)
else
other_categories_name = "other"
end
end
N,D = size(x,1),size(x,2)
outx = Array{original_vector_eltype,1}(undef,N)
for n in 1:N
if enctype == :onehot
if any(ismissing.(x[n,:]))
outx[n] = missing
continue
elseif handle_unknown == "infrequent" && findfirst(c->c==true,x[n,:]) == D
outx[n] = other_categories_name
continue
end
outx[n] = categories[findfirst(c->c==true,x[n,:])]
else
if ismissing(x[n])
outx[n] = missing
continue
elseif handle_unknown == "infrequent" && x[n] == length(categories)
outx[n] = other_categories_name
continue
end
outx[n] = categories[x[n]]
end
end
return outx
end
inverse_predict(m::OneHotEncoder,x::AbstractMatrix{<:Union{Int64,Bool,Missing}}) = _inverse_predict(m,x,:onehot)
function inverse_predict(m::OneHotEncoder,x::AbstractMatrix{<:Float64})
x2 = fit!(OneHotEncoder(categories=1:size(x,2)),mode(x))
return inverse_predict(m,x2)
end
inverse_predict(m::OrdinalEncoder,x) = _inverse_predict(m,x,:ordinal)
"""
partition(data,parts;shuffle,dims,rng)
Partition (by rows) one or more matrices according to the shares in `parts`.
# Parameters
* `data`: A matrix/vector or a vector of matrices/vectors
* `parts`: A vector of the required shares (must sum to 1)
* `shufle`: Whether to randomly shuffle the matrices (preserving the relative order between matrices)
* `dims`: The dimension for which to partition [def: `1`]
* `copy`: Wheter to _copy_ the actual data or only create a reference [def: `true`]
* `rng`: Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
# Notes:
* The sum of parts must be equal to 1
* The number of elements in the specified dimension must be the same for all the arrays in `data`
# Example:
```julia
julia> x = [1:10 11:20]
julia> y = collect(31:40)
julia> ((xtrain,xtest),(ytrain,ytest)) = partition([x,y],[0.7,0.3])
```
"""
function partition(data::AbstractArray{T,1},parts::AbstractArray{Float64,1};shuffle=true,dims=1,copy=true,rng = Random.GLOBAL_RNG) where T <: AbstractArray
# the sets of vector/matrices
N = size(data[1],dims)
all(size.(data,dims) .== N) || @error "All matrices passed to `partition` must have the same number of elements for the required dimension"
ridx = shuffle ? Random.shuffle(rng,1:N) : collect(1:N)
return partition.(data,Ref(parts);shuffle=shuffle,dims=dims,fixed_ridx = ridx,copy=copy,rng=rng)
end
function partition(data::AbstractArray{T,Ndims}, parts::AbstractArray{Float64,1};shuffle=true,dims=1,fixed_ridx=Int64[],copy=true,rng = Random.GLOBAL_RNG) where {T,Ndims}
# the individual vector/matrix
N = size(data,dims)
nParts = size(parts)
toReturn = toReturn = Array{AbstractArray{T,Ndims},1}(undef,nParts)
if !(sum(parts) ≈ 1)
@error "The sum of `parts` in `partition` should total to 1."
end
ridx = fixed_ridx
if (isempty(ridx))
ridx = shuffle ? Random.shuffle(rng, 1:N) : collect(1:N)
end
allDimIdx = convert(Vector{Union{UnitRange{Int64},Vector{Int64}}},[1:i for i in size(data)])
current = 1
cumPart = 0.0
for (i,p) in enumerate(parts)
cumPart += parts[i]
final = i == nParts ? N : Int64(round(cumPart*N))
allDimIdx[dims] = ridx[current:final]
toReturn[i] = copy ? data[allDimIdx...] : @views data[allDimIdx...]
current = (final +=1)
end
return toReturn
end
# API V2 for Scale
abstract type AbstractScaler end
abstract type AbstractScalerLearnableParameter end
"""
$(TYPEDEF)
Scale the data to a given (def: unit) hypercube
# Parameters:
$(FIELDS)
# Example:
```julia
julia> using BetaML
julia> x = [[4000,1000,2000,3000] ["a", "categorical", "variable", "not to scale"] [4,1,2,3] [0.4, 0.1, 0.2, 0.3]]
4×4 Matrix{Any}:
4000 "a" 4 0.4
1000 "categorical" 1 0.1
2000 "variable" 2 0.2
3000 "not to scale" 3 0.3
julia> mod = Scaler(MinMaxScaler(outputRange=(0,10)), skip=[2])
A Scaler BetaMLModel (unfitted)
julia> xscaled = fit!(mod,x)
4×4 Matrix{Any}:
10.0 "a" 10.0 10.0
0.0 "categorical" 0.0 0.0
3.33333 "variable" 3.33333 3.33333
6.66667 "not to scale" 6.66667 6.66667
julia> xback = inverse_predict(mod, xscaled)
4×4 Matrix{Any}:
4000.0 "a" 4.0 0.4
1000.0 "categorical" 1.0 0.1
2000.0 "variable" 2.0 0.2
3000.0 "not to scale" 3.0 0.3
```
"""
Base.@kwdef mutable struct MinMaxScaler <: AbstractScaler
"The range of the input. [def: (minimum,maximum)]. Both ranges are functions of the data. You can consider other relative of absolute ranges using e.g. `inputRange=(x->minimum(x)*0.8,x->100)`"
inputRange::Tuple{Function,Function} = (minimum,maximum)
"The range of the scaled output [def: (0,1)]"
outputRange::Tuple{Real,Real} = (0,1)
end
Base.@kwdef mutable struct MinMaxScaler_lp <: AbstractScalerLearnableParameter
inputRangeApplied::Vector{Tuple{Float64,Float64}} = [(-Inf,+Inf)]
end
"""
$(TYPEDEF)
Standardise the input to zero mean and unit standard deviation, aka "Z-score".
Note that missing values are skipped.
# Parameters:
$(FIELDS)
# Example:
```julia
julia> using BetaML, Statistics
julia> x = [[4000,1000,2000,3000] [400,100,200,300] [4,1,2,3] [0.4, 0.1, 0.2, 0.3]]
4×4 Matrix{Float64}:
4000.0 400.0 4.0 0.4
1000.0 100.0 1.0 0.1
2000.0 200.0 2.0 0.2
3000.0 300.0 3.0 0.3
julia> mod = Scaler() # equiv to `Scaler(StandardScaler(scale=true, center=true))`
A Scaler BetaMLModel (unfitted)
julia> xscaled = fit!(mod,x)
4×4 Matrix{Float64}:
1.34164 1.34164 1.34164 1.34164
-1.34164 -1.34164 -1.34164 -1.34164
-0.447214 -0.447214 -0.447214 -0.447214
0.447214 0.447214 0.447214 0.447214
julia> col_means = mean(xscaled, dims=1)
1×4 Matrix{Float64}:
0.0 0.0 0.0 5.55112e-17
julia> col_var = var(xscaled, dims=1, corrected=false)
1×4 Matrix{Float64}:
1.0 1.0 1.0 1.0
julia> xback = inverse_predict(mod, xscaled)
4×4 Matrix{Float64}:
4000.0 400.0 4.0 0.4
1000.0 100.0 1.0 0.1
2000.0 200.0 2.0 0.2
3000.0 300.0 3.0 0.3
```
"""
Base.@kwdef mutable struct StandardScaler <: AbstractScaler
"Scale to unit variance [def: true]"
scale::Bool=true
"Center to zero mean [def: true]"
center::Bool=true
end
Base.@kwdef mutable struct StandardScaler_lp <: AbstractScalerLearnableParameter
sfμ::Vector{Float64} = Float64[] # scale factor of mean
sfσ::Vector{Float64} = Float64[] # scale vector of st.dev.
end
function _fit(m::MinMaxScaler,skip,X,cache)
actualRanges = Tuple{Float64,Float64}[]
if cache
Tout = Union{eltype(X),Float64}
X_scaled = Array{Tout,ndims(X)}(undef,size(X))
X_scaled .= X
else
X_scaled = nothing
end
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
imin,imax = (m.inputRange[1](skipmissing(c)), m.inputRange[2](skipmissing(c)) )
if cache
omin, omax = m.outputRange[1], m.outputRange[2]
X_scaled[:,ic] = (c .- imin) .* ((omax-omin)/(imax-imin)) .+ omin
end
push!(actualRanges,(imin,imax))
else
push!(actualRanges,(-Inf,+Inf))
end
end
return X_scaled, MinMaxScaler_lp(actualRanges)
end
function _fit(m::StandardScaler,skip,X::AbstractArray,cache)
nDims = ndims(X)
nR = size(X,1)
nD = (nDims == 1) ? 1 : size(X,2)
sfμ = zeros(nD)
sfσ = ones(nD)
if cache
Tout = Union{eltype(X),Float64}
X_scaled = Array{Tout,ndims(X)}(undef,size(X))
X_scaled .= X
else
X_scaled = nothing
end
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
μ = m.center ? mean(skipmissing(c)) : 0.0
σ² = m.scale ? var(skipmissing(c),corrected=false) : 1.0
sfμ[ic] = - μ
sfσ[ic] = 1 ./ sqrt.(σ²)
if cache
X_scaled[:,ic] = (c .+ sfμ[ic]) .* sfσ[ic]
end
end
end
return X_scaled, StandardScaler_lp(sfμ,sfσ)
end
function _predict(m::MinMaxScaler,pars::MinMaxScaler_lp,skip,X;inverse=false)
if !inverse
Tout = Union{eltype(X),Float64}
xnew = Array{Tout,ndims(X)}(undef,size(X))
xnew .= X
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
imin,imax = pars.inputRangeApplied[ic]
omin,omax = m.outputRange
xnew[:,ic] = (c .- imin) .* ((omax-omin)/(imax-imin)) .+ omin
end
end
return xnew
else
xorig = deepcopy(X)
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
imin,imax = pars.inputRangeApplied[ic]
omin,omax = m.outputRange
xorig[:,ic] = (c .- omin) .* ((imax-imin)/(omax-omin)) .+ imin
end
end
return xorig
end
end
function _predict(m::StandardScaler,pars::StandardScaler_lp,skip,X;inverse=false)
if !inverse
Tout = Union{eltype(X),Float64}
xnew = Array{Tout,ndims(X)}(undef,size(X))
xnew .= X
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
xnew[:,ic] = (c .+ pars.sfμ[ic]) .* pars.sfσ[ic]
end
end
return xnew
else
xorig = deepcopy(X)
for (ic,c) in enumerate(eachcol(X))
if !(ic in skip)
xorig[:,ic] = (c ./ pars.sfσ[ic] .- pars.sfμ[ic])
end
end
return xorig
end
end
"""
$(TYPEDEF)
Hyperparameters for the Scaler transformer
## Parameters
$(FIELDS)
"""
Base.@kwdef mutable struct Scaler_hp <: BetaMLHyperParametersSet
"The specific scaler method to employ with its own parameters. See [`StandardScaler`](@ref) [def] or [`MinMaxScaler`](@ref)."
method::AbstractScaler = StandardScaler()
"The positional ids of the columns to skip scaling (eg. categorical columns, dummies,...) [def: `[]`]"
skip::Vector{Int64} = Int64[]
end
Base.@kwdef mutable struct Scaler_lp <: BetaMLLearnableParametersSet
scalerpars::AbstractScalerLearnableParameter = StandardScaler_lp()
end
"""
$(TYPEDEF)
Scale the data according to the specific chosen method (def: `StandardScaler`)
For the parameters see [`Scaler_hp`](@ref) and [`BML_options`](@ref)
# Examples:
- Standard scaler (default)...
```julia
julia> using BetaML, Statistics
julia> x = [[4000,1000,2000,3000] [400,100,200,300] [4,1,2,3] [0.4, 0.1, 0.2, 0.3]]
4×4 Matrix{Float64}:
4000.0 400.0 4.0 0.4
1000.0 100.0 1.0 0.1
2000.0 200.0 2.0 0.2
3000.0 300.0 3.0 0.3
julia> mod = Scaler() # equiv to `Scaler(StandardScaler(scale=true, center=true))`
A Scaler BetaMLModel (unfitted)
julia> xscaled = fit!(mod,x)
4×4 Matrix{Float64}:
1.34164 1.34164 1.34164 1.34164
-1.34164 -1.34164 -1.34164 -1.34164
-0.447214 -0.447214 -0.447214 -0.447214
0.447214 0.447214 0.447214 0.447214
julia> col_means = mean(xscaled, dims=1)
1×4 Matrix{Float64}:
0.0 0.0 0.0 5.55112e-17
julia> col_var = var(xscaled, dims=1, corrected=false)
1×4 Matrix{Float64}:
1.0 1.0 1.0 1.0
julia> xback = inverse_predict(mod, xscaled)
4×4 Matrix{Float64}:
4000.0 400.0 4.0 0.4
1000.0 100.0 1.0 0.1
2000.0 200.0 2.0 0.2
3000.0 300.0 3.0 0.3
```
- Min-max scaler...
```julia
julia> using BetaML
julia> x = [[4000,1000,2000,3000] ["a", "categorical", "variable", "not to scale"] [4,1,2,3] [0.4, 0.1, 0.2, 0.3]]
4×4 Matrix{Any}:
4000 "a" 4 0.4
1000 "categorical" 1 0.1
2000 "variable" 2 0.2
3000 "not to scale" 3 0.3
julia> mod = Scaler(MinMaxScaler(outputRange=(0,10)),skip=[2])
A Scaler BetaMLModel (unfitted)
julia> xscaled = fit!(mod,x)
4×4 Matrix{Any}:
10.0 "a" 10.0 10.0
0.0 "categorical" 0.0 0.0
3.33333 "variable" 3.33333 3.33333
6.66667 "not to scale" 6.66667 6.66667
julia> xback = inverse_predict(mod,xscaled)
4×4 Matrix{Any}:
4000.0 "a" 4.0 0.4
1000.0 "categorical" 1.0 0.1
2000.0 "variable" 2.0 0.2
3000.0 "not to scale" 3.0 0.3
```
"""
mutable struct Scaler <: BetaMLUnsupervisedModel
hpar::Scaler_hp
opt::BML_options
par::Union{Nothing,Scaler_lp}
cres::Union{Nothing,Array}
fitted::Bool
info::Dict{String,Any}
end
function Scaler(;kwargs...)
m = Scaler(Scaler_hp(),BML_options(),Scaler_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
function Scaler(method;kwargs...)
m = Scaler(Scaler_hp(method=method),BML_options(),Scaler_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
function fit!(m::Scaler,x)
# Parameter alias..
scaler = m.hpar.method
skip = m.hpar.skip
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
verbosity >= STD && @warn "This model doesn't support online training. Training will be performed based on new data only."
end
m.cres,m.par.scalerpars = _fit(scaler,skip,x,cache)
m.info["fitted_records"] = get(m.info,"fitted_records",0) + size(x,1)
m.info["xndims"] = size(x,2)
m.fitted = true
return cache ? m.cres : nothing
end
function predict(m::Scaler,x)
return _predict(m.hpar.method,m.par.scalerpars,m.hpar.skip,x;inverse=false)
end
function inverse_predict(m::Scaler,x)
return _predict(m.hpar.method,m.par.scalerpars,m.hpar.skip,x;inverse=true)
end
"""
$(TYPEDEF)
Hyperparameters for the PCAEncoder transformer
## Parameters
$(FIELDS)
"""
Base.@kwdef mutable struct PCAE_hp <: BetaMLHyperParametersSet
"The size, that is the number of dimensions, to maintain (with `encoded_size <= size(X,2)` ) [def: `nothing`, i.e. the number of output dimensions is determined from the parameter `max_unexplained_var`]"
encoded_size::Union{Nothing,Int64} = nothing
"The maximum proportion of variance that we are willing to accept when reducing the number of dimensions in our data [def: 0.05]. It doesn't have any effect when the output number of dimensions is explicitly chosen with the parameter `encoded_size`"
max_unexplained_var::Float64 = 0.05
end
Base.@kwdef mutable struct PCA_lp <: BetaMLLearnableParametersSet
eigen_out::Union{Eigen,Nothing} =nothing
encoded_size_actual::Union{Int64,Nothing}=nothing
end
"""
$(TYPEDEF)
Perform a Principal Component Analysis, a dimensionality reduction tecnique employing a linear trasformation of the original matrix by the eigenvectors of the covariance matrix.
PCAEncoder returns the matrix reprojected among the dimensions of maximum variance.
For the parameters see [`PCAE_hp`](@ref) and [`BML_options`](@ref)
# Notes:
- PCAEncoder doesn't automatically scale the data. It is suggested to apply the [`Scaler`](@ref) model before running it.
- Missing data are not supported. Impute them first, see the [`Imputation`](@ref) module.
- If one doesn't know _a priori_ the maximum unexplained variance that he is willling to accept, nor the wished number of dimensions, he can run the model with all the dimensions in output (i.e. with `encoded_size=size(X,2)`), analise the proportions of explained cumulative variance by dimensions in `info(mod,""explained_var_by_dim")`, choose the number of dimensions K according to his needs and finally pick from the reprojected matrix only the number of dimensions required, i.e. `out.X[:,1:K]`.
# Example:
```julia
julia> using BetaML
julia> xtrain = [1 10 100; 1.1 15 120; 0.95 23 90; 0.99 17 120; 1.05 8 90; 1.1 12 95];
julia> mod = PCAEncoder(max_unexplained_var=0.05)
A PCAEncoder BetaMLModel (unfitted)
julia> xtrain_reproj = fit!(mod,xtrain)
6×2 Matrix{Float64}:
100.449 3.1783
120.743 6.80764
91.3551 16.8275
120.878 8.80372
90.3363 1.86179
95.5965 5.51254
julia> info(mod)
Dict{String, Any} with 5 entries:
"explained_var_by_dim" => [0.873992, 0.999989, 1.0]
"fitted_records" => 6
"prop_explained_var" => 0.999989
"retained_dims" => 2
"xndims" => 3
julia> xtest = [2 20 200];
julia> xtest_reproj = predict(mod,xtest)
1×2 Matrix{Float64}:
200.898 6.3566
```
"""
mutable struct PCAEncoder <: BetaMLUnsupervisedModel
hpar::PCAE_hp
opt::BML_options
par::Union{Nothing,PCA_lp}
cres::Union{Nothing,Matrix}
fitted::Bool
info::Dict{String,Any}
end
function PCAEncoder(;kwargs...)
m = PCAEncoder(PCAE_hp(),BML_options(),PCA_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
# Correction for releasing without breaking.. to remove on v0.12 onward...
# found || error("Keyword \"$kw\" is not part of this model.")
if !found
if kw == :outdims
setproperty!(m.hpar,:encoded_size,kwv)
found = true
else
error("Keyword \"$kw\" is not part of this model.")
end
end
end
return m
end
function fit!(m::PCAEncoder,X)
# Parameter alias..
encoded_size = m.hpar.encoded_size
max_unexplained_var = m.hpar.max_unexplained_var
cache = m.opt.cache
verbosity = m.opt.verbosity
rng = m.opt.rng
if m.fitted
verbosity >= STD && @warn "This model doesn't support online training. Training will be performed based on new data only."
end
(N,D) = size(X)
if !isnothing(encoded_size) && encoded_size > D
@error("The parameter `encoded_size` must be ≤ of the number of dimensions of the input data matrix")
end
Σ = (1/N) * X'*(I-(1/N)*ones(N)*ones(N)')*X
E = eigen(Σ) # eigenvalues are ordered from the smallest to the largest
# Finding oudims_actual
totvar = sum(E.values)
explained_var_by_dim = cumsum(reverse(E.values)) ./ totvar
encoded_size_actual = isnothing(encoded_size) ? findfirst(x -> x >= (1-max_unexplained_var), explained_var_by_dim) : encoded_size
m.par.eigen_out = E
m.par.encoded_size_actual = encoded_size_actual
if cache
P = E.vectors[:,end:-1:D-encoded_size_actual+1]
m.cres = X*P
end
m.info["fitted_records"] = get(m.info,"fitted_records",0) + N
m.info["xndims"] = D
m.info["explained_var_by_dim"] = explained_var_by_dim
m.info["prop_explained_var"] = explained_var_by_dim[encoded_size_actual]
m.info["retained_dims"] = encoded_size_actual
m.fitted=true
return cache ? m.cres : nothing
end
function predict(m::PCAEncoder,X)
D = size(m.par.eigen_out.vectors,2)
P = m.par.eigen_out.vectors[:,end:-1:D-m.par.encoded_size_actual+1]
return X*P
end
"""
cols_with_missing(x)
Retuyrn an array with the ids of the columns where there is at least a missing value.
"""
function cols_with_missing(x)
cols_with_missing = Int64[]
(N,D) = size(x)
for d in 1:D
for n in 1:N
if ismissing(x[n,d])
push!(cols_with_missing,d)
break
end
end
end
return cols_with_missing
end
"""
$(TYPEDSIGNATURES)
Perform cross_validation according to `sampler` rule by calling the function f and collecting its output
# Parameters
- `f`: The user-defined function that consume the specific train and validation data and return somehting (often the associated validation error). See later
- `data`: A single n-dimenasional array or a vector of them (e.g. X,Y), depending on the tasks required by `f`.
- sampler: An istance of a ` AbstractDataSampler`, defining the "rules" for sampling at each iteration. [def: `KFold(nsplits=5,nrepeats=1,shuffle=true,rng=Random.GLOBAL_RNG)` ]. Note that the RNG passed to the `f` function is the `RNG` passed to the sampler
- `dims`: The dimension over performing the cross_validation i.e. the dimension containing the observations [def: `1`]
- `verbosity`: The verbosity to print information during each iteration (this can also be printed in the `f` function) [def: `STD`]
- `return_statistics`: Wheter cross_validation should return the statistics of the output of `f` (mean and standard deviation) or the whole outputs [def: `true`].
# Notes
cross_validation works by calling the function `f`, defined by the user, passing to it the tuple `trainData`, `valData` and `rng` and collecting the result of the function f. The specific method for which `trainData`, and `valData` are selected at each iteration depends on the specific `sampler`, whith a single 5 k-fold rule being the default.
This approach is very flexible because the specific model to employ or the metric to use is left within the user-provided function. The only thing that cross_validation does is provide the model defined in the function `f` with the opportune data (and the random number generator).
**Input of the user-provided function**
`trainData` and `valData` are both themselves tuples. In supervised models, cross_validations `data` should be a tuple of (X,Y) and `trainData` and `valData` will be equivalent to (xtrain, ytrain) and (xval, yval). In unsupervised models `data` is a single array, but the training and validation data should still need to be accessed as `trainData[1]` and `valData[1]`.
**Output of the user-provided function**
The user-defined function can return whatever. However, if `return_statistics` is left on its default `true` value the user-defined function must return a single scalar (e.g. some error measure) so that the mean and the standard deviation are returned.
Note that `cross_validation` can beconveniently be employed using the `do` syntax, as Julia automatically rewrite `cross_validation(data,...) trainData,valData,rng ...user defined body... end` as `cross_validation(f(trainData,valData,rng ), data,...)`
# Example
```julia
julia> X = [11:19 21:29 31:39 41:49 51:59 61:69];
julia> Y = [1:9;];
julia> sampler = KFold(nsplits=3);
julia> (μ,σ) = cross_validation([X,Y],sampler) do trainData,valData,rng
(xtrain,ytrain) = trainData; (xval,yval) = valData
model = RandomForestEstimator(n_trees=30,rng=rng)
fit!(model,xtrain,ytrain)
ŷval = predict(model,xval)
ϵ = relative_mean_error(yval,ŷval)
return ϵ
end
(0.3202242202242202, 0.04307662219315022)
```
"""
function cross_validation(f,data,sampler=KFold(nsplits=5,nrepeats=1,shuffle=true,rng=Random.GLOBAL_RNG);dims=1,verbosity=STD, return_statistics=true)
iterResults = []
for (i,iterData) in enumerate(SamplerWithData(sampler,data,dims))
iterResult = f(iterData[1],iterData[2],sampler.rng)
push!(iterResults,iterResult)
verbosity > HIGH && println("Done iteration $i. This iteration output: $iterResult")
end
if return_statistics return (mean(iterResults),std(iterResults)) else return iterResults end
end
"""
$(TYPEDEF)
Simple grid method for hyper-parameters validation of supervised models.
All parameters are tested using cross-validation and then the "best" combination is used.
# Notes:
- the default loss is suitable for 1-dimensional output supervised models
## Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct GridSearch <: AutoTuneMethod
"Loss function to use. [def: [`l2loss_by_cv`](@ref)`]. Any function that takes a model, data (a vector of arrays, even if we work only with X) and (using the `rng` keyword) a RNG and return a scalar loss."
loss::Function = l2loss_by_cv
"Share of the (data) resources to use for the autotuning [def: 0.1]. With `res_share=1` all the dataset is used for autotuning, it can be very time consuming!"
res_share::Float64 = 0.1
"Dictionary of parameter names (String) and associated vector of values to test. Note that you can easily sample these values from a distribution with rand(distr_object,n_values). The number of points you provide for a given parameter can be interpreted as proportional to the prior you have on the importance of that parameter for the algorithm quality."
hpranges::Dict{String,Any} = Dict{String,Any}()
"Use multithreads in the search for the best hyperparameters [def: `false`]"
multithreads::Bool = false
end
"""
$(TYPEDEF)
Hyper-parameters validation of supervised models that search the parameters space trouth successive halving
All parameters are tested on a small sub-sample, then the "best" combinations are kept for a second round that use more samples and so on untill only one hyperparameter combination is left.
# Notes:
- the default loss is suitable for 1-dimensional output supervised models, and applies itself cross-validation. Any function that accepts a model, some data and return a scalar loss can be used
- the rate at which the potential candidate combinations of hyperparameters shrink is controlled by the number of data shares defined in `res_shared` (i.e. the epochs): more epochs are choosen, lower the "shrink" coefficient
## Parameters:
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SuccessiveHalvingSearch <: AutoTuneMethod
"Loss function to use. [def: [`l2loss_by_cv`](@ref)`]. Any function that takes a model, data (a vector of arrays, even if we work only with X) and (using the `rng` keyword) a RNG and return a scalar loss."
loss::Function = l2loss_by_cv
"""Shares of the (data) resources to use for the autotuning in the successive iterations [def: `[0.05, 0.2, 0.3]`]. With `res_share=1` all the dataset is used for autotuning, it can be very time consuming!
The number of models is reduced of the same share in order to arrive with a single model. Increase the number of `res_shares` in order to increase the number of models kept at each iteration.
"""
res_shares::Vector{Float64} = [0.08, 0.1, 0.13, 0.15, 0.2, 0.3, 0.4]
"Dictionary of parameter names (String) and associated vector of values to test. Note that you can easily sample these values from a distribution with rand(distr_object,n_values). The number of points you provide for a given parameter can be interpreted as proportional to the prior you have on the importance of that parameter for the algorithm quality."
hpranges::Dict{String,Any} = Dict{String,Any}()
"Use multiple threads in the search for the best hyperparameters [def: `false`]"
multithreads::Bool = false
end
"Transform a Dict(parameters => possible range) in a vector of Dict(parameters=>parvalues)"
function _hpranges_2_candidates(hpranges)
parLengths = Int64[]
for (k,v) in hpranges
push!(parLengths,length(v))
end
candidates = Dict{String,Any}[]
for ij in CartesianIndices(Tuple(parLengths))
thishpars = Dict{String,Any}()
i = 1
for (k,v) in hpranges
thishpars[k] = hpranges[k][Tuple(ij)[i]]
i += 1
end
#thishpars = NamedTuple{Tuple(keys(thishpars))}(values(thishpars)) # dict to namedtouple, also ntuple = (; dict...)
push!(candidates,thishpars)
end
return candidates
end
"""
$(TYPEDSIGNATURES)
Hyperparameter autotuning using the [`GridSearch`](@ref) method.
"""
function tune!(m,method::GridSearch,data)
options(m).verbosity >= STD && println("Starting hyper-parameters autotuning (this could take a while..)")
options(m).verbosity >= HIGH && println(method)
hpranges = method.hpranges
candidates = _hpranges_2_candidates(hpranges)
rng = options(m).rng
multithreads = method.multithreads && Threads.nthreads() > 1
compLock = ReentrantLock()
best_candidate = Dict()
lowest_loss = Inf
n_orig = size(data[1],1)
res_share = method.res_share
if n_orig * res_share < 10
res_share = 10 / n_orig # trick to avoid training on 1-sample, where some models have problems
end
subs = partition([data...],[res_share,1-res_share],rng=rng, copy=true)
sampleddata = (collect([subs[i][1] for i in 1:length(subs)])...,)
masterSeed = rand(rng,100:typemax(Int64))
rngs = generate_parallel_rngs(rng,Threads.nthreads())
n_candidates = length(candidates)
@threadsif multithreads for c in 1:n_candidates
candidate = candidates[c]
tsrng = rngs[Threads.threadid()]
Random.seed!(tsrng,masterSeed+c*10)
options(m).verbosity == FULL && println("Testing candidate $candidate")
mc = deepcopy(m)
mc.opt.autotune = false
mc.opt.verbosity = NONE
sethp!(mc,candidate)
μ = method.loss(mc,sampleddata;rng=tsrng)
options(m).verbosity == FULL && println(" -- predicted loss: $μ")
if multithreads
lock(compLock) ## This step can't be run in parallel...
end
try
if μ < lowest_loss
lowest_loss = μ
best_candidate = candidate
end
finally
if multithreads
unlock(compLock)
end
end
end
sethp!(m,best_candidate)
end
"""
$(TYPEDSIGNATURES)
Hyperparameter autotuning using the [`SuccessiveHalvingSearch`](@ref) method.
"""
function tune!(m,method::SuccessiveHalvingSearch,data)
options(m).verbosity >= STD && println("Starting hyper-parameters autotuning (this could take a while..)")
options(m).verbosity >= HIGH && println(method)
hpranges = method.hpranges
res_shares = method.res_shares
rng = options(m).rng
multithreads = method.multithreads && Threads.nthreads() > 1
compLock = ReentrantLock()
epochs = length(res_shares)
candidates = _hpranges_2_candidates(hpranges)
ncandidates = length(candidates)
shrinkfactor = ncandidates^(1/epochs)
n_orig = size(data[1],1)
for e in 1:epochs
res_share = res_shares[e]
if n_orig * res_share < 10
res_share = 10 / n_orig # trick to avoid training on 1-sample, where some models have problems
end
esubs = partition([data...],[res_share,1-res_share],copy=false,rng=rng)
epochdata = (collect([esubs[i][1] for i in 1:length(esubs)])...,)
ncandidates_thisepoch = Int(round(ncandidates/shrinkfactor^(e-1)))
ncandidates_tokeep = Int(round(ncandidates/shrinkfactor^e))
options(m).verbosity >= STD && println("(e $e / $epochs) N data / n candidates / n candidates to retain : $(n_orig * res_share) \t $ncandidates_thisepoch $ncandidates_tokeep")
scores = Vector{Tuple{Float64,Dict}}(undef,ncandidates_thisepoch)
masterSeed = rand(rng,100:typemax(Int64))
rngs = generate_parallel_rngs(rng,Threads.nthreads())
n_candidates = length(candidates)
ncandidates_thisepoch == n_candidates || error("Problem with number of candidates!")
@threadsif multithreads for c in 1:n_candidates
candidate=candidates[c]
tsrng = rngs[Threads.threadid()]
Random.seed!(tsrng,masterSeed+c*10)
options(m).verbosity == FULL && println("(e $e) Testing candidate $candidate")
mc = deepcopy(m)
mc.opt.autotune = false
mc.opt.verbosity = NONE
sethp!(mc,candidate)
μ = method.loss(mc,epochdata;rng=tsrng)
options(m).verbosity == FULL && println(" -- predicted loss: $μ")
scores[c] = (μ,candidate)
end
sort!(scores,by=first)
options(m).verbosity == FULL && println("(e $e) Scores: \n $scores")
candidates = [scores[i][2] for i in 1:ncandidates_tokeep]
end
length(candidates) == 1 || error("Here we should have a single candidate remained!")
sethp!(m,candidates[1])
end
"""
$(TYPEDSIGNATURES)
Hyperparameter autotuning.
"""
function autotune!(m,data) # or autotune!(m,data) ???
if !(options(m).autotune)
return m
end
# let's sure data is always a tuple of arrays, even for unsupervised models
if !(eltype(data) <: AbstractArray) # data is a single array
data = (data,)
end
n = size(data[1],1)
n >= 10 || error("Too few records to autotune the model. At very least I need 1O records ($n provided)")
tune!(m,hyperparameters(m).tunemethod,data)
return nothing
end
"""
class_counts_with_labels(x)
Return a dictionary that counts the number of each unique item (rows) in a dataset.
"""
function class_counts_with_labels(x;classes=nothing)
dims = ndims(x)
if dims == 1
if classes == nothing
return countmap(x)
else
cWithLabels = countmap(x)
return [get(cWithLabels,k,0) for k in classes]
end
end
# nd is more than 1...
T = Array{eltype(x),1}
if classes != nothing
counts = Dict([u=>0 for u in classes])
else
counts = Dict{T,Int64}() # a dictionary of label -> count.
end
for i in 1:size(x,1)
label = x[i,:]
if !(label in keys(counts))
counts[label] = 1
else
counts[label] += 1
end
end
return counts
end
"""
class_counts(x;classes=nothing)
Return a (unsorted) vector with the counts of each unique item (element or rows) in a dataset.
If order is important or not all classes are present in the data, a preset vectors of classes can be given in the parameter `classes`
"""
function class_counts(x; classes=nothing)
nd = ndims(x)
if classes == nothing # order doesn't matter
return (nd == 1) ? values(countmap(x)) : values(class_counts_with_labels(x;classes=classes))
else
cWithLabels = (nd == 1) ? countmap(x) : class_counts_with_labels(x;classes=classes)
return [get(cWithLabels,k,0) for k in classes]
end
end
"""
mode(dict::Dict{T,Float64};rng)
Return the key with highest mode (using rand in case of multimodal values)
"""
function mode(dict::Dict{T,T2};rng = Random.GLOBAL_RNG) where {T, T2 <: Number}
mks = [k for (k,v) in dict if v==maximum(values(dict))]
if length(mks) == 1
return mks[1]
else
return mks[rand(rng,1:length(mks))]
end
end
"""
mode(v::AbstractVector{T};rng)
Return the position with the highest value in an array, interpreted as mode (using rand in case of multimodal values)
"""
function mode(v::AbstractVector{T};rng = Random.GLOBAL_RNG) where {T <: Number}
mpos = findall(x -> x == maximum(v),v)
if length(mpos) == 1
return mpos[1]
else
return mpos[rand(rng,1:length(mpos))]
end
end
"""
mode(elements,rng)
Given a vector of dictionaries whose key is numerical (e.g. probabilities), a vector of vectors or a matrix, it returns the mode of each element (dictionary, vector or row) in terms of the key or the position.
Use it to return a unique value from a multiclass classifier returning probabilities.
# Note:
- If multiple classes have the highest mode, one is returned at random (use the parameter `rng` to fix the stochasticity)
"""
function mode(dicts::AbstractArray{AbstractDict{T,<: Number}};rng = Random.GLOBAL_RNG) where {T}
return mode.(dicts;rng=rng)
end
function mode(vals::AbstractArray{T,1};rng = Random.GLOBAL_RNG) where {T <: AbstractArray{T2,1} where T2 <: Number}
return mode.(vals;rng=rng)
end
function mode(vals::AbstractArray{T,1};rng = Random.GLOBAL_RNG) where {T <: AbstractDict{T2,<: Number} where T2 }
return mode.(vals;rng=rng)
end
function mode(vals::AbstractArray{T,2};rng = Random.GLOBAL_RNG) where {T <: Number}
return [mode(r;rng=rng) for r in eachrow(vals)]
end
function mode(vals::AbstractArray{T,1};rng = Random.GLOBAL_RNG) where {T}
return mode(class_counts_with_labels(vals);rng=rng)
end
"""
mean_dicts(dicts)
Compute the mean of the values of an array of dictionaries.
Given `dicts` an array of dictionaries, `mean_dicts` first compute the union of the keys and then average the values.
If the original valueas are probabilities (non-negative items summing to 1), the result is also a probability distribution.
"""
function mean_dicts(dicts; weights=ones(length(dicts)))
if length(dicts) == 1
return dicts[1]
end
T = eltype(keys(dicts[1]))
allkeys = union([keys(i) for i in dicts]...)
outDict = Dict{T,Float64}()
ndicts = length(dicts)
totWeights = sum(weights)
for k in allkeys
v = 0
for (i,d) in enumerate(dicts)
if k in keys(d)
v += (d[k])*(weights[i]/totWeights)
end
end
outDict[k] = v
end
return outDict
end
# ------------------------------------------------------------------------------
# Other mathematical/computational functions
""" LogSumExp for efficiently computing log(sum(exp.(x))) """
lse(x) = maximum(x)+log(sum(exp.(x .- maximum(x))))
""" Sterling number: number of partitions of a set of n elements in k sets """
sterling(n::BigInt,k::BigInt) = (1/factorial(k)) * sum((-1)^i * binomial(k,i)* (k-i)^n for i in 0:k)
sterling(n::Int64,k::Int64) = sterling(BigInt(n),BigInt(k))
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 4736 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
import Base.iterate
abstract type AbstractDataSampler end
"""
SamplerWithData{Tsampler}
Associate an instance of an AbstractDataSampler with the actual data to sample.
"""
mutable struct SamplerWithData{Ts <: AbstractDataSampler, Td <: AbstractArray}
sampler::Ts
data::Td
dims::Int64
end
# To implement a new sampler:
# - create a new structure child of AbstractDataSampler
# - override iterate(iter::SamplerWithData{yoursampler} and iterate(iter::SamplerWithData{yoursampler},state) considering that
#=
for i in iter # or "for i = iter"
# body
end
# --> is rewritten to :
next = iterate(iter)
while next !== nothing
(i, state) = next
# body
next = iterate(iter, state)
end
=#
"""
KFold(nsplits=5,nrepeats=1,shuffle=true,rng=Random.GLOBAL_RNG)
Iterator for k-fold cross_validation strategy.
"""
mutable struct KFold <: AbstractDataSampler
nsplits::Int64
nrepeats::Int64
shuffle::Bool
rng::AbstractRNG
function KFold(;nsplits=5,nrepeats=1,shuffle=true,rng=Random.GLOBAL_RNG)
return new(nsplits,nrepeats,shuffle,rng)
end
end
# Implementation of the Julia iteration API for SamplerWithData{KFold}
function iterate(iter::SamplerWithData{KFold})
# First iteration, I need to create the subsamples
K = iter.sampler.nsplits
D = iter.dims
if eltype(iter.data) <: AbstractArray # data has multiple arrays, like X,Y
subs = collect(zip(partition(iter.data,fill(1/K,K),shuffle=iter.sampler.shuffle,dims=D,rng=iter.sampler.rng,copy=false)...))
else # data is a single matrix/tensor
subs = collect(zip(partition(iter.data,fill(1/K,K),shuffle=iter.sampler.shuffle,dims=D,rng=iter.sampler.rng,copy=false)))
end
i = (cat.(subs[2:K]...,dims=D),subs[1])
next = (subs,2)
return (i,next)
end
function iterate(iter::SamplerWithData{KFold},state)
# Further iteration, I need to create the subsamples only if it is a new interaction
K = iter.sampler.nsplits
D = iter.dims
nRep = iter.sampler.nrepeats
subs = state[1]
counter = state[2]
counter <= (K * nRep) || return nothing # If we are done all the splits by the repetitions we are done
kpart = counter % K
if kpart == 1 # new round, we repartition in k parts
if eltype(iter.data) <: AbstractArray # data has multiple arrays, like X,Y
subs = collect(zip(partition(iter.data,fill(1/K,K),shuffle=iter.sampler.shuffle,dims=D,rng=iter.sampler.rng,copy=false)...))
else # data is a single matrix
subs = collect(zip(partition(iter.data,fill(1/K,K),shuffle=iter.sampler.shuffle,dims=D,rng=iter.sampler.rng,copy=false)))
end
i = (cat.(subs[2:end]...,dims=D),subs[1])
next = (subs,counter+1)
return (i,next)
else
if kpart == 0 # the modulo returns the last element as zero instead as K
i = (cat.(subs[1:K-1]...,dims=D),subs[end])
else
i = (cat.(subs[1:kpart-1]...,subs[kpart+1:end]...,dims=D),subs[kpart])
end
next = (subs,counter+1)
return (i,next)
end
end
# ------------------------------------------------------------------------------
# Used for NN
"""
batch(n,bsize;sequential=false,rng)
Return a vector of `bsize` vectors of indeces from `1` to `n`.
Randomly unless the optional parameter `sequential` is used.
# Example:
```julia
julia> Utils.batch(6,2,sequential=true)
3-element Array{Array{Int64,1},1}:
[1, 2]
[3, 4]
[5, 6]
```
"""
function batch(n::Integer,bsize::Integer;sequential=false,rng = Random.GLOBAL_RNG)
ridx = sequential ? collect(1:n) : shuffle(rng,1:n)
if bsize > n
return [ridx]
end
n_batches = Int64(floor(n/bsize))
batches = Array{Int64,1}[]
for b in 1:n_batches
push!(batches,ridx[b*bsize-bsize+1:b*bsize])
end
return batches
end
"""
$(TYPEDSIGNATURES)
PErform a Xavier initialisation of the weigths
# Parameters:
- `previous_npar`: number of parameters of the previous layer
- `this_npar`: number of parameters of this layer
- `outsize`: tuple with the size of the weigths [def: `(this_npar,previous_npar)`]
- `rng` : random number generator [def: `Random.GLOBAL_RNG`]
- `eltype`: eltype of the weigth array [def: `Float64`]
"""
function xavier_init(previous_npar,this_npar,outsize=(this_npar,previous_npar);rng=Random.GLOBAL_RNG,eltype=Float64)
out = rand(rng, Uniform(-sqrt(6)/sqrt(previous_npar+this_npar),sqrt(6)/sqrt(previous_npar+this_npar)),outsize)
if eltype != Float64
return convert(Array{eltype,length(outsize)},out)
else
return out
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3297 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Part of submodule Utils of BetaML - the Beta Machine Learning Toolkit
# Various helper/ utility functions concerning stochastiticy management
#StableRNG(FIXEDSEED) Random.default_rng() #MersenneTwister(FIXEDSEED)
#const FIXEDRNG = MersenneTwister(FIXEDSEED) #StableRNG(FIXEDSEED) Random.default_rng()
"""
generate_parallel_rngs(rng::AbstractRNG, n::Integer;reSeed=false)
For multi-threaded models, return n independent random number generators (one per thread) to be used in threaded computations.
Note that each ring is a _copy_ of the original random ring. This means that code that _use_ these RNGs will not change the original RNG state.
Use it with `rngs = generate_parallel_rngs(rng,Threads.nthreads())` to have a separate rng per thread.
By default the function doesn't re-seed the RNG, as you may want to have a loop index based re-seeding strategy rather than a threadid-based one (to guarantee the same result independently of the number of threads).
If you prefer, you can instead re-seed the RNG here (using the parameter `reSeed=true`), such that each thread has a different seed. Be aware however that the stream of number generated will depend from the number of threads at run time.
"""
function generate_parallel_rngs(rng::AbstractRNG, n::Integer;reSeed=false)
if reSeed
seeds = [rand(rng,100:18446744073709551615) for i in 1:n] # some RNGs have issues with too small seed
rngs = [deepcopy(rng) for i in 1:n]
return Random.seed!.(rngs,seeds)
else
return [deepcopy(rng) for i in 1:n]
end
end
"""
consistent_shuffle(data;dims,rng)
Shuffle a vector of n-dimensional arrays across dimension `dims` keeping the same order between the arrays
# Parameters
- `data`: The vector of arrays to shuffle
- `dims`: The dimension over to apply the shuffle [def: `1`]
- `rng`: An `AbstractRNG` to apply for the shuffle
# Notes
- All the arrays must have the same size for the dimension to shuffle
# Example
```
julia> a = [1 2 30; 10 20 30]; b = [100 200 300];
julia> (aShuffled, bShuffled) = consistent_shuffle([a,b],dims=2)
2-element Vector{Matrix{Int64}}:
[1 30 2; 10 30 20]
[100 300 200]
```
"""
function consistent_shuffle(data::AbstractArray{T,1};dims=1,rng=Random.GLOBAL_RNG) where T <: Any
#= old code, fast for small data, slow for big element to shuffle
Ns = [size(m,dims) for m in data]
length(Set(Ns)) == 1 || @error "In `consistent_shuffle(arrays)` all individual arrays need to have the same size on the dimension specified"
N = Ns[1]
ridx = Random.shuffle(rng, 1:N)
out = similar(data)
for (i,a) in enumerate(data)
aidx = convert(Vector{Union{UnitRange{Int64},Vector{Int64}}},[1:i for i in size(a)])
aidx[dims] = ridx
out[i] = a[aidx...]
end
return out
=#
Ns = [size(m,dims) for m in data]
length(Set(Ns)) == 1 || @error "In `consistent_shuffle(arrays)` all individual arrays need to have the same size on the dimension specified"
ix = randperm(rng,size(data[1],dims))
return mapslices.(x->x[ix], data, dims=dims)
end
consistent_shuffle(rng::AbstractRNG,data::AbstractArray{T,1};dims=1) where T <: Any = consistent_shuffle(data;dims=dims,rng=rng) | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 8463 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
# Function of a single argument (including scalars and vectors), like activation functions but also gini, entropy,...)
# ------------------------------------------------------------------------------
# Various neural network activation functions as well their derivatives
#identity(x) = x already in Julia base
didentity(x) = one(x)
""" relu(x) \n\n Rectified Linear Unit \n\n https://www.cs.toronto.edu/~hinton/absps/reluICML.pdf"""
relu(x) = max(zero(x), x)
""" drelu(x) \n\n Rectified Linear Unit \n\n https://www.cs.toronto.edu/~hinton/absps/reluICML.pdf"""
drelu(x) = x <= zero(x) ? zero(x) : one(x)
"""elu(x; α=1) with α > 0 \n\n https://arxiv.org/pdf/1511.07289.pdf"""
elu(x; α=one(x)) = x > zero(x) ? x : α *(exp(x) - one(x))
"""delu(x; α=1) with α > 0 \n\n https://arxiv.org/pdf/1511.07289.pdf"""
delu(x; α=one(x)) = x > zero(x) ? one(x) : elu(x, α=α) + α
"""celu(x; α=1) \n\n https://arxiv.org/pdf/1704.07483.pdf"""
celu(x; α=one(x)) = max(zero(x),x)+ min(zero(x), α *(exp(x / α) - one(x) ))
#celu(x; α=one(x)) = if x >= zero(x) x/α else exp(x/α)-one(x) end
"""dcelu(x; α=1) \n\n https://arxiv.org/pdf/1704.07483.pdf"""
dcelu(x; α=one(x)) = x >= zero(x) ? one(x) : exp(x/α)
"""plu(x;α=0.1,c=1) \n\n Piecewise Linear Unit \n\n https://arxiv.org/pdf/1809.09534.pdf"""
plu(x;α=0.1,c=one(x)) = max(α*(x+c)-c,min(α*(x-c)+c,x)) # convert(eltype(x), α)
"""dplu(x;α=0.1,c=1) \n\n Piecewise Linear Unit derivative \n\n https://arxiv.org/pdf/1809.09534.pdf"""
dplu(x;α=0.1,c=one(x)) = ( ( x >= (α*(x+c)-c) && x <= (α*(x+c)+c) ) ? one(x) : α ) # convert(eltype(x), α)
"""
pool1d(x,poolsize=2;f=mean)
Apply funtion `f` to a rolling poolsize contiguous (in 1d) neurons.
Applicable to `VectorFunctionLayer`, e.g. `layer2 = VectorFunctionLayer(nₗ,f=(x->pool1d(x,4,f=mean))`
**Attention**: to apply this function as activation function in a neural network you will need Julia version >= 1.6, otherwise you may experience a segmentation fault (see [this bug report](https://github.com/FluxML/Zygote.jl/issues/943))
"""
pool1d(x,poolsize=3;f=mean) = [f(x[i:i+poolsize-1]) for i in 1:length(x)-poolsize+1] # we may try to use CartesianIndices/LinearIndices for a n-dimensional generalisation
#tanh(x) already in Julia base
"""dtanh(x)"""
dtanh(x) = sech(x)^2 # = 1-tanh(x)^2
"""sigmoid(x)"""
sigmoid(x) = one(x)/(one(x)+exp(-x))
"""dsigmoid(x)"""
dsigmoid(x) = exp(-x)*sigmoid(x)^2
"""softmax (x; β=1) \n\n The input x is a vector. Return a PMF"""
softmax(x; β=one.(x)) = exp.((β .* x) .- lse(β .* x)) # efficient implementation of softmax(x) = exp.(x) ./ sum(exp.(x))
softmax(x, β) = softmax(x, β=β)
""" dsoftmax(x; β=1) \n\n Derivative of the softmax function \n\n https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/"""
function dsoftmax(x; β=one(x[1]))
x = makecolvector(x)
d = length(x)
out = zeros(d,d)
y = softmax(x,β=β)
for i in 1:d
smi = y[i]
for j in 1:d
if j == i
out[i,j] = β*(smi-smi^2)
else
out[i,j] = - β*y[j]*smi
end
end
end
return out
end
"""softplus(x) \n\n https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Softplus"""
softplus(x) = log(one(x) + exp(x))
"""dsoftplus(x) \n\n https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Softplus"""
dsoftplus(x) = 1/(1+exp(-x))
""" mish(x) \n\n https://arxiv.org/pdf/1908.08681v1.pdf"""
mish(x) = x*tanh(softplus(x))
""" dmish(x) \n\n https://arxiv.org/pdf/1908.08681v1.pdf"""
dmish(x) = x*(1 - tanh(log(exp(x) + 1))^2)*exp(x)/(exp(x) + 1) + tanh(log(exp(x) + 1))
""" dmaximum(x) \n\n Multidimensional verison of the derivative of `maximum`"""
function dmaximum(x)
dy_dx = zeros(size(x))
dy_dx[argmax(x)] = 1.0
return dy_dx
end
dmean(x) = ones(size(x)) ./ length(x)
"""
autojacobian(f,x;nY)
Evaluate the Jacobian using AD in the form of a (nY,nX) matrix of first derivatives
# Parameters:
- `f`: The function to compute the Jacobian
- `x`: The input to the function where the jacobian has to be computed
- `nY`: The number of outputs of the function `f` [def: `length(f(x))`]
# Return values:
- An `Array{Float64,2}` of the locally evaluated Jacobian
# Notes:
- The `nY` parameter is optional. If provided it avoids having to compute `f(x)`
"""
function autojacobian(f,x;nY=length(f(x)))
x = convert(Array{Float64,1},x)
#j = Array{Float64, 2}(undef, size(x,1), nY)
#for i in 1:nY
# j[:, i] .= gradient(x -> f(x)[i], x)[1]
#end
#return j'
j = Array{Float64, 2}(undef, nY, size(x,1))
for i in 1:nY
j[i,:] = gradient(x -> f(x)[i], x)[1]'
end
return j
end
"generalisation of autojacobian to ndimensional in/out
TODO experimantal
Given ndims_x input dimensions and ndims_y dimensions the output will have ndims-x + ndims_y output dimensions
"
function ndderivative(f,x;y=f(x),ysize=size(y))
xsize = size(x)
outsize = ((xsize...),(ysize...))
out = zeros(outsize)
for yi in CartesianIndices(ysize)
yi = Tuple(yi)
for xi in CartesianIndices(xsize)
xi = Tuple(xi)
out[(xi...,yi...)] = gradient(x -> f(x)[yi], x)[1]
end
end
#j = Array{Float64, 2}(undef, size(x,1), nY)
#for i in 1:nY
# j[:, i] .= gradient(x -> f(x)[i], x)[1]
#end
#return j'
j = Array{Float64, 2}(undef, nY, size(x,1))
for i in 1:nY
j[i,:] = gradient(x -> f(x)[i], x)[1]'
end
return j
end
function match_known_derivatives(f)
map = Dict(
# Scalar input functions..
identity => didentity,
relu => drelu,
elu => delu,
celu => dcelu,
plu => dplu,
tanh => dtanh,
sigmoid => dsigmoid,
softplus => dsoftplus,
mish => dmish,
# Vector input functions..
softmax => dsoftmax,
maximum => dmaximum,
mean => dmean,
squared_cost => dsquared_cost,
crossentropy => dcrossentropy,
)
return get(map,f,nothing)
end
# ------------------------------------------------------------------------------
# Partition tasks..
"""
gini(x)
Calculate the Gini Impurity for a list of items (or rows).
See: https://en.wikipedia.org/wiki/Decision_tree_learning#Information_gain
"""
function gini(x)
counts = class_counts(x)
N = size(x,1)
impurity = 1.0
for c in counts
probₖ = c / N
impurity -= probₖ^2
end
return impurity
#=
counts = class_counts_with_labels(x)
N = size(x,1)
impurity = 1.0
for k in keys(counts)
probₖ = counts[k] / N
impurity -= probₖ^2
end
return impurity
=#
end
"""
entropy(x)
Calculate the entropy for a list of items (or rows) using logarithms in base 2.
See: https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
Note that this function input is the list of items. This list is conerted to a PMF and then the entropy is computed over the PMF.
"""
function entropy(x)
counts = class_counts(x)
N = size(x,1)
entr = 0.0
for c in counts
probₖ = c / N
entr -= probₖ * log2(probₖ)
end
return entr
end
"""variance(x) - population variance"""
variance(x) = var(x,corrected=false)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
"""bic(lL,k,n) - Bayesian information criterion (lower is better)"""
bic(lL,k,n) = k*log(n)-2*lL
"""aic(lL,k) - Akaike information criterion (lower is better)"""
aic(lL,k) = 2*k-2*lL
# ------------------------------------------------------------------------------
# Various kernel functions (e.g. for Perceptron)
"""Radial Kernel (aka _RBF kernel_) parametrised with γ=1/2. For other gammas γᵢ use
`K = (x,y) -> radial_kernel(x,y,γ=γᵢ)` as kernel function in the supporting algorithms"""
radial_kernel(x,y;γ=1/2) = exp(-γ*norm(x-y)^2)
"""Polynomial kernel parametrised with `constant=0` and `degree=2` (i.e. a quadratic kernel).
For other `cᵢ` and `dᵢ` use `K = (x,y) -> polynomial_kernel(x,y,c=cᵢ,d=dᵢ)` as
kernel function in the supporting algorithms"""
polynomial_kernel(x,y;constant=0,degree=2) = (dot(x,y)+constant)^degree
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 4690 | "Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."
"""
Utils module
Provide shared utility functions and/or models for various machine learning algorithms.
For the complete list of functions provided see below. The main ones are:
## Helper functions for logging
- Most BetaML functions accept a parameter `verbosity` (choose between `NONE`, `LOW`, `STD`, `HIGH` or `FULL`)
- Writing complex code and need to find where something is executed ? Use the macro [`@codelocation`](@ref)
## Stochasticity management
- Utils provide [`FIXEDSEED`], [`FIXEDRNG`] and [`generate_parallel_rngs`](@ref). All stochastic functions and models accept a `rng` parameter. See the "Getting started" section in the tutorial for details.
## Data processing
- Various small and large utilities for helping processing the data, expecially before running a ML algorithm
- Includes [`getpermutations`](@ref), [`OneHotEncoder`](@ref), [`OrdinalEncoder`](@ref), [`partition`](@ref), [`Scaler`](@ref), [`PCAEncoder`](@ref), [`AutoEncoder`](@ref), [`cross_validation`](@ref).
- Auto-tuning of hyperparameters is implemented in the supported models by specifying `autotune=true` and optionally overriding the `tunemethod` parameters (e.g. for different hyperparameters ranges or different resources available for the tuning). Autotuning is then implemented in the (first) `fit!` call. Provided autotuning methods: [`SuccessiveHalvingSearch`](@ref) (default), [`GridSearch`](@ref)
## Samplers
- Utilities to sample from data (e.g. for neural network training or for cross-validation)
- Include the "generic" type [`SamplerWithData`](@ref), together with the sampler implementation [`KFold`](@ref) and the function [`batch`](@ref)
## Transformers
- Funtions that "transform" a single input (that can be also a vector or a matrix)
- Includes varios NN "activation" functions ([`relu`](@ref), [`celu`](@ref), [`sigmoid`](@ref), [`softmax`](@ref), [`pool1d`](@ref)) and their derivatives (`d[FunctionName]`), but also [`gini`](@ref), [`entropy`](@ref), [`variance`](@ref), [`BIC`](@ref bic), [`AIC`](@ref aic)
## Measures
- Several functions of a pair of parameters (often `y` and `ŷ`) to measure the goodness of `ŷ`, the distance between the two elements of the pair, ...
- Includes "classical" distance functions ([`l1_distance`](@ref), [`l2_distance`](@ref), [`l2squared_distance`](@ref) [`cosine_distance`](@ref)), "cost" functions for continuous variables ([`squared_cost`](@ref), [`relative_mean_error`](@ref)) and comparision functions for multi-class variables ([`crossentropy`](@ref), [`accuracy`](@ref), [`ConfusionMatrix`](@ref), [`silhouette`](@ref))
- Distances can be used to compute a pairwise distance matrix using the function [`pairwise`](@ref)
"""
module Utils
using LinearAlgebra, Printf, Random, Statistics, Combinatorics, Zygote, CategoricalArrays, LoopVectorization, DocStringExtensions
using Distributions: Uniform
using ForceImport
@force using ..Api
using ..Api
export @codelocation, generate_parallel_rngs,
reshape, makecolvector, makerowvector, makematrix, issortable, getpermutations,
cols_with_missing,
batch, partition, consistent_shuffle,
xavier_init,
didentity, relu, drelu, elu, delu, celu, dcelu, plu, dplu, #identity and rectify units
dtanh, sigmoid, dsigmoid, softmax, dsoftmax, dmaximum, dmean, pool1d, softplus, dsoftplus, mish, dmish, # exp/trig based functions
bic, aic,
autojacobian, match_known_derivatives,
squared_cost, dsquared_cost, mse, crossentropy, dcrossentropy, class_counts, kl_divergence, class_counts_with_labels, mean_dicts, mode, gini, entropy, variance, sobol_index,
error, accuracy, relative_mean_error,
ConfusionMatrix, ConfusionMatrix_hp, silhouette,
cross_validation,
AbstractDataSampler, SamplerWithData, KFold,
autotune!, GridSearch, SuccessiveHalvingSearch, l2loss_by_cv,
l1_distance,l2_distance, l2squared_distance, cosine_distance, pairwise, lse, sterling,
radial_kernel, polynomial_kernel,
Scaler, MinMaxScaler, StandardScaler,
Scaler_hp, MinMaxScaler,StandardScaler,
PCAEncoder, PCAE_hp,
OneHotEncoder, OrdinalEncoder, OneHotE_hp,
@threadsif,
get_parametric_types, isinteger_bml,
FeatureRanker, FeatureR_hp,
online_mean
# Various functions that we add a method to
import Base.print, Base.println, Base.error, Random.shuffle, Base.show
include("Miscellaneous.jl")
include("Logging_utils.jl")
include("Processing.jl")
include("Stochasticity.jl")
include("Samplers.jl")
include("Transformers.jl")
include("Measures.jl")
end # end module
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 12181 |
@eval Utils begin
export AutoEncoder, AutoE_hp
@force using ..Nn
import ..Nn: AbstractLayer, ADAM, SGD, NeuralNetworkEstimator, OptimisationAlgorithm, DenseLayer, NN
import ..Imputation
"""
$(TYPEDEF)
Hyperparameters for the AutoEncoder transformer
## Parameters
$(FIELDS)
"""
Base.@kwdef mutable struct AutoE_hp <: BetaMLHyperParametersSet
"The desired size of the encoded data, that is the number of dimensions in output or the size of the latent space. This is the number of neurons of the layer sitting between the econding and decoding layers. If the value is a float it is considered a percentual (to be rounded) of the dimensionality of the data [def: `0.33`]"
encoded_size::Union{Float64,Int64} = 0.333
"Inner layers dimension (i.e. number of neurons). If the value is a float it is considered a percentual (to be rounded) of the dimensionality of the data [def: `nothing` that applies a specific heuristic]. Consider that the underlying neural network is trying to predict multiple values at the same times. Normally this requires many more neurons than a scalar prediction. If `e_layers` or `d_layers` are specified, this parameter is ignored for the respective part."
layers_size::Union{Int64,Float64,Nothing} = nothing
"The layers (vector of `AbstractLayer`s) responsable of the encoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `layers_size`]"
e_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"The layers (vector of `AbstractLayer`s) responsable of the decoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `layers_size`]"
d_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"""Loss (cost) function [def: `squared_cost`]
It must always assume y and ŷ as (n x d) matrices, eventually using `dropdims` inside.
"""
loss::Union{Nothing,Function} = squared_cost
"Derivative of the loss function [def: `dsquared_cost` if `loss==squared_cost`, `nothing` otherwise, i.e. use the derivative of the squared cost or autodiff]"
dloss::Union{Function,Nothing} = nothing
"Number of epochs, i.e. passages trough the whole training sample [def: `200`]"
epochs::Int64 = 200
"Size of each individual batch [def: `8`]"
batch_size::Int64 = 8
"The optimisation algorithm to update the gradient at each batch [def: `ADAM()`]"
opt_alg::OptimisationAlgorithm = ADAM()
"Whether to randomly shuffle the data at each iteration (epoch) [def: `true`]"
shuffle::Bool = true
"""
The method - and its parameters - to employ for hyperparameters autotuning.
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges = Dict("epochs"=>[100,200,400],"batch_size"=>[8,16],"encoded_size"=>[0.2,0.3,0.5],"layers_size"=>[1.3,2.0,5.0,10.0,nothing]),multithreads=true)
end
Base.@kwdef mutable struct AutoEncoder_lp <: BetaMLLearnableParametersSet
encoded_size_actual::Union{Int64,Nothing} = nothing
fullnn::Union{NeuralNetworkEstimator,Nothing} = nothing
n_el::Union{Nothing,Int64} = nothing
n_dl::Union{Nothing,Int64} = nothing
end
"""
$(TYPEDEF)
Perform a (possibly-non linear) transformation ("encoding") of the data into a different space, e.g. for dimensionality reduction using neural network trained to replicate the input data.
A neural network is trained to first transform the data (ofter "compress") to a subspace (the output of an inner layer) and then retransform (subsequent layers) to the original data.
`predict(mod::AutoEncoder,x)` returns the encoded data, `inverse_predict(mod::AutoEncoder,xtransformed)` performs the decoding.
For the parameters see [`AutoE_hp`](@ref) and [`BML_options`](@ref)
# Notes:
- AutoEncoder doesn't automatically scale the data. It is suggested to apply the [`Scaler`](@ref) model before running it.
- Missing data are not supported. Impute them first, see the [`Imputation`](@ref) module.
- Decoding layers can be optinally choosen (parameter `d_layers`) in order to suit the kind of data, e.g. a `relu` activation function for nonegative data
# Example:
```julia
julia> using BetaML
julia> x = [0.12 0.31 0.29 3.21 0.21;
0.22 0.61 0.58 6.43 0.42;
0.51 1.47 1.46 16.12 0.99;
0.35 0.93 0.91 10.04 0.71;
0.44 1.21 1.18 13.54 0.85];
julia> m = AutoEncoder(encoded_size=1,epochs=400)
A AutoEncoder BetaMLModel (unfitted)
julia> x_reduced = fit!(m,x)
***
*** Training for 400 epochs with algorithm ADAM.
Training.. avg loss on epoch 1 (1): 60.27802763757111
Training.. avg loss on epoch 200 (200): 0.08970099870421573
Training.. avg loss on epoch 400 (400): 0.013138484118673664
Training of 400 epoch completed. Final epoch error: 0.013138484118673664.
5×1 Matrix{Float64}:
-3.5483740608901186
-6.90396890458868
-17.06296512222304
-10.688936344498398
-14.35734756603212
julia> x̂ = inverse_predict(m,x_reduced)
5×5 Matrix{Float64}:
0.0982406 0.110294 0.264047 3.35501 0.327228
0.205628 0.470884 0.558655 6.51042 0.487416
0.529785 1.56431 1.45762 16.067 0.971123
0.3264 0.878264 0.893584 10.0709 0.667632
0.443453 1.2731 1.2182 13.5218 0.842298
julia> info(m)["rme"]
0.020858783340281222
julia> hcat(x,x̂)
5×10 Matrix{Float64}:
0.12 0.31 0.29 3.21 0.21 0.0982406 0.110294 0.264047 3.35501 0.327228
0.22 0.61 0.58 6.43 0.42 0.205628 0.470884 0.558655 6.51042 0.487416
0.51 1.47 1.46 16.12 0.99 0.529785 1.56431 1.45762 16.067 0.971123
0.35 0.93 0.91 10.04 0.71 0.3264 0.878264 0.893584 10.0709 0.667632
0.44 1.21 1.18 13.54 0.85 0.443453 1.2731 1.2182 13.5218 0.842298
```
"""
mutable struct AutoEncoder <: BetaMLUnsupervisedModel
hpar::AutoE_hp
opt::BML_options
par::Union{Nothing,AutoEncoder_lp}
cres::Union{Nothing,Matrix}
fitted::Bool
info::Dict{String,Any}
end
function AutoEncoder(;kwargs...)
m = AutoEncoder(AutoE_hp(),BML_options(),AutoEncoder_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
# Correction for releasing without breaking.. to remove on v0.12 onward...
# found || error("Keyword \"$kw\" is not part of this model.")
if !found
if kw == :outdims
setproperty!(m.hpar,:encoded_size,kwv)
found = true
elseif kw == :innerdims
setproperty!(m.hpar,:layers_size,kwv)
found = true
else
error("Keyword \"$kw\" is not part of this model.")
end
end
end
return m
end
function fit!(m::AutoEncoder,X)
(m.fitted) || autotune!(m,(X,))
# Parameter alias..
e_layers = m.hpar.e_layers
d_layers = m.hpar.d_layers
encoded_size = m.hpar.encoded_size
layers_size = m.hpar.layers_size
loss = m.hpar.loss
dloss = m.hpar.dloss
epochs = m.hpar.epochs
batch_size = m.hpar.batch_size
opt_alg = m.hpar.opt_alg
shuffle = m.hpar.shuffle
cache = m.opt.cache
descr = m.opt.descr
verbosity = m.opt.verbosity
#cb = m.opt.cb
rng = m.opt.rng
fitted = m.fitted
(N,D) = size(X)
if fitted
size(m.par.fullnn.par.nnstruct.layers[1])[1][1] == D || @error "The data used to re-fit the model have different dimensionality than the original data. [`reset!`](@ref) the model first."
verbosity >= HIGH && @info "Re-fitting of the model on new data"
encoded_size_actual = m.par.encoded_size_actual
fullnn = m.par.fullnn
n_el = m.par.n_el
n_dl = m.par.n_dl
else
typeof(encoded_size) <: Integer ? encoded_size_actual = encoded_size : encoded_size_actual = max(1,Int(round(D * encoded_size)))
if isnothing(layers_size)
if D == 1
innerSize = 3
elseif D < 5
innerSize = max(1,Int(round(D*D)))
elseif D < 10
innerSize = max(1,Int(round(D*1.3*D/3)))
else
innerSize = max(1,Int(round(D*1.3*log(2,D))))
end
elseif typeof(layers_size) <: Integer
innerSize = layers_size
else
innerSize = max(1,Int(round(D*layers_size)) )
end
if isnothing(e_layers)
l1 = DenseLayer(D,innerSize, f=relu, df=drelu, rng=rng)
l2 = DenseLayer(innerSize,innerSize, f=relu, df=drelu, rng=rng)
l3 = DenseLayer(innerSize, encoded_size_actual, f=identity, df=didentity, rng=rng)
e_layers_actual = [l1,l2,l3]
else
e_layers_actual = copy(e_layers)
end
if isnothing(d_layers)
l1d = DenseLayer(encoded_size_actual,innerSize, f=relu, df=drelu, rng=rng)
l2d = DenseLayer(innerSize,innerSize, f=relu, df=drelu, rng=rng)
l3d = DenseLayer(innerSize, D, f=identity, df=didentity, rng=rng)
d_layers_actual = [l1d,l2d,l3d]
else
d_layers_actual = copy(d_layers)
end
fullnn = NeuralNetworkEstimator(layers=[e_layers_actual...,d_layers_actual...],loss=loss,dloss=dloss,epochs=epochs,batch_size=batch_size,opt_alg=opt_alg,shuffle=shuffle,cache=cache,descr=descr,verbosity=verbosity,rng=rng )
n_el = length(e_layers_actual)
n_dl = length(d_layers_actual)
end
x̂ = fit!(fullnn,X,X)
par = AutoEncoder_lp()
par.encoded_size_actual = encoded_size_actual
par.fullnn = fullnn
par.n_el = n_el
par.n_dl = n_dl
m.par = par
m.fitted=true
rme = cache ? relative_mean_error(X,x̂) : missing
m.info["nepochs_ran"] = info(fullnn)["nepochs_ran"]
m.info["loss_per_epoch"] = info(fullnn)["loss_per_epoch"]
m.info["final_loss"] = verbosity >= STD ? info(fullnn)["loss_per_epoch"][end] : missing
m.info["rme"] = rme
m.info["par_per_epoch"] = info(fullnn)["par_per_epoch"]
m.info["xndims"] = info(fullnn)["xndims"]
m.info["fitted_records"] = info(fullnn)["fitted_records"]
m.info["nepochs_ran"] = info(fullnn)["nepochs_ran"]
m.info["nLayers"] = info(fullnn)["nLayers"]
m.info["nELayers"] = m.par.n_el
m.info["nDLayers"] = m.par.n_dl
m.info["nPar"] = info(fullnn)["nPar"]
if cache
xtemp = copy(X)
for el in fullnn.par.nnstruct.layers[1:m.par.n_el]
xtemp = vcat([forward(el,r) for r in eachrow(xtemp)]'...) # TODO: for some layers the data is not a vector!
end
m.cres = xtemp|> makematrix
end
m.fitted=true
verbosity >= HIGH && println("Relative mean error of the encoded vs original data: $rme")
return cache ? m.cres : nothing
end
function predict(m::AutoEncoder,X)
xtemp = copy(X)
(N,D) = size(X)
#for r in eachrow(X)
for el in m.par.fullnn.par.nnstruct.layers[1:m.par.n_el]
xtemp = vcat([forward(el,r) for r in eachrow(xtemp)]'...) # TODO: for some layers the data is not a vector!
end
return xtemp|> makematrix
end
function inverse_predict(m::AutoEncoder,X)
xtemp = copy(X)
for el in m.par.fullnn.par.nnstruct.layers[m.par.n_el+1:end]
xtemp = vcat([forward(el,r) for r in eachrow(xtemp)]'...)
end
return xtemp|> makematrix
end
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 4567 | using Test, DelimitedFiles
using BetaML
import BetaML.Clustering: init_representatives, kmeans, kmedoids
TESTRNG = FIXEDRNG # This could change...
println("*** Testing Clustering...")
# ==================================
# New test
# ==================================
println("Testing initRepreserntative...")
initial_representatives = init_representatives([1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.6 38],2,initialisation_strategy="given",initial_representatives=[1.7 15; 3.6 40])
@test isapprox(initial_representatives,[1.7 15.0; 3.6 40.0])
# ==================================
# New test
# ==================================
println("Testing kmeans...")
X = [1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.3 38; 5.1 -2.3; 5.2 -2.4]
(clIdxKMeans,Z) = kmeans(X,3,initialisation_strategy="grid",rng=copy(TESTRNG))
@test clIdxKMeans == [2, 2, 2, 2, 3, 3, 3, 1, 1]
#@test (clIdx,Z) .== ([2, 2, 2, 2, 3, 3, 3, 1, 1], [5.15 -2.3499999999999996; 1.5 11.075; 3.366666666666667 36.666666666666664])
m = KMeansClusterer(n_classes=3,verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG), descr="First test k-means model")
fit_ex(m,X)
classes = predict(m)
@test clIdxKMeans == classes
X2 = [1.5 11; 3 40; 3 40; 5 -2]
classes2 = predict(m,X2)
@test classes2 == [2,3,3,1]
fit!(m,X2)
classes3 = predict(m)
@test classes3 == [2,3,3,1]
reset_ex(m)
fit!(m,X)
classes = predict(m)
@test clIdxKMeans == classes
@test info(m)["fitted_records"] == 9
@test sprint(print, m) == "First test k-means model\nKMeansClusterer - A 2-dimensions 3-classes K-Means Model (fitted on 9 records)\nDict{String, Any}(\"fitted_records\" => 9, \"av_distance_last_fit\" => 1.9492325925652934, \"xndims\" => 2)\nRepresentatives:\n[5.15 -2.3499999999999996; 1.5 11.075; 3.366666666666667 36.666666666666664]\n"
# ==================================
# New test
# ==================================
println("Testing kmedoids...")
(clIdxKMedoids,Z) = kmedoids([1 10.5;1.5 10.8; 1.8 8; 1.7 15; 3.2 40; 3.6 32; 3.3 38; 5.1 -2.3; 5.2 -2.4],3,initialisation_strategy="shuffle",rng=copy(TESTRNG))
@test clIdxKMedoids == [2, 2, 2, 1, 3, 3, 3, 2, 2]
m = KMedoidsClusterer(n_classes=3,verbosity=NONE, initialisation_strategy="shuffle",rng=copy(TESTRNG))
fit!(m,X)
classes = predict(m)
@test clIdxKMedoids == classes
X2 = [1.5 11; 3 40; 3 40; 5 -2]
classes2 = predict(m,X2)
#@test classes2 == [1,2,2,3]
fit!(m,X2)
classes3 = predict(m)
#@test classes3 == [1,2,2,3]
@test info(m)["fitted_records"] == 13
reset!(m)
@test sprint(print, m) == "KMedoidsClusterer - A 3-classes K-Medoids Model (unfitted)"
# Testing on iris
println("Testing hard clustering on the sepal database...")
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
pd = pairwise(x)
yi = fit!(OrdinalEncoder(),y)
m = KMeansClusterer(n_classes=3, rng=copy(TESTRNG))
ŷ = fit!(m,x)
acc = accuracy(yi,ŷ,ignorelabels=true)
s = mean(silhouette(pd,ŷ))
@test s > 0.55
m = KMedoidsClusterer(n_classes=3, rng=copy(TESTRNG))
ŷ = fit!(m,x)
acc = accuracy(yi,ŷ,ignorelabels=true)
@test acc > 0.8
s = mean(silhouette(pd,ŷ))
@test s > 0.52
# ==================================
# NEW TEST
println("Testing MLJ interface for Clustering models....")
import MLJBase, MLJTestInterface
const Mlj = MLJBase
const mljti = MLJTestInterface
X, y = Mlj.@load_iris
model = BetaML.Bmlj.KMeansClusterer(rng=copy(TESTRNG))
modelMachine = Mlj.machine(model, X)
(fitResults, cache, report) = Mlj.fit(model, 0, X)
distances = Mlj.transform(model,fitResults,X)
yhat = Mlj.predict(model, fitResults, X)
acc = BetaML.accuracy(Mlj.levelcode.(yhat),Mlj.levelcode.(y),ignorelabels=true)
@test acc > 0.8
model = BetaML.Bmlj.KMedoidsClusterer(rng=copy(TESTRNG))
modelMachine = Mlj.machine(model, X)
(fitResults, cache, report) = Mlj.fit(model, 0, X)
distances = Mlj.transform(model,fitResults,X)
yhat = Mlj.predict(model, fitResults, X)
acc = BetaML.accuracy(Mlj.levelcode.(yhat),Mlj.levelcode.(y),ignorelabels=true)
@test acc > 0.8
@testset "generic mlj interface test" begin
f, s = mljti.test(
[BetaML.Bmlj.KMeansClusterer,],
mljti.make_regression()[1];
mod=@__MODULE__,
verbosity=0, # bump to debug
throw=true, # set to true to debug (`false` in CI)
)
@test isempty(f)
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 327 | using Test
using DelimitedFiles, LinearAlgebra
import MLJBase
const Mlj = MLJBase
using StableRNGs
rng = StableRNG(123)
using BetaML.Clustering
println("*** Additional testing for the Clustering algorithms...")
#println("Testing MLJ interface for Clustering models....")
# evaluate seem not supported for unsupervised models
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1324 | using Test
# https://discourse.julialang.org/t/getting-data-directly-from-a-website/12241/6
fNames = ["train-images-idx3-ubyte","train-labels-idx1-ubyte","t10k-images-idx3-ubyte","t10k-labels-idx1"]
origPath = ["http://yann.lecun.com/exdb/mnist/"]
destPath = joinpath(dirname(Base.find_package("BetaML")),"..","test","data","mnist")
iris = readdlm(joinpath(dirname(Base.find_package("BetaML")),"..","test","data","iris.csv"),',',skipstart=1)
http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
r = HTTP.get(origPath*fNames[1]*".gz", cookies=true);
using HTTP, GZip, IDX # https://github.com/jlegare/IDX.git
r = HTTP.get("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", cookies=true);
destPath = joinpath(dirname(Base.find_package("BetaML")),"..","test","data","minst")
zippedFile = joinpath(destPath,"test.gz")
unZippedFile = joinpath(destPath,"test.idx3")
open(zippedFile,"w") do f
write(f,String(r.body))
end
fh = GZip.open(zippedFile)
open(unZippedFile,"w") do f
write(f,read(fh))
end
train_set = load(unZippedFile)
img1 = train_set[3][:,:,1]
train_set = load(read(fh))
a = "aaa"
b = "bb"
c = a*b
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 8261 | using Test
import Distributions
using BetaML
import BetaML.GMM: gmm, initVariances!, updateVariances!
TESTRNG = FIXEDRNG # This could change...
println("*** Testing GMM...")
# ==================================
# New test
# ==================================
println("Testing mixture initialisation and log-pdf...")
m1 = SphericalGaussian()
m2 = SphericalGaussian([1.1,2,3])
m3 = SphericalGaussian(nothing,10.2)
mixtures = [m1,m2,m3]
X = [1 10 20; 1.2 12 missing; 3.1 21 41; 2.9 18 39; 1.5 15 25]
init_mixtures!(mixtures,X,minimum_variance=0.25,rng=copy(TESTRNG))
@test sum([sum(m.μ) for m in mixtures]) ≈ 102.2
@test sum([sum(m.σ²) for m in mixtures]) ≈ 19.651086419753085
mask = [true, true, false]
@test lpdf(m1,X[2,:][mask],mask) ≈ -3.461552516784797
m1 = DiagonalGaussian()
m2 = DiagonalGaussian([1.1,2,3])
m3 = DiagonalGaussian(nothing,[0.1,11,25.0])
mixtures = [m1,m2,m3]
init_mixtures!(mixtures,X,minimum_variance=0.25,rng=copy(TESTRNG))
@test sum([sum(m.σ²) for m in mixtures]) ≈ 291.27933333333334
@test lpdf(m1,X[2,:][mask],mask) ≈ -3.383055441795939
m1 = FullGaussian()
m2 = FullGaussian([1.1,2,3])
m3 = FullGaussian(nothing,[0.1 0.2 0.5; 0 2 0.8; 1 0 5])
mixtures = [m1,m2,m3]
init_mixtures!(mixtures,X,minimum_variance=0.25,rng=copy(TESTRNG))
@test sum([sum(m.σ²) for m in mixtures]) ≈ 264.77933333333334
@test lpdf(m1,X[2,:][mask],mask) ≈ -3.383055441795939
# ==================================
# New test
# ==================================
println("Testing gmm...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
clusters = gmm(X,3,verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
@test isapprox(clusters.BIC,114.1492467835965)
println("Testing GaussianMixtureClusterer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
m = GaussianMixtureClusterer(n_classes=3, mixtures=[DiagonalGaussian() for i in 1:3], verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
m2 = GaussianMixtureClusterer(n_classes=3, mixtures=DiagonalGaussian, verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
m3 = GaussianMixtureClusterer(n_classes=3, verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
ŷ1 = fit!(m,X)
ŷ2 = fit!(m2,X)
ŷ3 = fit!(m3,X)
@test ŷ1 == ŷ2 == ŷ3
probs = predict(m)
gmmOut = gmm(X,3,verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
@test gmmOut.pₙₖ == probs
μ_x1alone = hcat([m.par.mixtures[i].μ for i in 1:3]...)
pk_x1alone = m.par.initial_probmixtures
X2 = [2.0 12; 3 20; 4 15; 1.5 11]
m2 = GaussianMixtureClusterer(n_classes=3,verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
fit!(m2,X2)
#μ_x2alone = hcat([m.par.mixtures[i].μ for i in 1:3]...)
probsx2alone = predict(m2)
@test probsx2alone[1,1] < 0.999
probX2onX1model = predict(m,X2)
@test probX2onX1model[1,1] ≈ 0.5214795038476924
fit!(m,X2) # this greately reduces mixture variance
#μ_x1x2 = hcat([m.par.mixtures[i].μ for i in 1:3]...)
probsx2 = predict(m)
@test probsx2[1,1] > 0.999 # it feels more certain as it uses the info of he first training
reset!(m)
@test sprint(print,m) == "GaussianMixtureClusterer - A 3-classes Generative Mixture Model (unfitted)"
m = GaussianMixtureClusterer(mixtures=[SphericalGaussian() for i in 1:2])
x_full = fit!(m,X)
m = GaussianMixtureClusterer(mixtures=SphericalGaussian,n_classes=2)
x_full2 = fit!(m,X)
@test x_full == x_full2
m = GaussianMixtureClusterer(mixtures=SphericalGaussian)
x_full = fit!(m,X)
@test hyperparameters(m).n_classes == 3
@test length(hyperparameters(m).mixtures) == 3
m = GaussianMixtureClusterer(n_classes=2)
x_full = fit!(m,X)
@test length(hyperparameters(m).mixtures) == 2
# Testing GMM Regressor 1
ϵtrain = [1.023,1.08,0.961,0.919,0.933,0.993,1.011,0.923,1.084,1.037,1.012]
ϵtest = [1.056,0.902,0.998,0.977]
xtrain = [0.1 0.2; 0.3 0.5; 0.4 0.1; 0.5 0.4; 0.7 0.9; 0.2 0.1; 0.4 0.2; 0.3 0.3; 0.6 0.9; 0.3 0.4; 0.9 0.8]
ytrain = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtrain[i] for (i,x) in enumerate(eachrow(xtrain))]
ytrain2d = hcat(ytrain,ytrain .+ 0.1)
xtest = [0.5 0.6; 0.14 0.2; 0.3 0.7; 20.0 40.0;]
ytest = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtest[i] for (i,x) in enumerate(eachrow(xtest))]
m = GaussianMixtureRegressor2(n_classes=2,rng=copy(TESTRNG), verbosity=NONE)
fit!(m,xtrain,ytrain)
ŷtrain = predict(m, xtrain)
ŷtrain2 = predict(m)
@test isapprox(ŷtrain,ŷtrain2,atol=0.00001) # not the same as the predict(m,xtrain) goes trough a further estep
ŷtest = predict(m, xtest)
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.08
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest <= 0.35
# testing it with multidimensional Y
reset!(m)
fit!(m,xtrain,ytrain2d)
ŷtrain2d = predict(m, xtrain)
ŷtrain2db = predict(m)
@test isapprox(ŷtrain2d,ŷtrain2db,atol=0.00001) # not the same as the predict(m,xtrain) goes trough a further estep
mreTrain2d = relative_mean_error(ytrain2d,ŷtrain2d,normrec=true)
@test mreTrain2d <= 0.08
m = GaussianMixtureRegressor2(n_classes=2,rng=copy(TESTRNG), verbosity=NONE, mixtures= SphericalGaussian)
est = fit!(m,xtrain,ytrain2d)
@test typeof(est) == Matrix{Float64}
# Testing GMM Regressor 2
m = GaussianMixtureRegressor(n_classes=2,rng=copy(TESTRNG), verbosity=NONE)
fit!(m,xtrain,ytrain)
ŷtrain = predict(m, xtrain)
ŷtrain2 = predict(m)
@test isapprox(ŷtrain,ŷtrain2,atol=0.01) # not the same as the predict(m,xtrain) goes trough a further estep
ŷtest = predict(m, xtest)
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.08
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest <= 0.35
# testing it with multidimensional Y
reset!(m)
fit!(m,xtrain,ytrain2d)
ŷtrain2d = predict(m, xtrain)
ŷtrain2db = predict(m)
@test isapprox(ŷtrain2d,ŷtrain2db,atol=0.01) # not the same as the predict(m,xtrain) goes trough a further estep
mreTrain2d = relative_mean_error(ytrain2d,ŷtrain2d,normrec=true)
@test mreTrain2d <= 0.08
fit!(m,xtrain,ytrain2d) # re-fit
ŷtrain2d = predict(m, xtrain)
mreTrain2d = relative_mean_error(ytrain2d,ŷtrain2d,normrec=true)
@test mreTrain2d <= 0.08
# testing with different mixtures definition
m = GaussianMixtureRegressor(rng=copy(TESTRNG),verbosity=NONE)
fit!(m,xtrain,ytrain)
m = GaussianMixtureRegressor(mixtures=[DiagonalGaussian(),DiagonalGaussian(),DiagonalGaussian()],rng=copy(TESTRNG), verbosity=NONE)
fit!(m,xtrain,ytrain)
m = GaussianMixtureRegressor(n_classes=2,mixtures=SphericalGaussian,rng=copy(TESTRNG), verbosity=NONE)
fit!(m,xtrain,ytrain)
#m = GaussianMixtureRegressor(autotune=true,rng=copy(TESTRNG), verbosity=NONE)
#fit!(m,xtrain,ytrain) # don't work on githug ci
# ==================================
# NEW TEST
println("Testing MLJ interface for GMM models....")
import MLJBase
const Mlj = MLJBase
X, y = Mlj.@load_iris
model = BetaML.Bmlj.GaussianMixtureClusterer(mixtures=[DiagonalGaussian() for i in 1:3],rng=copy(TESTRNG))
modelMachine = Mlj.machine(model, X) # DimensionMismatch
(fitResults, cache, report) = Mlj.fit(model, 0, X)
yhat_prob = Mlj.predict(model, fitResults, X) # Mlj.transform(model,fitResults,X)
# how to get this ??? Mlj.predict_mode(yhat_prob)
@test Distributions.pdf(yhat_prob[end],2) ≈ 0.5937443601647852
println("Testing MLJ interface for GMMRegressor models....")
X, y = Mlj.@load_boston
model_gmmr = BetaML.Bmlj.GaussianMixtureRegressor(n_classes=20,rng=copy(TESTRNG))
regressor_gmmr = Mlj.machine(model_gmmr, X, y)
(fitresult_gmmr, cache, report) = Mlj.fit(model_gmmr, 0, X, y)
yhat_gmmr = Mlj.predict(model_gmmr, fitresult_gmmr, X)
@test relative_mean_error(y,yhat_gmmr,normrec=true) < 0.3
ydouble = hcat(y,y)
model_gmmr2 = BetaML.Bmlj.MultitargetGaussianMixtureRegressor(n_classes=20,rng=copy(TESTRNG))
regressor_gmmr2 = Mlj.machine(model_gmmr2, X, ydouble)
(fitresult_gmmr2, cache, report) = Mlj.fit(model_gmmr2, 0, X, ydouble)
yhat_gmmr2 = Mlj.predict(model_gmmr2, fitresult_gmmr2, X)
@test relative_mean_error(ydouble,yhat_gmmr2,normrec=true) < 0.3
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 14167 | using Test
#using Pipe
using Statistics, Random
using BetaML
import DecisionTree
TESTRNG = FIXEDRNG # This could change...
println("*** Testing Imputations...")
# ------------------------------------------------------------------------------
# Old API predictMissing
# ==================================
# New test
# ==================================
#println("Testing predictMissing...")
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#out = predictMissing(X,3,mixtures=[SphericalGaussian() for i in 1:3],verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
#@test isapprox(out.X̂[2,2],14.155186593170251)
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#out2 = predictMissing(X,3,mixtures=[DiagonalGaussian() for i in 1:3],verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
#@test out2.X̂[2,2] ≈ 14.588514438886131
#X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
#out3 = predictMissing(X,3,mixtures=[FullGaussian() for i in 1:3],verbosity=NONE, initialisation_strategy="grid",rng=copy(TESTRNG))
#@test out3.X̂[2,2] ≈ 11.166652292936876
# ------------------------------------------------------------------------------
println("Testing SimpleImputer...")
X = [2 missing 10; 20 40 100]
mod = SimpleImputer()
fit!(mod,X)
x̂ = predict(mod)
@test x̂[1,2] == 40
@test typeof(x̂) == Matrix{Float64}
@test info(mod) == Dict{String,Any}("n_imputed_values" => 1)
X2 = [2 4 missing; 20 40 100]
x̂2 = predict(mod,X2)
@test x̂2[1,3] == 55.0
reset!(mod)
X = [2.0 missing 10; 20 40 100]
mod = SimpleImputer(norm=1)
fit!(mod,X)
x̂ = predict(mod)
@test isapprox(x̂[1,2],4.044943820224719)
@test typeof(x̂) == Matrix{Float64}
# ------------------------------------------------------------------------------
println("Testing GaussianMixtureImputer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
mod = GaussianMixtureImputer(mixtures=[SphericalGaussian() for i in 1:3],verbosity=NONE,initialisation_strategy="grid",rng=copy(TESTRNG))
x̂ = predict(mod)
@test x̂ == nothing
fit!(mod,X)
x̂ = predict(mod)
@test isapprox(x̂[2,2],14.155186593170251)
mod = GaussianMixtureImputer(mixtures=[DiagonalGaussian() for i in 1:3],verbosity=NONE,initialisation_strategy="grid",rng=copy(TESTRNG))
fit!(mod,X)
x̂ = predict(mod)
@test isapprox(x̂[2,2],14.588514438886131)
mod = GaussianMixtureImputer(mixtures=[FullGaussian() for i in 1:3],verbosity=NONE,initialisation_strategy="grid",rng=copy(TESTRNG))
fit!(mod,X)
x̂ = predict(mod)
@test x̂[2,2] ≈ 11.166652292936876
X = [2 missing 10; 2000 4000 10000; 2000 4000 10000; 3 5 12; 4 8 20; 2000 4000 8000; 1 5 8 ]
mod = GaussianMixtureImputer(n_classes=2,rng=copy(TESTRNG),verbosity=NONE, initialisation_strategy="kmeans")
fit!(mod,X)
x̂ = predict(mod)
@test x̂[1,2] ≈ 6.0
infos = info(mod)
@test infos["n_imputed_values"] == 1 && infos["lL"] ≈ -163.12896063447343 && infos["BIC"] ≈ 351.5547532066659 && infos["AIC"] ≈ 352.25792126894686
X2 = [3 6 9; 2000 missing 10000; 1 2 5; 1500 3000 9000; 1.5 3 6]
fit!(mod,X2)
X̂2 = predict(mod)
@test X̂2[1,1] == 3
@test X̂2[2,2] == 4000
X3 = [1 2 missing; 2 4 6]
X̂3 = predict(mod,X3)
@test X̂3[1,3] ≈ 6.666666666717062
reset!(mod)
#predict(mod,X3)
mod = GaussianMixtureImputer(mixtures=DiagonalGaussian)
X2 = [3 6 9; 2000 missing 10000; 1 2 5; 1500 3000 9000; 1.5 3 6]
fit!(mod,X2)
X̂2 = predict(mod)
@test typeof(X̂2) == Matrix{Float64}
# ------------------------------------------------------------------------------
println("Testing RFFImputer...")
X = [2 missing 10 "aaa" missing; 20 40 100 "gggg" missing; 200 400 1000 "zzzz" 1000]
mod = RandomForestImputer(n_trees=30,forced_categorical_cols=[5],recursive_passages=3,multiple_imputations=10, rng=copy(TESTRNG),verbosity=NONE)
Xs_full = fit!(mod,X)
@test Xs_full[2][1,2] == 220
@test length(Xs_full) == 10
X = [2 missing 10; 2000 4000 1000; 2000 4000 10000; 3 5 12 ; 4 8 20; 1 2 5]
mod = RandomForestImputer(multiple_imputations=10, rng=copy(TESTRNG),oob=true, verbosity=NONE)
fit!(mod,X)
vals = predict(mod)
nR,nC = size(vals[1])
medianValues = [median([v[r,c] for v in vals]) for r in 1:nR, c in 1:nC]
@test medianValues[1,2] == 4.0
infos = info(mod)
@test infos["n_imputed_values"] == 1
@test all(isequal.(infos["oob_errors"][1],[missing, 0.47355452303986306, missing]))
X = [2 4 10 "aaa" 10; 20 40 100 "gggg" missing; 200 400 1000 "zzzz" 1000]
mod = RandomForestImputer(rng=copy(TESTRNG),verbosity=NONE)
fit!(mod,X)
X̂1 = predict(mod)
X̂1b = predict(mod,X)
@test X̂1 == X̂1b
mod = RandomForestImputer(rng=copy(TESTRNG),verbosity=NONE,cols_to_impute="all")
fit!(mod,X)
X2 = [2 4 10 missing 10; 20 40 100 "gggg" 100; 200 400 1000 "zzzz" 1000]
X̂2 = predict(mod,X2)
@test X̂2[1,4] == "aaa"
# ------------------------------------------------------------------------------
println("Testing GeneralImputer...")
X = [2 missing 10; 2000 4000 1000; 2000 4000 10000; 3 5 12 ; 4 8 20; 1 2 5]
trng = copy(TESTRNG)
mod = GeneralImputer(estimator=[GaussianMixtureRegressor2(rng=trng,verbosity=NONE),RandomForestEstimator(rng=trng,verbosity=NONE),RandomForestEstimator(rng=trng,verbosity=NONE)], multiple_imputations=10, recursive_passages=3, rng=copy(TESTRNG),verbosity=NONE,cols_to_impute="all")
fit!(mod,X)
vals = predict(mod)
nR,nC = size(vals[1])
meanValues = [mean([v[r,c] for v in vals]) for r in 1:nR, c in 1:nC]
@test meanValues[1,2] == 3.0
@test vals[1] == vals[10]
model_save("test.jld2"; mod)
modj = model_load("test.jld2","mod")
valsj = predict(modj)
@test isequal(vals,valsj)
X = [2 missing 10; 2000 4000 1000; 2000 4000 10000; 3 5 12 ; 4 8 20; 1 2 5]
mod = GeneralImputer(multiple_imputations=10, recursive_passages=3, rng=copy(TESTRNG), verbosity=NONE)
fit!(mod,X)
vals = predict(mod)
nR,nC = size(vals[1])
meanValues = [mean([v[r,c] for v in vals]) for r in 1:nR, c in 1:nC]
@test meanValues[1,2] == 70.3
X = [2 4 10 "aaa" 10; 20 40 100 "gggg" missing; 200 400 1000 "zzzz" 1000]
trng = copy(TESTRNG)
#Random.seed!(trng,123)
mod = GeneralImputer(estimator=[DecisionTreeEstimator(rng=trng,verbosity=NONE),RandomForestEstimator(n_trees=1,rng=trng,verbosity=NONE),RandomForestEstimator(n_trees=1,rng=trng,verbosity=NONE),RandomForestEstimator(n_trees=1,rng=trng,verbosity=NONE),DecisionTreeEstimator(rng=trng,verbosity=NONE)],rng=trng,verbosity=NONE,cols_to_impute="all")
fit!(mod,X)
Random.seed!(trng,123)
X̂1 = predict(mod)
@test X̂1 == Any[2 4 10 "aaa" 10; 20 40 100 "gggg" 505; 200 400 1000 "zzzz" 1000]
Random.seed!(trng,123)
X̂1b = predict(mod,X)
@test X̂1b == Any[2 4 10 "aaa" 10; 20 40 100 "gggg" 505; 200 400 1000 "zzzz" 1000]
@test X̂1 == X̂1b
X2 = [2 4 10 missing 10; 20 40 100 "gggg" 100; 200 400 1000 "zzzz" 1000]
X̂2 = predict(mod,X2)
@test X̂2[1,4] == "aaa"
# ------------------------------
X = [1.0 2 missing 100; 3 missing missing 200; 4 5 6 300; missing 7 8 400; 9 10 11 missing; 12 13 missing missing; 14 15 missing 700; 16 missing missing 800;]
mod = GeneralImputer(estimator=DecisionTree.DecisionTreeRegressor(),rng=copy(TESTRNG),fit_function=DecisionTree.fit!,predict_function=DecisionTree.predict,recursive_passages=10)
Xfull = BetaML.fit!(mod,X)
@test size(Xfull) == (8,4) && typeof(Xfull) == Matrix{Float64}
mod = GeneralImputer(estimator=BetaML.DecisionTreeEstimator(),rng=copy(TESTRNG),recursive_passages=10)
Xfull2 = BetaML.fit!(mod,X)
@test size(Xfull) == (8,4) && typeof(Xfull) == Matrix{Float64}
mod = GeneralImputer(estimator=BetaML.DecisionTreeEstimator(),rng=copy(TESTRNG),missing_supported=true,recursive_passages=10)
Xfull3 = BetaML.fit!(mod,X)
@test size(Xfull) == (8,4) && typeof(Xfull) == Matrix{Float64}
X = [ 12 0.3 5 11;
21 0.1 1 18;
8 missing 9 9;
missing 0.6 5 4;
missing 0.4 missing 6;
18 missing 1 missing;
5 0.8 missing 15;
10 0.7 8 11;]
mod = GeneralImputer(estimator=DecisionTree.DecisionTreeRegressor(),rng=copy(TESTRNG),fit_function=DecisionTree.fit!,predict_function=DecisionTree.predict,recursive_passages=10)
mod2 = GeneralImputer(rng=copy(TESTRNG),recursive_passages=10)
Xfull = BetaML.fit!(mod,X)
Xfull2 = BetaML.fit!(mod2,X)
@test Xfull[4,1] > 1
@test Xfull[3,2] < 1
@test Xfull[5,3] > 1
@test Xfull[6,4] > 10
@test Xfull2[4,1] < Xfull2[5,1]
@test Xfull2[3,2] > Xfull2[6,2]
@test Xfull2[5,3] < Xfull2[7,3]
@test Xfull2[6,4] > 10
# this would error, as multiple passsages
# predict(mod2,X)
rng2 = deepcopy(TESTRNG)
X = (vcat([[s*2-rand(rng2)/10 s*0.5*(1+rand(rng2)/10) exp(s)] for s in rand(rng2,600)]...))
X = convert(Matrix{Union{Float64,Missing}},X)
orig = [X[1,1], X[2,3], X[3,2]]
X[1,1] = missing #
X[2,3] = missing #
X[3,2] = missing #
rng2 = deepcopy(TESTRNG)
m = GeneralImputer(estimator=NeuralNetworkEstimator(rng=rng2, batch_size=256, epochs=300, verbosity=NONE), rng=rng2)
x_full = fit!(m,X)
imputed = [x_full[1,1], x_full[2,3], x_full[3,2]]
rme = relative_mean_error(orig,imputed)
@test rme < 0.5
# ------------------------------------------------------------------------------
println("Testing MLJ Interfaces...")
import MLJBase
const Mlj = MLJBase
println("Testing MLJ Interface for SimpleImputer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
Xt = Mlj.table(X)
model = BetaML.Bmlj.SimpleImputer(norm=1)
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
XM = Mlj.transform(model,fitResults,Xt)
x̂ = Mlj.matrix(XM)
@test isapprox(x̂[2,2],0.29546633468202105)
# Use the previously learned structure to imput missings..
Xnew_withMissing = Mlj.table([1.5 missing; missing missing; missing -2.3; 5.1 -2.3; 1 2; 1 2; 1 2; 1 2; 1 2])
XDNew = Mlj.transform(model,fitResults,Xnew_withMissing)
XDMNew = Mlj.matrix(XDNew)
@test isapprox(XDMNew[2,2],x̂[2,2]) # position only matters
println("Testing MLJ Interface for GaussianMixtureImputer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
Xt = Mlj.table(X)
model = BetaML.Bmlj.GaussianMixtureImputer(initialisation_strategy="grid",rng=copy(TESTRNG))
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
XM = Mlj.transform(model,fitResults,Xt)
x̂ = Mlj.matrix(XM)
@test isapprox(x̂[2,2],14.736620020139028)
# Use the previously learned structure to imput missings..
Xnew_withMissing = Mlj.table([1.5 missing; missing 38; missing -2.3; 5.1 -2.3])
XDNew = Mlj.transform(model,fitResults,Xnew_withMissing)
XDMNew = Mlj.matrix(XDNew)
@test isapprox(XDMNew[1,2],x̂[2,2])
model = BetaML.Bmlj.GaussianMixtureImputer(initialisation_strategy="grid",rng=copy(TESTRNG), mixtures=BetaML.SphericalGaussian)
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
@test report["AIC"] < 100000
println("Testing MLJ Interface for RandomForestImputer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
Xt = Mlj.table(X)
model = BetaML.Bmlj.RandomForestImputer(n_trees=40,rng=copy(TESTRNG))
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
XM = Mlj.transform(model,fitResults,Xt)
x̂ = Mlj.matrix(XM)
@test isapprox(x̂[2,2],10.144666666666666)
# Use the previously learned structure to imput missings..
Xnew_withMissing = Mlj.table([1.5 missing; missing 38; missing -2.3; 5.1 -2.3])
XDNew = Mlj.transform(model,fitResults,Xnew_withMissing)
XDMNew = Mlj.matrix(XDNew)
@test isapprox(XDMNew[1,2],x̂[2,2])
println("Testing MLJ Interface for GeneralImputer...")
X = [1 10.5;1.5 missing; 1.8 8; 1.7 15; 3.2 40; missing missing; 3.3 38; missing -2.3; 5.2 -2.4]
Xt = Mlj.table(X)
trng = copy(TESTRNG)
model = BetaML.Bmlj.GeneralImputer(estimator=[GaussianMixtureRegressor2(rng=copy(TESTRNG),verbosity=NONE),RandomForestEstimator(n_trees=40,rng=copy(TESTRNG),verbosity=NONE)],recursive_passages=2, missing_supported=true, rng = copy(TESTRNG))
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
XM = Mlj.transform(model,fitResults,Xt)
x̂ = Mlj.matrix(XM)
@test isapprox(x̂[2,2],11.8) # not the same as RF because the oth columns are imputed too
# Use the previously learned structure to imput missings..
Xnew_withMissing = Mlj.table([1.5 missing; missing 38; missing -2.3; 5.1 -2.3])
XDNew = Mlj.transform(model,fitResults,Xnew_withMissing)
XDMNew = Mlj.matrix(XDNew)
@test isapprox(XDMNew[1,2],x̂[2,2])
X = [ 12 0.3 5 11;
21 0.1 1 18;
8 missing 9 9;
missing 0.6 5 4;
missing 0.4 missing 6;
18 missing 1 missing;
5 0.8 missing 15;
10 0.7 8 11;]
Xt = Mlj.table(X)
trng = copy(TESTRNG)
model = BetaML.Bmlj.GeneralImputer(estimator=DecisionTree.DecisionTreeRegressor(), fit_function=DecisionTree.fit!,predict_function=DecisionTree.predict,recursive_passages=10, rng = copy(TESTRNG))
modelMachine = Mlj.machine(model,Xt)
(fitResults, cache, report) = Mlj.fit(model, 0, Xt)
XM = Mlj.transform(model,fitResults,Xt)
x̂ = Mlj.matrix(XM)
@test x̂[4,1] > 1
@test x̂[3,2] < 1
@test x̂[5,3] > 1
@test x̂[6,4] > 10
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 24449 | using Test
using DelimitedFiles, LinearAlgebra, Statistics #, MLDatasets
#using StableRNGs
#rng = StableRNG(123)
using BetaML
import BetaML.Nn: buildNetwork, forward, loss, backward, train!, get_nparams, _get_n_layers_weights
import BetaML.Nn: ConvLayer, ReshaperLayer # todo: to export it and remove this when completed
TESTRNG = FIXEDRNG # This could change...
#TESTRNG = StableRNG(123)
println("*** Testing Neural Network...")
# ==================================
# New test
# ==================================
println("Testing Learnable structure...")
L1a = Learnable(([1.0 2; 3 4.0], [1.1,2.2], Float64[], [1.1,2.2], [1.1]))
L1b = Learnable(([1.0 2; 3 4.0], [1.1,2.2], Float64[], [1.1,2.2], [1.1]))
L1c = Learnable(([1.0 2; 3 4.0], [1.1,2.2], Float64[], [1.1,2.2], [1.1]))
foo = (((((sum(L1a,L1b,L1c) - (L1a + L1b + L1c)) + 10) - 4) * 2 ) / 3) *
(((((sum(L1a,L1b,L1c) - (L1a + L1b + L1c)) + 10) - 4) * 2 ) / 3)
@test foo.data[1][2] == 16.0 && foo.data[5][1] == 16.00
@test (L1a -1).data[1] ≈ (-1 * (1 - L1a)).data[1]
@test (2 / (L1a / 2)).data[2] ≈ (4/L1a).data[2]
@test sqrt(L1a).data[1][2,2] == 2.0
# ==================================
# New test
# ==================================
println("Testing basic NN behaviour...")
x = [0.1,1]
y = [1,0]
l1 = DenseNoBiasLayer(2,2,w=[2 1;1 1],f=identity,rng=copy(TESTRNG))
l2 = VectorFunctionLayer(2,f=softmax)
mynn = buildNetwork([l1,l2],squared_cost,name="Simple Multinomial logistic regression")
o1 = forward(l1,x)
@test o1 == [1.2,1.1]
#@code_warntype forward(l1,x)
o2 = forward(l2,o1)
@test o2 ≈ [0.5249791874789399, 0.47502081252106] ≈ softmax([1.2,1.1])
orig = predict(mynn,x')[1,:]
@test orig == o2
#@code_warntype Nn.predict(mynn,x')[1,:]
ϵ = squared_cost(y,o2)
#@code_warntype squared_cost(o2,y)
lossOrig = loss(mynn,x',y')
@test ϵ == lossOrig
#@code_warntype loss(mynn,x',y')
dϵ_do2 = dsquared_cost(y,o2)
@test dϵ_do2 == [-0.4750208125210601,0.47502081252106]
#@code_warntype dsquared_cost(o2,y)
dϵ_do1 = backward(l2,o1,dϵ_do2) # here takes long as needs Zygote (because Vector Function layer has dfw that stil luse zygote)
@test dϵ_do1 ≈ [-0.23691761847142412, 0.23691761847142412]
#@code_warntype backward(l2,o1,dϵ_do2)
dϵ_dX = backward(l1,x,dϵ_do1)
@test dϵ_dX ≈ [-0.23691761847142412, 0.0]
#@code_warntype backward(l1,x,dϵ_do1)
l1w = get_params(l1)
#@code_warntype
l2w = get_params(l2)
#@code_warntype
w = get_params(mynn)
#@code_warntype get_params(mynn)
#typeof(w)2-element Vector{Float64}:
origW = deepcopy(w)
l2dw = get_gradient(l2,o1,dϵ_do2)
@test length(l2dw.data) == 0
#@code_warntype get_gradient(l2,o1,dϵ_do2)
l1dw = get_gradient(l1,x,dϵ_do1)
@test l1dw.data[1] ≈ [-0.023691761847142414 -0.23691761847142412; 0.023691761847142414 0.23691761847142412]
#@code_warntype
dw = get_gradient(mynn,x,y)
#@code_warntype get_gradient(mynn,x,y)
y_deltax1 = predict(mynn,[x[1]+0.001 x[2]])[1,:]
#@code_warntype Nn.predict(mynn,[x[1]+0.001 x[2]])[1,:]
lossDeltax1 = loss(mynn,[x[1]+0.001 x[2]],y')
#@code_warntype
deltaloss = dot(dϵ_dX,[0.001,0])
#@code_warntype
@test isapprox(lossDeltax1-lossOrig,deltaloss,atol=0.0000001)
l1wNew = l1w
l1wNew.data[1][1,1] += 0.001
set_params!(l1,l1wNew)
lossDelPar = loss(mynn,x',y')
#@code_warntype
deltaLossPar = 0.001*l1dw.data[1][1,1]
lossDelPar - lossOrig
@test isapprox(lossDelPar - lossOrig,deltaLossPar,atol=0.00000001)
η = 0.01
#w = gradientDescentSingleUpdate(w,dw,η)
#w = w - dw * η
w = w - dw * η
#@code_warntype gradSub.(w, gradMul.(dw,η))
#@code_warntype
set_params!(mynn,w)
loss2 = loss(mynn,x',y')
#@code_warntype
@test loss2 < lossOrig
for i in 1:10000
local w = get_params(mynn)
local dw = get_gradient(mynn,x,y)
w = w - dw * η
set_params!(mynn,w)
end
lossFinal = loss(mynn,x',y')
@test predict(mynn,x')[1,1]>0.96
set_params!(mynn,origW)
train!(mynn,x',y',epochs=10000,batch_size=1,sequential=true,verbosity=NONE,opt_alg=SGD(η=t->η,λ=1),rng=copy(TESTRNG))
#@code_warntype train!(mynn,x',y',epochs=10000,batch_size=1,sequential=true,verbosity=NONE,opt_alg=SGD(η=t->η,λ=1))
lossTraining = loss(mynn,x',y')
#@code_warntype
@test isapprox(lossFinal,lossTraining,atol=0.00001)
li = DenseLayer(2,2,w=[2 1;1 1],f=identity,rng=copy(TESTRNG))
@test get_nparams(li) == 6
# Testing ScalarFunctionLayer with no weigths and identity function (aka "Replicator") layer
l1 = DenseNoBiasLayer(2,2,w=[2 1;1 1],f=identity,rng=copy(TESTRNG))
l1b = ReplicatorLayer(2)
l2 = VectorFunctionLayer(2,f=softmax)
mynn2 = buildNetwork([l1,l1b,l2],squared_cost,name="Simple Multinomial logistic regression")
train!(mynn2,x',y',epochs=10000,batch_size=1,sequential=true,verbosity=NONE,opt_alg=SGD(η=t->η,λ=1),rng=copy(TESTRNG))
lossTraining2 = loss(mynn2,x',y')
#@code_warntype
@test isapprox(lossTraining,lossTraining2,atol=0.00001)
# ==================================
# NEW Test
println("Testing regression if it just works with manual derivatives...")
xtrain = [0.1 0.2; 0.3 0.5; 0.4 0.1; 0.5 0.4; 0.7 0.9; 0.2 0.1]
ytrain = [0.3; 0.8; 0.5; 0.9; 1.6; 0.3]
xtest = [0.5 0.6; 0.14 0.2; 0.3 0.7; 2.0 4.0]
ytest = [1.1; 0.36; 1.0; 6.0]
l1 = DenseLayer(2,3,w=[1 1; 1 1; 1 1], wb=[0,0,0], f=tanh, df=dtanh,rng=copy(TESTRNG))
l2 = DenseNoBiasLayer(3,2, w=[1 1 1; 1 1 1], f=relu, df=drelu,rng=copy(TESTRNG))
l3 = DenseLayer(2,1, w=[1 1], wb=[0], f=identity,df=didentity,rng=copy(TESTRNG))
mynn = buildNetwork(deepcopy([l1,l2,l3]),squared_cost,name="Feed-forward Neural Network Model 1",dcf=dsquared_cost)
train!(mynn,xtrain,ytrain, opt_alg=SGD(η=t -> 1/(1+t),λ=1), batch_size=1,sequential=true,epochs=100,verbosity=NONE,rng=copy(TESTRNG)) #
#@benchmark train!(mynn,xtrain,ytrain,batch_size=1,sequential=true,epochs=100,verbosity=NONE,opt_alg=SGD(η=t -> 1/(1+t),λ=1))
avgLoss = loss(mynn,xtest,ytest)
@test avgLoss ≈ 1.599729991966362
expectedŷtest= [0.7360644412052633, 0.7360644412052633, 0.7360644412052633, 2.47093434438514]
ŷtrain = dropdims(predict(mynn,xtrain),dims=2)
ŷtest = dropdims(predict(mynn,xtest),dims=2)
@test any(isapprox(expectedŷtest,ŷtest))
m = NeuralNetworkEstimator(layers=[l1,l2,l3],loss=squared_cost,dloss=dsquared_cost,batch_size=1,shuffle=false,epochs=100,verbosity=NONE,opt_alg=SGD(η=t -> 1/(1+t),λ=1),rng=copy(TESTRNG),descr="First test")
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
ŷtrain3 = predict(m,xtrain)
@test ŷtrain ≈ ŷtrain2 ≈ ŷtrain3
ŷtest2 = predict(m,xtest)
@test ŷtest ≈ ŷtest2
# With the ADAM optimizer...
l1 = DenseLayer(2,3,w=[1 1; 1 1; 1 1], wb=[0,0,0], f=tanh, df=dtanh,rng=copy(TESTRNG))
l2 = DenseNoBiasLayer(3,2, w=[1 1 1; 1 1 1], f=relu, df=drelu,rng=copy(TESTRNG))
l3 = DenseLayer(2,1, w=[1 1], wb=[0], f=identity,df=didentity,rng=copy(TESTRNG))
mynn = buildNetwork([l1,l2,l3],squared_cost,name="Feed-forward Neural Network with ADAM",dcf=dsquared_cost)
train!(mynn,xtrain,ytrain,batch_size=1,sequential=true,epochs=100,verbosity=NONE,opt_alg=ADAM(),rng=copy(TESTRNG))
avgLoss = loss(mynn,xtest,ytest)
@test avgLoss ≈ 0.9497779759064725
expectedOutput = [1.7020525792404175, -0.1074729043392682, 1.4998367847079956, 3.3985794704732717]
predicted = dropdims(predict(mynn,xtest),dims=2)
@test any(isapprox(expectedOutput,predicted))
# ==================================
# NEW TEST
# ==================================
println("Testing using AD...")
ϵtrain = [1.023,1.08,0.961,0.919,0.933,0.993,1.011,0.923,1.084,1.037,1.012]
ϵtest = [1.056,0.902,0.998,0.977]
xtrain = [0.1 0.2; 0.3 0.5; 0.4 0.1; 0.5 0.4; 0.7 0.9; 0.2 0.1; 0.4 0.2; 0.3 0.3; 0.6 0.9; 0.3 0.4; 0.9 0.8]
ytrain = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtrain[i] for (i,x) in enumerate(eachrow(xtrain))]
xtest = [0.5 0.6; 0.14 0.2; 0.3 0.7; 20.0 40.0;]
ytest = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtest[i] for (i,x) in enumerate(eachrow(xtest))]
l1 = DenseLayer(2,3,w=ones(3,2), wb=zeros(3),rng=copy(TESTRNG))
l2 = DenseLayer(3,1, w=ones(1,3), wb=zeros(1),rng=copy(TESTRNG))
mynn = buildNetwork([l1,l2],squared_cost,name="Feed-forward Neural Network Model 1")
train!(mynn,xtrain,ytrain,epochs=1000,sequential=true,batch_size=1,verbosity=NONE,opt_alg=SGD(η=t->0.01,λ=1),rng=copy(TESTRNG))
avgLoss = loss(mynn,xtest,ytest)
@test avgLoss ≈ 0.0032018998005211886
ŷtestExpected = [0.4676699631752518,0.3448383593117405,0.4500863419692639,9.908883999376018]
ŷtrain = dropdims(predict(mynn,xtrain),dims=2)
ŷtest = dropdims(predict(mynn,xtest),dims=2)
@test any(isapprox(ŷtest,ŷtestExpected))
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.06
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest <= 0.05
m = NeuralNetworkEstimator(rng=copy(TESTRNG),verbosity=NONE)
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.06
#predicted = dropdims(Nn.predict(mynn,xtrain),dims=2)
#ytrain
# ==================================
# NEW TEST
# ==================================
println("Going through Multinomial logistic regression (using softmax)...")
#=
using RDatasets
using Random
using DataFrames: DataFrame
using CSV
Random.seed!(123);
iris = dataset("datasets", "iris")
iris = iris[shuffle(axes(iris, 1)), :]
CSV.write(joinpath(@__DIR__,"data","iris_shuffled.csv"),iris)
=#
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = map(x->Dict("setosa" => 1, "versicolor" => 2, "virginica" =>3)[x],iris[:, 5])
y_oh = fit!(OneHotEncoder(),y)
ntrain = Int64(round(size(x,1)*0.8))
xtrain = x[1:ntrain,:]
ytrain = y[1:ntrain]
ytrain_oh = y_oh[1:ntrain,:]
xtest = x[ntrain+1:end,:]
ytest = y[ntrain+1:end]
xtrain = fit!(Scaler(),xtrain)
#pcaOut = pca(xtrain,error=0.01)
#(xtrain,P) = pcaOut.X, pcaOut.P
xtest = fit!(Scaler(),xtest)
#xtest = xtest*P
l1 = DenseLayer(4,10, w=ones(10,4), wb=zeros(10),f=celu,rng=copy(TESTRNG))
l2 = DenseLayer(10,3, w=ones(3,10), wb=zeros(3),rng=copy(TESTRNG))
l3 = VectorFunctionLayer(3,f=softmax)
mynn = buildNetwork([l1,l2,l3],squared_cost,name="Multinomial logistic regression Model Sepal")
train!(mynn,xtrain,ytrain_oh,epochs=254,batch_size=8,sequential=true,verbosity=NONE,opt_alg=SGD(η=t->0.001,λ=1),rng=copy(TESTRNG))
ŷtrain = predict(mynn,xtrain)
ŷtest = predict(mynn,xtest)
trainAccuracy = accuracy(ytrain,ŷtrain,tol=1)
testAccuracy = accuracy(ytest,ŷtest,tol=1)
@test testAccuracy >= 0.8 # set to random initialisation/training to have much better accuracy
# With ADAM
l1 = DenseLayer(4,10, w=ones(10,4), wb=zeros(10),f=celu,rng=copy(TESTRNG))
l2 = DenseLayer(10,3, w=ones(3,10), wb=zeros(3),rng=copy(TESTRNG),df=nothing) # testing AD
l3 = VectorFunctionLayer(3,f=softmax)
mynn = buildNetwork(deepcopy([l1,l2,l3]),squared_cost,name="Multinomial logistic regression Model Sepal",dcf=nothing)
train!(mynn,xtrain,ytrain_oh,epochs=10,batch_size=8,sequential=true,verbosity=NONE,opt_alg=ADAM(η=t -> 1/(1+t), λ=0.5),rng=copy(TESTRNG))
ŷtrain = predict(mynn,xtrain)
ŷtest = predict(mynn,xtest)
trainAccuracy = accuracy(ytrain,ŷtrain,tol=1)
testAccuracy = accuracy(ytest,ŷtest,tol=1)
@test testAccuracy >= 1
m = NeuralNetworkEstimator(layers=[l1,l2,l3],loss=squared_cost,dloss=nothing,batch_size=8,shuffle=false,epochs=10,verbosity=NONE,opt_alg=ADAM(η=t -> 1/(1+t), λ=0.5),rng=copy(TESTRNG),descr="Iris classification")
fit!(m,xtrain,ytrain_oh)
ŷtrain2 = predict(m)
@test ŷtrain ≈ ŷtrain2
reset!(m)
fit!(m,xtrain,ytrain_oh)
ŷtrain3 = predict(m)
@test ŷtrain ≈ ŷtrain2 ≈ ŷtrain3
reset!(m)
m.hpar.epochs = 5
fit!(m,xtrain,ytrain_oh)
#fit!(m,xtrain,ytrain_oh)
ŷtrain4 = predict(m)
acc = accuracy(ytrain,ŷtrain4,tol=1)
@test acc >= 0.95
m = NeuralNetworkEstimator(rng=copy(TESTRNG),verbosity=NONE)
fit!(m,xtrain,ytrain_oh)
ŷtrain5 = predict(m)
acc = accuracy(ytrain,ŷtrain5,tol=1, rng=copy(TESTRNG))
@test acc >= 0.78 # 0.9
# ------------------------------------------------------------------------------
# Testing GroupedLayer
println("Testing GroupedLayer and ReplicatorLayer...")
X = Float64.(collect(transpose(reshape(1:3*7,7,3))))
Y = Float64.(collect(1:3))
l1_1 = DenseLayer(2,3, rng=copy(TESTRNG),f=identity,w=ones(3,2),wb=[10,10,10])
l1_2 = DenseNoBiasLayer(3,3, rng=copy(TESTRNG),f=identity, w=ones(3,3))
l1_3 = ReplicatorLayer(2)
l1 = GroupedLayer([l1_1,l1_2,l1_3])
l1bis = GroupedLayer([l1_3,l1_1,l1_2])
l2 = DenseLayer(8,1,f=identity,w=ones(1,8),rng=copy(TESTRNG))
o1 = forward(l1,X[1,:])
o1bis = forward(l1bis,X[1,:])
@test o1 == [13,13,13,12,12,12,6,7]
@test o1bis == [1,2,17,17,17,18,18,18]
o1_1 = forward(l1_1,X[1,1:2])
@test typeof(o1) == typeof(o1_1)
dϵ_dI = backward(l1,X[1,:], o1 ./ 10)
dϵ_dIbis = backward(l1bis,X[1,:], o1bis ./ 10)
@test dϵ_dI ≈ [3.9, 3.9,3.6, 3.6, 3.6,0.6, 0.7]
@test dϵ_dIbis ≈ [0.1,0.2,5.1,5.1,5.4,5.4,5.4]
dϵ_dI_1_1 = backward(l1_1,X[1,1:2], o1_1 ./ 10)
@test typeof(dϵ_dI) == typeof(dϵ_dI_1_1)
g1 = get_gradient(l1,X[1,:], o1 ./ 10)
g1bis = get_gradient(l1bis,X[1,:], o1bis ./ 10)
g1_1 = get_gradient(l1_1,X[1,1:2], o1_1 ./ 10)
@test all(g1.data .≈ ([1.3 2.6; 1.3 2.6; 1.3 2.6], [1.3, 1.3, 1.3], [3.6 4.8 6.0; 3.6 4.8 6.0; 3.6 4.8 6.0]))
@test all(g1bis.data .≈ ([5.1 6.8; 5.1 6.8; 5.1 6.8], [1.7, 1.7, 1.7], [9.0 10.8 12.6; 9.0 10.8 12.6; 9.0 10.8 12.6]))
@test typeof(g1.data[1:2]) == typeof(g1_1.data)
p = get_params(l1)
pbis = get_params(l1bis)
set_params!(l1,p)
set_params!(l1bis,pbis)
o1_after = forward(l1,X[1,:])
o1bis_after = forward(l1bis,X[1,:])
@test o1_after == o1
@test o1bis_after == o1bis
@test size(l1) == ((7,),(8,))
treenn = buildNetwork([l1,l2],squared_cost)
predict(treenn,X)
loss(treenn,X,Y)
get_gradient(treenn,X[1,:],Y[1,:])
train!(treenn,X,Y,epochs=5000,rng=copy(TESTRNG))
Ŷ = predict(treenn,X)
rme = relative_mean_error(Y,Ŷ)
@test rme = relative_mean_error(Y,Ŷ)<= 0.3
#=
if "all" in ARGS
# ==================================
# NEW TEST
# ==================================
println("Testing colvolution layer with MINST data...")
train_x, train_y = MNIST.traindata()
test_x, test_y = MNIST.testdata()
test = train_x[:,:,1]
eltype(test)
test .+ 1
end
=#
# ==================================
# NEW Test
if VERSION >= v"1.6"
println("Testing VectorFunctionLayer with pool1d function...")
println("Attention: this test requires at least Julia 1.6")
x = rand(copy(TESTRNG),300,5)
y = [norm(r[1:3])+2*norm(r[4:5],2) for r in eachrow(x) ]
(N,D) = size(x)
l1 = DenseLayer(D,8, f=relu,rng=copy(TESTRNG))
l2 = VectorFunctionLayer(size(l1)[2][1],f=(x->pool1d(x,2,f=mean)))
l3 = DenseLayer(size(l2)[2][1],1,f=relu, rng=copy(TESTRNG))
mynn = buildNetwork([l1,l2,l3],squared_cost,name="Regression with a pooled layer")
train!(mynn,x,y,epochs=50,verbosity=NONE,rng=copy(TESTRNG))
ŷ = predict(mynn,x)
rmeTrain = relative_mean_error(y,ŷ,normrec=false)
@test rmeTrain < 0.14
end
# ==================================
# NEW TEST
println("Testing ConvLayer....")
d2convl = ConvLayer((14,8),(6,3),3,2,stride=(6,3))
@test d2convl.padding_start == [2,1]
@test d2convl.padding_end == [2,0]
@test size(d2convl) == ((14,8,3),(3,3,2))
d2convl = ConvLayer((14,8),(6,3),3,2,stride=3)
@test d2convl.padding_start == [2,1]
@test d2convl.padding_end == [2,0]
@test size(d2convl) == ((14, 8, 3), (5, 3, 2))
d2convl = ConvLayer((14,8),(6,3),3,2,stride=3, padding=((2,1),(1,0)))
@test size(d2convl) == ((14, 8, 3), (4, 3, 2))
d2convl = ConvLayer((13,8),(6,3),3,2,stride=3)
@test d2convl.padding_start == [3,1]
@test d2convl.padding_end == [2,0]
@test size(d2convl) == ((13, 8, 3), (5, 3, 2))
d2convl = ConvLayer((7,5),(4,3),3,2,stride=2)
@test d2convl.input_size == [7,5,3]
@test d2convl.ndims == 2
@test size(d2convl.weight) == (4,3,3,2)
@test d2convl.stride == [2,2]
d2conv = ConvLayer((4,4),(2,2),3,2,kernel_init=reshape(1:24,(2,2,3,2)),bias_init=[1,1])
x = ones(4,4,3)
preprocess!(d2conv)
y = forward(d2conv,x)
# The syntax for tensor hard coded in this way wants Julia >= 1.7
if VERSION >= v"1.7"
@test y[1,1,1] == dot([0 0; 0 1;;; 0 0; 0 1;;; 0 0; 0 1 ],selectdim(d2conv.weight,4,1)) + d2conv.bias[1] == 25
@test y[2,3,1] == dot([1 1; 1 1;;; 1 1; 1 1;;; 1 1; 1 1 ],selectdim(d2conv.weight,4,1)) + d2conv.bias[1] == 79
end
de_dy = y ./100
de_dw = get_gradient(d2conv,x,de_dy)
de_dx = backward(d2conv,x,de_dy)
d1conv = ConvLayer(8,3,1,1,stride=3,kernel_init=reshape(1:3,(3,1,1)),bias_init=[10,])
d1conv2 = ConvLayer(8,3,1,1,stride=2,kernel_init=reshape(1:3,(3,1,1)),bias_init=[10,])
x = collect(1:8)
preprocess!(d1conv)
preprocess!(d1conv2)
#@btime preprocess!(d1conv)
y = forward(d1conv,x)
@test y[1,1] == dot([0,1,2],[1,2,3]) + 10
@test y[3,1] == dot([6,7,8],[1,2,3]) + 10
# The syntax for tensor hard coded in this way wants Julia >= 1.7
if VERSION >= v"1.7"
de_dy = [1.0; 2.0; 3.0;;]
de_dw = get_gradient(d1conv,x,de_dy)
de_dx = backward(d1conv,x,de_dy)
@test de_dw.data[1] == [24.0; 30.0; 36.0;;;]
@test de_dw.data[2] == [6]
@test de_dx == [2.0; 3; 2; 4; 6; 3; 6; 9;;]
# using d1conv2 (some overlapping filters for the same x)
de_dy = [1.0; 2.0; 3.0; 4.0;;]
de_dw = get_gradient(d1conv2,x,de_dy)
de_dx = backward(d1conv2,x,de_dy)
@test de_dw.data[1] == [40.0; 50.0; 60.0;;;]
@test de_dw.data[2] == [10.0]
@test de_dx == [2.0;5;4;9;6;13;8;12;;]
end
#=
x = reshape(1:12*12*3,12,12,3)
l = ConvLayer((12,12,3),(4,4),5)
preprocess!(l)
@btime preprocess!(l)
y = forward(l,x)
@btime forward($l,$x)
de_dy = y ./ 100
de_dw = get_gradient(l,x,de_dy)
@btime get_gradient($l,$x,$de_dy)
de_dx = backward(l,x,de_dy)
@btime backward($l,$x,$de_dy)
@profile get_gradient(l,x,de_dy)
=#
x = collect(1:12)
l1 = ReshaperLayer((12,1),(3,2,2))
l2 = ConvLayer((3,2),(2,2),2,1,kernel_init=ones(2,2,2,1),bias_init=[1])
l3 = ConvLayer(size(l2)[2],(2,2),1,kernel_init=ones(2,2,1,1),bias_init=[1]) # alternative constructor
l4 = ReshaperLayer((3,2,1))
preprocess!.([l2,l3])
l1y = forward(l1,x)
l2y = forward(l2,l1y)
l3y = forward(l3,l2y)
l4y = forward(l4,l3y)
truey = [8.0, 31.0, 43.0, 33.0, 101.0, 149.0]
mynn = buildNetwork([l1,l2,l3,l4],squared_cost)
ŷ = predict(mynn,x')
e = loss(mynn,x',truey')
@test e ≈ 4
#x = rand(copy(TESTRNG),100,3*3*2)
x = convert(Matrix{Float32},reshape(1:100*3*3*2,100,3*3*2) ./ 100)
y = convert(Vector{Float32},[norm(r[1:9])+2*norm(r[10:18],2) for r in eachrow(x) ])
(N,D) = size(x)
l1 = ReshaperLayer((D,1),(3,3,2))
l2 = ConvLayer((3,3),(2,2),2,3,rng=copy(TESTRNG),kernel_eltype=Float32)
l3 = ConvLayer(size(l2)[2],(2,2),8,rng=copy(TESTRNG),kernel_eltype=Float32)
l4 = ReshaperLayer(size(l3)[2])
l5 = DenseLayer(size(l4)[2][1],1,f=relu, w_eltype=Float32, rng=copy(TESTRNG))
layers = [l1,l2,l3,l4,l5]
mynn = buildNetwork(layers,squared_cost,name="Regression with a convolutional layer")
preprocess!(mynn)
x1_hat = predict(mynn,x[1,:]')
@test typeof(x1_hat) == Matrix{Float32}
train!(mynn,x,y,epochs=60,verbosity=NONE,rng=copy(TESTRNG))
ŷ = predict(mynn,x)
rmeTrain = relative_mean_error(y,ŷ,normrec=false)
@test rmeTrain < 0.01
#using BenchmarkTools
#@btime train!($mynn,$x,$y,epochs=60,verbosity=NONE,rng=copy($TESTRNG))
#240.604 ms (1056544 allocations: 107.66 MiB)
#314.504 ms (774762 allocations: 99.39 MiB)
# ==================================
# NEW TEST
println("Testing PoolingLayer....")
d2pooll = PoolingLayer((14,8),(6,3),3)
@test d2pooll.padding_start == [2,1]
@test d2pooll.padding_end == [2,0]
@test size(d2pooll) == ((14, 8, 3), (3, 3, 3))
d2pooll = PoolingLayer((14,8),(6,3),3,stride=3, padding=((2,1),(1,0)))
@test size(d2pooll) == ((14, 8, 3), (4, 3, 3))
d2pooll = PoolingLayer((13,8),(6,3),3,stride=3)
@test d2pooll.padding_start == [3,1]
@test d2pooll.padding_end == [2,0]
@test size(d2pooll) == ((13, 8, 3), (5, 3, 3))
d2pooll = PoolingLayer((7,5),(4,3),3,stride=2)
@test d2pooll.input_size == [7,5,3]
@test d2pooll.ndims == 2
@test d2pooll.kernel_size == [4,3,3,3]
@test d2pooll.stride == [2,2]
d2pool = PoolingLayer((4,4),(2,2),3,f=mean)
x = reshape(1:(4*4*3),4,4,3)
preprocess!(d2pool)
@test d2pool.y_to_x_ids[2,2,2] == [(3,3,2),(4,3,2),(3,4,2),(4,4,2)]
y = forward(d2pool,x)
@test y[1,2,1] == 11.5
de_dy = y ./10
de_dw = get_gradient(d2pool,x,de_dy)
de_dx = backward(d2pool,x,de_dy)
@test de_dx[2,3,3] == (de_dy ./ 4)[1,2,3]
# full example - x as classical tabular data
x = reshape(1:100*6*6*2,100,6*6*2) ./ 100
y = [norm(r[1:36])+2*norm(r[37:72],2) for r in eachrow(x) ]
(N,D) = size(x)
l1 = ReshaperLayer((D,1),(6,6,2))
l2 = ConvLayer((6,6),(2,2),2,4,rng=copy(TESTRNG))
l3 = PoolingLayer((6,6,4),(2,2))
l4 = ConvLayer(size(l3)[2],(2,2),8,rng=copy(TESTRNG))
l5 = ReshaperLayer(size(l4)[2])
l6 = DenseLayer(size(l5)[2][1],1,f=identity, rng=copy(TESTRNG))
layers = [l1,l2,l3,l4,l5,l6]
mynn = buildNetwork(layers,squared_cost,name="Regression with a convolutional layer")
preprocess!(mynn)
dummyx = x[1,:]
nnout = BetaML.predict(mynn,dummyx')
l1y = forward(l1,dummyx)
l2y = forward(l2,l1y)
l3y = forward(l3,l2y)
l4y = forward(l4,l3y)
l5y = forward(l5,l4y)
l6y = forward(l6,l5y)
@test l6y[1] == nnout[1,1]
train!(mynn,x,y,epochs=40,verbosity=NONE,rng=copy(TESTRNG))
ŷ = BetaML.predict(mynn,x)
rmeTrain = relative_mean_error(y,ŷ,normrec=false)
@test rmeTrain < 0.1
a = 1
#l1 = ReshaperLayer((D,1),(6,6,2))
#l2 = ConvLayer((6,6),(2,2),2,4,rng=copy(TESTRNG))
#l3 = PoolingLayer((6,6,4),(2,2))
#l4 = ConvLayer(size(l3)[2],(2,2),8,rng=copy(TESTRNG))
#l5 = ReshaperLayer(size(l4)[2])
#l6 = DenseLayer(size(l5)[2][1],1,f=identity, rng=copy(TESTRNG))
#layers = [l1,l2,l3,l4,l5,l6]
#mynn = buildNetwork(layers,squared_cost,name="Regression with a convolutional layer")
#@btime train!(mynn,x,y,epochs=5,verbosity=NONE,rng=copy(TESTRNG))
#ŷ = BetaML.predict(mynn,x)
#rmeTrain = relative_mean_error(y,ŷ,normrec=false)
#=
# x organised as multidimensional array TODO
x = reshape(1:100*6*6*2,100,6,6,2) ./ 100
y = collect(1:100)
(N,D) = size(x)
l2 = ConvLayer((6,6),(2,2),2,4,rng=copy(TESTRNG))
l3 = PoolingLayer((6,6,4),(2,2))
l4 = ConvLayer(size(l3)[2],(2,2),8,rng=copy(TESTRNG))
l5 = ReshaperLayer(size(l4)[2])
l6 = DenseLayer(size(l5)[2][1],1,f=identity, rng=copy(TESTRNG))
layers = [l2,l3,l4,l5,l6]
mynn = buildNetwork(layers,squared_cost,name="Regression with a convolutional layer")
preprocess!(mynn)
dummyx = selectdim(x,1,1)
nnout = predict(mynn,dummyx)
l1y = forward(l1,dummyx)
l2y = forward(l2,l1y)
l3y = forward(l3,l2y)
l4y = forward(l4,l3y)
l5y = forward(l5,l4y)
l6y = forward(l6,l5y)
@test l6y == nnout
train!(mynn,x,y,epochs=10,verbosity=FULL,rng=copy(TESTRNG))
ŷ = predict(mynn,x)
rmeTrain = relative_mean_error(y,ŷ,normrec=false)
@test rmeTrain < 0.01
=#
# ==================================
# NEW TEST
println("Testing MLJ interface for FeedfordwarNN....")
import MLJBase
const Mlj = MLJBase
import StatisticalMeasures
X, y = Mlj.@load_boston
model = BetaML.Bmlj.NeuralNetworkRegressor(rng=copy(TESTRNG))
regressor = Mlj.machine(model, X, y)
(fitresult, cache, report) = Mlj.fit(model, -1, X, y)
yhat = Mlj.predict(model, fitresult, X)
@test relative_mean_error(y,yhat,normrec=true) < 0.2
X, y = Mlj.@load_boston
y2d = [y y]
model = BetaML.Bmlj.MultitargetNeuralNetworkRegressor(rng=copy(TESTRNG))
regressor = Mlj.machine(model, X, y2d)
(fitresult, cache, report) = Mlj.fit(model, -1, X, y2d)
yhat = Mlj.predict(model, fitresult, X)
@test relative_mean_error(y2d,yhat,normrec=true) < 0.2
X, y = Mlj.@load_iris
model = BetaML.Bmlj.NeuralNetworkClassifier(rng=copy(TESTRNG),epochs=500,batch_size=64)
regressor = Mlj.machine(model, X, y)
(fitresult, cache, report) = Mlj.fit(model, -1, X, y)
yhat = Mlj.predict(model, fitresult, X)
#@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat, y)) < 0.25
@test sum(Mlj.mode.(yhat) .== y)/length(y) >= 0.98
#=
x = Mlj.matrix(X)
sm = Scaler()
xs = fit!(sm,x)
m = AutoEncoder()
x2 = fit!(m,xs)
x̂s = inverse_predict(m,x2)
x̂ = inverse_predict(sm,x̂s)
rme = relative_mean_error(x,x̂)
=#
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 11207 | using Statistics
using Test
using DelimitedFiles
import MLJBase
const Mlj = MLJBase
import StatisticalMeasures
using StableRNGs
using BetaML
import BetaML.Perceptron: perceptron, perceptronBinary, kernel_perceptron_classifier, kernel_perceptron_classifier_binary, pegasos, pegasosBinary
#TESTRNG = FIXEDRNG # This could change...
TESTRNG = StableRNG(123)
println("*** Testing Perceptron algorithms...")
# ==================================
# TEST 1: Normal perceptron
println("Going through Test1 (normal Perceptron)...")
perceptronData = readdlm(joinpath(@__DIR__,"data/binary2DData.csv"),'\t')
x = copy(perceptronData[:,[2,3]])
y = convert(Array{Int64,1},copy(perceptronData[:,1]))
ntrain = Int64(round(length(y)*0.8))
xtrain = x[1:ntrain,:]
ytrain = y[1:ntrain]
xtest = x[ntrain+1:end,:]
ytest = y[ntrain+1:end]
classes = unique(y)
out = perceptron(xtrain, ytrain, shuffle=false,nMsgs=0)
ŷtrain = predict(xtrain,out.θ,out.θ₀,out.classes)
ϵtrain = error(ytrain, mode(ŷtrain))
ŷtest = predict(xtest,out.θ,out.θ₀,classes)
outTest = perceptron(xtrain, ytrain, shuffle=false,nMsgs=0,return_mean_hyperplane=true)
ŷavgtest = predict(xtest,outTest.θ,outTest.θ₀,outTest.classes)
ϵ = error(ytest, mode(ŷtest))
ϵavg = error(ytest, mode(ŷavgtest))
@test ϵ < 0.03
@test ϵavg < 0.2
m = PerceptronClassifier(shuffle=false, verbosity=NONE, rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
ŷtrain3 = predict(m,xtrain)
ϵtrain = error(ytrain, mode(ŷtrain3))
@test ŷtrain == ŷtrain2 == ŷtrain3
# Test save/load
model_save("test.jld2"; mtemp=m)
model_save("test.jld2", true; m, m2=m) # should overwrite the file...
model_save("test.jld2"; m3=m)
models = model_load("test.jld2")
@test collect(keys(models)) == ["m","m2","m3"]
ŷtrain4 = predict(models["m2"])
mb = model_load("test.jld2","m")
(mc, md) = model_load("test.jld2","m", "m2")
ŷtrain5 = predict(mb)
ŷtrain6 = predict(mc)
ŷtrain7 = predict(md)
@test ŷtrain == ŷtrain4 == ŷtrain5 == ŷtrain6 == ŷtrain7
pars = parameters(m)
pars.weigths[1,1] = 10
pars.weigths[2,1] = -10
ŷtrain8 = predict(m,xtrain)
@test ŷtrain8 != ŷtrain
hpars = hyperparameters(m)
hpars.epochs = 10
@test m.hpar.epochs == 10
opt = options(m)
opt.descr="Hello"
@test m.opt.descr == "Hello"
println("Testing multiple classes...")
#xtrain = [3 4 5; 5 3 5; 3 7 2; 8 5 3; 4 2 3; 3 2 1; 8 3 4; 3 5 1; 1 9 3; 4 2 1]
xtrain = rand(TESTRNG,100,3)
ytt = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtrain))]
ytrain = [i > median(ytt)*1.1 ? "big" : i > median(ytt)*0.9 ? "avg" : "small" for i in ytt]
#xtest = [2 2 3; 3 2 2; 4 1 2; 4 3 2; 3 7 2]
xtest = rand(TESTRNG,20,3)
ytt2 = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtest))]
ytest = [i > median(ytt2)*1.1 ? "big" : i > median(ytt2)*0.9 ? "avg" : "small" for i in ytt2]
out = perceptron(xtrain, ytrain, shuffle=false,nMsgs=0)
out2 = perceptron(xtrain,ytrain,θ₀=[0.0, 0.0, 0.0],θ=[[0.0, 0.0, 0.0], [0.0,0.0,0.0], [0.0,0.0,0.0]])
@test out == out2
ŷtrain = predict(xtrain,out.θ,out.θ₀,out.classes)
ŷtest = predict(xtest,out.θ,out.θ₀,out.classes)
ϵtrain = error(ytrain, mode(ŷtrain))
ϵtest = error(ytest, mode(ŷtest))
@test ϵtrain < 0.4
@test ϵtest < 0.4
m = PerceptronClassifier(shuffle=false, verbosity=NONE, rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
ŷtrain3 = predict(m,xtrain)
@test all([ŷtrain[r][k] ≈ ŷtrain2[r][k] ≈ ŷtrain3[r][k] for k in keys(ŷtrain[1]) for r in 1:length(ŷtrain)])
# ==================================
# Test 2: Kernel Perceptron
# ==================================
println("Going through Test2 (Kernel Perceptron)...")
xtrain = [3 4 5; 5 3 5; 3 7 2; 8 5 3; 4 2 3; 3 2 1; 8 3 4; 3 5 1; 1 9 3; 4 2 1]
ytt = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtrain))]
ytrain = [i > median(ytt) ? 1 : -1 for i in ytt]
xtest = [ 3 7 2; 2 2 3; 3 2 2; 4 1 2; 4 3 2;]
#xtest = xtrain
ytt2 = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtest))]
ytest = [i > median(ytt2) ? 1 : -1 for i in ytt2]
#out = KernelPerceptronClassifier(xtrain, ytrain, K=polynomial_kernel,rShuffle=true,nMsgs=100)
#ŷtest = predict(xtest,out[1][1],out[1][2],out[1][3], K=polynomial_kernel)
out = kernel_perceptron_classifier_binary(xtrain, ytrain, K=radial_kernel,shuffle=false,nMsgs=0,α=ones(Int64,length(ytrain)))
# the same: out = KernelPerceptronClassifierBinary(xtrain, ytrain, K=radial_kernel,shuffle=false,nMsgs=0)
ŷtest = predict(xtest,out.x,out.y,out.α, K=out.K)
ϵ = error(ytest, ŷtest)
ŷtestExpected = [1,-1,-1,-1,-1]
@test ϵ ≈ 0.2
#@test any(isapprox(ŷtestExpected,ŷtest))
@test any(ŷtestExpected == ŷtest )
# Multiclass..
outMultiClass = kernel_perceptron_classifier(xtrain, ytrain, K=radial_kernel,shuffle=false,nMsgs=0)
ŷtest = predict(xtest,outMultiClass.x,outMultiClass.y,outMultiClass.α, outMultiClass.classes,K=outMultiClass.K)
ϵ = error(ytest, mode(ŷtest))
ŷtestExpected = [1,-1,-1,-1,-1]
@test ϵ ≈ 0.2
#@test any(isapprox(ŷtestExpected,ŷtest))
@test any(ŷtestExpected == mode(ŷtest) )
xtrain = rand(TESTRNG,100,3)
ytt = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtrain))]
ytrain = [i > median(ytt)*1.1 ? "big" : i > median(ytt)*0.9 ? "avg" : "small" for i in ytt]
#xtest = [2 2 3; 3 2 2; 4 1 2; 4 3 2; 3 7 2]
xtest = rand(TESTRNG,20,3)
ytt2 = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtest))]
ytest = [i > median(ytt2)*1.1 ? "big" : i > median(ytt2)*0.9 ? "avg" : "small" for i in ytt2]
out = kernel_perceptron_classifier(xtrain, ytrain, shuffle=false,nMsgs=0,T=1000)
ŷtrain = predict(xtrain,out.x,out.y,out.α, out.classes,K=out.K)
ŷtest = predict(xtest,out.x,out.y,out.α, out.classes,K=out.K)
ϵtrain = error(ytrain, mode(ŷtrain))
ϵtest = error(ytest, mode(ŷtest))
@test ϵtrain < 0.1
@test ϵtest < 0.8
m = KernelPerceptronClassifier(shuffle=false,verbosity=NONE, rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
ŷtrain3 = predict(m,xtrain)
@test all([ŷtrain[r][k] ≈ ŷtrain2[r][k] ≈ ŷtrain3[r][k] for k in keys(ŷtrain[1]) for r in 1:length(ŷtrain)])
ŷtest2 = predict(m,xtest)
@test all([ŷtest[r][k] ≈ ŷtest2[r][k] for k in keys(ŷtest[1]) for r in 1:length(ŷtest)])
# Testing autotune for classification...
m = KernelPerceptronClassifier(verbosity=NONE, rng=copy(TESTRNG),autotune=true, tunemethod=SuccessiveHalvingSearch(multithreads=true, hpranges=Dict("epochs" =>[50,100,1000,10000])))
m = KernelPerceptronClassifier(verbosity=NONE, rng=copy(TESTRNG),autotune=true)
ŷtrain = fit!(m,xtrain,ytrain)
optpar = hyperparameters(m)
ŷtest = predict(m,xtest)
ϵtrain = error(ytrain, mode(ŷtrain))
ϵtest = error(ytest, mode(ŷtest))
@test ϵtrain < 0.1
@test ϵtest < 0.8
# ==================================
# Test 3: PegasosClassifier
# ==================================
println("Going through Test3 (PegasosClassifier)...")
perceptronData = readdlm(joinpath(@__DIR__,"data/binary2DData.csv"),'\t')
x = copy(perceptronData[:,[2,3]])
y = convert(Array{Int64,1},copy(perceptronData[:,1]))
xtrain = x[1:160,:]
ytrain = y[1:160]
xtest = x[161:end,:]
ytest = y[161:end]
out = pegasos(xtrain, ytrain, shuffle=false, nMsgs=0)
ŷtest = predict(xtest,out.θ,out.θ₀,out.classes)
outAvg = pegasos(xtrain, ytrain, shuffle=false, nMsgs=0, return_mean_hyperplane=true)
ŷavgtest = predict(xtest,outAvg.θ,outAvg.θ₀,outAvg.classes)
ϵ = error(ytest, mode(ŷtest))
ϵavg = error(ytest, mode(ŷavgtest))
@test ϵ ≈ 0.025
@test ϵavg ≈ 0.1
println("Testing pegasos with multiple classes...")
#xtrain = [3 4 5; 5 3 5; 3 7 2; 8 5 3; 4 2 3; 3 2 1; 8 3 4; 3 5 1; 1 9 3; 4 2 1]
xtrain = rand(TESTRNG,100,3)
ytt = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtrain))]
ytrain = [i > median(ytt)*1.1 ? "big" : i > median(ytt)*0.9 ? "avg" : "small" for i in ytt]
#xtest = [2 2 3; 3 2 2; 4 1 2; 4 3 2; 3 7 2]
xtest = rand(TESTRNG,20,3)
ytt2 = [(0.5*x[1]+0.2*x[2]^2+0.3*x[3]+1) for (i,x) in enumerate(eachrow(xtest))]
ytest = [i > median(ytt2)*1.1 ? "big" : i > median(ytt)*0.9 ? "avg" : "small" for i in ytt2]
out = pegasos(xtrain, ytrain, shuffle=false,nMsgs=0)
ŷtrain = predict(xtrain,out.θ,out.θ₀,out.classes)
ŷtest = predict(xtest,out.θ,out.θ₀,out.classes)
ϵtrain = error(ytrain, mode(ŷtrain))
ϵtest = error(ytest, mode(ŷtest))
@test ϵtrain <= 0.8 # this relation is not linear, normal error is big
@test ϵtest <= 0.8
m = PegasosClassifier(shuffle=false,verbosity=NONE, rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
ŷtrain2 = predict(m)
ŷtrain3 = predict(m,xtrain)
@test all([ŷtrain[r][k] ≈ ŷtrain2[r][k] ≈ ŷtrain3[r][k] for k in keys(ŷtrain[1]) for r in 1:length(ŷtrain)])
ŷtest2 = predict(m,xtest)
@test all([ŷtest[r][k] ≈ ŷtest2[r][k] for k in keys(ŷtest[1]) for r in 1:length(ŷtest)])
# ==================================
# NEW TEST
println("Testing classification of the sepal database using perceptron algorithms...")
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
ntrain = Int64(round(size(x,1)*0.8))
xtrain = x[1:ntrain,:]
ytrain = y[1:ntrain]
xtest = x[ntrain+1:end,:]
ytest = y[ntrain+1:end]
model = perceptron(xtrain,ytrain)
ŷtrain = predict(xtrain,model.θ,model.θ₀,model.classes)
@test accuracy(ytrain,mode(ŷtrain)) >= 0.79
ŷtest = predict(xtest,model.θ,model.θ₀,model.classes)
@test accuracy(ytest,mode(ŷtest)) >= 0.9
model = kernel_perceptron_classifier(xtrain,ytrain)
ŷtrain = predict(xtrain,model.x,model.y,model.α,model.classes)
@test accuracy(ytrain,mode(ŷtrain)) >= 0.9
ŷtest = predict(xtest,model.x,model.y,model.α,model.classes)
@test accuracy(ytest,mode(ŷtest)) >= 0.9
model = pegasos(xtrain,ytrain)
ŷtrain = predict(xtrain,model.θ,model.θ₀,model.classes)
@test accuracy(ytrain,mode(ŷtrain)) >= 0.64
ŷtest = predict(xtest,model.θ,model.θ₀,model.classes)
@test accuracy(ytest,mode(ŷtest)) >= 0.76
# ==================================
# NEW TEST
println("Testing MLJ interface for Perceptron models....")
X, y = Mlj.@load_iris
model = BetaML.Bmlj.PerceptronClassifier(rng=copy(TESTRNG))
regressor = Mlj.machine(model, X, y)
(fitresult, cache, report) = Mlj.fit(model, 0, X, y)
yhat = Mlj.predict(model, fitresult, X)
@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat, y)) < 3.1
model = BetaML.Bmlj.KernelPerceptronClassifier(rng=copy(TESTRNG))
regressor = Mlj.machine(model, X, y)
(fitresult, cache, report) = Mlj.fit(model, 0, X, y)
yhat = Mlj.predict(model, fitresult, X)
@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat, y)) < 0.5
model = BetaML.Bmlj.PegasosClassifier(rng=copy(TESTRNG))
regressor = Mlj.machine(model, X, y)
(fitresult, cache, report) = Mlj.fit(model, 0, X, y)
yhat = Mlj.predict(model, fitresult, X)
@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat, y)) < 1.3
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 916 | using Test
using DelimitedFiles, LinearAlgebra
import MLJBase
const Mlj = MLJBase
import StatisticalMeasures
using StableRNGs
rng = StableRNG(123)
using BetaML.Perceptron
println("*** Additional testing for the Perceptron algorithms...")
println("Testing MLJ interface for Perceptron models....")
X, y = Mlj.@load_iris
model = PerceptronClassifier()
regressor = Mlj.machine(model, X, y)
Mlj.evaluate!(regressor, resampling=Mlj.CV(), measure=StatisticalMeasures.LogLoss())
model = KernelPerceptronClassifier()
regressor = Mlj.machine(model, X, y)
Mlj.evaluate!(regressor, resampling=Mlj.CV(), measure=StatisticalMeasures.LogLoss())
model = PegasosClassifier()
regressor = Mlj.machine(model, X, y)
Mlj.evaluate!(regressor, resampling=Mlj.CV(), measure=StatisticalMeasures.LogLoss())
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3094 | using Test
using StableRNGs
import Distributions: Uniform, Gamma, Normal
using BetaML
#TESTRNG = FIXEDRNG # This could change...
TESTRNG = StableRNG(123)
println("*** Testing the Stats module...")
# ==================================
# NEW TEST
println("Testing welchSatterthwaite ...")
d = welchSatterthwaite(2,2,20,20)
@test d == 38
obs = rand(copy(TESTRNG),Gamma(2,2),50000)
candidates = 0:0.01:maximum(obs)
medianWithAbs = mEstimationBruteForce(obs,candidates)
# @test medianWithAbs ≈ 3.35 This doesn't wok in GitHub starting 22.09.2022 TODO
medianWithHuberLoss = mEstimationBruteForce(obs,candidates,x->huberLoss(x,0.0000000001))
@test medianWithAbs ≈ 3.35
meanWithHuberLoss = mEstimationBruteForce(obs,candidates,x->huberLoss(x,1000))
@test meanWithHuberLoss ≈ 3.98
# ----------------------------------------
lB = 100; uB = 200
obs = rand(copy(TESTRNG),Uniform(lB,uB),10000)
q0 = findQuantile(obs,0.2)
q0_2 = sort(obs)[Int(round(length(obs)*0.2))]
@test isapprox(q0,q0_2,atol=0.01)
# ----------------------------------------
out = goodnessOfFitDiscrete([205,26,25,19],[0.72,0.07,0.12,0.09],α=0.05)
@test out.testValue ≈ 5.889610389610388
@test out.p_value ≈ 0.1171061913085063
# ----------------------------------------
data = ["a","y","y","b","b","a","b"]
support = ["a","b","y"]
@test computeDensity(data,support) == [2,3,2]
# ----------------------------------------
support = [0,1,2,3]
data = [339,455,180,26]
θhat = sum(data .* support)/(sum(data)*3)
out = goodnessOfFitDiscrete(data,support,Binomial(3,θhat),compressedData=true,α=0.05,d=1)
@test out.testValue ≈ 0.8828551921498722
@test out.p_value ≈ 0.643117653187048
#----------------------------------------
f₀ = Uniform(0,1)
data = [0.8,0.7,0.4,0.7,0.2]
out = ksTest(data,f₀;α=0.05)
@test out.testValue ≈ 0.6708203932499368
@test out.p_value ≈ 0.009598291426747618
# --------------------------------------
#f₀ = Exponential(10)
#f₀ = Normal(0,1)
#f₀ = Uniform(0,10)
f₀ = Normal(0,1)
repetitions = 1000
outs = fill(false,repetitions)
for rep in 1:repetitions
local data = rand(f₀ ,31)
local out = ksTest(data,f₀;α=0.05)
outs[rep] = out.rejectedH₀
end
@test isapprox(sum(outs)/repetitions,0.05,atol=0.05)
#=
# -------------------------
function computeKSTableValue(f₀,N,α,repetitions=1000)
Ts = Array{Float64,1}(undef,repetitions)
for rep in 1:repetitions
data = sort(rand(f₀,N))
N = length(data)
cdfhat = collect(0:N) ./ N
maxDist = 0.0
for (n,x) in enumerate(data)
dist = max(abs(cdfhat[n]-cdf(f₀,x)), abs(cdfhat[n+1]-cdf(f₀,x)))
if dist > maxDist
maxDist = dist
end
end
T = sqrt(N) * maxDist
Ts[rep] = T
end
Ts = sort(Ts)
return Ts[Int(ceil((1-α)*repetitions))]/sqrt(N)
end
(N,α,f₀) = 7,0.05,Normal(20,20)
computeKSTableValue(f₀,N,α,1000000) * sqrt(N)
quantile(Kolmogorov(),1-α)
Distributions.quantile_bisect(KSDist(N),1-α) *sqrt(N)
=#
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 16058 | using Test
using DelimitedFiles, LinearAlgebra
import MLJBase
const Mlj = MLJBase
import StatisticalMeasures
using StableRNGs
#rng = StableRNG(123)
using BetaML
import BetaML.Trees: predictSingle, updateTreesWeights!
import BetaML.Trees: buildTree, buildForest
import BetaML.Trees: findbestgain_sortedvector
import AbstractTrees: printnode
import AbstractTrees: print_tree
import BetaML.Trees: predictSingle
#TESTRNG = FIXEDRNG # This could change...
TESTRNG = StableRNG(123)
println("*** Testing Decision trees/Random Forest algorithms...")
# ==================================
# NEW TEST
# ==================================
println("Testing basic classification of decision trees...")
# ---------------------
xtrain = [
"Green" 3.0;
"Yellow" 3.0;
"Red" 1.0;
"Red" 1.0;
"Yellow" 3.0;
]
ytrain = ["Apple", "Apple", "Grape", "Grape", "Lemon"]
myTree = buildTree(xtrain,ytrain,rng=copy(TESTRNG))
# col 2 == 3.0 ? (col 1 == "Yellow" ? Dict("Lemon"=>0.5,"Apple"=>0.5) : Dict("Apple" => 1.0) : Dict("Grape" => 1.0)
m = DecisionTreeEstimator(rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
ŷtrain = predict(myTree, xtrain,rng=copy(TESTRNG))
@test predictSingle(myTree, xtrain[5,:], ignore_dims=[2]) == (Dict("Grape" => 0.5, "Lemon" => 0.25, "Apple" => 0.25), 4)
@test predictSingle(myTree, xtrain[5,:], ignore_dims=[1]) == (Dict("Lemon" => 0.3333333333333333, "Apple" => 0.6666666666666666), 3)
ŷtrain2 = predict(m,xtrain)
ŷtrain3 = predict(m) # using cached elements
ŷtrain_partial = predict(myTree, xtrain, ignore_dims=[1])
@test accuracy(ytrain,ŷtrain,rng=copy(TESTRNG)) >= 0.8
@test accuracy(ytrain,ŷtrain_partial,rng=copy(TESTRNG)) < accuracy(ytrain,ŷtrain,rng=copy(TESTRNG))
@test ŷtrain == ŷtrain2 == ŷtrain3
ytrainI = fit!(OrdinalEncoder(),ytrain)
mi = DecisionTreeEstimator(rng=copy(TESTRNG),force_classification=true)
ŷtrainI = fit!(mi,xtrain,ytrainI)
predict(mi,xtrain)
@test accuracy(ytrainI,ŷtrainI, rng=copy(TESTRNG)) == 1
mi = RandomForestEstimator(rng=copy(TESTRNG),force_classification=true)
ŷtrainI = fit!(mi,xtrain,ytrainI)
predict(mi,xtrain)
@test accuracy(ytrainI,ŷtrainI, rng=copy(TESTRNG)) >= 0.8
x = [1 0.1; 2 0.2; 3 0.3; 4 0.4; 5 0.5]
y = ["a","a","b","b","b"]
@test findbestgain_sortedvector(x,y,2,x[:,2];mCols=[],currentUncertainty=gini(y),splitting_criterion=gini,rng=copy(TESTRNG)) == 0.3
wrappedNode = BetaML.wrapdn(myTree)
print("Node printing: ")
printnode(stdout,wrappedNode)
println("")
xtest = [
"Green" 3;
"Yellow" 4;
"Red" 2;
"Red" 1;
"Yellow" 3
]
ytest = ["Apple","Apple","Grape","Grape","Lemon"]
ŷtest = predict(myTree, xtest,rng=copy(TESTRNG))
ŷtest2 = predict(m, xtest)
@test accuracy(ytest,ŷtest,rng=copy(TESTRNG)) >= 0.8
@test ŷtest == ŷtest2
@test info(m) == Dict{String,Any}("job_is_regression" => 0,"fitted_records" => 5,"xndims" => 2,"avg_depth" => 2.6666666666666665, "max_reached_depth" => 3)
# Testing that ignore dims doesn't really depend from the dimension we want to ignore
xtrain = rand(100,3)
ytrain = [r[1] * 2 - r[2]*r[1]*1.5-r[2]*5+10 for r in eachrow(xtrain)]
xtrain2 = deepcopy(xtrain)
m = DecisionTreeEstimator(rng=copy(TESTRNG))
fit!(m,xtrain2,ytrain)
y1 = predict(m, xtrain2, ignore_dims=[1])
xtrain3 = hcat(shuffle(xtrain2[:,1]),xtrain2[:,2:end])
y1bis = predict(m, xtrain3, ignore_dims=[1])
@test y1 ≈ y1bis # This must be exactly the same
m2 = DecisionTreeEstimator(rng=copy(TESTRNG))
fit!(m2,xtrain3,ytrain)
y1ter = predict(m2, xtrain3, ignore_dims=[1])
# @test y1ter == y1bis # This is not true in general
@test relative_mean_error(y1bis,y1ter) <= 0.05
# Testing print_tree
X = [1.8 2.5; 0.5 20.5; 0.6 18; 0.7 22.8; 0.4 31; 1.7 3.7];
y = 2 .* X[:,1] .- X[:,2] .+ 3;
mod = DecisionTreeEstimator(max_depth=2)
ŷ = fit!(mod,X,y);
wmod = wrapdn(mod,featurenames=["dim1","dim2"])
print_tree(wmod)
y2 = ["a","b","b","c","b","a"]
mod2 = DecisionTreeEstimator(max_depth=2)
ŷ2 = fit!(mod2,X,y2);
wmod2 = wrapdn(mod2,featurenames=["dim1","dim2"])
print_tree(wmod2)
#print(myTree)
# --------------------------------------------------------------
X = [2 4 10 "aaa" 10; 20 40 100 "gggg" missing; 200 400 1000 "zzzz" 1000]
xtrain = [2 4 10 "aaa"; 200 400 1000 "zzzz"]
ytrain = [10,1000]
xtest = [20 40 100 "gggg"]
m = DecisionTreeEstimator(rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
@test predict(m,xtest)[1] == 505
# ==================================
# NEW TEST
println("Testing classification of the sepal database using decision trees...")
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
ntrain = Int64(round(size(x,1)*0.8))
xtrain = x[1:ntrain,:]
ytrain = y[1:ntrain]
xtest = x[ntrain+1:end,:]
ytest = y[ntrain+1:end]
myTree = buildTree(xtrain,ytrain, splitting_criterion=entropy,rng=copy(TESTRNG))
ŷtrain = predict(myTree, xtrain,rng=copy(TESTRNG))
@test accuracy(ytrain,ŷtrain,rng=copy(TESTRNG)) >= 0.98
ŷtest = predict(myTree, xtest,rng=copy(TESTRNG))
@test accuracy(ytest,ŷtest,rng=copy(TESTRNG)) >= 0.95
# ==================================
# NEW TEST
println("Testing decision trees regression...")
ϵtrain = [1.023,1.08,0.961,0.919,0.933,0.993,1.011,0.923,1.084,1.037,1.012]
ϵtest = [1.056,0.902,0.998,0.977]
xtrain = [0.1 0.2; 0.3 0.5; 0.4 0.1; 0.5 0.4; 0.7 0.9; 0.2 0.1; 0.4 0.2; 0.3 0.3; 0.6 0.9; 0.3 0.4; 0.9 0.8]
ytrain = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtrain[i] for (i,x) in enumerate(eachrow(xtrain))]
xtest = [0.5 0.6; 0.14 0.2; 0.3 0.7; 20.0 40.0;]
ytest = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtest[i] for (i,x) in enumerate(eachrow(xtest))]
myTree = buildTree(xtrain,ytrain, min_gain=0.001, min_records=2, max_depth=3,rng=copy(TESTRNG))
ŷtrain = predict(myTree, xtrain,rng=copy(TESTRNG))
ŷtest = predict(myTree, xtest,rng=copy(TESTRNG))
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.06
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest <= 0.3
m = DecisionTreeEstimator(min_gain=0.001,min_records=2,max_depth=3,rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
@test predict(m,xtrain) == ŷtrain
reset!(m)
@test m.par == nothing
# ==================================
# NEW TEST
println("Testing classification of the sepal database using random forests...")
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
ntrain = Int64(round(size(x,1)*0.8))
xtrain = x[1:ntrain,:]
ytrain = y[1:ntrain]
xtest = x[ntrain+1:end,:]
ytest = y[ntrain+1:end]
myForest = buildForest(xtrain,ytrain,β=0,max_depth=20,oob=true,rng=copy(TESTRNG), fast_algorithm=false)
trees = myForest.trees
treesWeights = myForest.weights
ooberror = myForest.ooberror
ŷtrain = predict(myForest, xtrain,rng=copy(TESTRNG))
@test accuracy(ytrain,ŷtrain,rng=copy(TESTRNG)) >= 0.96
ŷtest = predict(myForest, xtest,rng=copy(TESTRNG))
@test accuracy(ytest,ŷtest,rng=copy(TESTRNG)) >= 0.96
updateTreesWeights!(myForest,xtrain,ytrain;β=1,rng=copy(TESTRNG))
ŷtrain2 = predict(myForest, xtrain,rng=copy(TESTRNG))
@test accuracy(ytrain,ŷtrain2,rng=copy(TESTRNG)) >= 0.98
ŷtest2 = predict(myForest, xtest,rng=copy(TESTRNG))
@test accuracy(ytest,ŷtest2,rng=copy(TESTRNG)) >= 0.96
@test ooberror <= 0.1
m = RandomForestEstimator(max_depth=20,oob=true,beta=0,rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
m.opt.rng=copy(TESTRNG)
ŷtrainNew = predict(m,xtrain)
ŷtrainCached = predict(m)
@test ŷtrainNew == ŷtrain == ŷtrainCached
m.opt.rng=copy(TESTRNG)
ŷtestNew = predict(m,xtest)
@test ŷtestNew == ŷtest
#=
m.options.rng=copy(TESTRNG)
m.learnableparameters.weights = updateTreesWeights!(myForest,xtrain,ytrain;β=1, rng=copy(TESTRNG))
m.learnableparameters.weights == myForest.weights
m.options.rng=copy(TESTRNG)
ŷtrain2New = predict(m,xtrain)
ŷtrain2New == ŷtrain2
m.learnableparameters.trees == trees
m.options.rng=copy(TESTRNG)
ŷtrain3 = predict(m, xtrain)
m.options.rng=copy(TESTRNG)
ŷtest3 = predict(m, xtest)
ŷtrain2 == ŷtrain3
@test accuracy(ŷtest2,ytest,rng=copy(TESTRNG)) ≈ accuracy(ŷtest3,ytest,rng=copy(TESTRNG))
@test info(m)["oob_errors"] ≈ ooberror
=#
predictionsByTree = [] # don't use weights...
for i in 1:30
old = trees[i]
new = m.par.forest.trees[i]
pold = predict(old,xtrain, rng=copy(TESTRNG))
pnew = predict(old,xtrain, rng=copy(TESTRNG))
push!(predictionsByTree,pold == pnew)
end
@test sum(predictionsByTree) == 30
# ==================================
# NEW TEST
println("Testing random forest regression...")
ϵtrain = [1.023,1.08,0.961,0.919,0.933,0.993,1.011,0.923,1.084,1.037,1.012]
ϵtest = [1.056,0.902,0.998,0.977]
xtrain = [0.1 0.2; 0.3 0.5; 0.4 0.1; 0.5 0.4; 0.7 0.9; 0.2 0.1; 0.4 0.2; 0.3 0.3; 0.6 0.9; 0.3 0.4; 0.9 0.8]
ytrain = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtrain[i] for (i,x) in enumerate(eachrow(xtrain))]
xtest = [0.5 0.6; 0.14 0.2; 0.3 0.7; 20.0 40.0;]
ytest = [(0.1*x[1]+0.2*x[2]+0.3)*ϵtest[i] for (i,x) in enumerate(eachrow(xtest))]
myForest = buildForest(xtrain,ytrain, min_gain=0.001, min_records=2, max_depth=3,rng=copy(TESTRNG), fast_algorithm=true)
trees = myForest.trees
treesWeights = myForest.weights
ŷtrain = predict(myForest, xtrain,rng=copy(TESTRNG))
ŷtest = predict(myForest, xtest,rng=copy(TESTRNG))
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
@test mreTrain <= 0.08
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest <= 0.4
updateTreesWeights!(myForest,xtrain,ytrain;β=50)
ŷtrain2 = predict(myForest, xtrain,rng=copy(TESTRNG))
ŷtest2 = predict(myForest, xtest,rng=copy(TESTRNG))
mreTrain = relative_mean_error(ytrain,ŷtrain2,normrec=true)
@test mreTrain <= 0.08
mreTest = relative_mean_error(ytest,ŷtest2,normrec=true)
@test mreTest <= 0.4
# ==================================
# NEW TEST
println("Testing all possible combinations...")
xtrain = [1 "pippo" 1.5; 3 "topolino" 2.5; 1 "amanda" 5.2; 5 "zzz" 1.2; 7 "pippo" 2.2; 1 "zzz" 1.5; 3 "topolino" 2.1]
ytrain = [x[2][1] <= 'q' ? 5*x[1]-2*x[3] : -5*x[1]+2*x[3] for x in eachrow(xtrain)]
xtest = [2 "pippo" 3.4; 1 "amanda" 1.5; 4 "amanda" 0.5; 2 "topolino" 2.2; 7 "zzz" 3.2]
ytest = [x[2][1] <= 'q' ? 5*x[1]-2*x[3] : -5*x[1]+2*x[3] for x in eachrow(xtest)]
ytrainInt = Int64.(round.(ytrain))
myTree1 = buildTree(xtrain,ytrain,rng=copy(TESTRNG))
myForest = buildForest(xtrain,ytrain,oob=true,rng=copy(TESTRNG)) # TO.DO solved 20201130: If I add here β=1 I have no problem, but local testing gives a crazy error!!!
ooberror = myForest.ooberror
ŷtrain = predict(myForest,xtrain,rng=copy(TESTRNG))
ŷtest = predict(myForest,xtest,rng=copy(TESTRNG))
mreTrain = relative_mean_error(ytrain,ŷtrain,normrec=true)
mreTest = relative_mean_error(ytest,ŷtest,normrec=true)
xtrain[3,3] = missing
xtest[3,2] = missing
myForest = buildForest(xtrain,ytrain,oob=true,β=1,rng=copy(TESTRNG))
ooberror = myForest.ooberror
ŷtrain = predict(myForest,xtrain,rng=copy(TESTRNG))
ŷtest = predict(myForest,xtest,rng=copy(TESTRNG))
mreTrain2 = relative_mean_error(ytrain,ŷtrain,normrec=true)
mreTest2 = relative_mean_error(ytest,ŷtest,normrec=true)
@test mreTest2 <= mreTest * 1.5
m = RandomForestEstimator(oob=true,beta=1,rng=copy(TESTRNG))
fit!(m,xtrain,ytrain)
m.opt.rng=copy(TESTRNG) # the model RNG is consumed at each operation
ŷtest2 = predict(m,xtest)
@test relative_mean_error(ytest,ŷtest,normdim=false,normrec=false) ≈ relative_mean_error(ytest,ŷtest2,normdim=false,normrec=false)
myTree2 = buildTree(xtrain,ytrainInt,rng=copy(TESTRNG))
myTree3 = buildTree(xtrain,ytrainInt, force_classification=true,rng=copy(TESTRNG))
@test typeof(myTree1) <: Trees.DecisionNode && typeof(myTree2) <: Trees.DecisionNode && typeof(myTree3) <: Trees.DecisionNode
reset!(m)
# ==================================
# NEW TEST
println("Testing trees with unsortable and missing X values...")
abstract type AType end
mutable struct SortableType<:AType
x::Int64
y::Int64
end
mutable struct UnsortableType<:AType
x::Int64
y::Int64
end
isless(x::SortableType,y::SortableType) = x.x < y.x
SortableVector = [SortableType(2,4),SortableType(1,5),SortableType(1,8),SortableType(12,5),
SortableType(6,2),SortableType(2,2),SortableType(2,2),SortableType(2,4),
SortableType(6,2),SortableType(1,5),missing,SortableType(2,4),
SortableType(1,8),SortableType(12,5)]
UnSortableVector = [UnsortableType(2,5),UnsortableType(1,3),UnsortableType(1,8),UnsortableType(2,6),
UnsortableType(6,3),UnsortableType(7,9),UnsortableType(2,5),UnsortableType(2,6),
missing,UnsortableType(3,2),UnsortableType(6,3),UnsortableType(2,5),
UnsortableType(7,9),UnsortableType(7,9)]
data = Union{Missing,Float64, String,AType}[
0.9 0.6 "black" "monitor" 10.1
0.3 missing "white" "paper sheet" 2.3
4.0 2.2 missing "monitor" 12.5
0.6 0.5 "white" "monitor" 12.5
3.8 2.1 "gray" "car" 54.2
0.3 0.2 "red" "paper sheet" 2.6
0.1 0.1 "white" "paper sheet" 2.5
0.3 0.2 "black" "monitor" 11.3
0.1 0.2 "black" "monitor" 9.8
0.31 0.2 "white" "paper sheet" 3.7
3.2 1.9 "gray" "car" 64.3
0.4 0.25 "white" "paper" 2.7
0.9 0.4 "black" "monitor" 12.5
4.1 2.1 "gray" "monitor" 13.2
]
X = hcat(data[:,[1,2,3]],UnSortableVector)
y = convert(Vector{String}, data[:,4])
((xtrain,xtest),(ytrain,ytest)) = Utils.partition([X,y],[0.7,0.3],shuffle=false,rng=copy(TESTRNG))
modelβ = buildForest(xtrain,ytrain,5,rng=copy(TESTRNG))
ŷtestβ = predict(modelβ,xtest,rng=copy(TESTRNG))
accβ = accuracy(ytest,ŷtestβ,rng=copy(TESTRNG))
@test accβ >= 0.25
# ==================================
# NEW TEST
println("Testing MLJ interface for Trees models....")
X, y = Mlj.@load_boston
model_dtr = BetaML.Bmlj.DecisionTreeRegressor(rng=copy(TESTRNG))
regressor_dtr = Mlj.machine(model_dtr, X, y)
(fitresult_dtr, cache, report) = Mlj.fit(model_dtr, 0, X, y)
yhat_dtr = Mlj.predict(model_dtr, fitresult_dtr, X)
@test relative_mean_error(y,yhat_dtr,normrec=true) < 0.02
model_rfr = BetaML.Bmlj.RandomForestRegressor(rng=copy(TESTRNG))
regressor_rfr = Mlj.machine(model_rfr, X, y)
(fitresult_rfr, cache, report) = Mlj.fit(model_rfr, 0, X, y)
yhat_rfr = Mlj.predict(model_rfr, fitresult_rfr, X)
@test relative_mean_error(y,yhat_rfr,normrec=true) < 0.06
X, y = Mlj.@load_iris
model_dtc = BetaML.Bmlj.DecisionTreeClassifier(rng=copy(TESTRNG))
regressor_dtc = Mlj.machine(model_dtc, X, y)
(fitresult_dtc, cache, report) = Mlj.fit(model_dtc, 0, X, y)
yhat_dtc = Mlj.predict(model_dtc, fitresult_dtc, X)
@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat_dtc, y)) < 0.0002
@test sum(Mlj.mode.(yhat_dtc) .== y)/length(y) == 1
model_rfc = BetaML.Bmlj.RandomForestClassifier(max_features=3,rng=copy(TESTRNG))
regressor_rfc = Mlj.machine(model_rfc, X, y)
(fitresult_rfc, cache, report) = Mlj.fit(model_rfc, 0, X, y)
yhat_rfc = Mlj.predict(model_rfc, fitresult_rfc, X)
@test Mlj.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat_rfc, y)) < 0.04
sum(Mlj.mode.(yhat_rfc) .== y)/length(y) == 1
# Other MLJ classifier models
#=
import MLJ
X, y = Mlj.@load_iris
MLJ.models(MLJ.matching(X,y))
Model = MLJ.@load XGBoostClassifier # DecisionTreeClassifier # XGBoostClassifier
model = Model()
regressor = MLJ.machine(model, X, y)
(fitresult, cache, report) = MLJ.fit(model, 0, X, y)
yhat = MLJ.predict(model, fitresult, X)
MLJ.mean(StatisticalMeasures.LogLoss(tol=1e-4)(yhat, y))
MLJ.evaluate!(regressor, measure=StatisticalMeasures.LogLoss())
#XGBoostClassifier:
#- fit: https://github.com/alan-turing-institute/MLJModels.jl/blob/3687491b132be8493b6f7a322aedf66008caaab1/src/XGBoost.jl#L600
#- predict :
=#
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 2380 | using Test
using DelimitedFiles, LinearAlgebra
import MLJBase
const Mlj = MLJBase
import StatisticalMeasures
using StableRNGs
#rng = StableRNG(123)
using BetaML
println("*** Additional testing for the Testing Decision trees/Random Forest algorithms...")
println("Testing MLJ interface for Trees models....")
X, y = Mlj.@load_boston
model_dtr = DecisionTreeRegressor()
regressor_dtr = Mlj.machine(model_dtr, X, y)
Mlj.evaluate!(regressor_dtr, resampling=Mlj.CV(), measure=Mlj.rms, verbosity=0)
model_rfr = RandomForestRegressor()
regressor_rfr = Mlj.machine(model_rfr, X, y)
Mlj.evaluate!(regressor_rfr, resampling=Mlj.CV(), measure=Mlj.rms, verbosity=0)
X, y = Mlj.@load_iris
model_dtc = DecisionTreeClassifier()
regressor_dtc = Mlj.machine(model_dtc, X, y)
Mlj.evaluate!(regressor_dtc, resampling=Mlj.CV(), measure=StatisticalMeasures.LogLoss())
model_rfc = RandomForestClassifier(max_features=3)
regressor_rfc = Mlj.machine(model_rfc, X, y)
Mlj.evaluate!(regressor_rfc, resampling=Mlj.CV(), measure=StatisticalMeasures.LogLoss())
#=
using MLJ
X, y = Mlj.@load_boston
MLJ.models(MLJ.matching(X,y))
Tree = @load DecisionTreeRegressor pkg=BetaML
tree = Tree()
=#
@testset "PlotTree" begin
println("Testing 'TreeRecipe' for plotting of Trees models....")
using Plots
using TreeRecipe
println("--> train (and build) a decision tree")
xtrain = [
"Green" 3.0;
"Yellow" 3.0;
"Red" 1.0;
"Red" 1.0;
"Yellow" 3.0;
]
ytrain = ["Apple", "Apple", "Grape", "Grape", "Lemon"]
model = DecisionTreeEstimator()
yhat_train = Trees.fit!(model, xtrain, ytrain)
println("--> add information about feature names")
featurenames = ["Color", "Size"]
wrapped_tree = wrapdn(model, featurenames = featurenames)
println("--> plot the tree using the `TreeRecipe`")
plt = plot(wrapped_tree) # this calls automatically the `TreeRecipe`
display(plt) # show the plot in a window (in VS Code a tab will be opened)
# plot & display will plot the tree `wrapped_tree`.
# It has to be visually checked, if that plot is correct.
end
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 34111 | using Test, Statistics, LinearAlgebra, CategoricalArrays, Random, StableRNGs, DelimitedFiles
import Distributions: Normal
#using StableRNGs
#rng = StableRNG(123)
#using BetaML
#import BetaML.Utils
TESTRNG = FIXEDRNG # This could change...
#TESTRNG = StableRNG(123)
println("*** Testing individual utility functions (module `Utils`)...")
# ==================================
# TEST 1: onehotencoder
@test BetaML.Utils.singleunique([[1,2,3],[7,8],[5]]) == [1,2,3,7,8,5]
@test BetaML.Utils.singleunique([1,4,5,4]) == [1,4,5]
@test BetaML.Utils.singleunique("aaa") == ["aaa"]
m = OneHotEncoder()
x = [3,6,3,4]
ŷ = fit!(m,x)
@test ŷ == [ 1 0 0 0
0 0 0 1
1 0 0 0
0 1 0 0
]
x2 = inverse_predict(m,ŷ)
@test x2 == x
x = [3,6,missing,3,4]
m = OneHotEncoder(categories=[3,4,7],handle_unknown="infrequent",other_categories_name=99)
ŷ = fit!(m,x)
@test isequal(ŷ, [ true false false false
false false false true
missing missing missing missing
true false false false
false true false false])
ŷ2 = predict(m)
ŷ3 = predict(m,x)
@test isequal(ŷ,ŷ2)
@test isequal(ŷ,ŷ3)
x2 = inverse_predict(m,ŷ)
@test isequal(x2,[3,99,missing,3,4])
# Testing OneHotEncoder with vector of dictionaries
y = ["e","g","b"]
ŷ = [Dict("a"=>0.2,"e"=>0.8),Dict("h"=>0.1,"e"=>0.2,"g"=>0.7),Dict("b"=>0.4,"e"=>0.6)]
m = OneHotEncoder(handle_unknown="infrequent")
yoh = fit!(m,y)
ŷoh = predict(m,ŷ)
@test ŷoh == [0.8 0.0 0.0 0.2
0.2 0.7 0.0 0.1
0.6 0.0 0.4 0.0]
# Testing ordinal encoder...
x = ["3","6",missing,"3","4"]
m = OrdinalEncoder(categories=["3","7","4"],handle_unknown="infrequent",other_categories_name="99")
ŷ = fit!(m,x)
@test isequal(ŷ, [1,4,missing,1,3])
ŷ2 = predict(m)
ŷ3 = predict(m,x)
@test isequal(ŷ,ŷ2)
@test isequal(ŷ,ŷ3)
x2 = inverse_predict(m,ŷ)
@test isequal(x2,["3","99",missing,"3","4"])
x = ["1","2","3"]
m = OrdinalEncoder(handle_unknown="missing")
x̂ = fit!(m,x)
x̂m = collect(predict(m,["1","4","3"]))
@test x̂ == [1,2,3] && typeof(x̂) == Vector{Int64}
@test isequal(x̂m,[1,missing,3])
x = [2,1,3]
m = OrdinalEncoder()
x̂ = fit!(m,x)
@test x̂ == x
# ==================================
# TEST 2: softmax
println("** Going through Test2 (softmax and other activation functions)...")
@test isapprox(softmax([2,3,4],β=0.1),[0.3006096053557272,0.3322249935333472,0.36716540111092544])
@test didentity(-1) == 1 && relu(-1) == 0 && drelu(-1) == 0 && celu(-1,α=0.1) == -0.09999546000702375 && dcelu(-1) == 0.36787944117144233 &&
elu(-1,α=0.1) == -0.06321205588285576 && delu(-1) == 0.36787944117144233 && plu(-1) == -1 && dplu(-1) == 1
@test didentity(1) == 1 && relu(1) == 1 && drelu(1) == 1 && celu(1) == 1 && dcelu(1) == 1 &&
elu(1) == 1 && delu(1) == 1 && plu(1) == 1 && dplu(1) == 1
@test dtanh(1) == 0.41997434161402614 && sigmoid(1) == 0.7310585786300049 == dsoftplus(1) && dsigmoid(1) == 0.19661193324148188 &&
softplus(1) == 1.3132616875182228 && mish(1) == 0.8650983882673103 && dmish(1) == 1.0490362200997922
# ==================================
# TEST 3: autojacobian
println("** Going through Test3 (autojacobian)...")
@test isapprox(softmax([2,3,4],β=0.1),[0.3006096053557272,0.3322249935333472,0.36716540111092544])
#import BetaML.Utils: autojacobian
@test autojacobian(x -> (x[1]*2,x[2]*x[3]),[1,2,3]) == [2.0 0.0 0.0; 0.0 3.0 2.0]
b = softmax([2,3,4],β=1/2)
c = softmax([2,3.0000001,4],β=1/2)
# Skipping this test as gives problems in CI for Julia < 1.6
#autoGrad = autojacobian(x->softmax(x,β=1/2),[2,3,4])
realG2 = [(c[1]-b[1])*10000000,(c[2]-b[2])*10000000,(c[3]-b[3])*10000000]
#@test isapprox(autoGrad[:,2],realG2,atol=0.000001)
manualGrad = dsoftmax([2,3,4],β=1/2)
@test isapprox(manualGrad[:,2],realG2,atol=0.000001)
# Manual way is hundred of times faster
#@benchmark autojacobian(softmax2,[2,3,4])
#@benchmark dSoftMax([2,3,4],β=1/2)
# ==================================
# New test
println("** Testing cross-entropy...")
or = crossentropy([1.0,0,0],[0.8,0.001,0.001],weight = [2,1,1])
@test or ≈ 0.4462871026284194
d = dcrossentropy([1.0,0,0],[0.8,0.001,0.001],weight = [2,1,1])
δ = 0.001
dest = crossentropy([1.0,0,0],[0.8+δ,0.101,0.001],weight = [2,1,1])
@test isapprox(dest-or, d[1]*δ,atol=0.0001)
# ==================================
# New test
println("** Testing permutations...")
y = ["a","a","a","b","b","c","c","c"]
yp = getpermutations(y,keepStructure=true)
ypExpected = [
["a", "a", "a", "b", "b", "c", "c", "c"],
["a", "a", "a", "c", "c", "b", "b", "b"],
["b", "b", "b", "a", "a", "c", "c", "c"],
["b", "b", "b", "c", "c", "a", "a", "a"],
["c", "c", "c", "a", "a", "b", "b", "b"],
["c", "c", "c", "b", "b", "a", "a", "a"],
]
@test yp == ypExpected
# ==================================
# New test
println("** Going through testing accuracy...")
y = ["a","a","a","b","b","c","c","c"]
ŷ = ["b","b","a","c","c","a","a","c"]
accuracyConsideringClassLabels = accuracy(y,ŷ) # 2 out of 8
accuracyConsideringAnyClassLabel = accuracy(y,ŷ,ignorelabels=true) # 6 out of 8
@test accuracyConsideringClassLabels == 2/8
@test accuracyConsideringAnyClassLabel == 6/8
# with categorical arrays..
y = CategoricalArray(y)
ŷ = CategoricalArray(ŷ)
accuracyConsideringClassLabels = accuracy(y,ŷ) # 2 out of 8
accuracyConsideringAnyClassLabel = accuracy(y,ŷ,ignorelabels=true) # 6 out of 8
@test accuracyConsideringClassLabels == 2/8
@test accuracyConsideringAnyClassLabel == 6/8
x = [0.01 0.02 0.1 0.05 0.2 0.1 0.05 0.27 0.2;
0.05 0.01 0.2 0.02 0.1 0.27 0.1 0.05 0.2]
y = [3,3]
@test [accuracy(y,x,tol=i) for i in 1:10] == [0.0, 0.5, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
x = [0.3 0.2 0.5; 0.5 0.25 0.25; 0.1 0.1 0.9]
y = [1, 3, 1]
@test accuracy(y,x) == 0.0
@test accuracy(y,x, ignorelabels=true) == 1.0
yest = [0 0.12; 0 0.9]
y = [1,2]
accuracy(y,yest)
yest = [0.0 0.0 0.0;0.0 0.0 0.0;0.0 0.0 0.706138]
y = [2,2,1]
accuracy(y,yest)
# ==================================
# New test
println("** Going through testing scaling...")
x = [1.1 4.1 8.1 missing 8; 2 4 9 7 2; 7 2 9 3 1]
m = Scaler(method=MinMaxScaler(),skip=[1,5])
ŷfit = fit!(m,x)
ŷ = predict(m)
@test all(isequal.(ŷ, [ 1.1 1.0 0.0 missing 8.0
2.0 0.9523809523809526 1.0 1.0 2.0
7.0 0.0 1.0 0.0 1.0]))
ŷ1 = predict(m,x)
@test collect(skipmissing(ŷfit)) == collect(skipmissing(ŷ)) == collect(skipmissing(ŷ1))
x1 = inverse_predict(m,ŷ)
@test collect(skipmissing(x)) == collect(skipmissing(x1))
m2 = Scaler(MinMaxScaler(),skip=[1,5])
fit!(m2,x)
ŷ2 = predict(m2)
@test all(isequal.(ŷ, ŷ2))
m3 = Scaler(skip=[1,5])
fit!(m3,x)
ŷ3 = predict(m3)
@test ŷ3[2,2] == 0.6547832409557988
means = [mean(skipmissing(v)) for v in eachcol(ŷ3)]
vars = [var(skipmissing(v),corrected=false) for v in eachcol(ŷ3)]
@test all(isapprox.(means[2:4],0.0, atol=0.00000000001))
@test all(isapprox.(vars[2:4],1.0, atol=0.00000000001))
m4 = Scaler(StandardScaler(center=false,scale=false))
fit!(m4,x)
ŷ4 =predict(m4,x)
@test all(isequal.(ŷ4,x))
# Check it works with integer matrices..
X = [1 10 100; 2 20 200; 3 30 300]
m = Scaler()
Xs1 = fit!(m,X)
Xs2 = predict(m,X)
@test Xs1 == Xs2 ≈ [-1.224744871391589 -1.224744871391589 -1.224744871391589; 0.0 0.0 0.0; 1.224744871391589 1.224744871391589 1.224744871391589]
@test inverse_predict(m,Xs1) == float.(X)
m2 = Scaler(MinMaxScaler())
Xs1b = fit!(m2,X)
Xs2b = predict(m2,X)
@test Xs1b == Xs2b ≈ [0.0 0.0 0.0; 0.5 0.5 0.5; 1.0 1.0 1.0]
@test inverse_predict(m2,Xs1b) == float.(X)
# Check it works with arrays and can get back to matrix or vector
X = [1,2,3]
m = Scaler()
Xs1c = fit!(m,X)
Xs2c = predict(m,X)
@test Xs1c == Xs2c == Xs1[:,1]
inverse_predict(m,Xs1c) == X
m2 = Scaler(MinMaxScaler())
Xs1d = fit!(m2,X)
Xs2e = predict(m2,X)
@test Xs1d == Xs2e == Xs1b[:,1]
@test inverse_predict(m2,Xs1d) == float.(X)
X2 = makematrix([1,2,3])
m = Scaler()
X2s1c = fit!(m,X2)
inverse_predict(m,X2s1c) == X2
# Test for issue #73
X = [[4000,1000,2000,3000] ["a", "categorical", "variable", "not to scale"] [4,1,2,3] [0.4, 0.1, 0.2, 0.3]]
m1 = Scaler(MinMaxScaler(),skip=[2])
xs1 = fit!(m1,X)
predict(m1,X)
inverse_predict(m1,xs1)
m2 = Scaler(skip=[2])
xs2 = fit!(m2,X)
predict(m2,X)
inverse_predict(m2,xs2)
# ==================================
# New test
println("** Testing batch()...")
@test size.(batch(10,3,rng=copy(TESTRNG)),1) == [3,3,3]
@test size.(batch(10,12,rng=copy(TESTRNG)),1) == [10]
# ==================================
# New test
println("** Testing relative_mean_error()...")
ŷ = [22 142 328; 3 9 31; 5 10 32; 3 10 36]
y = [20 140 330; 1 11 33; 3 8 30; 5 12 38]
p=2
(n,d) = size(y)
# case 1 - average of the relative error (records and dimensions normalised)
avgϵRel = sum(abs.((ŷ-y)./ y).^p)^(1/p)/(n*d)
#avgϵRel = (norm((ŷ-y)./ y,p)/(n*d))
relative_mean_error(y,ŷ,normdim=true,normrec=true,p=p) == avgϵRel
# case 2 - normalised by dimensions (i.e. all dimensions play the same)
avgϵRel_byDim = (sum(abs.(ŷ-y) .^ (1/p),dims=1).^(1/p) ./ n) ./ (sum(abs.(y) .^ (1/p) ,dims=1) ./n)
avgϵRel = mean(avgϵRel_byDim)
@test relative_mean_error(y,ŷ,normdim=true,normrec=false,p=p) == avgϵRel
# case 3
avgϵRel_byRec = (sum(abs.(ŷ-y) .^ (1/p),dims=2).^(1/p) ./ d) ./ (sum(abs.(y) .^ (1/p) ,dims=2) ./d)
avgϵRel = mean(avgϵRel_byRec)
@test relative_mean_error(y,ŷ,normdim=false,normrec=true,p=p) == avgϵRel
# case 4 - average error relativized
avgϵRel = (sum(abs.(ŷ-y).^p)^(1/p) / (n*d)) / (sum( abs.(y) .^p)^(1/p) / (n*d))
#avgϵRel = (norm((ŷ-y),p)/(n*d)) / (norm(y,p) / (n*d))
@test relative_mean_error(y,ŷ,normdim=false,normrec=false,p=p) == avgϵRel
# ==================================
# New test
println("** Testing pca()...")
X = [1 8; 4.5 5.5; 9.5 0.5]
expectedX = [-4.58465 6.63182;-0.308999 7.09961; 6.75092 6.70262]
m = PCAEncoder(encoded_size=2)
fit!(m,X)
ŷ = predict(m)
@test isapprox(ŷ,expectedX,atol=0.00001) || isapprox(ŷ, (.- expectedX),atol=0.00001)
X = [1 10 100; 1.1 15 120; 0.95 23 90; 0.99 17 120; 1.05 8 90; 1.1 12 95]
m = PCAEncoder(max_unexplained_var=0.05)
fit!(m,X)
ŷ = predict(m)
@test 1-m.info["prop_explained_var"] ≈ 1.0556269747774571e-5
@test sum(ŷ) ≈ 662.3492034128955
ŷ2 = predict(m,X)
@test ŷ ≈ ŷ2
# ==================================
# New test
println("** Testing AutoEncoder...")
iris = readdlm(joinpath(@__DIR__,"data","iris_shuffled.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
tuning_method = SuccessiveHalvingSearch(
hpranges = Dict(
"layers_size"=>[2.0,5.0,nothing]
),
res_shares = [0.2, 0.3],
multithreads = true
)
m = AutoEncoder(epochs=400,encoded_size=2,autotune=true,
tunemethod=tuning_method, verbosity=NONE, rng=copy(TESTRNG) )
#m = AutoEncoder(encoded_size=2,rng=copy(TESTRNG))
x2 = fit!(m,x)
x2b = predict(m)
x2c = predict(m,x)
x2d = fit!(m,x)
@test x2 == x2b == x2c
x̂ = inverse_predict(m,x2d)
@test relative_mean_error(x,x̂) < 0.04
((xtrain,xtest),(x2train,x2test),(ytrain,ytest)) = partition([x,x2d,y],[0.8,0.2],rng=copy(TESTRNG))
mc1 = RandomForestEstimator(rng=copy(TESTRNG))
mc2 = RandomForestEstimator(rng=copy(TESTRNG))
fit!(mc1,xtrain,ytrain)
fit!(mc2,x2train,ytrain)
ŷ1 = mode(predict(mc1,xtest))
ŷ2 = mode(predict(mc2,x2test))
em1 = accuracy(ytest,ŷ1)
em2 = accuracy(ytest,ŷ2)
@test em2 > 0.85
x = [0.12 0.31 0.29 3.21 0.21;
0.44 1.21 1.18 13.54 0.85
0.22 0.61 0.58 6.43 0.42;
0.35 0.93 0.91 10.04 0.71;
0.51 1.47 1.46 16.12 0.99;
0.35 0.93 0.91 10.04 0.71;
0.51 1.47 1.46 16.12 0.99;
0.22 0.61 0.58 6.43 0.42;
0.12 0.31 0.29 3.21 0.21;
0.44 1.21 1.18 13.54 0.85];
m = AutoEncoder(encoded_size=2,layers_size=15,epochs=400,autotune=false)
x_reduced = fit!(m,x)
x̂ = inverse_predict(m,x_reduced)
rme = info(m)["rme"]
@test size(x_reduced) == (10,2)
@test size(x̂ ) == (10,5)
@test rme < 0.1
# ==================================
# New test
println("** Testing accuracy() on probs given in a dictionary...")
ŷ = Dict("Lemon" => 0.33, "Apple" => 0.2, "Grape" => 0.47)
y = "Lemon"
@test accuracy(y,ŷ) == 0
@test accuracy(y,ŷ,tol=2) == 1
y = "Grape"
@test accuracy(y,ŷ) == 1
y = "Something else"
@test accuracy(y,ŷ) == 0
ŷ1 = Dict("Lemon" => 0.6, "Apple" => 0.4)
ŷ2 = Dict("Lemon" => 0.33, "Apple" => 0.2, "Grape" => 0.47)
ŷ3 = Dict("Lemon" => 0.2, "Apple" => 0.5, "Grape" => 0.3)
ŷ4 = Dict("Apple" => 0.2, "Grape" => 0.8)
ŷ = [ŷ1,ŷ2,ŷ3,ŷ4]
y = ["Lemon","Lemon","Apple","Lemon"]
@test accuracy(y,ŷ) == 0.5
@test accuracy(y,ŷ,tol=2) == 0.75
# ==================================
# New test
println("** Testing mode(dicts)...")
ŷ1 = Dict("Lemon" => 0.6, "Apple" => 0.4)
ŷ2 = Dict("Lemon" => 0.33, "Apple" => 0.2, "Grape" => 0.47)
ŷ3 = Dict("Lemon" => 0.2, "Apple" => 0.5, "Grape" => 0.3)
ŷ4 = Dict("Apple" => 0.2, "Grape" => 0.8)
ŷ5 = Dict("Lemon" => 0.4, "Grape" => 0.4, "Apple" => 0.2)
ŷ = [ŷ1,ŷ2,ŷ3,ŷ4,ŷ5]
@test mode(ŷ,rng=copy(TESTRNG)) == ["Lemon","Grape","Apple","Grape","Lemon"]
y1 = [1,4,2,5]
y2 = [2,6,6,4]
y = [y1,y2]
@test mode(y,rng=copy(TESTRNG)) == [4,3]
y = vcat(y1',y2')
mode(y,rng=copy(TESTRNG)) == [4,3]
x = ["a","b","b","a","d","b"]
@test mode(x,rng=copy(TESTRNG)) == "b"
# ==================================
# New test
println("** Testing ConfusionMatrix()...")
y = ["Lemon","Lemon","Apple","Grape","Lemon"]
ŷ = ["Lemon","Grape","Apple","Grape","Lemon"]
cm = ConfusionMatrix()
scores1 = fit!(cm,y,ŷ)
scores2 = predict(cm)
res = info(cm)
@test res["scores"] == [2 0 1; 0 1 0; 0 0 1]
@test res["normalised_scores"] == scores1 == scores2 ≈ [ 0.6666666666666666 0.0 0.3333333333333333; 0.0 1.0 0.0; 0.0 0.0 1.0]
@test res["tp"] == [2,1,1] && res["tn"] == [2,4,3] && res["fp"] == [0,0,1] && res["fn"] == [1, 0, 0]
parameters(cm)
# Checking multiple training equal to just training on full data
scores2 = fit!(cm,y,ŷ)
res2 = info(cm)
y3 = vcat(y,y)
ŷ3 = vcat(ŷ,ŷ)
cm3 = ConfusionMatrix()
scores3 = fit!(cm3,y3,ŷ3)
res3 = info(cm3)
@test res2 == res3
# Checking infrequent setting
cm = ConfusionMatrix(categories=["Lemon","Grape"],handle_unknown="infrequent")
scores = fit!(cm,y,ŷ)
res = info(cm)
# Example from https://scikit-learn.org/stable/modules/model_evaluation.html#classification-report
y = [0,1,2,2,0]
ŷ = [0,0,2,1,0]
cm = ConfusionMatrix()
fit!(cm,y,ŷ)
res = info(cm)
@test res["precision"] ≈ [0.6666666666666666, 0.0, 1.0]
@test res["recall"] ≈ [1.0, 0.0, 0.5]
@test res["specificity"] ≈ [0.6666666666666666, 0.75, 1.0]
@test res["f1score"] ≈ [0.8, 0.0, 0.6666666666666666]
@test res["mean_precision"] == (0.5555555555555555, 0.6666666666666666)
@test res["mean_recall"] == (0.5, 0.6)
@test res["mean_specificity"] == (0.8055555555555555, 0.8166666666666667)
@test res["mean_f1score"] == (0.48888888888888893, 0.5866666666666667)
@test res["accuracy"] == 0.6
@test res["misclassification"] == 0.4
#original_stdout = stdout
#(rd, wr) = redirect_stdout()
#redirect_stdout(original_stdout)
#close(wr)
cm = ConfusionMatrix(categories_names = Dict(0=>"0",1=>"1",2=>"2"))
fit!(cm,y,ŷ)
res = info(cm)
@test res["categories"] == ["0","1","2"]
# ==================================
# New test
println("** Testing class_counts()...")
@test class_counts_with_labels(["a","b","a","c","d"]) == Dict("a"=>2,"b"=>1,"c"=>1,"d"=>1)
@test class_counts_with_labels(['a' 'b'; 'a' 'c';'a' 'b']) == Dict(['a', 'b'] => 2,['a', 'c'] => 1)
@test class_counts(["a","b","a","c","d"],classes=["a","b","c","d","e"]) == [2,1,1,1,0]
@test collect(class_counts(['a' 'b'; 'a' 'c';'a' 'b'])) in [[2,1],[1,2]] # Order doesn't matter
# ==================================
# New test
println("** Testing gini()...")
@test gini(['a','b','c','c']) == 0.625 # (1/4) * (3/4) + (2/4) * (2/4) + (1/4)*(3/4)
@test gini([1 10; 2 20; 3 30; 2 20]) == 0.625
#a = -0.01*log2(0.01)
#b = -0.99*log2(0.99)
#c = a+b
#A = -0.49*log2(0.49)
#B = -0.51*log2(0.51)
#C = A+B
# ==================================
# New test
println("** Testing entropy()...")
@test isapprox(entropy([1,2,3]), 1.584962500721156) #-(1/3)*log2(1/3)-(1/3)*log2(1/3)-(1/3)*log2(1/3)
@test isapprox(entropy([1 10; 2 20; 3 30]), 1.584962500721156)
#par = entropy([1,1,1,1,1,0,0,0,0,0,0,0,0,0])
#k1 = entropy([1,1,1,0,0,0])
#k2 = entropy([1,1,0,0,0,0,0,0])
#kidsEntropy = k1 *(6/14) + k2*(8/14)
#gain = par - kidsEntropy
#entropy([0,1,2,3,4,5,6,7])
# ==================================
# New test
println("** Testing mean_dicts()...")
a = Dict('a'=> 0.2,'b'=>0.3,'c'=>0.5)
b = Dict('a'=> 0.3,'b'=>0.1,'d'=>0.6)
c = Dict('b'=>0.6,'e'=>0.4)
d = Dict('a'=>1)
dicts = [a,b,c,d]
@test mean_dicts(dicts) == Dict('a' => 0.375,'c' => 0.125,'d' => 0.15,'e' => 0.1,'b' => 0.25)
@test mean_dicts(dicts,weights=[1,1,97,1]) == Dict('a' => 0.015, 'b' => 0.586, 'c' => 0.005, 'd' =>0.006, 'e' =>0.388)
a = Dict(1=> 0.1,2=>0.4,3=>0.5)
b = Dict(4=> 0.3,1=>0.1,2=>0.6)
c = Dict(5=>0.6,4=>0.4)
d = Dict(2=>1)
dicts = [a,b,c,d]
@test mean_dicts(dicts) == Dict(4 => 0.175, 2 => 0.5, 3 => 0.125, 5 => 0.15, 1 => 0.05)
# ==================================
# New test
println("** Testing partition()...")
m1 = [1:10 11:20 31:40]
m2 = convert(Array{Float64,2},[41:50 51:60])
m3 = makematrix(collect(61:70))
m4 = collect(71:80)
parts = [0.33,0.27,0.4]
out = partition([m1,m2,m3,m4],parts,shuffle=true,rng=copy(TESTRNG))
@test size(out,1) == 4 && size(out[1][3]) == (4,3)
x = [1:10 11:20]
y = collect(31:40)
((xtrain,xtest),(ytrain,ytest)) = partition([x,y],[0.7,0.3], shuffle=false,rng=copy(TESTRNG))
@test xtest[2,2] == 19 && ytest[2] == 39
m1 = [1:2 11:12 31:32 41:42 51:52 61:62]
out = partition(m1,[0.7,0.3],dims=2,rng=copy(TESTRNG))
@test out == [[31 1 51 61; 32 2 52 62],[11 41; 12 42]]
# Testing not numeric matrices
ms = [[11:16 string.([21:26;])],[31:36;]]
out = partition(ms,[0.7,0.3],dims=1,rng=copy(TESTRNG))
@test out[1][2] == [12 "22"; 14 "24"] && out[2][2]== [32,34]
# ==================================
# New test
println("** Testing KFold sampler with a single X matrix...")
data = [11:13 21:23 31:33 41:43 51:53 61:63]
sampleIterator = SamplerWithData(KFold(nsplits=3,nrepeats=1,shuffle=true,rng=copy(TESTRNG)),data,2)
for (i,d) in enumerate(sampleIterator)
if i == 1
@test d[1][1] == [51 61 21 41; 52 62 22 42; 53 63 23 43] && d[2][1] == [31 11; 32 12; 33 13]
elseif i == 2
@test d[1][1] == [31 11 21 41; 32 12 22 42; 33 13 23 43] && d[2][1] == [51 61; 52 62; 53 63]
elseif i ==3
@test d[1][1] == [31 11 51 61; 32 12 52 62; 33 13 53 63] && d[2][1] == [21 41; 22 42; 23 43]
else
@error "There shoudn't be more than 3 iterations for this iterator!"
end
end
println("** Testing KFold sampler with multiple matrices...")
data = [[11:16 string.([21:26;])],[31:36;]]
sampleIterator = SamplerWithData(KFold(nsplits=3,nrepeats=2,shuffle=false,rng=copy(TESTRNG)),data,1)
for (i,d) in enumerate(sampleIterator)
local xtrain, ytrain, xval, yval
(xtrain,ytrain),(xval,yval) = d
if i == 1
@test xtrain == [13 "23"; 14 "24"; 15 "25"; 16 "26"] && ytrain == [33, 34, 35, 36] && xval == [11 "21"; 12 "22"] && yval == [31, 32]
elseif i == 5
@test xtrain == [11 "21"; 12 "22"; 15 "25"; 16 "26"] && ytrain == [31, 32, 35, 36] && xval == [13 "23"; 14 "24"] && yval == [33, 34]
elseif i > 6
@error "There shoudn't be more than 6 iterations for this iterator!"
end
end
println("** Testing cross_validation...")
X = [11:19 21:29 31:39 41:49 51:59 61:69]
Y = [1:9;]
sampler = KFold(nsplits=3,nrepeats=1,shuffle=true,rng=copy(TESTRNG))
(μ,σ) = cross_validation([X,Y],sampler) do trainData,valData,rng
(xtrain,ytrain) = trainData; (xval,yval) = valData
rfmod = RandomForestEstimator(n_trees=30,rng=rng)
fit!(rfmod,xtrain,ytrain)
predictions = predict(rfmod,xval)
ϵ = relative_mean_error(yval,predictions,normrec=false)
return ϵ
end
@test (μ,σ) == (0.3202242202242202, 0.04307662219315022)
# test on stats on multiple outputs...
sampler = KFold(nsplits=3,nrepeats=1,shuffle=true,rng=copy(TESTRNG))
((μ1,μ2),(σ1,σ2)) = cross_validation([X,Y],sampler,verbosity=FULL) do trainData,valData,rng
(xtrain,ytrain) = trainData; (xval,yval) = valData
rfmod = RandomForestEstimator(n_trees=30,rng=rng)
fit!(rfmod,xtrain,ytrain)
predictions = predict(rfmod,xval)
ϵ = relative_mean_error(yval,predictions,normrec=false)
return [ϵ, ϵ .+ 0.1]
end
@test μ1 == μ
@test μ2 ≈ (μ + 0.1)
@test σ1 ≈ σ ≈ σ2
println("** Testing autotuning...")
X = [11:99 99:-1:11]
y = collect(111:199)
tunemethod = GridSearch(hpranges=Dict("max_depth" =>[5,10,nothing], "min_gain"=>[0.0, 0.1, 0.5], "min_records"=>[2,3,5],"max_features"=>[nothing,5,10,30]),multithreads=true)
m = DecisionTreeEstimator(verbosity=NONE,rng=copy(TESTRNG),autotune=true,tunemethod=tunemethod)
hyperparameters(m).tunemethod.res_share=0.8
fit!(m,X,y)
opthp = hyperparameters(m)
#println("Test...")
#println(opthp)
#dump(opthp)
#@test ((opthp.max_depth == 10) && (opthp.min_gain==0.0) && (opthp.min_records==2) && (opthp.max_features==5))
ŷ = predict(m,X)
relative_mean_error(y,ŷ,normrec=false)
@test relative_mean_error(y,ŷ,normrec=false) <= 0.005 # ≈ 0.0023196810438564698
X = [11:99 99:-1:11]
y = collect(111:199)
tunemethod = SuccessiveHalvingSearch(hpranges=Dict("n_trees"=>[5,10,20,30],"max_depth" =>[5,10,nothing], "min_gain"=>[0.0, 0.1, 0.5], "min_records"=>[2,3,5],"max_features"=>[nothing,5,10,30]),multithreads=false)
m = RandomForestEstimator(verbosity=NONE,rng=copy(TESTRNG),autotune=false,tunemethod=tunemethod)
hyperparameters(m).tunemethod.res_shares=[0.3,0.6,0.8]
fit!(m,X,y)
opthp = hyperparameters(m)
#println("Test...")
#println(opthp)
#dump(opthp)
#@test ((opthp.max_depth == 10) && (opthp.min_gain==0.5) && (opthp.min_records==2) && (opthp.max_features==nothing))
ŷ = predict(m,X)
@test relative_mean_error(y,ŷ,normrec=false) <= 0.002 # ≈ 0.0023196810438564698
# ==================================
# New test
println("** Testing consistent_shuffle()...")
a = [1 2 3; 10 20 30; 100 200 300; 1000 2000 3000; 10000 20000 30000]; b = [4,40,400,4000,40000]
out = consistent_shuffle([a,b],rng=copy(FIXEDRNG))
@test out[1] == [1000 2000 3000; 10000 20000 30000; 10 20 30; 1 2 3; 100 200 300] && out[2] == [4000, 40000, 40, 4, 400]
out2 = consistent_shuffle(copy(FIXEDRNG),[a,b])
@test out2 == out
a = [1 2 3 4 5; 10 20 30 40 50]; b = [100 200 300 400 500]
out = consistent_shuffle([a,b],rng=copy(FIXEDRNG),dims=2)
@test out[1] == [4 5 2 1 3; 40 50 20 10 30] && out[2] == [400 500 200 100 300]
# ==================================
# New test
println("** Testing generate_parallel_rngs()...")
x = rand(copy(TESTRNG),100)
function innerFunction(bootstrappedx; rng=Random.GLOBAL_RNG)
sum(bootstrappedx .* rand(rng) ./ 0.5)
end
function outerFunction(x;rng = Random.GLOBAL_RNG)
masterSeed = rand(rng,100:typemax(Int64)) # important: with some RNG it is important to do this before the generate_parallel_rngs to guarantee independance from number of threads
rngs = generate_parallel_rngs(rng,Threads.nthreads()) # make new copy instances
results = Array{Float64,1}(undef,30)
Threads.@threads for i in 1:30
tsrng = rngs[Threads.threadid()] # Thread safe random number generator: one RNG per thread
Random.seed!(tsrng,masterSeed+i*10) # But the seeding depends on the i of the loop not the thread: we get same results indipendently of the number of threads
toSample = rand(tsrng, 1:100,100)
bootstrappedx = x[toSample]
innerResult = innerFunction(bootstrappedx, rng=tsrng)
results[i] = innerResult
end
overallResult = mean(results)
return overallResult
end
# Different sequences..
@test outerFunction(x) != outerFunction(x)
# Different values, but same sequence
mainRng = copy(TESTRNG)
a = outerFunction(x, rng=mainRng)
b = outerFunction(x, rng=mainRng)
mainRng = copy(TESTRNG)
A = outerFunction(x, rng=mainRng)
B = outerFunction(x, rng=mainRng)
@test a != b && a == A && b == B
# Same value at each call
a = outerFunction(x,rng=copy(TESTRNG))
b = outerFunction(x,rng=copy(TESTRNG))
@test a == b
# ==================================
# New test
println("** Testing pool1d()...")
x = [1,2,3,4,5,6]
poolsize = 3
out = pool1d(x,poolsize)
@test out == [2.0,3.0,4.0,5.0]
out = pool1d(x,poolsize;f=maximum)
@test out == [3,4,5,6]
# ==================================
# New test
println("** Testing get_parametric_types() (this uses Julia internals!)...")
o = [1,2,3]
T = get_parametric_types(o)[1]
@test T == Int64
println("Testing pairwise and silhouette...")
x = [1 2 3 3; 1.2 3 3.1 3.2; 2 4 6 6.2; 2.1 3.5 5.9 6.3]
pd = pairwise(x)
s1 = silhouette(pd,[1,2,2,2])
@test s1 == [0.0, -0.7590778795827623, 0.5030093571833065, 0.4936350560759424]
s2 = silhouette(pd,[1,1,2,2])
@test s2 == [0.7846062151896173, 0.7590778795827623, 0.8860577617518799, 0.8833580446365146]
# ==================================
# New test
println("** Testing xavier_init....")
previous_npar = 12
this_npar = 32
w1 = xavier_init(previous_npar,this_npar,(4,1,2),eltype=Float32)
size(w1) == (4,1,2)
eltype(w1) == Float32
w2 = xavier_init(previous_npar,this_npar,4)
size(w2) == (4,)
eltype(w2) == Float64
# ==================================
# New test
println("** Testing cosine distance....")
x = [0,1]; y = [1,0]
@test cosine_distance(x,y) == 1
# ==================================
# New test
println("** Testing Sobol Index....")
ŷ = [1.0, 2.4, 1.5, 1.8];
ŷ₋₁ = [0.8, 2.5, 1.5, 1.7];
ŷ₋₂ = [0.7,2.6,1.4,1.6];
@test sobol_index(ŷ,ŷ₋₁) ≈ 0.058394160583941625
@test sobol_index(ŷ,ŷ₋₂) ≈ 0.1751824817518249
@test sobol_index(ŷ,ŷ) ≈ 0.0
y = ["a", "c", "c", "b", "c", "a", "c", "b"];
y1 = ["a", "c", "b", "b", "c", "a", "c", "c"];
y2 = ["c", "c", "b", "b", "c", "a", "b", "c"];
yoh = fit!(OneHotEncoder(),y)
y1oh = fit!(OneHotEncoder(),y1)
y2oh = fit!(OneHotEncoder(),y2)
@test sobol_index(yoh,y1oh) ≈ 24.829214102104547
@test sobol_index(yoh,y2oh) ≈ 62.07303525526137
@test sobol_index(yoh,yoh) ≈ 0.0
y = [0.4 0.3 0.3
0.1 0.4 0.5
0.0 0.0 1
0.3 0.2 0.5
]
y1 = [0.5 0.3 0.2
0.1 0.3 0.6
0.0 1.0 0.0
0.4 0.2 0.4
]
@test sobol_index(y,y1) ≈ 82.24215978182593
@test sobol_index(y,y) ≈ 0.0
# Testing kl_divergence
y = [0.4,0.3,0.3]
y1 = [0.5,0.3,0.2]
@test kl_divergence(y,y1) ≈ 0.0467175122614015
@test kl_divergence(y,y) ≈ 0.0
# Testig l2loss_by_cv
x = vcat(rand(copy(TESTRNG),0:0.001:0.6,60,5), rand(copy(TESTRNG),0.4:0.001:1,60,5))
y = [2 * r[1] ^2 - 3 * (r[3] + rand(copy(TESTRNG),0:0.001:0.3)) + 12 + 2 * r[5] - 0.3 * r[1] * r[2] for r in eachrow(x) ]
ycat = [(i < 10) ? "c" : ( (i < 13) ? "a" : "b") for i in y]
losses = ones(7)
losses[1] = l2loss_by_cv(RandomForestEstimator(rng=copy(TESTRNG), verbosity=NONE),(x,y),rng=copy(TESTRNG))
losses[2] = l2loss_by_cv(RandomForestEstimator(rng=copy(TESTRNG), verbosity=NONE),(x,y),rng=copy(TESTRNG))
losses[3] = l2loss_by_cv(RandomForestEstimator(rng=copy(TESTRNG), verbosity=NONE),(x,ycat),rng=copy(TESTRNG))
losses[4] = l2loss_by_cv(NeuralNetworkEstimator(rng=copy(TESTRNG), verbosity=NONE),(x,y),rng=rng=copy(TESTRNG))
losses[5] = l2loss_by_cv(NeuralNetworkEstimator(rng=copy(TESTRNG), verbosity=NONE),(x,fit!(OneHotEncoder(),ycat)),rng=copy(TESTRNG))
losses[6] = l2loss_by_cv(PerceptronClassifier(rng=copy(TESTRNG), verbosity=NONE),(x,ycat),rng=copy(TESTRNG))
losses[7] = l2loss_by_cv(AutoEncoder(rng=copy(TESTRNG), verbosity=NONE),(x,),rng=copy(TESTRNG))
@test losses[1] ≈ losses[2]
println(losses)
@test all(losses .< 0.50)
@test all(losses[[1:3;5:end]] .< 0.26)
# ------------------------------------------------------------------------------
# Feature importance
println("** Testing Feature Importance....")
# Data generation
TEMPRNG = copy(TESTRNG)
N = 1000
xa = rand(TEMPRNG,N,3)
xb = xa[:,1] .* rand.(TEMPRNG,Normal(1,0.5))
x = hcat(xa,xb)
y = [10*r[1]^2-5 for r in eachrow(x)]
ysort = sort(y)
ycat = [(i < ysort[Int(round(N/3))]) ? "c" : ( (i < ysort[Int(round(2*N/3))]) ? "a" : "b") for i in y]
yoh = fit!(OneHotEncoder(),ycat)
((xtrain,xtest),(ytrain,ytest),(ycattrain,ycattest),(yohtrain,yohtest)) = partition([x,y,ycat,yoh],[0.8,0.2],rng=TEMPRNG)
# Several combinations...
fr = FeatureRanker(model=RandomForestEstimator(verbosity=NONE,rng=TEMPRNG),nsplits=5,nrepeats=1,recursive=false,metric="mda",ignore_dims_keyword="ignore_dims",verbosity=NONE,refit=false)
rank = fit!(fr,x,y)
rank2 = predict(fr)
rank3 = predict(fr,x)
@test rank == rank2 == rank3
@test rank[end] == 1
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
loss_by_col_sd = info(fr)["loss_by_col_sd"]
sobol_by_col_sd = info(fr)["sobol_by_col_sd"]
loss_fullmodel = info(fr)["loss_all_cols"]
loss_fullmodel_sd = info(fr)["loss_all_cols_sd"]
ntrials_per_metric = info(fr)["ntrials_per_metric"]
@test size(loss_by_col) == size(sobol_by_col) == size(loss_by_col_sd) == size(loss_by_col_sd) == (4,)
@test sortperm(loss_by_col) == rank
# -
fr = FeatureRanker(model=RandomForestEstimator(verbosity=NONE,rng=TEMPRNG),nsplits=3,nrepeats=2,recursive=true,metric="sobol",ignore_dims_keyword="ignore_dims",verbosity=NONE,refit=false)
rank = fit!(fr,x,ycat)
@test rank[end] == 1
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
ntrials_per_metric = info(fr)["ntrials_per_metric"]
@test ntrials_per_metric == 6
# This is not necessarily true in recursive models because by omitting less important variables we may by chance improve the loss of the next important variables.
# The order to retain is the rank one.
# @test sortperm(sobol_by_col) == rank
#bar(string.(sortperm(sobol_by_col)),sobol_by_col[sortperm(sobol_by_col)],label="sobol by col", yerror=quantile(Normal(1,0),0.975) .* (sobol_by_col_sd[sortperm(sobol_by_col)]./sqrt(3)))
# bar(string.(sortperm(sobol_by_col)),sobol_by_col[sortperm(sobol_by_col)],label="sobol by col")
#bar(string.(rank),sobol_by_col[rank],label="sobol by col following rank")
# -
fr = FeatureRanker(model=NeuralNetworkEstimator(verbosity=NONE,rng=TEMPRNG),nsplits=3,nrepeats=1,recursive=false,metric="sobol",verbosity=NONE,refit=false)
rank = fit!(fr,x,yoh)
@test rank[end] == 1
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
loss_by_col_sd = info(fr)["loss_by_col_sd"]
sobol_by_col_sd = info(fr)["sobol_by_col_sd"]
@test sortperm(sobol_by_col) == rank
#bar(string.(sortperm(loss_by_col)),loss_by_col[sortperm(loss_by_col)],label="loss by col", yerror=quantile(Normal(1,0),0.975) .* (loss_by_col_sd[sortperm(loss_by_col)]./sqrt(3)))
#bar(string.(sortperm(sobol_by_col)),sobol_by_col[sortperm(sobol_by_col)],label="sobol by col", yerror=quantile(Normal(1,0),0.975) .* (sobol_by_col_sd[sortperm(sobol_by_col)]./sqrt(3)))
# -
fr = FeatureRanker(model=NeuralNetworkEstimator(verbosity=NONE,rng=TEMPRNG),nsplits=3,nrepeats=1,recursive=false,metric="sobol",verbosity=NONE,refit=true)
rank = fit!(fr,x,y) # TODO
@test rank[end] == 1
loss_by_col = info(fr)["loss_by_col"]
sobol_by_col = info(fr)["sobol_by_col"]
@test sortperm(sobol_by_col) == rank
# MLJ Tests
# ==================================
# NEW TEST
println("Testing MLJ interface for Utils....")
import MLJBase
const MLJ = MLJBase
X, y = MLJ.@load_iris
model = BetaML.Bmlj.AutoEncoder(encoded_size=2,rng=copy(TESTRNG))
ae = MLJ.machine(model, X)
MLJ.fit!(ae)
X_latent = MLJ.transform(ae, X)
X_recovered = MLJ.inverse_transform(ae,X_latent)
@test relative_mean_error(MLJ.matrix(X),X_recovered) < 0.05
import MLJTestInterface
@testset "generic mlj interface test" begin
fails, summary = MLJTestInterface.test(
[BetaML.Bmlj.AutoEncoder,],
MLJTestInterface.make_regression()[1];
mod=@__MODULE__,
verbosity=0, # bump to debug
throw=false, # set to true to debug
)
@test isempty(fails)
end
#=
using Random, StableRNGs
rDiff(rngFunction,seedBase,seedDiff,repetitions) = norm(rand(rngFunction(seedBase),repetitions) .- rand(rngFunction(seedBase+seedDiff),repetitions))/repetitions
# Seed base 1000: ok
rDiff(StableRNG,1000,1,100000) # 0.00129
rDiff(StableRNG,1000,10,100000) # 0.00129
rDiff(StableRNG,1000,1000,100000) # 0.00129
rDiff(MersenneTwister,1000,1,100000) # 0.00129
rDiff(MersenneTwister,1000,10,100000) # 0.00129
rDiff(MersenneTwister,1000,1000,100000) # 0.00129
# Seed base 10: Still ok
rDiff(StableRNG,10,1,100000) # 0.00129
rDiff(StableRNG,10,10,100000) # 0.00129
rDiff(StableRNG,10,1000,100000) # 0.00129
rDiff(MersenneTwister,10,1,100000) # 0.00129
rDiff(MersenneTwister,10,10,100000) # 0.00129
rDiff(MersenneTwister,10,1000,100000) # 0.00129
# Seed base 1: We start seeing problems for StableRNG..
rDiff(StableRNG,1,1,100000) # 0.00125 <--
rDiff(StableRNG,1,10,100000) # 0.00129
rDiff(StableRNG,1,1000,100000) # 0.00129
rDiff(MersenneTwister,1,1,100000) # 0.00129
rDiff(MersenneTwister,1,10,100000) # 0.00129
rDiff(MersenneTwister,1,1000,100000) # 0.00129
# Seed base 0: Unexpected results for for StableRNG..
rDiff(StableRNG,0,1,100000) # 0.00105 <----------
rDiff(StableRNG,0,2,100000) # 0.00116 <-----
rDiff(StableRNG,0,5,100000) # 0.00123 <---
rDiff(StableRNG,0,10,100000) # 0.00126 <--
rDiff(StableRNG,0,1000,100000) # 0.00129
rDiff(MersenneTwister,0,1,100000) # 0.00130 <-
rDiff(MersenneTwister,0,2,100000) # 0.00129
rDiff(MersenneTwister,0,5,100000) # 0.00129
rDiff(MersenneTwister,0,10,100000) # 0.00129
rDiff(MersenneTwister,0,1000,100000) # 0.00129
=#
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 1408 | using Test
using BetaML
#using Pkg # seems Julia bug: can't find pkg `Pkg` !!
#Pkg.activate(@__DIR__)
# choose what to test with `Pkg.test("BetaML", test_args=["Trees","Clustering","all"])``
# or `$ julia runtests.jl Trees Clustering all`
nArgs = length(ARGS)
if "all" in ARGS
println("Running ALL tests available")
else
println("Running normal testing")
end
# just to reset the file used to save models in several mudules..
rm("test.jld2", force=true)
if "all" in ARGS || "Utils" in ARGS || nArgs == 0
include("Utils_tests.jl")
end
if "all" in ARGS || "Trees" in ARGS || nArgs == 0
include("Trees_tests.jl")
end
if "all" in ARGS || "Nn" in ARGS || nArgs == 0
include("Nn_tests.jl")
end
if "all" in ARGS || "Perceptron" in ARGS || nArgs == 0
include("Perceptron_tests.jl")
end
if "all" in ARGS || "Clustering" in ARGS || nArgs == 0
include("Clustering_tests.jl")
end
if "all" in ARGS || "GMM" in ARGS || nArgs == 0
include("GMM_tests.jl")
end
if "all" in ARGS || "Stats" in ARGS || nArgs == 0
#include("Stats_tests.jl") strange errors on github only !!
end
if "all" in ARGS || "Imputation" in ARGS || nArgs == 0
include("Imputation_tests.jl")
end
if "all" in ARGS
# run optional long tests
include("Perceptron_tests_additional.jl")
include("Trees_tests_additional.jl")
include("Clustering_tests_additional.jl")
end
rm("test.jld2", force=true)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | code | 3246 | using PyCall
using Flux
using BetaML.Nn
using BenchmarkTools
using Distributions
import LinearAlgebra.BLAS
BLAS.set_num_threads(1)
torch = pyimport("torch")
torch.set_num_threads(1)
NN = torch.nn.Sequential(
torch.nn.Linear(8, 64),
torch.nn.Tanh(),
torch.nn.Linear(64, 32),
torch.nn.Tanh(),
torch.nn.Linear(32, 2),
torch.nn.Tanh()
)
torch_nn(in) = NN(in)
Flux_nn = Chain(Dense(8,64,tanh),
Dense(64,32,tanh),
Dense(32,2,tanh))
BetaML_nn = buildNetwork([
DenseLayer(8,64,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(8+64),sqrt(6)/sqrt(8+64)),64,8),wb=rand(Uniform(-sqrt(6)/sqrt(8+64),sqrt(6)/sqrt(8+64)),64)),
DenseLayer(64,32,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(64+32),sqrt(6)/sqrt(64+32)),32,64),wb=rand(Uniform(-sqrt(6)/sqrt(64+32),sqrt(6)/sqrt(64+32)),32)),
DenseLayer(32,2,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(32+2),sqrt(6)/sqrt(32+2)),2,32),wb=rand(Uniform(-sqrt(6)/sqrt(32+2),sqrt(6)/sqrt(32+2)),2))],
squared_cost,name="Bike sharing regression model")
BetaML_nn = buildNetwork([
DenseLayer(8,64,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(8+64),sqrt(6)/sqrt(8+64)),64,8),wb=zeros(64)),
DenseLayer(64,32,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(64+32),sqrt(6)/sqrt(64+32)),32,64),wb=zeros(32)),
DenseLayer(32,2,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(32+2),sqrt(6)/sqrt(32+2)),2,32),wb=zeros(2))],
squared_cost,name="Bike sharing regression model")
BetaML_nn2 = buildNetwork([
DenseLayer(8,64,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(8+64),sqrt(6)/sqrt(8+64)),64,8),wb=zeros(64), df=dtanh),
DenseLayer(64,32,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(64+32),sqrt(6)/sqrt(64+32)),32,64),wb=zeros(32),df=dtanh),
DenseLayer(32,2,f=tanh,w=rand(Uniform(-sqrt(6)/sqrt(32+2),sqrt(6)/sqrt(32+2)),2,32),wb=zeros(2),df=dtanh)],
squared_cost,name="Bike sharing regression model",dcf=dsquared_cost)
for i in [1, 10, 100, 1000]
println("Batch size: $i")
torch_in = torch.rand(i,8)
flux_in = rand(Float32,8,i)
betaml_in = rand(Float32,i,8)
print("pytorch:")
@btime torch_nn($torch_in)
print("flux :")
@btime Flux_nn($flux_in)
print("betaml :")
@btime predict($BetaML_nn,$betaml_in)
print("betaml2 :")
@btime predict($BetaML_nn2,$betaml_in)
end
#= Output:
Batch size: 1
pytorch: 89.920 μs (6 allocations: 192 bytes)
flux : 3.426 μs (6 allocations: 1.25 KiB)
betaml : 3.046 μs (19 allocations: 3.55 KiB)
betaml2 : 3.089 μs (19 allocations: 3.55 KiB)
Batch size: 10
pytorch: 100.737 μs (6 allocations: 192 bytes)
flux : 19.743 μs (6 allocations: 8.22 KiB)
betaml : 33.137 μs (181 allocations: 34.77 KiB)
betaml2 : 32.259 μs (181 allocations: 34.77 KiB)
Batch size: 100
pytorch: 132.689 μs (6 allocations: 192 bytes)
flux : 184.807 μs (8 allocations: 77.16 KiB)
betaml : 306.326 μs (1801 allocations: 347.08 KiB)
betaml2 : 310.554 μs (1801 allocations: 347.08 KiB)
Batch size: 1000
pytorch: 392.295 μs (6 allocations: 192 bytes)
flux : 1.838 ms (10 allocations: 766.19 KiB)
betaml : 3.172 ms (18490 allocations: 3.40 MiB)
betaml2 : 3.116 ms (18490 allocations: 3.40 MiB)
=#
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 17837 | # Beta Machine Learning Toolkit
_Machine Learning made simple :-)_
<!-- _BetaML: makes simple machine learning tasks easy, and complex machine learning tasks possible._ -->
<img src="assets/BetaML_logo.png" width="300" valign="middle"/> <img src="assets/microExample_white.png" width="500" valign="middle"/>
The **Beta Machine Learning Toolkit** is a package including many algorithms and utilities to implement machine learning workflows in Julia, [Python](https://sylvaticus.github.io/BetaML.jl/stable/tutorials/Betaml_tutorial_getting_started.html#Use-BetaML-in-Python), [R](https://sylvaticus.github.io/BetaML.jl/stable/tutorials/Betaml_tutorial_getting_started.html#Use-BetaML-in-R) and any other language with a Julia binding.
[](https://sylvaticus.github.io/BetaML.jl/stable)
[](https://sylvaticus.github.io/BetaML.jl/dev)
[](https://doi.org/10.21105/joss.02849)
[](https://github.com/sylvaticus/BetaML.jl/actions)
[](http://codecov.io/github/sylvaticus/BetaML.jl?branch=master)
Currently the following models are available:
| BetaML name | MLJ Interface | Category |
| ----------- | ------------- | -------- |
| [`PerceptronClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/Perceptron.html#BetaML.Perceptron.PerceptronClassifier) | [`PerceptronClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.PerceptronClassifier) | _Supervised classifier_ |
| [`KernelPerceptronClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/Perceptron.html#BetaML.Perceptron.KernelPerceptronClassifier) |
[`KernelPerceptronClassifier`](https://sylvaticus.github.io/BetaML.jl/MLJ_interface.html#BetaML.Bmlj.KernelPerceptronClassifier) | _Supervised classifier_ |
| [`PegasosClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/Perceptron.html#BetaML.Perceptron.PegasosClassifier) | [`PegasosClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.PegasosClassifier) | _Supervised classifier_ |
| [`DecisionTreeEstimator`](https://sylvaticus.github.io/BetaML.jl/stable/Trees.html#BetaML.Trees.DecisionTreeEstimator) | [`DecisionTreeClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.DecisionTreeClassifier), [`DecisionTreeRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.DecisionTreeRegressor) | _Supervised regressor and classifier_ |
| [`RandomForestEstimator`](https://sylvaticus.github.io/BetaML.jl/stable/Trees.html#BetaML.Trees.RandomForestEstimator) | [`RandomForestClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.RandomForestClassifier), [`RandomForestRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.RandomForestRegressor) | _Supervised regressor and classifier_ |
| [`NeuralNetworkEstimator`](https://sylvaticus.github.io/BetaML.jl/stable/Nn.html#BetaML.Nn.NeuralNetworkEstimator) | [`NeuralNetworkRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.NeuralNetworkRegressor), [`MultitargetNeuralNetworkRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.MultitargetNeuralNetworkRegressor), [`NeuralNetworkClassifier`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.NeuralNetworkClassifier) | _Supervised regressor and classifier_ |
| [`GaussianMixtureRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/GMM.html#BetaML.GMM.GaussianMixtureRegressor) | [`GaussianMixtureRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.GaussianMixtureRegressor), [`MultitargetGaussianMixtureRegressor`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.MultitargetGaussianMixtureRegressor) | _Supervised regressor_ |
| [`GaussianMixtureRegressor2`](https://sylvaticus.github.io/BetaML.jl/stable/GMM.html#BetaML.GMM.GaussianMixtureRegressor2) | | _Supervised regressor_ |
| [`KMeansClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/Clustering.html#BetaML.Clustering.KMeansClusterer) | [`KMeansClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.KMeansClusterer) | _Unsupervised hard clusterer_ |
| [`KMedoidsClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/Clustering.html#BetaML.Clustering.KMedoidsClusterer) | [`KMedoidsClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.KMedoidsClusterer) | _Unsupervised hard clusterer_ |
| [`GaussianMixtureClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/GMM.html#BetaML.GMM.GaussianMixtureClusterer) | [`GaussianMixtureClusterer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.GaussianMixtureClusterer) | _Unsupervised soft clusterer_ |
| [`SimpleImputer`](https://sylvaticus.github.io/BetaML.jl/stable/Imputation.html#BetaML.Imputation.SimpleImputer)| [`SimpleImputer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.SimpleImputer) | _Unsupervised missing data imputer_ |
| [`GaussianMixtureImputer`](https://sylvaticus.github.io/BetaML.jl/stable/Imputation.html#BetaML.Imputation.GaussianMixtureImputer) | [`GaussianMixtureImputer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.GaussianMixtureImputer) | _Unsupervised missing data imputer_ |
| [`RandomForestImputer`](https://sylvaticus.github.io/BetaML.jl/stable/Imputation.html#BetaML.Imputation.RandomForestImputer) | [`RandomForestImputer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.RandomForestImputer) | _Unsupervised missing data imputer_ |
| [`GeneralImputer`](https://sylvaticus.github.io/BetaML.jl/stable/Imputation.html#BetaML.Imputation.GeneralImputer) | [`GeneralImputer`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.GeneralImputer) | _Unsupervised missing data imputer_ |
| [`MinMaxScaler`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.MinMaxScaler) | | _Data transformer_ |
| [`StandardScaler`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.StandardScaler) | | _Data transformer_ |
| [`Scaler`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.Scaler) | | _Data transformer_ |
| [`PCAEncoder`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.PCAEncoder) | | _Unsupervised dimensionality reduction transformer_ |
| [`AutoEncoder`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.AutoEncoder) | [`AutoEncoderMLJ`](https://sylvaticus.github.io/BetaML.jl/stable/MLJ_interface.html#BetaML.Bmlj.AutoEncoderMLJ) | _Unsupervised non-linear dimensionality reduction_ |
| [`OneHotEncoder`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.OneHotEncoder) | | _Data transformer_ |
| [`OrdinalEncoder`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.OrdinalEncoder) | | _Data transformer_ |
| [`ConfusionMatrix`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.ConfusionMatrix) | | _Predictions analysis_ |
| [`FeatureRanker`](https://sylvaticus.github.io/BetaML.jl/stable/Utils.html#BetaML.Utils.FeatureRanker) | | _Predictions analysis_ |
Theoretical notes describing many of these algorithms are at the companion repository https://github.com/sylvaticus/MITx_6.86x.
All models are implemented entirely in Julia and are hosted in the repository itself (i.e. they are not wrapper to third-party models).
If your favorite option or model is missing, you can try implement it yourself and [open a pull request](https://github.com/sylvaticus/BetaML.jl/pulls) to share it (see the section [Contribute](#contribute) below) or request its implementation ([open an issue](https://github.com/sylvaticus/BetaML.jl/issues)). Thanks to its JIT compiler, Julia is indeed in the sweet spot where we can easily write models in a high-level language and still having them running efficiently.
## Documentation
Please refer to the [package documentation](https://sylvaticus.github.io/BetaML.jl/stable) or use the Julia inline package system (just press the question mark `?` and then, on the special help prompt `help?>`, type the module or function name). The package documentation is made of two distinct parts. The first one is an extensively commented tutorial that covers most of the library, the second one is the reference manual covering the library's API.
If you are looking for an introductory material on Julia, have a look on the book "[Julia Quick Syntax Reference](https://www.julia-book.com/)"(Apress,2019) or the online course "[Scientific Programming and Machine Learning in Julia](https://sylvaticus.github.io/SPMLJ/stable/).
While implemented in Julia, this package can be easily used in R or Python employing [JuliaCall](https://github.com/Non-Contradiction/JuliaCall) or [PyJulia](https://github.com/JuliaPy/pyjulia) respectively, see [the relevant section](https://sylvaticus.github.io/BetaML.jl/stable/tutorials/Betaml_tutorial_getting_started.html#using_betaml_from_other_languages) in the documentation.
### Examples
- **Using an Artificial Neural Network for multinomial categorisation**
In this example we see how to train a neural networks model to predict the specie's name (5th column) given floral sepals and petals measures (first 4 columns) in the famous [iris flower dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set).
```julia
# Load Modules
using DelimitedFiles, Random
using Pipe, Plots, BetaML # Load BetaML and other auxiliary modules
Random.seed!(123); # Fix the random seed (to obtain reproducible results).
# Load the data
iris = readdlm(joinpath(dirname(Base.find_package("BetaML")),"..","test","data","iris.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
# Encode the categories (levels) of y using a separate column per each category (aka "one-hot" encoding)
ohmod = OneHotEncoder()
y_oh = fit!(ohmod,y)
# Split the data in training/testing sets
((xtrain,xtest),(ytrain,ytest),(ytrain_oh,ytest_oh)) = partition([x,y,y_oh],[0.8,0.2])
(ntrain, ntest) = size.([xtrain,xtest],1)
# Define the Artificial Neural Network model
l1 = DenseLayer(4,10,f=relu) # The activation function is `ReLU`
l2 = DenseLayer(10,3) # The activation function is `identity` by default
l3 = VectorFunctionLayer(3,f=softmax) # Add a (parameterless) layer whose activation function (`softmax` in this case) is defined to all its nodes at once
mynn = NeuralNetworkEstimator(layers=[l1,l2,l3],loss=crossentropy,descr="Multinomial logistic regression Model Sepal", batch_size=2, epochs=200) # Build the NN and use the cross-entropy as error function. Swith to auto-tuning with `autotune=true`
# Train the model (using the ADAM optimizer by default)
res = fit!(mynn,fit!(Scaler(),xtrain),ytrain_oh) # Fit the model to the (scaled) data
# Obtain predictions and test them against the ground true observations
ŷtrain = @pipe predict(mynn,fit!(Scaler(),xtrain)) |> inverse_predict(ohmod,_) # Note the scaling and reverse one-hot encoding functions
ŷtest = @pipe predict(mynn,fit!(Scaler(),xtest)) |> inverse_predict(ohmod,_)
train_accuracy = accuracy(ytrain,ŷtrain) # 0.975
test_accuracy = accuracy(ytest,ŷtest) # 0.96
# Analyse model performances
cm = ConfusionMatrix()
fit!(cm,ytest,ŷtest)
print(cm)
```
```text
A ConfusionMatrix BetaMLModel (fitted)
-----------------------------------------------------------------
*** CONFUSION MATRIX ***
Scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "virginica" "versicolor" "setosa"
"virginica" 8 1 0
"versicolor" 0 14 0
"setosa" 0 0 7
Normalised scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "virginica" "versicolor" "setosa"
"virginica" 0.888889 0.111111 0.0
"versicolor" 0.0 1.0 0.0
"setosa" 0.0 0.0 1.0
*** CONFUSION REPORT ***
- Accuracy: 0.9666666666666667
- Misclassification rate: 0.033333333333333326
- Number of classes: 3
N Class precision recall specificity f1score actual_count predicted_count
TPR TNR support
1 virginica 1.000 0.889 1.000 0.941 9 8
2 versicolor 0.933 1.000 0.938 0.966 14 15
3 setosa 1.000 1.000 1.000 1.000 7 7
- Simple avg. 0.978 0.963 0.979 0.969
- Weigthed avg. 0.969 0.967 0.971 0.966
```
```julia
ϵ = info(mynn)["loss_per_epoch"]
plot(1:length(ϵ),ϵ, ylabel="epochs",xlabel="error",legend=nothing,title="Avg. error per epoch on the Sepal dataset")
heatmap(info(cm)["categories"],info(cm)["categories"],info(cm)["normalised_scores"],c=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix")
```
<img src="assets/sepal_errorsPerEpoch.png" width="400"/> <img src="assets/sepal_confusionMatrix.png" width="400"/>
- **Other examples**
Further examples, with more models and more advanced techniques in order to improve predictions, are provided in the documentation tutorial.
Basic examples in Python and R are given [here](https://sylvaticus.github.io/BetaML.jl/stable/tutorials/Betaml_tutorial_getting_started.html#using_betaml_from_other_languages).
Very "micro" examples of usage of the various functions can also be studied in the unit-tests available in the [`test`](https://github.com/sylvaticus/BetaML.jl/tree/master/test) folder.
## Limitations and alternative packages
The focus of the library is skewed toward user-friendliness rather than computational efficiency. While the code is (relatively) easy to read, it is not heavily optimised, and currently all models operate on the CPU and only with data that fits in the pc's memory.
For very large data we suggest specialised packages. See the list below:
Category | Packages
-----------------|-----------------
ML toolkits/pipelines | [ScikitLearn.jl](https://github.com/cstjean/ScikitLearn.jl), [AutoMLPipeline.jl](https://github.com/IBM/AutoMLPipeline.jl), [MLJ.jl](https://joss.theoj.org/papers/10.21105/joss.02704)
Neural Networks | [Flux.jl](https://fluxml.ai/), [Knet](https://github.com/denizyuret/Knet.jl)
Decision Trees | [DecisionTree.jl](https://github.com/bensadeghi/DecisionTree.jl)
Clustering | [Clustering.jl](https://github.com/JuliaStats/Clustering.jl), [GaussianMixtures.jl](https://github.com/davidavdav/GaussianMixtures.jl)
Missing imputation | [Impute.jl](https://github.com/invenia/Impute.jl), [Mice.jl](https://github.com/tom-metherell/Mice.jl)
Variable importance | [ShapML.jl](https://github.com/nredell/ShapML.jl)
## TODO
### Short term
- Implement autotuning of `GaussianMixtureClusterer` using `BIC` or `AIC`
- <del>Add Silhouette method to check cluster validity</del>
- Implement PAM and/or variants for kmedoids
### Mid/Long term
- Add RNN support and improve convolutional layers speed
- Reinforcement learning (Markov decision processes)
- Standardize data sampling in training
- Add GPU
## Contribute
Contributions to the library are welcome. We are particularly interested in the areas covered in the "TODO" list above, but we are open to other areas as well.
Please however consider that the focus is mostly didactic/research, so clear, easy to read (and well documented) code and simple API with reasonable defaults are more important that highly optimised algorithms. For the same reason, it is fine to use verbose names.
Please open an issue to discuss your ideas or make directly a well-documented pull request to the repository.
While not required by any means, if you are customising BetaML and writing for example your own neural network layer type (by subclassing `AbstractLayer`), your own sampler (by subclassing `AbstractDataSampler`) or your own mixture component (by subclassing `AbstractMixture`), please consider to give it back to the community and open a pull request to integrate them in BetaML.
## Citations
If you use `BetaML` please cite it as:
- Lobianco, A., (2021). BetaML: The Beta Machine Learning Toolkit, a self-contained repository of Machine Learning algorithms in Julia. Journal of Open Source Software, 6(60), 2849, https://doi.org/10.21105/joss.02849
```Bibtex
@article{Lobianco2021,
doi = {10.21105/joss.02849},
url = {https://doi.org/10.21105/joss.02849},
year = {2021},
publisher = {The Open Journal},
volume = {6},
number = {60},
pages = {2849},
author = {Antonello Lobianco},
title = {BetaML: The Beta Machine Learning Toolkit, a self-contained repository of Machine Learning algorithms in Julia},
journal = {Journal of Open Source Software}
}
```
## Acknowledgements
The development of this package at the _Bureau d'Economie Théorique et Appliquée_ (BETA, Nancy) was supported by the French National Research Agency through the [Laboratory of Excellence ARBRE](http://mycor.nancy.inra.fr/ARBRE/), a part of the “Investissements d'Avenir” Program (ANR 11 – LABX-0002-01).
[](hhttp://www.beta-umr7522.fr/)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 14555 | ---
title: 'BetaML: The Beta Machine Learning Toolkit, a self-contained repository of Machine Learning algorithms in Julia'
tags:
- Julia
- machine learning
- neural networks
- deep learning
- clustering
- decision trees
- random forest
- perceptron
- data science
authors:
- name: Antonello Lobianco^[Corresponding author.]
orcid: 0000-0002-1534-8697
affiliation: "1, 2, 3, 4, 5, 6" # (Multiple affiliations must be quoted)
affiliations:
- name: Université de Lorraine
index: 1
- name: Université de Strasbourg
index: 2
- name: Institut des sciences et industries du vivant et de l'environnement (AgroParisTech)
index: 3
- name: Centre national de la recherche scientifique (CNRS)
index: 4
- name: Institut national de recherche pour l’agriculture, l’alimentation et l’environnement (INRAE)
index: 5
- name: Bureau d'économie théorique et appliquée (BETA)
index: 6
date: 19 April 2021
bibliography: docs/paper/paper.bib
---
<!-- Test it with:
`pandoc --filter pandoc-citeproc --bibliography docs/paper/paper.bib paper.md -o paper.pdf`
-->
# Summary
A series of _machine learning_ algorithms has been implemented and bundled together with several "utility" functions in a single package for the Julia programming language.
Currently, algorithms are available in the areas of classification (perceptron, kernel perceptron, pegasos), neural networks (feed-forward), clustering (kmeans, kmenoids, gmm, missing values imputation) and decision trees/random forests. Development of these algorithms started following the theoretical notes of the MOOC class "Machine Learning with Python: from Linear Models to Deep Learning" from MITx/edX.
This paper presents the motivations and the general approach of the package and gives an overview of its organisation. We refer the reader to the [package documentation](https://sylvaticus.github.io/BetaML.jl/stable) for instructions on how to use the various algorithms provided or to the MOOC notes available on GitHub [@Lobianco:2020] for their mathematical backgrounds.
# Motivations and objectives
`BetaML` provides one of the simplest way to run ML algorithms in Julia. While many packages already implement specific ML algorithms in Julia, these are fragmented across different packages and often value performances more than usability. For example the popular Deep Learning library Flux [@Innes:2018], while extremely performant and flexible, adopts some designing choices that for a beginner could appear odd, like avoiding the neural network object from the training process, or requiring all parameters to be explicitly defined.
In `BetaML` we made the choice to allow the user to experiment with the hyper-parameters of the algorithms, learning them one step at the time. Hence for most functions we provide reasonable default parameters that can be overridden when needed. For example, modelling, training and collecting predictions from a feed-forward artificial neural network with one hidden layer can be as simple as:
```julia
using BetaML.Nn
mynn = buildNetwork([DenseLayer(nIn,nHidden),
DenseLayer(nHidden,nOut)],
squared_cost)
train!(mynn,xtrain,ytrain)
ytrain_est = predict(mynn,xtrain)
ytest_est = predict(mynn,xtest)
```
While much better results can be obtained (in general) by scaling the variables and/or tuning their activation functions, the training parameters or the optimisation algorithm, this code snippet already runs the model using common practices like random mini-batches.
Still BetaML offers a fair level of flexibility. As we didn't aim for heavy optimisation, we were able to keep the API (Application Programming Interface) both beginner-friendly and flexible.
If a great level of flexibility can already be achieved by just employing the full set of model parameters, the greatest flexibility is obtained by customising BetaML and writing, for example, its own neural network layer type (by subclassing `AbstractLayer`), its own sampler (by subclassing `AbstractDataSampler`) or its own mixture component (by subclassing `AbstractMixture`).
While the library is designed for Julia users, the documentation provides examples for using the package from R or Python (thanks to JuliaCall [@Li:2019] and PyJulia [@Arakaki:2020] respectively).
A few packages try to provide a common Julia framework of the various ML algorithms available in Julia, like ScikitLearn.jl [@St-Jean:2020], AutoMLPipeline.jl [@Paulito:2020] or MLJ.jl [@Blaom:2019]. They build up on existing Julia (and/or Python) ML specialised packages. While avoiding the problem of "reinventing the wheel", the wrapping level may unintentionally introduces complications for the end-user, like the need to load the models and learn framework-specific concepts as _model_ or _machine_ in MLJ or `@pipeline` and `fit_transform!` in AutoMLPipeline.
We chose instead to bundle the main ML algorithms directly within the package. This offers a complementary approach that we feel it is more beginner-friendly.
We believe that the BetaML flexibility and simplicity, together with the efficiency and usability of a Just in Time compiled language like Julia and the convenience of having several ML algorithms and data-science utilities all in the same package,
will support the needs of that community of students and researchers that, contrary to industrial practitioners or computer science specialists, don't necessarily need to work with very large datasets that don't fit in memory or algorithms that require distributed computation.
# Package organisation
The BetaML toolkit is currently composed of 5 modules: `Utils` provides common data-science utility functions to be used in the other modules, `Perceptron` supplies linear and non-linear classifiers based on the classical Perceptron algorithm, `Nn` allows implementing and training artificial neural networks, `Clustering` includes several clustering algorithms and missing value attribution / collaborative filtering algorithms based on clustering and finally `Trees` implements decision trees classifiers/regressors together with their most common ensemble method, random forests.
All sub-module functionalities are re-exported at the root level, so the user doesn't need to deal with the sub-modules, but just load the main `BetaML` module.
## The `Utils` module
The `Utils` module is intended to provide functionalities that are either: (a) used in other modules but are not strictly part of that specific module's logic (for example activation functions would be most likely used in neural networks, but could be of more general usage); (b) general methods that are used alongside the ML algorithms, e.g. to improve their predictions capabilities; or (c) general methods to assess the goodness of fits of ML algorithms.
Concerning the fist category `Utils` provides "classical" activation functions (and their respective derivatives) like `relu`, `sigmoid`, `softmax`, but also more recent implementations like `elu` [@Clevert:2015], `celu` [@Barron:2017], `plu` [@Nicolae:2018], `softplus` [@Glorot:2011] and `mish` [@Misra:2019]. Kernel functions (`radial_kernel` - aka "KBF", `polynomial_kernel`), distance metrics (`l1_distance` - aka "Manhattan", `l2_distance`, `l2squared_distance`, `cosine_distance`), and functions typically used to improve numerical stability (`lse`) are also provided with the intention to be available in the different ML algorithms.
Often ML algorithms work better if the data is normalised or dimensions are reduced to those explaining the greatest extent of data variability. This is the purpose of the functions `scale` and `pca` respectively. `scale` scales the data to $\mu=0$ and $\sigma=1$, optionally skipping dimensions that don't need to be normalised (like categorical ones). The related function `get_scalefactors` saves the scaling factors so that inverse scaling (typically for the predictions of the ML algorithm) can be applied. `pca` performs Principal Component Analysis, where the user can specify either the number of dimensions to retain or the maximum approximation error that she/he is willing to accept, either _ex-ante_ or _ex-post_, after having analysed the distribution of the explained variance by number of dimensions. Other "general support" functions provided are `onehotencoder`, `batch`, `partition` and `cross_validation`.
Concerning the last category, several functions are provided to assess the goodness of fit of a single datapoint or of the whole dataset, whether the output of the ML algorithm is in $R^n$ or categorical. Notably, `accuracy` provides categorical accuracy given a probabilistic prediction (as PMF) of a datapoint and `ConfMatrix` allows a detailed analysis of categorical predictions.
Finally, the Bayesian Information Criterion `bic` and Akaike Information Criterion `aic` functions can be used for regularisation.
## The `Perceptron` module
It provides the classical Perceptron linear classifier, a _kernelised_ version of it and "PegasosClassifier" [@Shalev-Shwartz:2011], a gradient-descent based implementation.
The basic Perceptron classifier is implemented in the `perceptron` function, where the user can provide the initial weights and retrieve both the final and the average parameters of the classifier. In `KernelPerceptronClassifier` the user can either pass one of the kernel implemented in `Utils` or implement its own kernel function. `pegasos` performs the classification using a basic stochastic descent method^[We plan to generalise the PegasosClassifier algorithm to use the optimisation algorithms implemented for neural networks.]. Finally `predict` predicts the binary label given the feature vector and the linear coefficients or the error distribution as obtained by the kernel Perceptron algorithm.
## The `Nn` module
Artificial neural networks can be implemented using the functions provided by the `Nn` module.
Currently only feed-forward networks for regression or classification tasks are fully provided, but more complex layers (convolutional, pooling, recursive, ...) can be eventually defined and implemented directly by the user.
The instantiation of the layers required by the network can be done indeed either using one of the layer provided (`DenseLayer`, `DenseNoBiasLayer` or `VectorFunctionLayer`, the latter one being a parameterless layer whose activation function, like `softmax` or `pool1d`, is applied to the ensemble of the neurons rather than individually on each of them) or by creating a user-defined layer by subclassing the `AbstractLayer` type and implementing the functions `forward`, `backward`, `get_params`, `get_gradient`, `set_params` and `size`.
While in the provided layers the computation of the derivatives for `backward` and `get_params` is coded manually^[For the derivatives of the activation function the user can (a) provide one of the derivative functions defined in `Utils`, (b) implement it by himself, or (c) just leave the library use automatic differentiation (using Zygote) to compute it.], for complex user-defined layers the two functions can benefit of automatic differentiation packages like `Zygote`[@Innes:2018b], eventually wrapped in the function `autojacobian` defined in `Utils`.
Once the layers are defined, the neural network is modelled by setting the layers in an array, giving the network a cost function and a name. The `show` function can be employ to print the structure of the network.
The training of the model is done with the highly parametrisable `train!` function. In a similar way than for the definition of the layers, one can use for training one of the "standard" optimisation algorithms provided (`SGD` and `ADAM`, @Kingma:2014), either using their default values or by fine-tuning their parameters, or by defining the optimisation algorithm by subclassing the `AbstractOptimisationAlgorithm` type and implementing the `single_update!` and eventually `init_optalg!` methods. Note that the `single_update!` function provides the algorithm with quite a large set of information from the training process, allowing a wide class of optimisation algorithms to be implemented.
# The `Clustering` module
Both the classical `kmeans` and `kmedoids` algorithms are provided (with the difference being that the clusters "representatives" can be in any $R^n$ point in `kmeans`, while are restricted to be one of the data point in `kmedoids`), where different measure metrics can be provided (either those defined in `Utils` or user-provided ones) as well as different initialisation strategies (`random`, `grid`, `shuffle` or `given`).
Alongside these "hard clustering" algorithms, the `Clustering` module provides `gmm`, an implementation of the Expectation-Maximisation algorithm to estimate a generative mixture model, with variance-free and variance-constrained Gaussian mixture components already provided (and again, one can write his own mixture component by subclassing `Mixture` and implementing `init_mixtures!`, `lpdf`, `update_parameters!` and `npar`).
Notably the `gmm` function works also with missing input data either in one or all dimensions (and in the former case parameter estimation will be based using only the available dimensions).
This, together with the probabilistic assignment nature of the em algorithm, allows it to be used as base for missing values assignment or even collaborative filtering/recommandation systems in the `predictMissing` function.
# The `Trees` module
Like for the other modules the two algorithms provided by the `Trees` module (decision trees and random forests) have an API that tries to maximise the flexibility and user-friendliness: users can train a tree (forest) by just using `buildTree(xtrain,ytrain)` (`buildForest(xtrain,ytrain)`) and then obtain the predictions with `predict([trained tree or forest object],ytrain)`.
The nature of the task (classification or regression) is automatically determined by the numerical nature of the training labels but it can be overridden by the user, together with many other parameters. Support for missing data and the direct usage of mixed categorical and numerical dimensions in the data (without the need to encode the categories) make the algorithms of the `Trees` module very convenient to use.
# Acknowledgements
This work was supported by a grant overseen by the French National Research Agency (ANR) as part of the "Investissements d'Avenir" program (ANR-11-LABX-0002-01, Lab of Excellence ARBRE).
# References
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 10373 | Okay, here's an **update** of my review from the [pre-review thread](https://github.com/openjournals/joss-reviews/issues/2512)
## What the package provides
The package under review provides pure-julia implementations of two
tree-based models, three clustering models, a perceptron model (with 3
variations) and a basic neural network model. In passing, it should be
noted that all or almost all of these algorithms have existing julia
implementations (e.g., DecisionTree.jl, Clustering.jl, Flux.jl). The package
is used in a course on Machine Learning but integration between the
package and the course is quite loose, as far as I could ascertain
(more on this below).
~~Apart from a library of loss functions, the package provides no
other tools.~~ In addition to the models the package provides a number
of loss functions, as well as activation functions for the neural
network models, and some tools to rescale data. I did not see tools to
automate resampling (such as cross-validation), hyper parameter
optimization, and no model composition (pipelining). The quality of
the model implementations looks good to me, although the author warns
us that "the code is not heavily optimized and GPU [for neural
networks] is not supported "
## Existing machine learning toolboxes in Julia
For context, consider the following multi-paradigm ML
toolboxes written in Julia which are relatively mature, by Julia standards:
package | number of models | resampling | hyper-parameter optimization | composition
-----------------|------------------|-------------|------------------------------|-------------
[ScikitLearn.jl](https://github.com/cstjean/ScikitLearn.jl) | > 150 | yes | yes | basic
[AutoMLPipeline.jl](https://github.com/IBM/AutoMLPipeline.jl)| > 100 | no | no | medium
[MLJ.jl](https://joss.theoj.org/papers/10.21105/joss.02704) | 151 | yes | yes | advanced
In addition to these are several excellent and mature packages
dedicated to neural-networks, the most popular being the AD-driven
Flux.jl package. So far, these provide limited meta-functionality,
although MLJ now provides an interface to certain classes of Flux
models ([MLJFlux](https://github.com/alan-turing-institute/MLJFlux.jl)) and
ScikitLearn.jl provides interfaces to python neural network models
sufficient for small datasets and pedagogical use.
Disclaimer: I am a designer/contributor to MLJ.
**According to the [JOSS requirements](https://joss.theoj.org/about),
Submissions should "Have an obvious research application."** In its
current state of maturity, BetaML is not a serious competitor to the
frameworks above, for contributing directly to research. However, the
author argues that it has pedagogical advantages over existing tools.
## Value as pedagogical tool
I don't think there are many rigorous machine learning courses or
texts closely integrated with models and tools implemented in julia
and it would be useful to have more of these. ~~The degree of
integration in this case was difficult for me to ascertain because I
couldn't see how to access the course notes without formally
registering for the course (which is, however, free).~~ I was also
disappointed to find only one link from doc-strings to course
materials; from this "back door" to the course notes I could find no
reference back to the package, however. Perhaps there is better
integration in course exercises? I couldn't figure this out.
**edit** Okay, I see that I missed the link to the course notes, as
opposed to the course itself. However the notes make only references
to python code and so do not appear to be directly integrated with the
package BetaML.
The remaining argument for BetaML's pedagogical value rests on a
number of perceived drawbacks of existing toolboxes, for the
beginner. Quoting from the JOSS manuscript:
1. "For example the popular Deep Learning library Flux (Mike Innes,
2018), while extremely performant and flexible, adopts some
designing choices that for a beginner could appear odd, for example
avoiding the neural network object from the training process, or
requiring all parameters to be explicitly defined. In BetaML we
made the choice to allow the user to experiment with the
hyperparameters of the algorithms learning them one step at the
time. Hence for most functions we provide reasonable default
parameters that can be overridden when needed."
2. "To help beginners, many parameters and functions have pretty
longer but more explicit names than usual. For example the Dense
layer is a DenseLayer, the RBF kernel is radial_kernel, etc."
3. "While avoiding the problem of “reinventing the wheel”, the
wrapping level unin- tentionally introduces some complications for
the end-user, like the need to load the models and learn
MLJ-specific concepts as model or machine. We chose instead to
bundle the main ML algorithms directly within the package. This
offers a complementary approach that we feel is more
beginner-friendly."
Let me respond to these:
1. These cricitism only apply to dedicated neural network
packages, such as Flux.jl; all of the toolboxes listed
above provide default hyper parameters for every model. In the case
of neural networks, user-friendly interaction close to the kind
sought here is available either by using the MLJFlux.jl models
(available directly through MLJ) or by using the python models
provided through ScikitLearn.jl.
2. Yes, shorter names are obstacles for the beginner but hardly
insurmountable. For example, one could provide a cheat sheet
summarizing the models and other functionality needed for the
machine learning course (and omitting all the rest).
3. Yes, not needing to load in model code is slightly more
friendly. On the other hand, in MLJ for example, one can load and
instantiate a model with a single macro. So the main complication
is having to ensure relevant libraries are in your environment. But
this could be solved easily with a `BeginnerPackage` which curates
all the necessary dependencies. I am not convinced beginners should
find the idea of separating hyper-parameters and learned parameters
(the "machines" in MLJ) that daunting. I suggest the author's
criticism may have more to do with their lack of familiarity than a
difficulty for newcomers, who do not have the same preconceptions
from using other frameworks. In any case, the point is moot, as one
can interact with MLJ models directly via a "model" interface and
ignore machines. To see this, I have
[translated](https://github.com/ablaom/ForBetaMLReview) part of a
BetaML notebook into MLJ syntax. There's hardly any difference - if
anything the presentation is simpler (less hassle when splitting
data horizontally and vertically).
In summary, while existing toolboxes might present a course instructor
with a few challenges, these are hardly game-changers. The advantages of
introducing a student to a powerful, mature, professional toolbox *ab*
*initio* far outweigh any drawbacks, in my view.
## Conclusions
To meet the requirements of JOSS, I think either: (i) The BetaML
package needs to demonstrate tighter integration with ~~easily
accessible~~ course materials; or (ii) BetaML needs very substantial
enhancements to make it competitive with existing toolboxes.
Frankly, a believe a greater service to the Julia open-source software
community would be to integrate the author's course materials with one
of the mature ML toolboxes. In the case of MLJ, I would be more than
happy to provide guidance for such a project.
---
## Sundry comments
I didn't have too much trouble installing the package or running the
demos, except when running a notebook on top of an existing Julia
environment (see commment below).
- **added** The repository states quite clearly that the primary
purpose of the package is dilectic (for teaching purposes). If this
is true, the paper should state this clearly in the "Summary" (not
just that it was developed in response to the course).
- **added** The authors should reference for comparison the toolboxes
ScitkitLearn.jl and AutoMLPipeline.jl
- The README.md should provide links to the toolboxes listed in
the table above, for the student who "graduates" from BetaML.
- Some or most intended users will be new to Julia, so I suggest
including with the installation instructions something about how to
set up a julia environment that includes BetaML. Something like
[this](https://alan-turing-institute.github.io/MLJ.jl/dev/#Installation-1), for example.
- I found it weird that the front-facing demo is an *unsupervised*
model. A more "Hello World" example might be to train a Decision
Tree.
- The way users load the built-in datasets seems pretty awkward. Maybe
just define some functions to do this? E.g.,
`load_bike_sharing()`. Might be instructive to have examples where
data is pulled in using `RDatasets`, `UrlDownload` or similar?
- A cheat-sheet summarizing the model fitting functions and the loss
functions would be helpful. Or you could have functions `models()` and
`loss_functions()` that list these.
- I found it pretty annoying to split data by hand the way this is
done in the notebooks and even beginners might find this
annoying. One utility function here would go a long way to making
life easier here (something like the `partition` function in the
MLJ, which you are welcome to lift).
- The notebooks are not portable as they do not come with a
Manifest.toml. One suggestion on how to handle this is
[here](https://github.com/ablaom/ForBetaMLReview/blob/main/bike_sharing.ipynb)
but you should add a comment in the notebook explaining that the
notebook is only valid if it is accompanied by the Manifest.toml. I
think an even better solution is provided by InstantiateFromUrl.jl
but I haven't tried this yet.
- The name `em` for the expectation-maximization clustering algorithm
is very terse, and likely to conflict with a user variable. I admit, I had
to dig up the doc-string to find out what it was.
_Originally posted by @ablaom in https://github.com/openjournals/joss-reviews/issues/2849#issuecomment-730694698_
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 15674 | # Author's response to @ablaom review 1
Above all, I would like to thanks the reviewer for having taken the time to provide the review and the useful suggestions he brings. I have implemented most of them, as they helped improving the software.
My detailed response is below.
```
Okay, here's an **update** of my review from the [pre-review thread](https://github.com/openjournals/joss-reviews/issues/2512)
## What the package provides
The package under review provides pure-julia implementations of two
tree-based models, three clustering models, a perceptron model (with 3
variations) and a basic neural network model. In passing, it should be
noted that all or almost all of these algorithms have existing julia
implementations (e.g., DecisionTree.jl, Clustering.jl, Flux.jl).
```
While "most" of the functionality is indeed already present, from the user point of view, they are not necessarily accessed in the same way and for some functionality, like missing imputation using GMM models, I am not aware of implementations in Julia. Also the kind of output is often different from current implementations. For example most classifiers in BetaML report the whole PMF of the various items rather than the mode. Together with the fact that the function `accuracy` has an extra optional parameter for selecting the range of items to consider the estimate correct, one can train a classifier that is best in returning a correct value for example within the most probable 2 results (rather than the single most probable one). This can be useful in some applications where the second-best is also an acceptable value.
```
The package
is used in a course on Machine Learning but integration between the
package and the course is quite loose, as far as I could ascertain
(more on this below).
```
I am sorry for the misunderstanding here. I am not affiliated with that course. The course referenced uses Python to teach the algorithms, while I believe a Julia approach, when dealing with the internals of the algorithms (conversely to "just" using some API), is more appropriate, this is why I translated, and generalised, the code in Julia.
```
~~Apart from a library of loss functions, the package provides no
other tools.~~ In addition to the models the package provides a number
of loss functions, as well as activation functions for the neural
network models, and some tools to rescale data. I did not see tools to
automate resampling (such as cross-validation), hyper parameter
optimization, and no model composition (pipelining). The quality of
the model implementations looks good to me, although the author warns
us that "the code is not heavily optimized and GPU [for neural
networks] is not supported "
```
While tools for automatic sampling and cross-validation may be in scope with BetaML, I believe that the added value for pipeling in a language like Julia is not so strong like it is for other programming languages.
In R and Python for example loops are slow, and it definitely helps having a fast library implementing for example hyper-parameters tuning.
Julia is instead highly expressive and has fast loops at the same time. The computational and convenience benefits to use a specific framework to build a chain of models or tune the hyper-parameters balance again the flexibility and easiness of using just the "core" Julia functionalities to do the same, so that the advantage is partially shaded and depends from the situation.
```
## Existing machine learning toolboxes in Julia
For context, consider the following multi-paradigm ML
toolboxes written in Julia which are relatively mature, by Julia standards:
package | number of models | resampling | hyper-parameter optimization | composition
-----------------|------------------|-------------|------------------------------|-------------
[ScikitLearn.jl](https://github.com/cstjean/ScikitLearn.jl) | > 150 | yes | yes | basic
[AutoMLPipeline.jl](https://github.com/IBM/AutoMLPipeline.jl)| > 100 | no | no | medium
[MLJ.jl](https://joss.theoj.org/papers/10.21105/joss.02704) | 151 | yes | yes | advanced
In addition to these are several excellent and mature packages
dedicated to neural-networks, the most popular being the AD-driven
Flux.jl package. So far, these provide limited meta-functionality,
although MLJ now provides an interface to certain classes of Flux
models ([MLJFlux](https://github.com/alan-turing-institute/MLJFlux.jl)) and
ScikitLearn.jl provides interfaces to python neural network models
sufficient for small datasets and pedagogical use.
Disclaimer: I am a designer/contributor to MLJ.
**According to the [JOSS requirements](https://joss.theoj.org/about),
Submissions should "Have an obvious research application."** In its
current state of maturity, BetaML is not a serious competitor to the
frameworks above, for contributing directly to research. However, the
author argues that it has pedagogical advantages over existing tools.
## Value as pedagogical tool
I don't think there are many rigorous machine learning courses or
texts closely integrated with models and tools implemented in julia
and it would be useful to have more of these. ~~The degree of
integration in this case was difficult for me to ascertain because I
couldn't see how to access the course notes without formally
registering for the course (which is, however, free).~~ I was also
disappointed to find only one link from doc-strings to course
materials; from this "back door" to the course notes I could find no
reference back to the package, however. Perhaps there is better
integration in course exercises? I couldn't figure this out.
**edit** Okay, I see that I missed the link to the course notes, as
opposed to the course itself. However the notes make only references
to python code and so do not appear to be directly integrated with the
package BetaML.
The remaining argument for BetaML's pedagogical value rests on a
number of perceived drawbacks of existing toolboxes, for the
beginner. Quoting from the JOSS manuscript:
1. "For example the popular Deep Learning library Flux (Mike Innes,
2018), while extremely performant and flexible, adopts some
designing choices that for a beginner could appear odd, for example
avoiding the neural network object from the training process, or
requiring all parameters to be explicitly defined. In BetaML we
made the choice to allow the user to experiment with the
hyperparameters of the algorithms learning them one step at the
time. Hence for most functions we provide reasonable default
parameters that can be overridden when needed."
2. "To help beginners, many parameters and functions have pretty
longer but more explicit names than usual. For example the Dense
layer is a DenseLayer, the RBF kernel is radial_kernel, etc."
3. "While avoiding the problem of “reinventing the wheel”, the
wrapping level unin- tentionally introduces some complications for
the end-user, like the need to load the models and learn
MLJ-specific concepts as model or machine. We chose instead to
bundle the main ML algorithms directly within the package. This
offers a complementary approach that we feel is more
beginner-friendly."
Let me respond to these:
1. These cricitism only apply to dedicated neural network
packages, such as Flux.jl; all of the toolboxes listed
above provide default hyper parameters for every model. In the case
of neural networks, user-friendly interaction close to the kind
sought here is available either by using the MLJFlux.jl models
(available directly through MLJ) or by using the python models
provided through ScikitLearn.jl.
2. Yes, shorter names are obstacles for the beginner but hardly
insurmountable. For example, one could provide a cheat sheet
summarizing the models and other functionality needed for the
machine learning course (and omitting all the rest).
3. Yes, not needing to load in model code is slightly more
friendly. On the other hand, in MLJ for example, one can load and
instantiate a model with a single macro. So the main complication
is having to ensure relevant libraries are in your environment. But
this could be solved easily with a `BeginnerPackage` which curates
all the necessary dependencies. I am not convinced beginners should
find the idea of separating hyper-parameters and learned parameters
(the "machines" in MLJ) that daunting. I suggest the author's
criticism may have more to do with their lack of familiarity than a
difficulty for newcomers, who do not have the same preconceptions
from using other frameworks. In any case, the point is moot, as one
can interact with MLJ models directly via a "model" interface and
ignore machines. To see this, I have
[translated](https://github.com/ablaom/ForBetaMLReview) part of a
BetaML notebook into MLJ syntax. There's hardly any difference - if
anything the presentation is simpler (less hassle when splitting
data horizontally and vertically).
In summary, while existing toolboxes might present a course instructor
with a few challenges, these are hardly game-changers. The advantages of
introducing a student to a powerful, mature, professional toolbox *ab*
*initio* far outweigh any drawbacks, in my view.
```
I rephrased the readme.md of the package, as the project evolved from being a mere "rewriting" of algorithms in Julia.
The focus of the package is on the accessibility to people from different backgrounds, and consequently different interests, than researchers or practitioners in computer sciences.
The current ML ecosystem in Julia is out of scope for some kind of PhD students and researchers, for example many in my lab.
They have different research interests and don't have the time to deep into ML so much, "just" applying it (often to small datasets) for their concrete problems. So the way to access the algorithms is particularly important. This is why, for example, both the decision trees / GMM algorithms in BetaML accept data with missing values, or it is not necessarily to specify in the decision tree algorithm the kind of job (regression/classification), as this is automatically inferred by the type of the labels (this is also true for DecisionTrees, but using two different API, `DecisionTreeRegressor`/`DecisionTreeClassifier` on one side and `build_tree` on the other). This is an example where we explicitly traded simplicity for efficiency, as adding support for missing data directly in the algorithms considerably reduces their performances (and this is the reason, I assume, the leading packages don't implement it).
```
## Conclusions
To meet the requirements of JOSS, I think either: (i) The BetaML
package needs to demonstrate tighter integration with ~~easily
accessible~~ course materials; or (ii) BetaML needs very substantial
enhancements to make it competitive with existing toolboxes.
Frankly, a believe a greater service to the Julia open-source software
community would be to integrate the author's course materials with one
of the mature ML toolboxes. In the case of MLJ, I would be more than
happy to provide guidance for such a project.
```
I do appreciate both the Reviewer comments and the MLJ as a mature, state-of-the art framework, I just believe that there is space for a different approach with different user cases.
---
```
## Sundry comments
I didn't have too much trouble installing the package or running the
demos, except when running a notebook on top of an existing Julia
environment (see commment below).
- **added** The repository states quite clearly that the primary
purpose of the package is dilectic (for teaching purposes). If this
is true, the paper should state this clearly in the "Summary" (not
just that it was developed in response to the course).
```
As specified on a previous comment, the focus is on usability, whether this is important for didactic or applied research purposes.
```
- **added** The authors should reference for comparison the toolboxes
ScitkitLearn.jl and AutoMLPipeline.jl
- The README.md should provide links to the toolboxes listed in
the table above, for the student who "graduates" from BetaML.
```
I added an "Alternative packages" section that lists the most relevant and mature Julia packages in the topics covered by BetaML.
```
- Some or most intended users will be new to Julia, so I suggest
including with the installation instructions something about how to
set up a julia environment that includes BetaML. Something like
[this](https://alan-turing-institute.github.io/MLJ.jl/dev/#Installation-1), for example.
- A cheat-sheet summarizing the model fitting functions and the loss
functions would be helpful. Or you could have functions `models()` and
`loss_functions()` that list these.
```
Being a much smaller package than MLJ, I believe the "Installation" and "Loading the module(s)" (for the first point) and "Usage" (for the second one) in the documentation do suffice.
```
- I found it weird that the front-facing demo is an *unsupervised*
model. A more "Hello World" example might be to train a Decision
Tree.
```
I added a basic Random Forest example in the Readme.md so to provide the readers of an overview of different techniques to analyse the same dataset (iris).
```
- The way users load the built-in datasets seems pretty awkward. Maybe
just define some functions to do this? E.g.,
`load_bike_sharing()`. Might be instructive to have examples where
data is pulled in using `RDatasets`, `UrlDownload` or similar?
```
I now load the data using a path relative to the package base path. In this way the script should load the correct data whichever is the current directory from which it is called by the user.
```
- I found it pretty annoying to split data by hand the way this is
done in the notebooks and even beginners might find this
annoying. One utility function here would go a long way to making
life easier here (something like the `partition` function in the
MLJ, which you are welcome to lift).
```
Thank you. I did indeed add a simple partition function to allow partition multiple matrices in one line, e.g.
`((xtrain,xtest),(ytrain,ytest)) = partition([x,y],[0.7,0.3])`.
Note that a release of the software including the new `partition` function has still to be made.
```
- The notebooks are not portable as they do not come with a
Manifest.toml. One suggestion on how to handle this is
[here](https://github.com/ablaom/ForBetaMLReview/blob/main/bike_sharing.ipynb)
but you should add a comment in the notebook explaining that the
notebook is only valid if it is accompanied by the Manifest.toml. I
think an even better solution is provided by InstantiateFromUrl.jl
but I haven't tried this yet.
```
Having a manifest means that I need to keep it updated and the user understand what it is.
Instead the notebooks all have a section at the beginning where the required packages are loaded. In this way even if the user just copy and paste the code to his/her own IDE, it will likely works.
A related issue is to guarantee that notebooks are kept in sync with the code. I noticed that the reviewer use Literate.jl, I may consider it, as it helps keeping the examples under testing control.
```
- The name `em` for the expectation-maximization clustering algorithm
is very terse, and likely to conflict with a user variable. I admit, I had
to dig up the doc-string to find out what it was.
```
I agree and changed the name to `gmm`.
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 226 | # [The BetaML.Api Module](@id api_module)
```@docs
Api
```
## Module Index
```@index
Modules = [Api]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Api]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 3059 | # [Api v2 - developer documentation (API implementation)](@id api_implementation)
Each model is a child of either `BetaMLSuperVisedModel` or `BetaMLSuperVisedModel`, both in turn child of `BetaMLModel`:
```
BetaMLSuperVisedModel <: BetaMLModel
BetaMLUnsupervisedModel <: BetaMLModel
RandomForestEstimator <: BetaMLSuperVisedModel
```
The model struct is composed of the following elements:
```
mutable struct DecisionTreeEstimator <: BetaMLSupervisedModel
hpar::DecisionTreeE_hp # Hyper-pharameters
opt::BML_options # Option sets, default or a specific one for the model
par::DT_lp # Model learnable parameters (needed for predictions)
cres::T # Cached results
trained::Bool # Trained flag
info # Complementary information, but not needed to make predictions
end
```
Each specific model hyperparameter set and learnable parameter set are childs of `BetaMLHyperParametersSet` and `BetaMLLearnedParametersSet` and, if a specific model option set is used, this would be child of `BetaMLOptionsSet`.
While hyperparameters are elements that control the learning process, i.e. would influence the model training and prediction, the options have a more general meaning and do not directly affect the training (they can do indirectly, like the rng). The default option set is implemented as:
```
Base.@kwdef mutable struct BML_options
"Cache the results of the fitting stage, as to allow predict(mod) [default: `true`]. Set it to `false` to save memory for large data."
cache::Bool = true
"An optional title and/or description for this model"
descr::String = ""
"The verbosity level to be used in training or prediction (see [`Verbosity`](@ref)) [deafult: `STD`]
"
verbosity::Verbosity = STD
"Random Number Generator (see [`FIXEDSEED`](@ref)) [deafult: `Random.GLOBAL_RNG`]
"
rng::AbstractRNG = Random.GLOBAL_RNG
end
```
Note that the user doesn't generally need to make a difference between an hyperparameter and an option, as both are provided as keyword arguments to the model constructor thanks to a model constructor like the following one:
```
function KMedoidsClusterer(;kwargs...)
m = KMedoidsClusterer(KMeansMedoidsHyperParametersSet(),BML_options(),KMeansMedoids_lp(),nothing,false,Dict{Symbol,Any}())
thisobjfields = fieldnames(nonmissingtype(typeof(m)))
for (kw,kwv) in kwargs
found = false
for f in thisobjfields
fobj = getproperty(m,f)
if kw in fieldnames(typeof(fobj))
setproperty!(fobj,kw,kwv)
found = true
end
end
found || error("Keyword \"$kw\" is not part of this model.")
end
return m
end
```
So, in order to implement a new model we need to:
- implement its struct and constructor
- implement the relative `ModelHyperParametersSet`, `ModelLearnedParametersSet` and eventually `ModelOptionsSet`.
- define `fit!(model, X, [y])`, `predict(model,X)` and eventually `inverse_predict(model,X)`.
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 4851 | # [BetaML Api v2](@id api_usage)
!!! note
The API described below is the default one starting from BetaML v0.8.
The following API is designed to further simply the usage of the various ML models provided by BetaML introducing a common workflow. This is the _user_ documentation. Refer to the [developer documentation](@ref api_implementation) to learn how the API is implemented.
## Supervised , unsupervised and transformed models
_Supervised_ refers to models designed to _learn_ a relation between some _features_ (often noted with X) and some _labels_ (often noted with Y) in order to predict the label of new data given the observed features alone. Perceptron, decision trees or neural networks are common examples.
_Unsupervised_ and _transformer_ models relate to models that learn a "structure" from the data itself (without any label attached from which to learn) and report either some new information using this learned structure (e.g. a cluster class) or directly process a transformation of the data itself, like `PCAEncoder` or missing imputers.
There is no difference in BetaML about these kind of models, aside that the fitting (aka _training_) function for the former takes both the features and the labels. In particular there isn't a separate `transform` function as in other frameworks, but any information we need to learn using the model, wheter a label or some transformation of the original data, is provided by the `predict` function.
### Model constructor
The first step is to build the model constructor by passing (using keyword arguments) the agorithm hyperparameters and various options (cache results flag, debug levels, random number generators, ...):
```
mod = ModelName(par1=X,par2=Y,...)
```
Sometimes a parameter is itself another model, in such case we would have:
```
mod = ModelName(par1=OtherModel(a_par_of_OtherModel=X,...),par2=Y,...)
```
### Training of the model
The second step is to _fit_ (aka _train_) the model:
```
fit!(m,X,[Y])
```
where `Y` is present only for supervised models.
For online algorithms, i.e. models that support updating of the learned parameters with new data, `fit!` can be repeated as new data arrive, altought not all algorithms guarantee that training each record at the time is equivalent to train all the records at once. In some algorithms the "old training" could be used as initial conditions, without consideration if these has been achieved with hundread or millions of records, and the new data we use for training become much more important than the old one for the determination of the learned parameters.
### Prediction
Fitted models can be used to predict `y` (wheter the label, some desired new information or a transformation) given new `X`:
```
ŷ = predict(mod,X)
```
As a convenience, if the model has been trained while having the `cache` option set on `true` (by default) the `ŷ` of the last training is retained in the model object and it can be retrieved simply with `predict(mod)`. Also in such case the `fit!` function returns `ŷ` instead of `nothing` effectively making it to behave like a _fit-and-transform_ function.
The 3 expressions below are hence equivalent :
```
ŷ = fit!(mod,xtrain) # only with `cache=true` in the model constructor (default)
ŷ1 = predict(mod) # only with `cache=true` in the model constructor (default)
ŷ2 = predict(mod,xtrain)
```
### Other functions
Models can be resetted to lose the learned information with `reset!(mod)` and training information (other than the algorithm learned parameters, see below) can be retrieved with `info(mod)`.
Hyperparameters, options and learned parameters can be retrieved with the functions `hyperparameters`, `parameters` and `options` respectively. Note that they can be used also to set new values to the model as they return a reference to the required objects.
!!! note
Which is the difference between the output of `info`, `parameters` and the `predict` function ?
The `predict` function (and, when cache is used, the `fit!` one too) returns the main information required from the model.. the prediceted label for supervised models, the class assignment for clusters or the reprojected data for PCA.... `info` returns complementary information like the number of dimensions of the data or the number of data emploied for training. It doesn't include information that is necessary for the training itself, like the centroids in cluser analysis. These can be retrieved instead using `parameters` that include all and only the information required to compute `predict`.
Some models allow an inverse transformation, that using the parameters learned at trainign time (e.g. the scale factors) perform an inverse tranformation of new data to the space of the training data (e.g. the unscaled space). Use `inverse_predict(mod,xnew)`.
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 258 | # [The BetaML.Clustering Module](@id clustering_module)
```@docs
Clustering
```
## Module Index
```@index
Modules = [Clustering]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Clustering]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 2871 |
# Examples
## Supervised learning
### Regression
#### Estimating the bike sharing demand
The task is to estimate the influence of several variables (like the weather, the season, the day of the week..) on the demand of shared bicycles, so that the authority in charge of the service can organise the service in the best way.
Data origin:
- original full dataset (by hour, not used here): https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
- simplified dataset (by day, with some simple scaling): https://www.hds.utc.fr/~tdenoeux/dokuwiki/en/aec
- description: https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/exam_2019_ace_.pdf
- data: https://www.hds.utc.fr/~tdenoeux/dokuwiki/_media/en/bike_sharing_day.csv.zip
Note that even if we are estimating a time serie, we are not using here a recurrent neural network as we assume the temporal dependence to be negligible (i.e. $Y_t = f(X_t)$ alone).
### Classification
## Unsupervised lerarning
# Notebooks
The following notebooks provide runnable examples of the package functionality:
- Pegasus classifiers: [[Static notebook](https://github.com/sylvaticus/BetaML.jl/blob/master/notebooks/Perceptron.ipynb)] - [[myBinder](https://mybinder.org/v2/gh/sylvaticus/BetaML.jl/master?filepath=notebooks%2FPerceptron.ipynb)]
- Decision Trees and Random Forest regression on Bike sharing demand forecast (daily data): [[Static notebook](https://github.com/sylvaticus/BetaML.jl/blob/master/notebooks/DecisionTrees%20-%20Bike%20sharing%20demand%20forecast%20(daily%20db).ipynb)] - [[myBinder](https://mybinder.org/v2/gh/sylvaticus/BetaML.jl/master?filepath=notebooks%2FDecisionTrees%20-%20Bike%20sharing%20demand%20forecast%20(daily%20db).ipynb)]
- Neural Networks: [[Static notebook](https://github.com/sylvaticus/BetaML.jl/blob/master/notebooks/Nn.ipynb)] - [[myBinder](https://mybinder.org/v2/gh/sylvaticus/BetaML.jl/master?filepath=notebooks%2FNn.ipynb)]
- Bike sharing demand forecast (daily data): [[Static notebook](https://github.com/sylvaticus/BetaML.jl/blob/master/notebooks/NN%20-%20Bike%20sharing%20demand%20forecast%20(daily%20db).ipynb)] - [[myBinder](https://mybinder.org/v2/gh/sylvaticus/BetaML.jl/master?filepath=notebooks%2FNN%20-%20Bike%20sharing%20demand%20forecast%20(daily%20db).ipynb)]
- Clustering: [[Static notebook](https://github.com/sylvaticus/BetaML.jl/blob/master/notebooks/Clustering.ipynb)] - [[myBinder](https://mybinder.org/v2/gh/sylvaticus/BetaML.jl/master?filepath=notebooks%2FClustering.ipynb)]
Note: the live, runnable computational environment is a temporary new copy made at each connection. The first time after a commit is done on this repository a new environment has to be set (instead of just being copied), and the server may take several minutes.
This is only if you are the unlucky user triggering the rebuild of the environment after the commit.
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 223 | # [The BetaML.GMM Module](@id gmm_module)
```@docs
GMM
```
## Module Index
```@index
Modules = [GMM]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [GMM]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 257 | # [The BetaML.Imputation Module](@id imputation_module)
```@docs
Imputation
```
## Module Index
```@index
Modules = [Imputation]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Imputation]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 270 | # [The MLJ interface to BetaML Models](@id bmlj_module)
```@docs
Bmlj
```
## Models available through MLJ
```@index
Modules = [Bmlj]
Order = [:function, :constant, :type, :macro]
```
## Detailed models documentation
```@autodocs
Modules = [Bmlj]
Private = true
``` | BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 218 | # [The BetaML.Nn Module](@id nn_module)
```@docs
Nn
```
## Module Index
```@index
Modules = [Nn]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Nn]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 258 | # [The BetaML.Perceptron Module](@id perceptron_module)
```@docs
Perceptron
```
## Module Index
```@index
Modules = [Perceptron]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Perceptron]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 2595 | # Style guide and template for BetaML developers
## Master Style guide
The code in BetaML should follow the official [Julia Style Guide](https://docs.julialang.org/en/v1/manual/style-guide/).
## Names style
- Each file name should start with a capital letter, no spaces allowed (and each file content should start with: `"Part of [BetaML](https://github.com/sylvaticus/BetaML.jl). Licence is MIT."`)
- Type names use the so-called "CamelCase" convention, where the words are separated by a capital letter rather than `_` ,while function names use lower letters only, with words eventually separated (but only when really neeed for readibility) by an `_`;
- In the code and documentation we refer with `N` the number of observations/records, `D` the number of dimensions and `K` the number of classes/categories;
- Error/accuracy/loss functions want firt `y` and then `ŷ`
- In API exposed to users, strings are preferred to symbols
## Docstrings
Please apply the following templates when writing a docstring for BetaML:
- Functions (add `@docs` if the function is not on the root module level, like for inner constructors, i.e. `@docs """ foo()x ...."""`):
```
"""
$(TYPEDSIGNATURES)
One line description
[Further description]
# Parameters:
# Returns:
- Elements the funtion need
# Notes:
- notes
# Example:
` ` `julia
julia> [code]
[output]
` ` `
"""
```
- Structs
```
"""
$(TYPEDEF)
One line description
[Further description]
# Fields: (if relevant)
$(TYPEDFIELDS)
# Notes:
# Example:
` ` `julia
julia> [code]
[output]
` ` `
"""
```
- Enums:
```
"""
$(TYPEDEF)
One line description
[Further description]
# Notes:
"""
```
- Constants
```
"""
[4 spaces] [Constant name]
One line description
[Further description]
# Notes:
"""
```
- Modules
```
"""
[4 spaces] [Module name]
One line description
Detailed description on the module objectives, content and organisation
"""
```
## Internal links
To refer to a documented object: ```[`NAME`](@ref)``` or ```[`NAME`](@ref manual_id)```.
In particular for internal links use ```[`?NAME`](@ref ?NAME)```
To create an id manually: ```[Title](@id manual_id)```
## Data organisation
- While some functions provide a `dims` parameter, most BetaML algorithms expect the input data layout with observations organised by rows and fields/features by columns.
- While some algorithms accept as input DataFrames, the usage of standard arrays is encourages (if the data is passed to the function as dataframe, it may be converted to standard arrays somewhere inside inner loops, leading to great inefficiencies).
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 232 | # [The BetaML.Trees Module](@id trees_module)
```@docs
Trees
```
## Module Index
```@index
Modules = [Trees]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Trees]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 232 | # [The BetaML.Utils Module](@id utils_module)
```@docs
Utils
```
## Module Index
```@index
Modules = [Utils]
Order = [:function, :constant, :type, :macro]
```
## Detailed API
```@autodocs
Modules = [Utils]
Private = false
```
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 14458 | #  BetaML.jl Documentation
Welcome to the documentation of the [_Beta Machine Learning toolkit_](https://github.com/sylvaticus/BetaML.jl).
## About
The `BetaML` toolkit provides machine learning algorithms written in the Julia programming language.
Aside the algorithms themselves, `BetaML` provides many "utility" functions. Because algorithms are all self-contained in the library itself (you are invited to explore their source code by typing `@edit functionOfInterest(par1,par2,...)`), the utility functions have APIs that are coordinated with the algorithms, facilitating the "preparation" of the data for the analysis, the choice of the hyper-parameters or the evaluation of the models.
Most models have [`an interface`](@ref BetaML.Bmlj) for the [`MLJ`](https://github.com/alan-turing-institute/MLJ.jl) framework.
Aside Julia, BetaML can be accessed in R or Python using respectively [JuliaCall](https://github.com/Non-Contradiction/JuliaCall) and [PyJulia](https://github.com/JuliaPy/pyjulia). See [the tutorial](@ref using_betaml_from_other_languages) for details.
## Installation
The BetaML package is included in the standard Julia register, install it with:
* `] add BetaML`
## Available modules
While `BetaML` is split in several (sub)modules, all of them are re-exported at the root module level. This means that you can access their functionality by simply typing `using BetaML`:
```julia
using BetaML
myLayer = DenseLayer(2,3) # DenseLayer is defined in the Nn submodule
res = KernelPerceptronClassifier() # KernelPerceptronClassifier is defined in the Perceptron module
@edit DenseLayer(2,3) # Open a text editor with to the relevant source code
```
Each module is documented on the links below (you can also use the inline Julia help system: just press the question mark `?` and then, on the special help prompt `help?>`, type the function name):
- [**`BetaML.Perceptron`**](@ref BetaML.Perceptron): The Perceptron, Kernel Perceptron and Pegasos classification algorithms;
- [**`BetaML.Trees`**](@ref BetaML.Trees): The Decision Trees and Random Forests algorithms for classification or regression (with missing values supported);
- [**`BetaML.Nn`**](@ref BetaML.Nn): Implementation of Artificial Neural Networks;
- [**`BetaML.Clustering`**](@ref BetaML.Clustering): (hard) Clustering algorithms (K-Means, K-Mdedoids)
- [**`BetaML.GMM`**](@ref BetaML.GMM): Various algorithms (Clustering, regressor, missing imputation / collaborative filtering / recommandation systems) that use a Generative (Gaussian) mixture models (probabilistic) fitter, fitted using a EM algorithm;
- [**`BetaML.Imputation`**](@ref BetaML.Imputation): Imputation algorithms;
- [**`BetaML.Utils`**](@ref BetaML.Utils): Various utility functions (scale, one-hot, distances, kernels, pca, autoencoder, predictions analysis, feature importance..).
## [Available models](@id models_list)
Currently BetaML provides the following models:
| `BetaML` name | Hp | [`MLJ`](https://github.com/alan-turing-institute/MLJ.jl) Interface | Category* |
| ----------- | ------------- |------------- | -------- |
| [`PerceptronClassifier`](@ref) | [☒](@ref PerceptronC_hp) | [`PerceptronClassifier`](@ref Bmlj.PerceptronClassifier) | _Supervised classifier_ |
| [`KernelPerceptronClassifier`](@ref) | [☒](@ref KernelPerceptronC_hp) | [`KernelPerceptronClassifier`](@ref Bmlj.KernelPerceptronClassifier) | _Supervised classifier_ |
| [`PegasosClassifier`](@ref) | [☒](@ref PegasosC_hp) | [`PegasosClassifier`](@ref Bmlj.PegasosClassifier) | _Supervised classifier_ |
| [`DecisionTreeEstimator`](@ref) | [☒](@ref DecisionTreeE_hp) | [`DecisionTreeClassifier`](@ref Bmlj.DecisionTreeClassifier), [`DecisionTreeRegressor`](@ref Bmlj.DecisionTreeRegressor) | _Supervised regressor and classifier_ |
| [`RandomForestEstimator`](@ref) | [☒](@ref RandomForestE_hp) | [`RandomForestClassifier`](@ref Bmlj.RandomForestClassifier), [`RandomForestRegressor`](@ref Bmlj.RandomForestRegressor) | _Supervised regressor and classifier_ |
| [`NeuralNetworkEstimator`](@ref) | [☒](@ref NeuralNetworkE_hp) | [`NeuralNetworkRegressor`](@ref Bmlj.NeuralNetworkRegressor), [`MultitargetNeuralNetworkRegressor`](@ref Bmlj.MultitargetNeuralNetworkRegressor), [`NeuralNetworkClassifier`](@ref Bmlj.NeuralNetworkClassifier) | _Supervised regressor and classifier_ |
| [`GaussianMixtureRegressor`](@ref) | [☒](@ref GaussianMixture_hp) | [`GaussianMixtureRegressor`](@ref Bmlj.GaussianMixtureRegressor), [`MultitargetGaussianMixtureRegressor`](@ref Bmlj.MultitargetGaussianMixtureRegressor) | _Supervised regressor_ |
| [`GaussianMixtureRegressor2`](@ref) | [☒](@ref GaussianMixture_hp) | | _Supervised regressor_ |
| [`KMeansClusterer`](@ref) | [☒](@ref KMeansC_hp) | [`KMeansClusterer`](@ref Bmlj.KMeansClusterer) | _Unsupervised hard clusterer_ |
| [`KMedoidsClusterer`](@ref) | [☒](@ref KMedoidsC_hp) | [`KMedoidsClusterer`](@ref Bmlj.KMedoidsClusterer) | _Unsupervised hard clusterer_ |
| [`GaussianMixtureClusterer`](@ref) | [☒](@ref GaussianMixture_hp) | [`GaussianMixtureClusterer`](@ref Bmlj.GaussianMixtureClusterer)| _Unsupervised soft clusterer_ |
| [`SimpleImputer`](@ref) | [☒](@ref SimpleI_hp) | [`SimpleImputer`](@ref Bmlj.SimpleImputer) | _Unsupervised missing data imputer_ |
| [`GaussianMixtureImputer`](@ref) | [☒](@ref GaussianMixture_hp) | [`GaussianMixtureImputer`](@ref Bmlj.GaussianMixtureImputer) | _Unsupervised missing data imputer_ |
| [`RandomForestImputer`](@ref) | [☒](@ref RandomForestI_hp), [☒](@ref RandomForestE_hp) | [`RandomForestImputer`](@ref Bmlj.RandomForestImputer) | _Unsupervised missing data imputer_ |
| [`GeneralImputer`](@ref) | [☒](@ref GeneralI_hp) | [`GeneralImputer`](@ref Bmlj.GeneralImputer) | _Unsupervised missing data imputer_ |
| [`MinMaxScaler`](@ref) | | | _Data transformer_ |
| [`StandardScaler`](@ref) | | | _Data transformer_ |
| [`Scaler`](@ref) | [☒](@ref Scaler_hp) | | _Data transformer_ |
| [`PCAEncoder`](@ref) | [☒](@ref PCAE_hp) | | _Unsupervised dimensionality reduction_ |
| [`AutoEncoder`](@ref) | [☒](@ref AutoE_hp) | [`AutoEncoder`](@ref Bmlj.AutoEncoder) | _Unsupervised non-linear dimensionality reduction_ |
| [`OneHotEncoder`](@ref) | [☒](@ref OneHotE_hp) | | _Data transformer_ |
| [`OrdinalEncoder`](@ref) | [☒](@ref OneHotE_hp) | | _Data transformer_ |
| [`ConfusionMatrix`](@ref) | [☒](@ref ConfusionMatrix_hp) | | _Predictions analysis_ |
| [`FeatureRanker`](@ref) | [☒](@ref FeatureR_hp) | | _Predictions analysis_ |
\* There is no formal distinction in `BetaML` between a transformer, or also a model to assess predictions, and a unsupervised model. They are all treated as unsupervised models that given some data they lern how to return some useful information, wheter a class grouping, a specific tranformation or a quality evaluation..
## Usage
New to BetaML or even to Julia / Machine Learning altogether? [Start from the tutorial](@ref getting_started)!
All models supports the (a) model **construction** (where hyperparameters and options are choosen), (b) **fitting** and (c) **prediction** paradigm. A few model support `inverse_transform`, for example to go back from the one-hot encoded columns to the original categorical variable (factor).
This paradigm is described in detail in the [`API V2`](@ref api_usage) page.
## Quick examples
_(see the_ [tutorial](@ref getting_started) _for a more step-by-step guide to the examples below and to other examples)_
- **Using an Artificial Neural Network for multinomial categorisation**
In this example we see how to train a neural networks model to predict the specie's name (5th column) given floral sepals and petals measures (first 4 columns) in the famous [iris flower dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set).
```julia
# Load Modules
using DelimitedFiles, Random
using Pipe, Plots, BetaML # Load BetaML and other auxiliary modules
Random.seed!(123); # Fix the random seed (to obtain reproducible results).
# Load the data
iris = readdlm(joinpath(dirname(Base.find_package("BetaML")),"..","test","data","iris.csv"),',',skipstart=1)
x = convert(Array{Float64,2}, iris[:,1:4])
y = convert(Array{String,1}, iris[:,5])
# Encode the categories (levels) of y using a separate column per each category (aka "one-hot" encoding)
ohmod = OneHotEncoder()
y_oh = fit!(ohmod,y)
# Split the data in training/testing sets
((xtrain,xtest),(ytrain,ytest),(ytrain_oh,ytest_oh)) = partition([x,y,y_oh],[0.8,0.2])
(ntrain, ntest) = size.([xtrain,xtest],1)
# Define the Artificial Neural Network model
l1 = DenseLayer(4,10,f=relu) # The activation function is `ReLU`
l2 = DenseLayer(10,3) # The activation function is `identity` by default
l3 = VectorFunctionLayer(3,f=softmax) # Add a (parameterless include("Imputation_tests.jl")) layer whose activation function (`softmax` in this case) is defined to all its nodes at once
mynn = NeuralNetworkEstimator(layers=[l1,l2,l3],loss=crossentropy,descr="Multinomial logistic regression Model Sepal", batch_size=2, epochs=200) # Build the NN and use the cross-entropy as error function.
# Alternatively, swith to hyperparameters auto-tuning with `autotune=true` instead of specify `batch_size` and `epoch` manually
# Train the model (using the ADAM optimizer by default)
res = fit!(mynn,fit!(Scaler(),xtrain),ytrain_oh) # Fit the model to the (scaled) data
# Obtain predictions and test them against the ground true observations
ŷtrain = @pipe predict(mynn,fit!(Scaler(),xtrain)) |> inverse_predict(ohmod,_) # Note the scaling and reverse one-hot encoding functions
ŷtest = @pipe predict(mynn,fit!(Scaler(),xtest)) |> inverse_predict(ohmod,_)
train_accuracy = accuracy(ŷtrain,ytrain) # 0.975
test_accuracy = accuracy(ŷtest,ytest) # 0.96
# Analyse model performances
cm = ConfusionMatrix()
fit!(cm,ytest,ŷtest)
print(cm)
```
```text
A ConfusionMatrix BetaMLModel (fitted)
-----------------------------------------------------------------
*** CONFUSION MATRIX ***
Scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "virginica" "versicolor" "setosa"
"virginica" 8 1 0
"versicolor" 0 14 0
"setosa" 0 0 7
Normalised scores actual (rows) vs predicted (columns):
4×4 Matrix{Any}:
"Labels" "virginica" "versicolor" "setosa"
"virginica" 0.888889 0.111111 0.0
"versicolor" 0.0 1.0 0.0
"setosa" 0.0 0.0 1.0
*** CONFUSION REPORT ***
- Accuracy: 0.9666666666666667
- Misclassification rate: 0.033333333333333326
- Number of classes: 3
N Class precision recall specificity f1score actual_count predicted_count
TPR TNR support
1 virginica 1.000 0.889 1.000 0.941 9 8
2 versicolor 0.933 1.000 0.938 0.966 14 15
3 setosa 1.000 1.000 1.000 1.000 7 7
- Simple avg. 0.978 0.963 0.979 0.969
- Weigthed avg. 0.969 0.967 0.971 0.966
```
```julia
ϵ = info(mynn)["loss_per_epoch"]
plot(1:length(ϵ),ϵ, ylabel="epochs",xlabel="error",legend=nothing,title="Avg. error per epoch on the Sepal dataset")
heatmap(info(cm)["categories"],info(cm)["categories"],info(cm)["normalised_scores"],c=cgrad([:white,:blue]),xlabel="Predicted",ylabel="Actual", title="Confusion Matrix")
```
 
- **Using Random forests for regression**
In this example we predict, using [another classical ML dataset](https://archive-beta.ics.uci.edu/ml/datasets/auto+mpg), the miles per gallon of various car models.
Note in particular:
- (a) how easy it is in Julia to import remote data, even cleaning them without ever saving a local file on disk;
- (b) how Random Forest models can directly work on data with missing values, categorical one and non-numerical one in general without any preprocessing
```julia
# Load modules
using Random, HTTP, CSV, DataFrames, BetaML, Plots
import Pipe: @pipe
Random.seed!(123)
# Load data
urlData = "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data"
data = @pipe HTTP.get(urlData).body |>
replace!(_, UInt8('\t') => UInt8(' ')) |>
CSV.File(_, delim=' ', missingstring="?", ignorerepeated=true, header=false) |>
DataFrame;
# Preprocess data
X = Matrix(data[:,2:8]) # cylinders, displacement, horsepower, weight, acceleration, model year, origin, model name
y = data[:,1] # miles per gallon
(xtrain,xtest),(ytrain,ytest) = partition([X,y],[0.8,0.2])
# Model definition, hyper-parameters auto-tuning, training and prediction
m = RandomForestEstimator(autotune=true)
ŷtrain = fit!(m,xtrain,ytrain) # shortcut for `fit!(m,xtrain,ytrain); ŷtrain = predict(x,xtrain)`
ŷtest = predict(m,xtest)
# Prediction assessment
relative_mean_error_train = relative_mean_error(ytrain,ŷtrain) # 0.039
relative_mean_error_test = relative_mean_error(ytest,ŷtest) # 0.076
scatter(ytest,ŷtest,xlabel="Actual",ylabel="Estimated",label=nothing,title="Est vs. obs MPG (test set)")
```

- **Further examples**
Finally, you may want to give a look at the ["test" folder](https://github.com/sylvaticus/BetaML.jl/tree/master/test). While the primary objective of the scripts under the "test" folder is to provide automatic testing of the BetaML toolkit, they can also be used to see how functions should be called, as virtually all functions provided by BetaML are tested there.
## Benchmarks
A page summarising some basic benchmarks for BetaML and other leading Julia ML libraries is available [here](@ref benchmarks).
## Acknowledgements
The development of this package at the _Bureau d'Economie Théorique et Appliquée_ (BETA, Nancy) was supported by the French National Research Agency through the [Laboratory of Excellence ARBRE](http://mycor.nancy.inra.fr/ARBRE/), a part of the “Investissements d'Avenir” Program (ANR 11 – LABX-0002-01).
[](hhttp://www.beta-umr7522.fr/)
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"MIT"
] | 0.12.1 | 8333d4c6ba83914d4204fa9fcf2c0a5bc73fe99c | docs | 20553 | # [Getting started](@id getting_started)
## Introduction
This "tutorial" part of the documentation presents a step-by-step guide to the main algorithms and utility functions provided by BetaML and comparisons with the leading packages in each field.
Aside this page, the tutorial is divided in the following sections:
- [Classification tutorial](@ref classification_tutorial) - Topics: _Decision trees and random forests, neural networks (softmax), dealing with stochasticity, loading data from internet_
- [Regression tutorial](@ref regression_tutorial) - Topics: _Decision trees, Random forests, neural networks, hyper-parameters autotuning, one-hot encoding, continuous error measures_
- [Clustering tutorial](@ref clustering_tutorial) - Topics: _k-means, kmedoids, generative (gaussian) mixture models (gmm), cross-validation, ordinal encoding_
- [Multi-branch neural network](@ref multibranch_nn_tutorial) - Topics: _neural networks regression, multi-branch neural network_
- [Feature importance](@ref variable_importance_tutorial) - Topics: _feature importance, Sobol indices, mean decrease accuracy (mda), Shapley values_
Detailed usage instructions on each algorithm can be found on each model struct (listed [here](@ref models_list)), while theoretical notes describing most of them can be found at the companion repository [https://github.com/sylvaticus/MITx_6.86x](https://github.com/sylvaticus/MITx_6.86x).
The overall "philosophy" of BetaML is to support simple machine learning tasks easily and make complex tasks possible. An the most basic level, the majority of algorithms have default parameters suitable for a basic analysis. A great level of flexibility can be already achieved by just employing the full set of model parameters, for example changing the distance function in `KMedoidsClusterer` to `l1_distance` (aka "Manhattan distance").
Finally, the greatest flexibility can be obtained by customising BetaML and writing, for example, its own neural network layer type (by subclassing `AbstractLayer`), its own sampler (by subclassing `AbstractDataSampler`) or its own mixture component (by subclassing `AbstractMixture`),
In such a cases, while not required by any means, please consider to give it back to the community and open a pull request to integrate your work in BetaML.
If you are looking for an introductory book on Julia, you could consider "[Julia Quick Syntax Reference](https://www.julia-book.com/)" (Apress,2019) or the online course "[Introduction to Scientific Programming and Machine Learning with Julia](https://sylvaticus.github.io/SPMLJ/stable/)".
A few conventions applied across the library:
- Type names use the so-called "CamelCase" convention, where the words are separated by a capital letter rather than `_` ,while function names use lower letters only, with words eventually separated (but only when really neeed for readibility) by an `_`;
- While some functions provide a `dims` parameter, most BetaML algorithms expect the input data layout with observations organised by rows and fields/features by columns. Almost everywhere in the code and documentation we refer with `N` the number of observations/records, `D` the number of dimensions and `K` the number of classes/categories;
- While some algorithms accept as input DataFrames, the usage of standard arrays is encourages (if the data is passed to the function as dataframe, it may be converted to standard arrays somewhere inside inner loops, leading to great inefficiencies)
- The accuracy/error/loss measures expect the ground true `y` and then the estimated `ŷ` (in this order)
## [Using BetaML from other programming languages](@id using_betaml_from_other_languages)
In this section we provide two examples of using `BetaML` directly in Python or R (with automatic object conversion). Click `Details` for a more extended explanation of these examples.
While I have no experience with, the same approach can be used to access `BetaML` from any language with a binding to Julia, like Matlab or Javascript.
### Use BetaML in Python
```
$ python3 -m pip install --user juliacall
```
```python
>>> from juliacall import Main as jl
>>> import numpy as np
>>> from sklearn import datasets
>>> jl.seval('using Pkg; Pkg.add("BetaML")') # Only once
>>> jl.seval("using BetaML")
>>> bml = jl.BetaML
>>> iris = datasets.load_iris()
>>> X = iris.data[:, :4]
>>> y = iris.target + 1 # Julia arrays start from 1 not 0
>>> (Xs,ys) = bml.consistent_shuffle([X,y])
>>> m = bml.KMeansClusterer(n_classes=3)
>>> yhat = bml.fit_ex(m,Xs) # Python doesn't allow exclamation marks in function names, so we use `fit_ex(⋅)` instead of `fit!(⋅)` (the original function name)
>>> m._jl_display() # force a "Julian" way of displaying of Julia objects
>>> acc = bml.accuracy(ys,yhat,ignorelabels=True)
>>> acc
0.8933333333333333
```
```@raw html
<details><summary>Details</summary>
```
We show for Python two separate "Julia from Python" interfaces, [PyJulia](https://github.com/JuliaPy/pyjulia) and [JuliaCall](https://github.com/cjdoris/PythonCall.jl) with the second one being the most recent one.
#### With the classical `pyjulia` package
[PyJulia](https://github.com/JuliaPy/pyjulia) is a relativelly old method to use Julia code and libraries in Python. It works great but it requires that you already have a Julia working installation on your PC, so we need first to download and install the Julia binaries for our operating system from [JuliaLang.org](https://julialang.org/). Be sure that Julia is working by opening the Julia terminal and e.g. typing `println("hello world")`
Install `PyJulia` with:
```
$ python3 -m pip install --user julia # the name of the package in `pip` is `julia`, not `PyJulia`
```
For the sake of this tutorial, let's also install in Python a package that contains the dataset that we will use:
```
$ python3 -m pip install --user sklearn # only for retrieving the dataset in the python way
```
We can now open a Python terminal and, to obtain an interface to Julia, just run:
```python
>>> import julia
>>> julia.install() # Only once to set-up in julia the julia packages required by PyJulia
>>> jl = julia.Julia(compiled_modules=False)
```
If we have multiple Julia versions, we can specify the one to use in Python passing `julia="/path/to/julia/binary/executable"` (e.g. `julia = "/home/myUser/lib/julia-1.8.0/bin/julia"`) to the `install()` function.
The `compiled_module=False` in the Julia constructor is a workaround to the common situation when the Python interpreter is statically linked to `libpython`, but it will slow down the interactive experience, as it will disable Julia packages pre-compilation, and every time we will use a module for the first time, this will need to be compiled first.
Other, more efficient but also more complicate, workarounds are given in the package documentation, under the https://pyjulia.readthedocs.io/en/stable/troubleshooting.html[Troubleshooting section].
Let's now add to Julia the BetaML package. We can surely do it from within Julia, but we can also do it while remaining in Python:
```python
>>> jl.eval('using Pkg; Pkg.add("BetaML")') # Only once to install BetaML
```
While `jl.eval('some Julia code')` evaluates any arbitrary Julia code (see below), most of the time we can use Julia in a more direct way. Let's start by importing the BetaML Julia package as a submodule of the Python Julia module:
```python
>>> from julia import BetaML
>>> jl.eval('using BetaML')
```
As you can see, it is no different than importing any other Python module.
For the data, let's load it "Python side":
```python
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data[:, :4]
>>> y = iris.target + 1 # Julia arrays start from 1 not 0
```
Note that `X` and `y` are Numpy arrays.
We can now call BetaML functions as we would do for any other Python library functions. In particular, we can pass to the functions (and retrieve) complex data types without worrying too much about the conversion between Python and Julia types, as these are converted automatically:
```python
>>> (Xs,ys) = BetaML.consistent_shuffle([X,y]) # X and y are first converted to julia arrays and then the returned julia arrays are converted back to python Numpy arrays
>>> m = BetaML.KMeansClusterer(n_classes=3)
>>> yhat = BetaML.fit_ex(m,Xs) # Python doesn't allow exclamation marks in function names, so we use `fit_ex(⋅)` instead of `fit!(⋅)`
>>> acc = BetaML.accuracy(ys,yhat,ignorelabels=True)
>>> acc
0.8933333333333333
```
Note: If we are using the `jl.eval()` interface, the objects we use must be already known to julia. To pass objects from Python to Julia, import the julia `Main` module (the root module in julia) and assign the needed variables, e.g.
```python
>>> X_python = [1,2,3,2,4]
>>> from julia import Main
>>> Main.X_julia = X_python
>>> jl.eval('BetaML.gini(X_julia)')
0.7199999999999999
```
Another alternative is to "eval" only the function name and pass the (python) objects in the function call:
```python
>>> jl.eval('BetaML.gini')(X_python)
0.7199999999999999
```
#### With the newer `JuliaCall` python package
[JuliaCall](https://github.com/cjdoris/PythonCall.jl) is a newer way to use Julia in Python that doesn't require separate installation of Julia.
Istall it in Python using `pip` as well:
```
$ python3 -m pip install --user juliacall
```
We can now open a Python terminal and, to obtain an interface to Julia, just run:
```python
>>> from juliacall import Main as jl
```
If you have `julia` on PATH, it will use that version, otherwise it will automatically download and install a private version for `JuliaCall`
If we have multiple Julia versions, we can specify the one to use in Python passing `julia="/path/to/julia/binary/executable"` (e.g. `julia = "/home/myUser/lib/julia-1.8.0/bin/julia"`) to the `install()` function.
To add `BetaML` to the JuliaCall private version we evaluate the julia package manager `add` function:
```python
>>> jl.seval('using Pkg; Pkg.add("BetaML")')# Only once to install BetaML
```
As with `PyJulia` we can evaluate arbitrary Julia code either using `jl.seval('some Julia code')` and by direct call, but let's first import `BetaML`:
```python
>>> jl.seval("using BetaML")
>>> bml = jl.BetaML
```
For the data, we reuse the `X` and `y` Numpy arrays we loaded earlier.
We can now call BetaML functions as we would do for any other Python library functions. In particular, we can pass to the functions (and retrieve) complex data types without worrying too much about the conversion between Python and Julia types, as these are converted automatically:
```python
>>> (Xs,ys) = bml.consistent_shuffle([X,y])
>>> m = bml.KMeansClusterer(n_classes=3)
>>> yhat = bml.fit_ex(m,Xs)
>>> m._jl_display() # force a "Julian" way of displaying of Julia objects
>>> acc = bml.accuracy(ys,yhat,ignorelabels=True)
>>> acc
0.8933333333333333
```
Note: If we are using the `jl.eval()` interface, the objects we use must be already known to julia. To pass objects from Python to Julia, we can write a small Julia _macro_:
```python
>>> X_python = [1,2,3,2,4]
>>> jlstore = jl.seval("(k, v) -> (@eval $(Symbol(k)) = $v; return)")
>>> jlstore("X_julia",X_python)
>>> jl.seval("BetaML.gini(X_julia)")
0.7199999999999999
```
Another alternative is to "eval" only the function name and pass the (python) objects in the function call:
```python
>>> X_python = [1,2,3,2,4]
>>> jl.seval('BetaML.gini')(X_python)
0.7199999999999999
```
#### Conclusions about using BetaML in Python
Using either the direct call or the `eval` function, wheter in `Pyjulia` or `JuliaCall`, we should be able to use all the BetaML functionalities directly from Python. If you run into problems using BetaML from Python, [open an issue](https://github.com/sylvaticus/BetaML.jl/issues/new) specifying your set-up.
```@raw html
</details>
```
### Use BetaML in R
```{r}
> install.packages("JuliaCall") # only once
> library(JuliaCall)
> library(datasets)
> julia_setup(installJulia = TRUE) # use installJulia = TRUE to let R download and install a private copy of julia, FALSE to use an existing Julia local installation
> julia_eval('using Pkg; Pkg.add("BetaML")') # only once
> julia_eval("using BetaML")
> X <- as.matrix(sapply(iris[,1:4], as.numeric))
> y <- sapply(iris[,5], as.integer)
> xsize <- dim(X)
> shuffled <- julia_call("consistent_shuffle",list(X,y))
> Xs <- matrix(sapply(shuffled[1],as.numeric), nrow=xsize[1])
> ys <- as.vector(sapply(shuffled[2], as.integer))
> m <- julia_eval('KMeansClusterer(n_classes=3)')
> yhat <- julia_call("fit_ex",m,Xs)
> acc <- julia_call("accuracy",yhat,ys,ignorelabels=TRUE)
> acc
[1] 0.8933333
```
```@raw html
<details><summary>Details</summary>
```
For R, we show how to access `BetaML` functionalities using the [JuliaCall](https://github.com/Non-Contradiction/JuliaCall) R package (no relations with the homonymous Python package).
Let's start by installing [`JuliaCall`](https://cran.r-project.org/web/packages/JuliaCall/index.html) in R:
```{r}
> install.packages("JuliaCall")
> library(JuliaCall)
> julia_setup(installJulia = TRUE) # use installJulia = TRUE to let R download and install a private copy of julia, FALSE to use an existing Julia local installation
```
Note that, differently than `PyJulia`, the "setup" function needs to be called every time we start a new R section, not just when we install the `JuliaCall` package.
If we don't have `julia` in the path of our system, or if we have multiple versions and we want to specify the one to work with, we can pass the `JULIA_HOME = "/path/to/julia/binary/executable/directory"` (e.g. `JULIA_HOME = "/home/myUser/lib/julia-1.1.0/bin"`) parameter to the `julia_setup` call. Or just let `JuliaCall` automatically download and install a private copy of julia.
`JuliaCall` depends for some things (like object conversion between Julia and R) from the Julia `RCall` package. If we don't already have it installed in Julia, it will try to install it automatically.
As in Python, let's start from the data loaded from R and do some work with them in Julia:
```{r}
> library(datasets)
> X <- as.matrix(sapply(iris[,1:4], as.numeric))
> y <- sapply(iris[,5], as.integer)
> xsize <- dim(X)
```
Let's install BetaML. As we did in Python, we can install a Julia package from Julia itself or from within R:
```{r}
> julia_eval('using Pkg; Pkg.add("BetaML")')
```
We can now "import" the BetaML julia package (in julia a "Package" is basically a module plus some metadata that facilitate its discovery and integration with other packages, like the reuired set) and call its functions with the `julia_call("juliaFunction",args)` R function:
```{r}
> julia_eval("using BetaML")
> shuffled <- julia_call("consistent_shuffle",list(X,y))
> Xs <- matrix(sapply(shuffled[1],as.numeric), nrow=xsize[1])
> ys <- as.vector(sapply(shuffled[2], as.integer))
> m <- julia_eval('KMeansClusterer(n_classes=3)')
> yhat <- julia_call("fit_ex",m,Xs)
> acc <- julia_call("accuracy",yhat,ys,ignorelabels=TRUE)
> acc
[1] 0.8933333
```
As alternative, we can embed Julia code directly in R using the `julia_eval()` function:
```{r}
kMeansR <- julia_eval('
function accFromKmeans(x,k,y)
m = KMeansClusterer(n_classes=Int(k))
yhat = fit!(m,x)
acc = accuracy(yhat,y,ignorelabels=true)
return acc
end
')
```
We can then call the above function in R in one of the following three ways:
1. `kMeansR(Xs,3,ys)`
2. `julia_assign("Xs_julia", Xs); julia_assign("ys_julia", ys); julia_eval("accFromKmeans(Xs_julia,3,ys_julia)")`
3. `julia_call("accFromKmeans",Xs,3,ys)`
While other "convenience" functions are provided by the package, using `julia_call`, or `julia_assign` followed by `julia_eval`, should suffix to use `BetaML` from R. If you run into problems using BetaML from R, [open an issue](https://github.com/sylvaticus/BetaML.jl/issues/new) specifying your set-up.
```@raw html
</details>
```
## [Dealing with stochasticity and reproducibility](@id stochasticity_reproducibility)
Machine Learning workflows include stochastic components in several steps: in the data sampling, in the model initialisation and often in the models's own algorithms (and sometimes also in the prediction step).
All BetaML models with a stochastic components support a `rng` parameter, standing for _Random Number Generator_. A RNG is a "machine" that streams a flow of random numbers. The flow itself however is deterministically determined for each "seed" (an integer number) that the RNG has been told to use.
Normally this seed changes at each running of the script/model, so that stochastic models are indeed stochastic and their output differs at each run.
If we want to obtain reproductible results we can fix the seed at the very beginning of our model with `Random.seed!([AnInteger])`. Now our model or script will pick up a specific flow of random numbers, but this flow will always be the same, so that its results will always be the same.
However the default Julia RNG guarantee to provide the same flow of random numbers, conditional to the seed, only within minor versions of Julia. If we want to "guarantee" reproducibility of the results with different versions of Julia, or "fix" only some parts of our script, we can call the individual functions passing [`FIXEDRNG`](@ref), an instance of `StableRNG(FIXEDSEED)` provided by `BetaML`, to the `rng` parameter. Use it with:
- `MyModel(;rng=FIXEDRNG)` : always produce the same sequence of results on each run of the script ("pulling" from the same rng object on different calls)
- `MyModel(;rng=StableRNG(SOMEINTEGER))` : always produce the same result (new identical rng object on each call)
This is very convenient expecially during model development, as a model that use `(...,rng=StableRNG(an_integer))` will provides stochastic results that are isolated (i.e. they don't depend from the consumption of the random stream from other parts of the model).
In particular, use `rng=StableRNG(FIXEDSEED)` or `rng=copy(FIXEDRNG)` with [`FIXEDSEED`](@ref) to retrieve the exact output as in the documentation or in the unit tests.
Most of the stochasticity appears in _training_ a model. However in few cases (e.g. decision trees with missing values) some stochasticity appears also in _predicting_ new data using a trained model. In such cases the model doesn't restrict the random seed, so that you can choose at _predict_ time to use a fixed or a variable random seed.
Finally, if you plan to use multiple threads and want to provide the same stochastic output independent to the number of threads used, have a look at [`generate_parallel_rngs`](@ref).
"Reproducible stochasticity" is only one of the elements needed for a reproductible output. The other two are (a) the inputs the workflow uses and (b) the code that is evaluated.
Concerning the second point Julia has a very modern package system that guarantee reproducible code evaluation (with a few exception linked to using external libraries, but BetaML models are all implemented in Julia itself). Without going in detail, you can use a pattern like this at the beginning of your machine learning workflows:
```
using Pkg
cd(@__DIR__)
Pkg.activate(".") # Activate a "local" environment, specific to this folder
Pkg.instantiate() # Download and install the required packages if not already available
```
This will tell Julia to load the exact version of dependent packages, and recursively of their dependencies, from a `Manifest.toml` file that is automatically created in the script's folder, and automatically updated, when you add or update a package in your workflow.
Note that these locals "environments" are very "cheap" (packages are not actually copied to each environment on your system, only referenced) and the environment doen't need to be in the same script folder as in this example, can be any folder you want to "activate".
## Saving and loading trained models
Trained models can be saved on disk using the [`model_save`](@ref) function, and retrieved with [`model_load`](@ref).
The advantage over the serialization functionality in Julia core is that the two functions are actually wrappers around equivalent [JLD2](https://juliaio.github.io/JLD2.jl/stable/) package functions, and should maintain compatibility across different Julia versions.
| BetaML | https://github.com/sylvaticus/BetaML.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 3383 | #!/usr/bin/env julia
# Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using GLNS
"""
Optional Flags -- values are given in square brackets []
-max_time=[Int] (default set by mode)
-trials=[Int] (default set by mode)
-restarts=[Int] (default set by mode)
-mode=[default, fast, slow] (default is default)
-verbose=[0, 1, 2, 3] (default is 3. 0 is no output, 2 is most verbose)
-output=[filename] (default is output.tour)
-epsilon=[Float in [0,1]] (default is 0.5)
-reopt=[Float in [0,1]] (default is set by mode)
"""
function parse_cmd(ARGS)
if isempty(ARGS)
println("no input instance given")
exit(0)
end
if ARGS[1] == "-help" || ARGS[1] == "--help"
println("Usage: GTSPcmd.jl [filename] [optional flags]\n")
println("Optional flags (vales are give in square brackets) :\n")
println("-mode=[default, fast, slow] (default is default)")
println("-max_time=[Int] (default set by mode)")
println("-trials=[Int] (default set by mode)")
println("-restarts=[Int] (default set by mode)")
println("-noise=[None, Both, Subset, Add] (default is Both)")
println("-num_iterations=[Int] (default set by mode. Number multiplied by # of sets)")
println("-verbose=[0, 1, 2, 3] (default is 3. 0 is no output, 3 is most.)")
println("-output=[filename] (default is None)")
println("-epsilon=[Float in [0,1]] (default is 0.5)")
println("-reopt=[Float in [0,1]] (default is 1.0)")
println("-budget=[Int] (default has no budget)")
exit(0)
end
int_flags = ["-max_time", "-trials", "-restarts", "-verbose", "-budget", "-num_iterations"]
float_flags = ["-epsilon", "-reopt"]
string_flags = ["-mode", "-output", "-noise", "-devel"]
filename = ""
optional_args = Dict{Symbol, Any}()
for arg in ARGS
temp = split(arg, "=")
if length(temp) == 1 && filename == ""
filename = temp[1]
elseif length(temp) == 2
flag = temp[1]
value = temp[2]
if flag in int_flags
key = Symbol(flag[2:end])
optional_args[key] = parse(Int64, value)
elseif flag in float_flags
key = Symbol(flag[2:end])
optional_args[key] = parse(Float64, value)
elseif flag in string_flags
key = Symbol(flag[2:end])
optional_args[key] = value
else
println("WARNING: skipping unknown flag ", flag, " in command line arguments")
end
else
error("argument ", arg, " not in proper format")
end
end
if !isfile(filename)
println("the problem instance ", filename, " does not exist")
exit(0)
end
return filename, optional_args
end
# running the code on the problem instance passed in via command line args.
problem_instance, optional_args = parse_cmd(ARGS)
GLNS.solver(problem_instance; optional_args...)
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 4665 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module GLNS
export solver
using Random
include("utilities.jl")
include("parse_print.jl")
include("tour_optimizations.jl")
include("adaptive_powers.jl")
include("insertion_deletion.jl")
include("parameter_defaults.jl")
"""
Main GTSP solver, which takes as input a problem instance and
some optional arguments
"""
function solver(problem_instance; args...)
###### Read problem data and solver settings ########
num_vertices, num_sets, sets, dist, membership = read_file(problem_instance)
param = parameter_settings(num_vertices, num_sets, sets, problem_instance, args)
#####################################################
init_time = time()
count = Dict(:latest_improvement => 1,
:first_improvement => false,
:warm_trial => 0,
:cold_trial => 1,
:total_iter => 0,
:print_time => init_time)
lowest = Tour(Int64[], typemax(Int64))
start_time = time_ns()
# compute set distances which will be helpful
setdist = set_vertex_dist(dist, num_sets, membership)
powers = initialize_powers(param)
while count[:cold_trial] <= param[:cold_trials]
# build tour from scratch on a cold restart
best = initial_tour!(lowest, dist, sets, setdist, count[:cold_trial], param)
# print_cold_trial(count, param, best)
phase = :early
if count[:cold_trial] == 1
powers = initialize_powers(param)
else
power_update!(powers, param)
end
while count[:warm_trial] <= param[:warm_trials]
iter_count = 1
current = Tour(copy(best.tour), best.cost)
temperature = 1.442 * param[:accept_percentage] * best.cost
# accept a solution with 50% higher cost with 0.05% change after num_iterations.
cooling_rate = ((0.0005 * lowest.cost)/(param[:accept_percentage] *
current.cost))^(1/param[:num_iterations])
if count[:warm_trial] > 0 # if warm restart, then use lower temperature
temperature *= cooling_rate^(param[:num_iterations]/2)
phase = :late
end
while count[:latest_improvement] <= (count[:first_improvement] ?
param[:latest_improvement] : param[:first_improvement])
if iter_count > param[:num_iterations]/2 && phase == :early
phase = :mid # move to mid phase after half iterations
end
trial = remove_insert(current, best, dist, membership, setdist, sets, powers, param, phase)
# decide whether or not to accept trial
if accepttrial_noparam(trial.cost, current.cost, param[:prob_accept]) ||
accepttrial(trial.cost, current.cost, temperature)
param[:mode] == "slow" && opt_cycle!(current, dist, sets, membership, param, setdist, "full")
current = trial
end
if current.cost < best.cost
count[:latest_improvement] = 1
count[:first_improvement] = true
if count[:cold_trial] > 1 && count[:warm_trial] > 1
count[:warm_trial] = 1
end
opt_cycle!(current, dist, sets, membership, param, setdist, "full")
best = current
else
count[:latest_improvement] += 1
end
# if we've come in under budget, or we're out of time, then exit
if best.cost <= param[:budget] || time() - init_time > param[:max_time]
param[:timeout] = (time() - init_time > param[:max_time])
param[:budget_met] = (best.cost <= param[:budget])
timer = (time_ns() - start_time)/1.0e9
lowest.cost > best.cost && (lowest = best)
print_best(count, param, best, lowest, init_time)
print_summary(lowest, timer, membership, param)
return
end
temperature *= cooling_rate # cool the temperature
iter_count += 1
count[:total_iter] += 1
print_best(count, param, best, lowest, init_time)
end
print_warm_trial(count, param, best, iter_count)
# on the first cold trial, we are just determining
count[:warm_trial] += 1
count[:latest_improvement] = 1
count[:first_improvement] = false
end
lowest.cost > best.cost && (lowest = best)
count[:warm_trial] = 0
count[:cold_trial] += 1
# print_powers(powers)
end
timer = (time_ns() - start_time)/1.0e9
print_summary(lowest, timer, membership, param)
end
end
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 5706 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## The items needed to adapt the powers in pdf_insert and pdf_remove
"""
A struct to store each insertion/deletion method using its power,
its weight, and its score on the last segment
"""
mutable struct Power
name::String
value::Float64
weight::Dict{Symbol, Float64}
scores::Dict{Symbol, Float64}
count::Dict{Symbol, Int64}
end
""" initialize four noise values, none, low, med, and high """
function initialize_noise(weights, scores, count, noise)
none = Power("additive", 0.0, Dict(:early => 1.0, :mid => 1.0, :late => 1.0),
copy(scores), copy(count))
low = Power("additive", 0.25, Dict(:early => 1.0, :mid => 1.0, :late => 1.0),
copy(scores), copy(count))
high = Power("additive", 0.75, Dict(:early => 1.0, :mid => 1.0, :late => 1.0),
copy(scores), copy(count))
sublow = Power("subset", 0.5, Dict(:early => 1.0, :mid => 1.0, :late => 1.0),
copy(scores), copy(count))
subhigh = Power("subset", 0.25, Dict(:early => 1.0, :mid => 1.0, :late => 1.0),
copy(scores), copy(count))
if noise == "None"
noises = [none]
elseif noise == "Add"
noises = [none, low, high]
elseif noise == "Subset"
noises = [none, sublow, subhigh]
else
noises = [none, low, high, sublow, subhigh]
end
return noises
end
"""
initialize the insertion methods. Use powers between -10 and 10
the spacing between weights is chosen so that when you increase the weight,
the probability of selecting something that is a given distance farther
is cut in half.
"""
function initialize_powers(param)
weights = Dict(:early => 1.0, :mid => 1.0, :late => 1.0)
scores = Dict(:early => 0, :mid => 0, :late => 0)
count = Dict(:early => 0, :mid => 0, :late => 0)
insertionpowers = Power[]
for insertion in param[:insertions]
if insertion == "cheapest"
push!(insertionpowers, Power(insertion, 0.0, copy(weights), copy(scores),
copy(count)))
else
for value in param[:insertion_powers]
push!(insertionpowers, Power(insertion, value, copy(weights), copy(scores),
copy(count)))
end
end
end
removalpowers = Power[]
# use only positive powers for randworst -- -ve corresponds to "best removal"
for removal in param[:removals]
if removal == "segment"
push!(removalpowers, Power(removal, 0.0, copy(weights), copy(scores),
copy(count)))
else
for value in param[:removal_powers]
if removal == "distance"
value == 0.0 && continue # equivalent to randworst with 0.0
value *= -1.0 # for distance, we want to find closest vertices
end
push!(removalpowers, Power(removal, value, copy(weights), copy(scores),
copy(count)))
end
end
end
noises = initialize_noise(weights, scores, count, param[:noise])
# store the sum of insertion and deletion powers for roulette selection
powers = Dict("insertions" => insertionpowers,
"removals" => removalpowers,
"noise" => noises,
"insertion_total" => total_power_weight(insertionpowers),
"removal_total" => total_power_weight(removalpowers),
"noise_total" => total_power_weight(noises),
)
return powers
end
"""
sums the weights for all the powers (i.e., the insertion or deletion methods)
"""
function total_power_weight(powers::Array{Power, 1})
total_weight = Dict(:early => 0.0, :mid => 0.0, :late => 0.0)
for phase in keys(total_weight)
for i = 1:length(powers)
total_weight[phase] += powers[i].weight[phase]
end
end
return total_weight
end
"""
function takes in a set of bins and a weights array of the same length
and selects a bin with probability equal to weight
"""
function power_select(powers, total_weight, phase::Symbol)
selection = rand()*total_weight[phase]
for i = 1:length(powers)
if selection < powers[i].weight[phase]
return powers[i]
end
selection -= powers[i].weight[phase]
end
return powers[1] # should never hit this case, but if you do, return first bin?
end
"""
Update both insertion and deletion powers along with the total weights
if we are a multiple of param[:adaptive_iter] iterations in trial
"""
function power_update!(powers, param::Dict{Symbol,Any})
for phase in [:early, :mid, :late]
power_weight_update!(powers["insertions"], param, phase)
power_weight_update!(powers["removals"], param, phase)
power_weight_update!(powers["noise"], param, phase)
end
powers["insertion_total"] = total_power_weight(powers["insertions"])
powers["removal_total"] = total_power_weight(powers["removals"])
powers["noise_total"] = total_power_weight(powers["noise"])
end
"""
Update only at the end of each trial -- update based on average success
over the trial
"""
function power_weight_update!(powers::Array{Power, 1}, param::Dict{Symbol,Any},
phase::Symbol)
for power in powers
if power.count[phase] > 0 && param[:cold_trials] > 0 # average after 2nd trial
power.weight[phase] = param[:epsilon] * power.scores[phase]/power.count[phase] +
(1 - param[:epsilon]) * power.weight[phase]
elseif power.count[phase] > 0
power.weight[phase] = power.scores[phase]/power.count[phase]
end
power.scores[phase] = 0
power.count[phase] = 0
end
end
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 12743 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Select a removal and an insertion method using powers, and then perform
removal followed by insertion on tour. Operation done in place.
"""
function remove_insert(current::Tour, best::Tour, dist::Array{Int64,2}, member::Array{Int64,1},
setdist::Distsv, sets::Array{Any,1},
powers, param::Dict{Symbol,Any}, phase::Symbol)
# make a new tour to perform the insertion and deletion on
trial = Tour(copy(current.tour), current.cost)
pivot_tour!(trial.tour)
num_removals = rand(param[:min_removals]:param[:max_removals])
removal = power_select(powers["removals"], powers["removal_total"], phase)
if removal.name == "distance"
sets_to_insert = distance_removal!(trial.tour, dist, num_removals,
member, removal.value)
elseif removal.name == "worst"
sets_to_insert = worst_removal!(trial.tour, dist, num_removals,
member, removal.value)
else
sets_to_insert = segment_removal!(trial.tour, num_removals, member)
end
randomize_sets!(sets, sets_to_insert)
# then perform insertion
insertion = power_select(powers["insertions"], powers["insertion_total"], phase)
noise = power_select(powers["noise"], powers["noise_total"], phase)
if insertion.name == "cheapest"
cheapest_insertion!(trial.tour, sets_to_insert, dist, setdist, sets)
else
randpdf_insertion!(trial.tour, sets_to_insert, dist, setdist, sets,
insertion.value, noise)
end
rand() < param[:prob_reopt] && opt_cycle!(trial, dist, sets, member, param, setdist, "partial")
# update power scores for remove and insert
score = 100 * max(current.cost - trial.cost, 0)/current.cost
insertion.scores[phase] += score
insertion.count[phase] += 1
removal.scores[phase] += score
removal.count[phase] += 1
noise.scores[phase] += score
noise.count[phase] += 1
return trial
end
"""
Select an integer between 1 and num according to
and exponential distribution with lambda = power
# goes from left of array if power is positive
# and right of array if it is negative
"""
function select_k(num::Int64, power::Float64)
base = (1/2)^abs(power)
# (1 - base^num)/(1 - base) is sum of geometric series
rand_select = (1 - base^num)/(1 - base) * rand()
bin = 1
@inbounds for k = 1:num
if rand_select < bin
return (power >= 0 ? (num - k + 1) : k)
end
rand_select -= bin
bin *= base
end
return (power >=0 ? num : 1)
end
"""
selecting a random k in 1 to length(weights) according to power
and then selecting the kth smallest element in weights
"""
function pdf_select(weights::Array{Int64,1}, power::Float64)
power == 0.0 && return rand(1:length(weights))
power > 9.0 && return rand_select(weights, maximum(weights))
power < - 9.0 && return rand_select(weights, minimum(weights))
# select kth smallest. If 1 or length(weights), simply return
k = select_k(length(weights), power)
k == 1 && return rand_select(weights, minimum(weights))
k == length(weights) && return rand_select(weights, maximum(weights))
val = partialsort(weights, k)
return rand_select(weights, val)
end
""" choose set with pdf_select, and then insert in best place with noise """
function randpdf_insertion!(tour::Array{Int64,1}, sets_to_insert::Array{Int64,1},
dist::Array{Int64, 2}, setdist::Distsv,
sets::Array{Any, 1}, power::Float64, noise::Power)
mindist = [typemax(Int64) for i=1:length(sets_to_insert)]
@inbounds for i = 1:length(sets_to_insert)
set = sets_to_insert[i]
for vertex in tour
if setdist.min_sv[set, vertex] < mindist[i]
mindist[i] = setdist.min_sv[set, vertex]
end
end
end
new_vertex_in_tour = 0
@inbounds while length(sets_to_insert) > 0
if new_vertex_in_tour != 0
for i = 1:length(sets_to_insert)
set = sets_to_insert[i]
if setdist.min_sv[set, new_vertex_in_tour] < mindist[i]
mindist[i] = setdist.min_sv[set, new_vertex_in_tour]
end
end
end
set_index = pdf_select(mindist, power) # select set to insert from pdf
# find the closest vertex and the best insertion in that vertex
nearest_set = sets_to_insert[set_index]
if noise.name == "subset"
bestv, bestpos = insert_subset_lb(tour, dist, sets[nearest_set], nearest_set,
setdist, noise.value)
else
bestv, bestpos =
insert_lb(tour, dist, sets[nearest_set], nearest_set, setdist, noise.value)
end
insert!(tour, bestpos, bestv) # perform the insertion
new_vertex_in_tour = bestv
# remove the inserted set from data structures
splice!(sets_to_insert, set_index)
splice!(mindist, set_index)
end
end
function cheapest_insertion!(tour::Array{Int64,1}, sets_to_insert::Array{Int64,1},
dist::Array{Int64, 2}, setdist::Distsv, sets::Array{Any, 1})
"""
choose vertex that can be inserted most cheaply, and insert it in that position
"""
while length(sets_to_insert) > 0
best_cost = typemax(Int64)
best_v = 0
best_pos = 0
best_set = 0
for i = 1:length(sets_to_insert)
set_ind = sets_to_insert[i]
# find the best place to insert the vertex
best_v, best_pos, cost = insert_cost_lb(tour, dist, sets[set_ind], set_ind, setdist,
best_v, best_pos, best_cost)
if cost < best_cost
best_set = i
best_cost = cost
end
end
# now, perform the insertion
insert!(tour, best_pos, best_v)
# remove the inserted set from data structures
splice!(sets_to_insert, best_set)
end
end
"""
Given a tour and a set, this function finds the vertex in the set with minimum
insertion cost, along with the position of this insertion in the tour. If
best_position is i, then vertex should be inserted between tour[i-1] and tour[i].
"""
@inline function insert_lb(tour::Array{Int64,1}, dist::Array{Int64,2}, set::Array{Int64, 1},
setind::Int, setdist::Distsv, noise::Float64)
best_cost = typemax(Int64)
bestv = 0
bestpos = 0
@inbounds for i = 1:length(tour)
v1 = prev_tour(tour, i)
lb = setdist.vert_set[v1, setind] + setdist.set_vert[setind, tour[i]] - dist[v1, tour[i]]
lb > best_cost && continue
for v in set
insert_cost = dist[v1, v] + dist[v, tour[i]] - dist[v1, tour[i]]
noise > 0.0 && (insert_cost += round(Int64, noise * rand() * abs(insert_cost)))
if insert_cost < best_cost
best_cost = insert_cost
bestv = v
bestpos = i
end
end
end
return bestv, bestpos
end
@inline function insert_subset_lb(tour::Array{Int64,1}, dist::Array{Int64,2}, set::Array{Int64, 1},
setind::Int, setdist::Distsv, noise::Float64)
best_cost = typemax(Int64)
bestv = 0
bestpos = 0
tour_inds = collect(1:length(tour))
@inbounds for i = 1:ceil(Int64, length(tour) * noise)
i = incremental_shuffle!(tour_inds, i)
v1 = prev_tour(tour, i)
lb = setdist.vert_set[v1, setind] + setdist.set_vert[setind, tour[i]] - dist[v1, tour[i]]
lb > best_cost && continue
for v in set
insert_cost = dist[v1, v] + dist[v, tour[i]] - dist[v1, tour[i]]
if insert_cost < best_cost
best_cost = insert_cost
bestv = v
bestpos = i
end
end
end
return bestv, bestpos
end
############ Initial Tour Construction ##########################
"""build tour from scratch on a cold restart"""
function initial_tour!(lowest::Tour, dist::Array{Int64, 2}, sets::Array{Any, 1},
setdist::Distsv, trial_num::Int64, param::Dict{Symbol,Any})
sets_to_insert = collect(1:param[:num_sets])
best = Tour(Int64[], typemax(Int64))
# compute random initial tour only if past first trial
# in this case, randomly choose between random and insertion tour.
if param[:init_tour] == "rand" && (trial_num > 1) && (rand() < 0.5)
random_initial_tour!(best.tour, sets_to_insert, dist, sets)
else
random_insertion!(best.tour, sets_to_insert, dist, sets, setdist)
end
best.cost = tour_cost(best.tour, dist)
lowest.cost > best.cost && (lowest = best)
return best
end
"""
Randomly shuffle the sets, and then insert the best vertex from each set back into
the tour where sets are considered in shuffled order.
"""
function random_insertion!(tour::Array{Int64,1}, sets_to_insert::Array{Int64,1},
dist::Array{Int64, 2}, sets::Array{Any, 1}, setdist::Distsv)
shuffle!(sets_to_insert) # randomly permute the sets
for set in sets_to_insert
# only have to compute the insert cost for the changed portion of the tour
if isempty(tour)
best_vertex = rand(sets[set])
best_position = 1
else
best_vertex, best_position = insert_lb(tour, dist, sets[set], set, setdist, 0.75)
end
# now, perform the insertion
insert!(tour, best_position, best_vertex)
end
end
"""
Randomly shuffle the sets, and then insert the best vertex from each set back into
the tour where sets are considered in shuffled order.
"""
function random_initial_tour!(tour::Array{Int64,1}, sets_to_insert::Array{Int64,1},
dist::Array{Int64, 2}, sets::Array{Any, 1})
shuffle!(sets_to_insert)
for set in sets_to_insert
push!(tour, rand(sets[set]))
end
end
######################### Removals ################################
"""
Remove the vertices randomly, but biased towards those that add the most length to the
tour. Bias is based on the power input. Vertices are then selected via pdf select.
"""
function worst_removal!(tour::Array{Int64,1}, dist::Array{Int64, 2},
num_to_remove::Int64, member::Array{Int64,1}, power::Float64)
deleted_sets = Array{Int}(undef, 0)
while length(deleted_sets) < num_to_remove
removal_costs = worst_vertices(tour, dist)
ind = pdf_select(removal_costs, power)
set_to_delete = member[tour[ind]]
# perform the deletion
push!(deleted_sets, set_to_delete)
splice!(tour, ind)
end
return deleted_sets
end
""" removing a single continuos segment of the tour of size num_remove """
function segment_removal!(tour::Array{Int64, 1}, num_to_remove::Int64, member::Array{Int64,1})
i = rand(1:length(tour))
deleted_sets = Array{Int}(undef, 0)
while length(deleted_sets) < num_to_remove
i > length(tour) && (i = 1)
push!(deleted_sets, member[tour[i]])
splice!(tour, i)
end
return deleted_sets
end
""" pick a random vertex, and delete its closest neighbors """
function distance_removal!(tour::Array{Int64,1}, dist::Array{Int64, 2},
num_to_remove::Int64, member::Array{Int64,1}, power::Float64)
deleted_sets = Array{Int}(undef, 0)
deleted_vertices = Array{Int}(undef, 0)
seed_index = rand(1:length(tour))
push!(deleted_sets, member[tour[seed_index]])
push!(deleted_vertices, tour[seed_index])
splice!(tour, seed_index)
while length(deleted_sets) < num_to_remove
# pick a random vertex from the set of deleted vertices
seed_vertex = rand(deleted_vertices)
# find closest vertex to the seed vertex that's still in the tour
mindist = zeros(Int64, length(tour))
for i = 1:length(tour)
mindist[i] = min(dist[seed_vertex, tour[i]], dist[tour[i], seed_vertex])
end
del_index = pdf_select(mindist, power)
push!(deleted_sets, member[tour[del_index]])
push!(deleted_vertices, tour[del_index])
splice!(tour, del_index)
end
return deleted_sets
end
"""
determine the cost of removing each vertex from the tour, given that all others remain.
"""
function worst_vertices(tour::Array{Int64, 1}, dist::Array{Int64, 2})
removal_cost = zeros(Int64, length(tour))
@inbounds for i = 1:length(tour)
if i == 1
removal_cost[i] = dist[tour[end], tour[i]] +
dist[tour[i], tour[i+1]] - dist[tour[end], tour[i+1]]
elseif i == length(tour)
removal_cost[i] = dist[tour[i-1], tour[i]] +
dist[tour[i], tour[1]] - dist[tour[i-1], tour[1]]
else
removal_cost[i] = dist[tour[i-1], tour[i]] +
dist[tour[i], tour[i+1]] - dist[tour[i-1], tour[i+1]]
end
end
return removal_cost
end
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 4425 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Three different parameter settings for the GTSP solver
default -- moderate runtime. ~ 10-20 seconds for 100 sets
fast -- very quick, lower quality solution
slow -- slower, but potentially higher quality solution
"""
function parameter_settings(num_vertices, num_sets, sets, problem_instance, args)
args = Dict(args)
mode = get(args, :mode, "default")
############ default setting #####################
if mode == "default"
num_iterations = get(args, :num_iterations, 60) * num_sets
param = Dict(
:cold_trials => get(args, :trials, 5),
:warm_trials => get(args, :restarts, 3),
:max_time => get(args, :max_time, 360),
:init_tour => get(args, :init_tour, "rand"),
:prob_reopt => get(args, :reopt, 1.0),
:accept_percentage => 0.05,
:prob_accept => 10.0/num_iterations,
:num_iterations => num_iterations,
:latest_improvement => num_iterations/2,
:first_improvement => num_iterations/4,
:max_removals => min(100, max(round(Int64, 0.3*num_sets), 1)),
:insertions => ["randpdf", "cheapest"],
)
################## very_fast ##########################
elseif mode == "fast"
num_iterations = get(args, :num_iterations, 60) * num_sets
param = Dict(
:cold_trials => get(args, :trials, 3),
:warm_trials => get(args, :restarts, 2),
:max_time => get(args, :max_time, 300),
:init_tour => get(args, :init_tour, "insertion"),
:prob_reopt => get(args, :reopt, 0.2),
:accept_percentage => 0.05,
:prob_accept => 10.0/num_iterations,
:num_iterations => num_iterations,
:latest_improvement => num_iterations/4,
:first_improvement => num_iterations/6,
:max_removals => min(20, max(round(Int64, 0.1*num_sets), 1)),
:insertions => ["randpdf"],
)
################## attempt slow search ##########################
elseif mode == "slow"
num_iterations = get(args, :num_iterations, 150) * num_sets
param = Dict(
:cold_trials => get(args, :trials, 10),
:warm_trials => get(args, :restarts, 5),
:max_time => get(args, :max_time, 1200),
:init_tour => get(args, :init_tour, "rand"),
:prob_reopt => get(args, :reopt, 1.0),
:accept_percentage => 0.05,
:prob_accept => 10.0/num_iterations,
:num_iterations => num_iterations,
:latest_improvement => num_iterations/3,
:first_improvement => num_iterations/6,
:max_removals => max(round(Int64, 0.4*num_sets), 1),
:insertions => ["randpdf", "cheapest"],
)
else
error("mode not recognized. Use default, fast, or slow")
end
# insertion algs
if get(args, :insertion_algs, "default") == "cheapest"
param[:insertions] = ["randpdf", "cheapest"]
elseif get(args, :insertion_algs, "default") == "randpdf"
param[:insertions] = ["randpdf"]
end
# insertion powers
if get(args, :insertion_algs, "default") == "classic"
param[:insertion_powers] = [-10.0, 0.0, 10.0]
else
param[:insertion_powers] = [-10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0]
end
# removal algs and powers
if get(args, :removal_algs, "default") == "classic"
param[:removals] = ["worst"]
param[:removal_powers] = [0.0, 10.0]
else
param[:removals] = ["distance", "worst", "segment"]
param[:removal_powers] = [-0.5, 0.0, 0.5, 1.0, 10.0]
end
# parameters that are common for all modes
param[:mode] = mode
param[:problem_instance] = split(problem_instance, "/")[end]
param[:num_sets] = num_sets
param[:num_vertices] = num_vertices
param[:output_file] = get(args, :output, "None")
param[:print_output] = get(args, :verbose, 3)
param[:epsilon] = get(args, :epsilon, 0.5)
param[:noise] = get(args, :noise, "Add")
param[:adaptive_iter] = 1
param[:print_time] = 5
param[:budget] = get(args, :budget, typemin(Int64))
param[:timeout] = false
param[:budget_met] = false
param[:min_set] = min_set(sets)
param[:min_removals] = (param[:max_removals] > 1 ? 2 : 1)
print_params(param)
return param
end | GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 20037 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######### Input Parsing ############################
"""
parse the problem instance file given as a text file with the following format:
N: int
M: int
Symmetric: true/false
Triangle: true/false
1 5
5 1 8 9 10 11
8 2 3 4 6 7 12 13 14
DISTANCE_MATRIX
Note: the line 5 1 8 9 10 11 shows that the second set contains 5 vertices:
1,8,9,10,11.
TSPLIB Parser defined by:
http://comopt.ifi.uni-heidelberg.de/software/TSPLIB95/TSPFAQ.html
"""
function read_file(filename)
if !isfile(filename)
error("the problem instance ", filename, " does not exist")
end
# Setup
INF = 9999
RE_INT = r"-?\d+"
RE_DEC = r"-?\d+\.\d+"
RE_NUMBER = r"-?\d+\.?\d*"
parse_state = "UNKNOWN_FORMAT"
data_type = ""
data_format = ""
dist = zeros(Int64, 0, 0)
sets = Any[]
vid00 = vid01 = 1
coords = Any[]
set_data = Int64[]
num_vertices = -1
num_sets = -1
# parse
s = open(filename, "r")
for line in readlines(s)
line = strip(line)
# debug
# println(line)
# println(parse_state)
# auto format select
if parse_state == "UNKNOWN_FORMAT"
if occursin(r"^\s*NAME\s*:\s*\w+", uppercase(line))
parse_state = "TSPLIB_HEADER"
elseif occursin(r"^\s*N\s*:\s*\w+", uppercase(line))
parse_state = "SIMPLE_HEADER"
end
end
# Parse Setup
if parse_state == "TSPLIB_HEADER"
value = strip(split(line,":")[end])
if occursin(r"^\s*NAME\s*:\s*\w+", uppercase(line))
elseif occursin(r"^\s*TYPE\s*:\s*\w+\s*$", uppercase(line))
elseif occursin(r"^\s*DIMENSION\s*:\s*\d+\s*$", uppercase(line))
num_vertices = parse(Int64, value)
dist = zeros(Int64, num_vertices, num_vertices)
elseif occursin(r"^\s*GTSP_SETS\s*:\s*\d+\s*$", uppercase(line))
num_sets = parse(Int64, value)
elseif occursin(r"^\s*EDGE_WEIGHT_TYPE\s*:\s*\w+\s*$", uppercase(line))
data_type = value
data_format = value
elseif occursin(r"^\s*EDGE_WEIGHT_FORMAT\s*:\s*\w+\s*$", uppercase(line))
if data_type == "EXPLICIT"
data_format = value
end
elseif occursin(r"^\s*EDGE_WEIGHT_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_MATRIX_DATA"
elseif occursin(r"^\s*NODE_COORD_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_COORD_DATA"
end
# Parse matrix data
elseif parse_state == "TSPLIB_MATRIX_DATA"
if occursin(r"^[\d\se+-\.]+$", line)
for x in split(line)
cost = parse(Int64, x)
# tested
if data_format == "FULL_MATRIX"
dist[vid00, vid01] = cost
vid01 += 1
if vid01 > num_vertices
vid00 += 1
vid01 = 1
end
# tested
elseif data_format == "LOWER_DIAG_ROW"
dist[vid00, vid01] = cost
dist[vid01, vid00] = cost
vid01 += 1
if vid01 > vid00
vid00 += 1
vid01 = 1
end
# not tested
elseif data_format == "LOWER_ROW"
println("WARNING: Not tested")
if vid00 == 0 && vid01 == 0
vid00 = 1
end
dist[vid00, vid01] = cost
dist[vid01, vid00] = cost
vid01 += 1
if vid01 >= vid00
vid00 += 1
vid01 = 0
end
# tested
elseif data_format == "UPPER_DIAG_ROW"
dist[vid00, vid01] = cost
dist[vid01, vid00] = cost
vid01 += 1
if vid01 > num_vertices
vid00 += 1
vid01 = vid00
end
# tested
elseif data_format == "UPPER_ROW"
if vid00 == 1 && vid01 == 1
vid01 = 2
end
dist[vid00, vid01] = cost
dist[vid01, vid00] = cost
vid01 += 1
if vid01 > num_vertices
vid00 += 1
vid01 = vid00+1
end
end
end
elseif occursin(r"^\s*DISPLAY_DATA_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_DISPLAY_DATA"
elseif occursin(r"^\s*GTSP_SET_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_SET_DATA"
end
# Parse display data
elseif parse_state == "TSPLIB_DISPLAY_DATA"
if occursin(r"^\s*GTSP_SET_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_SET_DATA"
end
# Parse coord data
elseif parse_state == "TSPLIB_COORD_DATA"
if occursin(r"\s*\d+\s*", uppercase(line))
coord = [parse(Float64, x) for x = split(line)[2:end]]
push!(coords, coord)
elseif occursin(r"^\s*GTSP_SET_SECTION\s*:?\s*$", uppercase(line))
parse_state = "TSPLIB_SET_DATA"
end
# Parse set data
elseif parse_state == "TSPLIB_SET_DATA"
if occursin(r"\d+", uppercase(line))
for x = split(line)
push!(set_data, parse(Int64,x))
end
elseif occursin(r"^\s*EOF\s*$", uppercase(line))
parse_state = "TSPLIB"
end
# Parse header (simple)
elseif parse_state == "SIMPLE_HEADER"
if occursin(r"^\s*N\s*:\s*\w+", uppercase(line))
value = strip(split(strip(line),":")[end])
num_vertices = parse(Int64, value)
dist = zeros(Int64, num_vertices, num_vertices)
elseif occursin(r"^\s*M\s*:\s*\d+\s*$", uppercase(line))
value = strip(split(strip(line),":")[end])
num_sets = parse(Int64, value)
parse_state = "SIMPLE_SETS"
end
# Parse set data (simple)
elseif parse_state == "SIMPLE_SETS"
if occursin(r"^[\d\se+-\.]+$", line)
sid = parse(Int64, split(line)[1])
set = [parse(Int64, x) for x in split(line)[2:end]]
push!(sets, set)
if sid == num_sets
parse_state = "SIMPLE_MATRIX"
end
end
# Parse set data (simple)
elseif parse_state == "SIMPLE_MATRIX"
if occursin(r"^[\d\se+-\.]+$", line)
for x in split(line)
cost = parse(Int64, x)
dist[vid00, vid01] = cost
vid01 += 1
if vid01 > num_vertices
vid00 += 1
vid01 = 1
end
end
else
parse_state = "SIMPLE"
end
end
end
close(s)
if occursin(r"TSPLIB", parse_state)
parse_state = "TSPLIB"
end
# Convert coordinate data to matrix data
if parse_state == "TSPLIB" && data_type != "EXPLICIT"
# tested
if data_format == "EUC_2D"
for vid00 in 1:num_vertices
for vid01 in 1:num_vertices
if vid00 == vid01
dist[vid00, vid01] = INF
else
dx = coords[vid00][1] - coords[vid01][1]
dy = coords[vid00][2] - coords[vid01][2]
cost = sqrt(dx^2 + dy^2)
dist[vid00, vid01] = nint(cost)
end
end
end
# not tested
elseif data_format == "MAN_2D"
println("Warning: MAN_2D not tested")
for vid00 in 1:num_vertices
for vid01 in 1:num_vertices
if vid00 == vid01
dist[vid00, vid01] = INF
else
dx = coords[vid00][1] - coords[vid01][1]
dy = coords[vid00][2] - coords[vid01][2]
cost = abs(dx) + abs(dy)
dist[vid00, vid01] = nint(cost)
end
end
end
# not working...
elseif data_format == "GEO"
RRR = 6378.388
PI = 3.141592
DEBUG01 = false
TSPLIB_GEO = true
for vid00 in 1:num_vertices
d, m = degree_minutes(coords[vid00][1])
lat00 = PI * (d + 5.0 * m / 3.0) / 180.0
d, m = degree_minutes(coords[vid00][2])
long00 = PI * (d + 5.0 * m / 3.0) / 180.0
for vid01 in 1:num_vertices
d, m = degree_minutes(coords[vid01][1])
lat01 = PI * (d + 5.0 * m / 3.0) / 180.0
d, m = degree_minutes(coords[vid01][2])
long01 = PI * (d + 5.0 * m / 3.0) / 180.0
if vid00 == vid01
dist[vid00, vid01] = INF
else
if TSPLIB_GEO
q1 = cos(long00 - long01)
q2 = cos(lat00 - lat01)
q3 = cos(lat00 + lat01)
cost = RRR * acos(0.5 * ((1.0 + q1) * q2 - (1.0 - q1) * q3)) + 1.0
dist[vid00, vid01] = floor(Int64, cost)
else
# http://andrew.hedges.name/experiments/haversine/
R = 6373
dlat = abs(coords[vid01][1] - coords[vid00][1])
dlong = abs(coords[vid01][2] - coords[vid00][2])
a = (sind(dlat/2))^2 + cosd(coords[vid00][1])*cosd(coords[vid01][1])*(sind(dlong/2))^2
c = 2*atan2(sqrt(a), sqrt(1-a))
cost = R * c
dist[vid00, vid01] = floor(Int64, cost)
end
if DEBUG01
println("lat00 = ", coords[vid00][1], ", long00 = ", coords[vid00][2], ", lat01 = ", coords[vid01][1], ", long01 = ", coords[vid01][2], ", dist = ", floor(Int64, cost))
DEBUG01 = false
end
end
end
end
# tested
elseif data_format == "ATT"
for vid00 in 1:num_vertices
for vid01 in 1:num_vertices
if vid00 == vid01
dist[vid00, vid01] = INF
else
dx = coords[vid00][1] - coords[vid01][1]
dy = coords[vid00][2] - coords[vid01][2]
r = sqrt((dx^2 + dy^2)/10.0)
cost = ceil(r)
dist[vid00, vid01] = nint(cost)
end
end
end
# tested: not sure if working or not
elseif data_format == "CEIL_2D"
for vid00 in 1:num_vertices
for vid01 in 1:num_vertices
if vid00 == vid01
dist[vid00, vid01] = INF
else
dx = coords[vid00][1] - coords[vid01][1]
dy = coords[vid00][2] - coords[vid01][2]
cost = sqrt(dx^2 + dy^2)
dist[vid00, vid01] = ceil(cost)
end
end
end
else
error("coordinate type $data_format not supported")
end
end
# construct sets
if parse_state == "TSPLIB"
i = 1
sid00 = 1
set = Int64[]
set_data = set_data[2:end]
while i <= length(set_data)
x = set_data[i]
if x == -1
push!(sets, set)
set = Int64[]
i += 1 # skip set id
else
push!(set, x)
end
i += 1
end
if num_sets != length(sets)
error("number of sets doesn't match set size")
end
end
if length(sets) <= 1
error("must have more than 1 set")
end
membership = findmember(num_vertices, sets)
return num_vertices, num_sets, sets, dist, membership
end
""" Computing degrees and minutes for GEO instances """
function degree_minutes(num)
if num > 0
deg = floor(Int64, num)
return deg, num - deg
else
deg = ceil(Int64, num)
return deg, num - deg
end
end
#####################################################
#####################################################
######### Output Printing ###########################
""" print the main parameter settings """
function print_params(param::Dict{Symbol,Any})
if param[:print_output] > 0
println("\n", "--------- Problem Data ------------")
println("Instance Name : ", param[:problem_instance])
println("Number of Vertices : ", param[:num_vertices])
println("Number of Sets : ", param[:num_sets])
println("Initial Tour : ", (param[:init_tour] == "rand" ?
"Random" : "Random Insertion"))
println("Maximum Removals : ", param[:max_removals])
println("Trials : ", param[:cold_trials])
println("Restart Attempts : ", param[:warm_trials])
println("Rate of Adaptation : ", param[:epsilon])
println("Prob of Reopt : ", param[:prob_reopt])
println("Maximum Time : ", param[:max_time])
println("Tour Budget : ", (param[:budget] == typemin(Int64) ?
"None" : param[:budget]))
println("-----------------------------------\n")
end
end
"""
Print the powers in an easy-to-read format for debugging the adaptive weights
"""
function print_powers(powers)
println("-- Printing powers -- ")
println("Insertions:")
for power in powers["insertions"]
println(power.name, " ", power.value, ": ", power.weight)
end
println("\n Removals:")
for power in powers["removals"]
println(power.name, " ", power.value, ": ", power.weight)
end
print("\n")
println("\n Noises:")
for power in powers["noise"]
println(power.name, " ", power.value, ": ", power.weight)
end
print("\n")
end
"""print statement at the beginning of a cold trial"""
function print_cold_trial(count::Dict{Symbol,Real}, param::Dict{Symbol,Any}, best::Tour)
if param[:print_output] == 2
println("\n||--- trial ", count[:cold_trial],
" --- initial cost ", best.cost, " ---||")
end
end
"""print details at the end of each warm trial"""
function print_warm_trial(count::Dict{Symbol,Real}, param::Dict{Symbol,Any},
best::Tour, iter_count::Int)
if param[:print_output] == 2
println("-- ", count[:cold_trial], ".", count[:warm_trial],
" - iterations ", iter_count, ": ", "cost ", best.cost)
end
end
""" print best cost so far """
function print_best(count::Dict{Symbol,Real}, param::Dict{Symbol,Any},
best::Tour, lowest::Tour, init_time::Float64)
if param[:print_output] == 1 && time() - count[:print_time] > param[:print_time]
count[:print_time] = time()
println("-- trial ", count[:cold_trial], ".", count[:warm_trial], ":",
" Cost = ", min(best.cost, lowest.cost),
" Time = ", round(count[:print_time] - init_time, digits=1), " sec")
elseif (param[:print_output] == 3 && time() - count[:print_time] > 0.5) ||
param[:budget_met] || param[:timeout]
count[:print_time] = time()
if param[:warm_trials] > 0
progress = (count[:cold_trial] - 1)/param[:cold_trials] +
(count[:warm_trial])/param[:warm_trials]/param[:cold_trials]
else
progress = (count[:cold_trial] - 1)/param[:cold_trials]
end
tcurr = round(count[:print_time] - init_time, digits=1)
cost = min(best.cost, lowest.cost)
progress_bar(param[:cold_trials], progress, cost, tcurr)
end
end
""" a string representing the progress bar """
function progress_bar(trials, progress, cost, time)
ticks, trials_per_bar, total_length = 6, 5, 31
progress == 1.0 && (progress -= 0.0001)
n = floor(Int64, progress * trials/trials_per_bar)
start_number = n * trials_per_bar
trials_in_bar = min(trials_per_bar, trials - start_number)
progress_in_bar = (progress * trials - start_number)/trials_in_bar
bar_length = min(total_length - 1, (trials - start_number) * ticks)
progress_bar = "|"
for i=1:total_length
if i == bar_length + 1
progress_bar *= "|"
elseif i > bar_length + 1
progress_bar *= " "
elseif i % ticks == 1
progress_bar *= string(start_number + ceil(Int64, i / ticks))
elseif i <= ceil(Int64, bar_length * progress_in_bar)
progress_bar *= "="
else
progress_bar *= " "
end
end
print(" ", progress_bar, " Cost = ", cost, " Time = ", time, " sec \r")
end
"""print tour summary at end of execution"""
function print_summary(lowest::Tour, timer::Float64, member::Array{Int64,1},
param::Dict{Symbol,Any})
if param[:print_output] == 3 && !param[:timeout] && !param[:budget_met]
progress_bar(param[:cold_trials], 1.0, lowest.cost, round(timer, digits=1))
end
if param[:print_output] > -1
if (param[:print_output] > 0 || param[:output_file] == "None")
println("\n\n", "--------- Tour Summary ------------")
println("Cost : ", lowest.cost)
println("Total Time : ", round(timer, digits=2), " sec")
println("Solver Timeout? : ", param[:timeout])
println("Tour is Feasible? : ", tour_feasibility(lowest.tour, member,
param[:num_sets]))
order_to_print = (param[:output_file] == "None" ?
lowest.tour : "printed to " * param[:output_file])
println("Output File : ", param[:output_file])
println("Tour Ordering : ", order_to_print)
println("-----------------------------------")
end
if param[:output_file] != "None"
s = open(param[:output_file], "w")
write(s, "Problem Instance : ", param[:problem_instance], "\n")
write(s, "Vertices : ", string(param[:num_vertices]), "\n")
write(s, "Sets : ", string(param[:num_sets]), "\n")
write(s, "Comment : To avoid ~0.5sec startup time, use the Julia REPL\n")
write(s, "Host Computer : ", gethostname(), "\n")
write(s, "Solver Time : ", string(round(timer, digits=3)), " sec\n")
write(s, "Tour Cost : ", string(lowest.cost), "\n")
write(s, "Tour : ", string(lowest.tour))
close(s)
end
end
end
"""
init function defined by TSPLIB
http://comopt.ifi.uni-heidelberg.de/software/TSPLIB95/TSPFAQ.html
"""
function nint(x::Float64)
return floor(Int64, x + 0.5)
end
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 7296 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequentially moves each vertex to its best point on the tour.
Repeats until no more moves can be found
"""
function moveopt!(tour::Array{Int64, 1}, dist::Array{Int64, 2}, sets::Array{Any, 1},
member::Array{Int64,1}, setdist::Distsv)
improvement_found = true
number_of_moves = 0
start_position = 1
@inbounds while improvement_found && number_of_moves < 10
improvement_found = false
for i = start_position:length(tour)
select_vertex = tour[i]
delete_cost = removal_cost(tour, dist, i)
set_ind = member[select_vertex]
splice!(tour, i) # remove vertex from tour
# find the best place to insert the vertex
v, pos, cost = insert_cost_lb(tour, dist, sets[set_ind], set_ind, setdist,
select_vertex, i, delete_cost)
insert!(tour, pos, v)
# check if we found a better position for vertex i
if cost < delete_cost
improvement_found = true
number_of_moves += 1
start_position = min(pos, i) # start looking for swaps where tour change began
break
end
end
end
end
function moveopt_rand!(tour::Array{Int64, 1}, dist::Array{Int64, 2}, sets::Array{Any, 1},
member::Array{Int64,1}, iters::Int, setdist::Distsv)
tour_inds = collect(1:length(tour))
@inbounds for i = 1:iters # i = rand(1:length(tour), iters)
i = incremental_shuffle!(tour_inds, i)
select_vertex = tour[i]
# first check if this vertex should be moved
delete_cost = removal_cost(tour, dist, i)
set_ind = member[select_vertex]
splice!(tour, i) # remove vertex from tour
v, pos, cost = insert_cost_lb(tour, dist, sets[set_ind], set_ind, setdist,
select_vertex, i, delete_cost)
insert!(tour, pos, v)
end
end
"""
compute the cost of inserting vertex v into position i of tour
"""
@inline function insert_cost_lb(tour::Array{Int64,1}, dist::Array{Int64,2}, set::Array{Int64, 1}, setind::Int,
setdist::Distsv, bestv::Int, bestpos::Int, best_cost::Int)
@inbounds for i = 1:length(tour)
v1 = prev_tour(tour, i) # first check lower bound
lb = setdist.vert_set[v1, setind] + setdist.set_vert[setind, tour[i]] - dist[v1, tour[i]]
lb > best_cost && continue
for v in set
insert_cost = dist[v1, v] + dist[v, tour[i]] - dist[v1, tour[i]]
if insert_cost < best_cost
best_cost = insert_cost
bestv = v
bestpos = i
end
end
end
return bestv, bestpos, best_cost
end
"""
determine the cost of removing the vertex at position i in the tour
"""
@inline function removal_cost(tour::Array{Int64, 1}, dist::Array{Int64, 2}, i::Int64)
if i == 1
return dist[tour[end], tour[i]] + dist[tour[i], tour[i+1]] - dist[tour[end], tour[i+1]]
elseif i == length(tour)
return dist[tour[i-1], tour[i]] + dist[tour[i], tour[1]] - dist[tour[i-1], tour[1]]
else
return dist[tour[i-1], tour[i]] + dist[tour[i], tour[i+1]] - dist[tour[i-1], tour[i+1]]
end
end
""" repeatedly perform moveopt and reopt_tour until there is no improvement """
function opt_cycle!(current::Tour, dist::Array{Int64,2}, sets::Array{Any, 1},
member::Array{Int64,1}, param::Dict{Symbol, Any}, setdist::Distsv, use)
current.cost = tour_cost(current.tour, dist)
prev_cost = current.cost
for i=1:5
if i % 2 == 1
current.tour = reopt_tour(current.tour, dist, sets, member, param)
elseif param[:mode] == "fast" || use == "partial"
moveopt_rand!(current.tour, dist, sets, member, param[:max_removals], setdist)
else
moveopt!(current.tour, dist, sets, member, setdist)
end
current.cost = tour_cost(current.tour, dist)
if i > 1 && (current.cost >= prev_cost || use == "partial")
return
end
prev_cost = current.cost
end
end
"""
Given an ordering of the sets, this alg performs BFS to find the
optimal vertex in each set
"""
function reopt_tour(tour::Array{Int64,1}, dist::Array{Int64,2}, sets::Array{Any, 1},
member::Array{Int64,1}, param::Dict{Symbol, Any})
best_tour_cost = tour_cost(tour, dist)
new_tour = copy(tour)
min_index = min_setv(tour, sets, member, param)
tour = [tour[min_index:end]; tour[1:min_index-1]]
prev = zeros(Int64, param[:num_vertices]) # initialize cost_to_come
cost_to_come = zeros(Int64, param[:num_vertices])
@inbounds for start_vertex in sets[member[tour[1]]]
relax_in!(cost_to_come, dist, prev, Int64[start_vertex], sets[member[tour[2]]])
for i = 3:length(tour) # cost to get to ith set on path through (i-1)th set
relax_in!(cost_to_come, dist, prev, sets[member[tour[i-1]]], sets[member[tour[i]]])
end
# find the cost back to the start vertex.
tour_cost, start_prev = relax(cost_to_come, dist, sets[member[tour[end]]], start_vertex)
if tour_cost < best_tour_cost # reconstruct the path
best_tour_cost = tour_cost
new_tour = extract_tour(prev, start_vertex, start_prev)
end
end
return new_tour
end
""" Find the set with the smallest number of vertices """
function min_setv(tour::Array{Int64, 1}, sets::Array{Any, 1}, member::Array{Int64, 1},
param::Dict{Symbol, Any})
min_set = param[:min_set]
@inbounds for i = 1:length(tour)
member[tour[i]] == min_set && return i
end
return 1
end
"""
extracting a tour from the prev pointers.
"""
function extract_tour(prev::Array{Int64,1}, start_vertex::Int64, start_prev::Int64)
tour = [start_vertex]
vertex_step = start_prev
while prev[vertex_step] != 0
push!(tour, vertex_step)
vertex_step = prev[vertex_step]
end
return reverse(tour)
end
"""
outputs the new cost and prev for vertex v2 after relaxing
does not actually update the cost
"""
@inline function relax(cost::Array{Int64, 1}, dist::Array{Int64, 2}, set1::Array{Int64, 1}, v2::Int64)
v1 = set1[1]
min_cost = cost[v1] + dist[v1, v2]
min_prev = v1
@inbounds for i = 2:length(set1)
v1 = set1[i]
newcost = cost[v1] + dist[v1, v2]
if min_cost > newcost
min_cost, min_prev = newcost, v1
end
end
return min_cost, min_prev
end
"""
relaxes the cost of each vertex in the set set2 in-place.
"""
@inline function relax_in!(cost::Array{Int64, 1}, dist::Array{Int64, 2}, prev::Array{Int64, 1},
set1::Array{Int64, 1}, set2::Array{Int64, 1})
@inbounds for v2 in set2
v1 = set1[1]
cost[v2] = cost[v1] + dist[v1, v2]
prev[v2] = v1
for i = 2:length(set1)
v1 = set1[i]
newcost = cost[v1] + dist[v1, v2]
if cost[v2] > newcost
cost[v2], prev[v2] = newcost, v1
end
end
end
end | GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 6152 | # Copyright 2017 Stephen L. Smith and Frank Imeson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################
######### GTSP Utilities ###########################
""" tour type that stores the order array and the length of the tour
"""
mutable struct Tour
tour::Array{Int64,1}
cost::Int64
end
""" return the vertex before tour[i] on tour """
@inline function prev_tour(tour, i)
i != 1 && return tour[i - 1]
return tour[length(tour)]
end
######################################################
############# Randomizing tour before insertions ####
""" some insertions break tie by taking first minimizer -- this
randomization helps avoid getting stuck choosing same minimizer """
function pivot_tour!(tour::Array{Int64,1})
pivot = rand(1:length(tour))
tour = [tour[pivot:end]; tour[1:pivot-1]]
end
function randomize_sets!(sets::Array{Any, 1}, sets_to_insert::Array{Int64, 1})
for i in sets_to_insert
shuffle!(sets[i])
end
end
function findmember(num_vertices::Int64, sets::Array{Any, 1})
""" create an array containing the set number for each vertex """
member = zeros(Int64, num_vertices)
num_verts = 0
for i = 1:length(sets)
set = sets[i]
num_verts += length(set)
for vertex in set
if member[vertex] != 0
error("vertex ", vertex, " belongs to more than one set")
else
member[vertex] = i
end
end
end
return member
end
struct Distsv
set_vert::Array{Int64, 2}
vert_set::Array{Int64,2}
min_sv::Array{Int64, 2}
end
function set_vertex_dist(dist::Array{Int64, 2}, num_sets::Int, member::Array{Int64,1})
"""
Computes the minimum distance between each set and each vertex
Also compute the minimum distance from a set to a vertex, ignoring direction
This is used in insertion to choose the next set.
"""
numv = size(dist, 1)
dist_set_vert = typemax(Int64) * ones(Int64, num_sets, numv)
mindist = typemax(Int64) * ones(Int64, num_sets, numv)
dist_vert_set = typemax(Int64) * ones(Int64, numv, num_sets)
for i = 1:numv
for j = 1:numv
set = member[j]
if dist[j,i] < dist_set_vert[set, i]
dist_set_vert[set, i] = dist[j,i]
end
if dist[j,i] < mindist[set, i] # dist from set containing j to vertex i
mindist[set, i] = dist[j, i]
end
set = member[i]
if dist[j,i] < dist_vert_set[j, set] # dist from j to set containing i
dist_vert_set[j, set] = dist[j,i]
end
if dist[j,i] < mindist[set,j] # record as distance from set containing i to j
mindist[set,j] = dist[j,i]
end
end
end
return Distsv(dist_set_vert, dist_vert_set, mindist)
end
function set_vertex_distance(dist::Array{Int64, 2}, sets::Array{Any, 1})
"""
Computes the minimum distance between each set and each vertex
"""
numv = size(dist, 1)
nums = length(sets)
dist_set_vert = typemax(Int64) * ones(Int64, nums, numv)
# dist_vert_set = typemax(Int64) * ones(Int64, numv, nums)
for i = 1:nums
for j = 1:numv
for k in sets[i]
newdist = min(dist[k, j], dist[j, k])
dist_set_vert[i,j] > newdist && (dist_set_vert[i,j] = newdist)
end
end
end
return dist_set_vert
end
""" Find the set with the smallest number of vertices """
function min_set(sets::Array{Any, 1})
min_size = length(sets[1])
min_index = 1
for i = 2:length(sets)
set_size = length(sets[i])
if set_size < min_size
min_size = set_size
min_index = i
end
end
return min_index
end
############################################################
############ Trial acceptance criteria #####################
"""
decide whether or not to accept a trial based on simulated annealing criteria
"""
function accepttrial(trial_cost::Int64, current_cost::Int64, temperature::Float64)
if trial_cost <= current_cost
accept_prob = 2.0
else
accept_prob = exp((current_cost - trial_cost)/temperature)
end
return (rand() < accept_prob ? true : false)
end
"""
decide whether or not to accept a trial based on simple probability
"""
function accepttrial_noparam(trial_cost::Int64, current_cost::Int64, prob_accept::Float64)
if trial_cost <= current_cost
return true
end
return (rand() < prob_accept ? true : false)
end
###################################################
################ Tour checks ######################
""" Compute the length of a tour """
@inline function tour_cost(tour::Array{Int64,1}, dist::Array{Int64,2})
tour_length = dist[tour[end], tour[1]]
@inbounds for i in 1:length(tour)-1
tour_length += dist[tour[i], tour[i+1]]
end
return tour_length
end
"""
Checks if a tour is feasible in that it visits each set exactly once.
"""
function tour_feasibility(tour::Array{Int64,1}, membership::Array{Int64,1},
num_sets::Int64)
length(tour) != num_sets && return false
set_test = falses(num_sets)
for v in tour
set_v = membership[v]
if set_test[set_v]
return false # a set is visited twice in the tour
end
set_test[set_v] = true
end
for visited_set in set_test
!visited_set && return false
end
return true
end
#####################################################
############# Incremental Shuffle ##################
@inline function incremental_shuffle!(a::AbstractVector, i::Int)
j = i + floor(Int, rand() * (length(a) + 1 - i))
a[j], a[i] = a[i], a[j]
return a[i]
end
""" rand_select for randomize over all minimizers """
@inline function rand_select(a::Array{Int64, 1}, val::Int)
inds = Int[]
@inbounds for i=1:length(a)
a[i] == val && (push!(inds, i))
end
return rand(inds)
end
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | code | 106 | using GLNS
datafile = joinpath( @__DIR__, "..", "examples", "test.gtsp" )
@time GLNS.solver( datafile )
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"Apache-2.0"
] | 1.0.0 | 8adcc1093357f0a1b8aa72ed7b484b389a7bad71 | docs | 4063 | # GLNS
A Generalized Traveling Salesman Problem (GTSP) Solver.
This solver is implemented in Julia (<http://julialang.org/>). It is now **compatible with Julia v1.0. Update Julia to v1.0 before running GLNS.**
More information on the solver is given at <https://ece.uwaterloo.ca/~sl2smith/GLNS/>
## Citing this work
The solver and its settings are described in the following paper
[[DOI]](https://doi.org/10.1016/j.cor.2017.05.010) [[PDF]](https://ece.uwaterloo.ca/~sl2smith/papers/2017COR-GLNS.pdf):
@Article{Smith2017GLNS,
author = {S. L. Smith and F. Imeson},
title = {{GLNS}: An Effective Large Neighborhood Search Heuristic
for the Generalized Traveling Salesman Problem},
journal = {Computers \& Operations Research},
volume = 87,
pages = {1-19},
year = 2017,
}
Please cite this paper when using GLNS.
## Using the solver
GLNS has three default settings: slow, default, and fast.
It also has several flags that can be used to give to give the solver
timeout, or to have it quit when a solution cost threshold is met.
The solver can be run from the command line or from the Julia REPL.
The input to the solver is a text file in
[GTSPLIB format](http://www.cs.rhul.ac.uk/home/zvero/GTSPLIB/), which is an extension of the
[TSPLIB format](https://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/).
### Running from the command line
Julia has a startup time of approximately 0.5 seconds, which gives this
option a delay over option two below. Download the command line solver **GLNScmd.jl** from this repository and place in a convenient location.
The syntax is:
```bash
$ <path_to_script>/GLNScmd.jl <path_to_instance> -options
```
The following are a few examples:
```bash
$ <path_to_script>/GLNScmd.jl test/39rat195.gtsp
$ <path_to_script>/GLNScmd.jl test/39rat195.gtsp -mode=fast -output=tour.txt
# GLNS can also be set to run "persistently" for a given amount of time.
# The following example will run for 60 seconds before terminating.
$ <path_to_script>/GLNScmd.jl test/39rat195.gtsp -max_time=60 -trials=100000
```
### Running from the Julia REPL
For this method you should launch Julia, include the GLNS module, and then
call the solver. This is done as follows:
```julia
$ julia
julia> include("GLNS.jl")
julia> GLNS.solver("<path_to_instance>", options)
```
Here are a few more examples. The first uses the default settings. The
last example is a persistent solver that will run for at most 60 seconds,
but will quit if it finds a tour of cost 13,505 or less (the best known solution
for this instance is 13,502):
```julia
julia> GLNS.solver("test/39rat195.gtsp")
julia> GLNS.solver("test/39rat195.gtsp", mode="slow")
julia> GLNS.solver("test/107si535.gtsp", max_time=60, budget=13505, trials=100000)
```
## Index of files
The GLNS solver package is arranged as follows.
- GLNScmd.jl -- Command line solver
- examples/ -- contains sample GTSP instances for testing and as example inputs
- src/ -- contains
- GLNS.jl --- Main Julia solver
- adaptive_powers.jl
- insertion_deletion.jl
- parameter_defaults.jl
- parse_print.jl
- tour_optimizations.jl
- utilities.jl
- test/ -- test scripts for installation verification
## License
Copyright 2018 Stephen L. Smith and Frank Imeson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
## Contact information
Prof. Stephen L. Smith
Department of Electrical and Computer Engineering
University of Waterloo
Waterloo, ON Canada
web: <https://ece.uwaterloo.ca/~sl2smith/>
email: <[email protected]>
| GLNS | https://github.com/stephenlsmith/GLNS.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 412 | using Documenter, LombScargle
# # Generate all images
# include(joinpath(@__DIR__, "src", "images.jl"))
cp(joinpath(@__DIR__, "..", "perf", "benchmarks.png"),
joinpath(@__DIR__, "src", "benchmarks.png"))
makedocs(
modules = [LombScargle],
sitename = "LombScargle",
)
deploydocs(
repo = "github.com/JuliaAstro/LombScargle.jl.git",
target = "build",
deps = nothing,
make = nothing,
)
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 1461 | #!/usr/bin/env julia
### images.jl
#
# Copyright (C) 2016 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Commentary:
#
# This file produces images used in documentation.
#
### Code:
using StableRNGs
rng = StableRNG(42)
using Measurements, LombScargle, Plots
sz = (1200, 800)
# First periodograms
ntimes = 1001
t = range(0.01, stop=10pi, length=ntimes)
t += step(t)*rand(rng, ntimes)
s = @. sinpi(t) + 1.5cospi(2t) + rand(rng)
p = lombscargle(t, s)
plot(freqpower(p)..., size = sz, xlim = (0.0, 2.0), xlabel = "Frequency",
ylabel = "Lomb–Scargle power")
savefig(joinpath(@__DIR__, "freq-periodogram.png"))
plot(periodpower(p)..., size = sz, xlim = (0.5, 2.5), xlabel = "Period",
ylabel = "Lomb–Scargle power")
savefig(joinpath(@__DIR__, "period-periodogram.png"))
# signal with uncertainties
ntimes = 1001
t = range(0.01, stop=10pi, length=ntimes)
s = @. sinpi(2t)
errors = rand(rng, 0.1:1e-3:4.0, ntimes)
p = lombscargle(t, s, errors, maximum_frequency=1.5)
plot(freqpower(p)..., size = sz, xlim = (0.25, 1.5), xlabel = "Frequency",
ylabel = "Lomb–Scargle power")
savefig(joinpath(@__DIR__, "freq-uncertainties.png"))
plot(periodpower(p)..., size = sz, xlim = (0.75, 2.0), xlabel = "Period",
ylabel = "Lomb–Scargle power")
savefig(joinpath(@__DIR__, "period-uncertainties.png"))
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 771 | #!/usr/bin/env julia
using LombScargle, BenchmarkTools, FFTW
N = round.(Int, exp10.(range(1, stop=5, length=15)))
for nthreads in (1, 4)
FFTW.set_num_threads(nthreads)
@info(string(nthreads) * " FFTW thread(s)")
open(joinpath(@__DIR__, "julia_times-" * string(nthreads) * ".dat"), "w") do file
for (j, n) in enumerate(N)
println("Iteration ", j, " (", n, " datapoints)")
t = collect(range(0.01, stop=10, length=n))
s = sin.(t) .+ 1.5*cospi.(4t) .+ 3
plan = LombScargle.plan(t, s, fast = true, flags = FFTW.MEASURE)
time = minimum(@benchmark(lombscargle($plan)).times)/1e9
println(" Julia: ", time, " seconds")
println(file, n, "\t", time)
end
end
end
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 684 | #!/usr/bin/env julia
using Plots, DelimitedFiles
pyplot()
julia1 = readdlm("julia_times-1.dat")
julia4 = readdlm("julia_times-4.dat")
python = readdlm("python_times.dat")
plot(xaxis = (:log,), yaxis = (:log,),
xlabel = "Datapoints", ylabel = "Time (seconds)",
size=(900, 600))
plot!(julia1[:,1], julia1[:,2], linewidth = 2, marker = (:auto,), color = :blue, lab = "LombScargle.jl - single thread")
plot!(julia4[:,1], julia4[:,2], linewidth = 2, marker = (:auto,), color = :orange, lab = "LombScargle.jl - 4 threads")
plot!(python[:,1], python[:,2], linewidth = 2, marker = (:auto,), color = :green, lab = "Astropy")
savefig("benchmarks.svg")
savefig("benchmarks.png")
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 3423 | ### LombScargle.jl --- Perform Lomb–Scargle periodogram
#
# Copyright (C) 2016, 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Code:
__precompile__()
module LombScargle
using FFTW, Measurements, LinearAlgebra, Statistics, SpecialFunctions
export lombscargle
# This is similar to Periodogram type of DSP.Periodograms module, but for
# unevenly spaced frequencies.
struct Periodogram{P<:AbstractFloat, F<:AbstractVector, T<:AbstractVector}
power::Vector{P}
freq::F
# XXX: the `times' vector is only in the `M' function (see utils.jl), but
# only maximum(times) and minimum(times) are used. We could consider the
# possibility to keep in this type only the extrema of t, instead of the
# whole array.
times::T
norm::Symbol
end
abstract type PeriodogramPlan end
include("utils.jl")
include("bootstrap.jl")
include("townsend.jl")
include("gls.jl")
include("press-rybicki.jl")
include("planning.jl")
function normalize!(P::AbstractVector{<:AbstractFloat},
signal::AbstractVector{<:Real},
psdfactor::Real,
N::Integer,
noise_level::Real,
normalization::Symbol)
if normalization == :standard
return P
elseif normalization == :model
return P ./= (1 .- P)
elseif normalization == :log
return P .= -log.(1 .- P)
elseif normalization == :psd
return P .*= psdfactor ./ 2
elseif normalization == :Scargle
return P ./= noise_level
elseif normalization == :HorneBaliunas
return P .*= (N .- 1) ./ 2
elseif normalization == :Cumming
M = maximum(P)
return P .*= (N .- 3) ./ (1 .- M) ./ 2
else
error("normalization \"", string(normalization), "\" not supported")
end
end
normalize!(P::AbstractVector{<:AbstractFloat}, p::PeriodogramPlan) =
normalize!(P, p.signal, p.YY * p.sumw, length(p.signal), p.noise, p.norm)
lombscargle(p::PeriodogramPlan) =
Periodogram(normalize!(_periodogram!(p), p), p.freq, p.times, p.norm)
lombscargle(args...; kwargs...) = lombscargle(plan(args...; kwargs...))
"""
lombscargle(times::AbstractVector{<:Real}, signal::AbstractVector{<:Real},
[errors::AbstractVector{<:Real}]; keywords...)
Compute the Lomb–Scargle periodogram of the `signal` vector, observed at
`times`. You can also specify the uncertainties for each signal point with
`errors` argument. All these vectors must have the same length.
All optional keywords are described in the docstring of
[`LombScargle.plan`](@ref).
If the signal has uncertainties, the `signal` vector can also be a vector of
`Measurement` objects (from
[`Measurements.jl`](https://github.com/giordano/Measurements.jl) package), in
which case you don’t need to pass a separate `errors` vector for the
uncertainties of the signal.
"""
lombscargle(::AbstractVector{<:Real}, rest...)
"""
lombscargle(plan::PeriodogramPlan)
Compute the Lomb–Scargle periodogram for the given `plan`. This method has no
other arguments. See documentation of [`LombScargle.plan`](@ref) for how to
plan a Lomb–Scargle periodogram.
"""
lombscargle(::PeriodogramPlan)
_periodogram!(p::PeriodogramPlan) = _periodogram!(p.P, p.times, p)
end # module
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 2869 | ### bootstrap.jl
#
# Copyright (C) 2016, 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle, bootstrapping
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Commentary
#
# This file contains facilities to perform bootstrapping and to calculate
# false-alarm probability and its inverse.
#
### Code:
using Random
struct Bootstrap{T<:AbstractFloat}
p::Vector{T} # Vector of highest peaks
end
const _default_rng = if VERSION < v"1.3"
() -> Random.GLOBAL_RNG
else
Random.default_rng
end
function bootstrap(rng::AbstractRNG, N::Integer, p::PeriodogramPlan)
P = similar(p.P)
Bootstrap(sort!([maximum(normalize!(_periodogram!(P, shuffle(rng, p.times), p), p)) for _ in 1:N], rev = true))
end
bootstrap(N::Integer, p::PeriodogramPlan) =
bootstrap(_default_rng(), N, p)
bootstrap(rng::AbstractRNG, N::Integer, t::AbstractVector{<:Real}, rest...; kwargs...) =
bootstrap(rng, N, plan(t, rest...; kwargs...))
bootstrap(N::Integer, t::AbstractVector{<:Real}, rest...; kwargs...) =
bootstrap(_default_rng(), N, plan(t, rest...; kwargs...))
"""
LombScargle.bootstrap(N::Integer,
times::AbstractVector{Real},
signal::AbstractVector{Real},
errors::AbstractVector{Real}=ones(signal); ...)
Create `N` bootstrap samples, perform the Lomb–Scargle analysis on them, and
store all the highest peaks for each one in a `LombScargle.Bootstrap` object.
All the arguments after `N` are passed around to [`lombscargle`](@ref).
"""
bootstrap(::Integer, ::AbstractVector{<:Real})
"""
LombScargle.bootstrap(N::Integer, plan::PeriodogramPlan)
Create `N` bootstrap samples, perform the Lomb–Scargle analysis on them for the given
`plan`, and store all the highest peaks for each one in a `LombScargle.Bootstrap` object.
See documentation of [`LombScargle.plan`](@ref) for how to plan a Lomb–Scargle periodogram.
"""
bootstrap(::Integer, ::PeriodogramPlan)
"""
fap(b::Bootstrap, power::Real)
Return the false-alarm probability for `power` in the bootstrap sample `b`.
Its inverse is the [`fapinv`](@ref) function.
"""
fap(b::Bootstrap{<:AbstractFloat}, power::Real) =
length(findall(x -> x >= power, b.p))/length(b.p)
"""
fapinv(b::Bootstrap, prob::Real)
Return the power value whose false-alarm probability is `prob` in the bootstrap
sample `b`.
It returns `NaN` if the requested probability is too low and the power cannot be
determined with the bootstrap sample `b`. In this case, you should enlarge your
bootstrap sample so that `N*fap` can be rounded to an integer larger than or
equal to 1.
This is the inverse of [`fap`](@ref) function.
"""
fapinv(b::Bootstrap{<:AbstractFloat}, prob::Real) =
get(b.p, round(Int, length(b.p)*prob), NaN)
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 2808 | ### extirpolation.jl
#
# Copyright (C) 2016 Astropy Developers
# Copyright (C) 2016, 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle, extirpolation
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Commentary:
#
# Utility functions used for implementing the fast algorithm proposed here:
# * Press, W. H., Rybicki, G. B. 1989, ApJ, 338, 277 (URL:
# http://dx.doi.org/10.1086/167197,
# Bibcode: http://adsabs.harvard.edu/abs/1989ApJ...338..277P)
# The actual implementation is adapted from Astropy project. See
# stats/lombscargle/implementations/utils.py in Astropy source code.
#
### Code:
function add_at!(arr::AbstractVector{T1}, ind::AbstractVector{<:Integer},
vals::AbstractVector{T2}) where {T1,T2}
@inbounds for i in eachindex(ind)
arr[ind[i]] += vals[i]
end
end
function extirpolate!(result, x::AbstractVector{<:Real}, y::AbstractVector{NU},
N::Integer, M::Integer=4) where {NU<:Number}
# Now use legendre polynomial weights to populate the results array; This is
# an efficient recursive implementation (See Press et al. 1989)
fill!(result, zero(NU))
# first take care of the easy cases where x is an integer
integers = findall(isinteger, x)
add_at!(result, mod.(trunc.(Int, x[integers]), N) .+ 1, y[integers])
deleteat!(x, integers)
deleteat!(y, integers)
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center, adjusted so that
# the limits are within the range 0...N
ilo = clamp.(trunc.(Int, x .- div(M, 2)), 0, N - M)
v = collect(0:(M - 1))
numerator = [y[j] * prod(x[j] - ilo[j] .- v) for j in eachindex(x)]
denominator = gamma(M)
ilo .+= M .+ 1
@inbounds for j in v
if j > 0
denominator *= j/(j - M)
end
ilo .-= 1
add_at!(result, ilo, numerator ./ (denominator .* (x .- ilo .+ 1)))
end
return result
end
function trig_sum!(grid, fftgrid, bfft_vec, bfft_plan, t::AbstractVector{<:Real},
h::AbstractVector{<:Real}, df::Real, N::Integer,
Nfft::Integer, t0::Real, f0::Real=0.0,
freq_factor::Integer=1, Mfft::Integer=4)
df *= freq_factor
f0 *= freq_factor
if f0 > 0
H = h .* cis.(f0 .* (t .- t0) .* 2 .* pi)
else
H = complex(h)
end
tnorm = mod.(((t .- t0) .* Nfft .* df), Nfft)
extirpolate!(grid, tnorm, H, Nfft, Mfft)
mul!(bfft_vec, bfft_plan, grid)
fftgrid .= @view(bfft_vec[1:N])
if t0 != 0
fftgrid .*= cis.(t0 .* (f0 .+ df .* (0:(N - 1))) .* 2 .* pi)
end
return real(fftgrid), imag(fftgrid)
end
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 3947 | ### gls.jl --- Perform Generalised Lomb–Scargle periodogram
#
# Copyright (C) 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Code:
struct GLSPlan{T,A,B<:AbstractVector{T},C,D,E,F,G} <: PeriodogramPlan
times::A
signal::B
freq::C
sumw::D
w::E
y::B
YY::T
noise::F
norm::Symbol
P::G
end
struct GLSPlan_fit_mean{T,A,B<:AbstractVector{T},C,D,E,F,G,H} <: PeriodogramPlan
times::A
signal::B
freq::C
sumw::D
w::E
y::B
Y::F
YY::T
noise::G
norm::Symbol
P::H
end
# Generalised Lomb–Scargle algorithm: this takes into account uncertainties and
# fit the mean of the signal. This is implemented following the recipe by
# * Zechmeister, M., Kürster, M. 2009, A&A, 496, 577 (URL:
# http://dx.doi.org/10.1051/0004-6361:200811296,
# Bibcode: http://adsabs.harvard.edu/abs/2009A%26A...496..577Z)
# In addition, some tricks suggested by
# * Press, W. H., Rybicki, G. B. 1989, ApJ, 338, 277 (URL:
# http://dx.doi.org/10.1086/167197,
# Bibcode: http://adsabs.harvard.edu/abs/1989ApJ...338..277P)
# to make computation faster are adopted.
function _generalised_lombscargle!(P, freqs, times, y, w, Y, YY, nil)
@inbounds Threads.@threads for n in eachindex(freqs)
ω = freqs[n] * 2 * pi
# Find τ for current angular frequency
C = S = CS = CC = nil
@inbounds for i in eachindex(times)
W = w[i]
s, c = sincos(ω * times[i])
CS += W*c*s
CC += W*c*c
C += W*c
S += W*s
end
CS -= C*S
SS = 1 - CC - S*S
CC -= C*C
ωτ = atan(2CS, CC - SS) / 2
# Now we can compute the power
YC_τ = YS_τ = CC_τ = nil
@inbounds for i in eachindex(times)
W = w[i]
s, c = sincos(ω*times[i] - ωτ)
YC_τ += W*y[i]*c
YS_τ += W*y[i]*s
CC_τ += W*c*c
end
# "C_τ" and "S_τ" are computed following equation (7) of Press &
# Rybicki, this formula simply comes from angle difference trigonometric
# identities.
sin_ωτ, cos_ωτ = sincos(ωτ)
C_τ = C*cos_ωτ + S*sin_ωτ
S_τ = S*cos_ωτ - C*sin_ωτ
YC_τ -= Y*C_τ
YS_τ -= Y*S_τ
SS_τ = 1 - CC_τ - S_τ*S_τ
CC_τ -= C_τ*C_τ
P[n] = (abs2(YC_τ)/CC_τ + abs2(YS_τ)/SS_τ)/YY
end
return P
end
function _generalised_lombscargle!(P, freqs, times, y, w, YY, nil)
@inbounds Threads.@threads for n in eachindex(freqs)
ω = freqs[n] * 2 * pi
# Find τ for current angular frequency
C = S = CS = CC = nil
@inbounds for i in eachindex(times)
W = w[i]
s, c = sincos(ω * times[i])
CS += W*c*s
CC += W*c*c
end
ωτ = atan(2CS, 2CC - 1) / 2
# Now we can compute the power
YC_τ = YS_τ = CC_τ = nil
@inbounds for i in eachindex(times)
W = w[i]
s, c = sincos(ω*times[i] - ωτ)
YC_τ += W*y[i]*c
YS_τ += W*y[i]*s
CC_τ += W*c*c
end
# P[n] should be (abs2(YC_τ)/CC_τ + abs2(YS_τ)/SS_τ)/YY, but we guard the values of
# CC_τ and SS_τ in case they're zeros (note that SS_τ = 1 - CC_τ).
frac_C = ifelse(iszero(CC_τ), nil, abs2(YC_τ) / CC_τ)
frac_S = ifelse(CC_τ == 1, nil, abs2(YS_τ) / (1 - CC_τ))
P[n] = (frac_C + frac_S)/YY
end
return P
end
_periodogram!(P::AbstractVector, times, p::GLSPlan_fit_mean) =
_generalised_lombscargle!(P, p.freq, times, p.y, p.w, p.Y, p.YY, zero(eltype(p.P)))
_periodogram!(P::AbstractVector, times, p::GLSPlan) =
_generalised_lombscargle!(P, p.freq, times, p.y, p.w, p.YY, zero(eltype(p.P)))
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 11142 | ### press-rybicki.jl --- Plan the Lomb–Scargle periodogram
#
# Copyright (C) 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Code:
# Switches to select the appropriate algorithm to compute the periodogram.
function _plan_no_fast(times::AbstractVector{R1}, signal::AbstractVector{R2}, sumw::Real,
w::AbstractVector{R3}, frequencies::AbstractVector{R4},
with_errors::Bool, center_data::Bool,
fit_mean::Bool, noise::Real,
normalization::Symbol) where {R1<:Real,R2<:Real,R3<:Real,R4<:Real}
P_type = promote_type(float(R1), float(R2), float(R3), float(R4))
P = Vector{P_type}(undef, length(frequencies))
if fit_mean || with_errors
# If "center_data" or "fit_mean" keywords are true,
# subtract the weighted mean from each point.
if center_data || fit_mean
y = signal .- (w ⋅ signal)
else
y = signal
end
YY = w ⋅ (y .^ 2)
if fit_mean
Y = w ⋅ y
YY -= Y * Y
return GLSPlan_fit_mean(times, signal, frequencies, sumw, w, y, Y,
YY, noise, normalization, P)
else
return GLSPlan(times, signal, frequencies, sumw, w, y,
YY, noise, normalization, P)
end
else
if center_data
X = signal .- mean(signal)
else
X = signal
end
return LSPlan(times, signal, frequencies, sumw, w, X, X ⋅ X, noise, normalization, P)
end
end
# There are two "_plan" methods, the only different argument being "frequencies". When it
# is a `Range' object (first method) it could be possible to use the fast method, provided
# that fast == true, otherwise we can only use the non fast methods.
function _plan(times::AbstractVector{<:Real}, signal::AbstractVector{R1}, sumw::Real,
w::AbstractVector{R2}, frequencies::AbstractRange{<:Real}, fast::Bool,
with_errors::Bool, center_data::Bool, fit_mean::Bool, flags::Integer,
timelimit::Real, padding_factors::Union{NTuple{N,<:Integer} where {N},Vector{<:Integer}},
oversampling::Integer, Mfft::Integer, noise::Real, normalization::Symbol) where {R1<:Real,R2<:Real}
if fast
@assert Mfft > 0
@assert step(frequencies) > 0
if center_data || fit_mean
y = signal .- w⋅signal
else
y = signal
end
YY = w ⋅ (y .^ 2)
N = length(frequencies)
Nfft = nextprod(padding_factors, N * oversampling)
T = promote_type(float(R1), float(R2))
bfft_vect = Vector{Complex{T}}(undef, Nfft)
bfft_grid = Vector{Complex{T}}(undef, Nfft)
fftgrid = Vector{Complex{T}}(undef, N)
bfft_plan = FFTW.plan_bfft(bfft_vect, flags = flags, timelimit = timelimit)
if fit_mean
return FastGLSPlan_fit_mean(times, signal, frequencies, sumw, w, y, YY,
fftgrid, bfft_vect, bfft_grid, bfft_plan, Mfft,
noise, normalization, Vector{T}(undef, N))
else
return FastGLSPlan(times, signal, frequencies, sumw, w, y, YY,
fftgrid, bfft_vect, bfft_grid, bfft_plan, Mfft,
noise, normalization, Vector{T}(undef, N))
end
else
return _plan_no_fast(times, signal, sumw, w, frequencies, with_errors,
center_data, fit_mean, noise, normalization)
end
end
_plan(times::AbstractVector{<:Real}, signal::AbstractVector{<:Real}, sumw::Real,
w::AbstractVector{<:Real}, frequencies::AbstractVector{<:Real},
fast::Bool, with_errors::Bool, center_data::Bool, fit_mean::Bool, flags::Integer,
timelimit::Real, padding_factors::Union{NTuple{N,<:Integer} where {N},Vector{<:Integer}},
oversampling::Integer, Mfft::Integer, noise::Real, normalization::Symbol) =
_plan_no_fast(times, signal, sumw, w, frequencies,
with_errors, center_data, fit_mean, noise, normalization)
function _plan(times::AbstractVector{<:Real},
signal::AbstractVector{<:Real},
with_errors::Bool,
sumw::Real=length(signal),
w::AbstractVector{<:Real}=fill!(similar(signal), one(eltype(signal)))/sumw;
normalization::Symbol=:standard,
noise_level::Real=1,
center_data::Bool=true,
fit_mean::Bool=true,
flags::Integer=FFTW.ESTIMATE,
timelimit::Real=Inf,
oversampling::Integer=5,
padding_factors::Union{NTuple{N,<:Integer} where {N},Vector{<:Integer}}=[2,3,5,7],
Mfft::Integer=4,
samples_per_peak::Integer=5,
nyquist_factor::Integer=5,
minimum_frequency::Real=NaN,
maximum_frequency::Real=NaN,
frequencies::AbstractVector{<:Real}=
autofrequency(times,
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency),
fast::Bool=(length(frequencies) > 200))
@assert length(times) == length(signal) == length(w)
return _plan(times, signal, sumw, w, frequencies, fast, with_errors, center_data,
fit_mean, flags, timelimit, padding_factors, oversampling, Mfft, noise_level, normalization)
end
### Main interface functions
# No uncertainties
plan(times::AbstractVector{<:Real}, signal::AbstractVector{<:Real}; kwargs...) =
_plan(times, signal, false; kwargs...)
# Uncertainties provided
function plan(times::AbstractVector{<:Real}, signal::AbstractVector{<:Real},
errors::AbstractVector{<:Real}; kwargs...)
# Compute weights vector
w = 1 ./ errors .^ 2
sumw = sum(w)
w ./= sumw
return _plan(times, signal, true, sumw, w; kwargs...)
end
# Uncertainties provided via Measurement type
plan(times::AbstractVector{<:Real}, signal::AbstractVector{<:Measurement}; kwargs...) =
plan(times, Measurements.value.(signal), Measurements.uncertainty.(signal); kwargs...)
"""
LombScargle.plan(times::AbstractVector{<:Real}, signal::AbstractVector{<:Real},
[errors::AbstractVector{<:Real}];
normalization::Symbol=:standard,
noise_level::Real=1,
center_data::Bool=true,
fit_mean::Bool=true,
fast::Bool=true,
flags::Integer=FFTW.ESTIMATE,
timelimit::Real=Inf,
oversampling::Integer=5,
padding_factors::Vector{Int}=[2],
Mfft::Integer=4,
samples_per_peak::Integer=5,
nyquist_factor::Integer=5,
minimum_frequency::Real=NaN,
maximum_frequency::Real=NaN,
frequencies::AbstractVector{Real}=
autofrequency(times,
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency))
Pre-plan the Lomb–Scargle periodogram of the `signal` vector, observed at
`times`. The periodogram can then be computed by passing the result of this
function to `lombscargle`.
You can also specify the uncertainties for each signal point with `errors`
argument. All these vectors must have the same length.
Optional keywords arguments are:
* `normalization`: how to normalize the periodogram. Valid choices are:
`:standard`, `:model`, `:log`, `:psd`, `:Scargle`, `:HorneBaliunas`,
`:Cumming`
* `noise_level`: the noise level used to normalize the periodogram when
`normalization` is set to `:Scargle`
* `fit_mean`: if `true`, fit for the mean of the signal using the Generalised
Lomb–Scargle algorithm (see Zechmeister & Kürster paper below). If this is
`false` and no uncertainty on the signal is provided, the original algorithm
by Lomb and Scargle will be employed (see Townsend paper below)
* `center_data`: if `true`, subtract the weighted mean of `signal` from `signal`
itself before performing the periodogram. This is especially important if
`fit_mean` is `false`
* `frequencies`: the frequecy grid (not angular frequencies) at which the
periodogram will be computed, as a vector. If not provided, it is an evenly
spaced grid of type `Range`, automatically determined with
`LombScargle.autofrequency` function, which see. See below for other
available keywords that can be used to affect the frequency grid without
directly setting `frequencies`
You can explicitely require to use or not the fast method by Press & Rybicki,
overriding the default choice, by setting the `fast` keyword. In any case,
`frequencies` must be a `Range` object (this is the default) in order to
actually use this method. A few other keywords are available to adjust the
settings of the periodogram when the fast method is used (otherwise they are
ignored):
* `fast`: whether to use the fast method.
* `flags`: this integer keyword is a bitwise-or of FFTW planner flags,
defaulting to `FFTW.ESTIMATE`. Passing `FFTW.MEASURE` or `FFTW.PATIENT` will
instead spend several seconds (or more) benchmarking different possible FFT
algorithms and picking the fastest one; see the FFTW manual for more
information on planner flags.
* `timelimit`: specifies a rough upper bound on the allowed planning time, in seconds.
* `oversampling`: oversampling the frequency factor for the approximation;
roughly the number of time samples across the highest-frequency sinusoid.
This parameter contains the tradeoff between accuracy and speed.
* `padding_factors`: the FFT is performed on a vector with length equal to the
smallest number larger than or equal to `N * oversampling` which is a product
of all numbers in this vector. E.g., use `padding_factors=[2]` to perform the
FFT on a vector padded to a power of 2, or `padding_factors=[2, 3, 5, 7]` for
the optimal size for the FFTW library.
* `Mfft`: the number of adjacent points to use in the FFT approximation.
In addition, you can use all optional keyword arguments of
[`LombScargle.autofrequency`](@ref) function in order to tune the `frequencies`.
If the signal has uncertainties, the `signal` vector can also be a vector of
`Measurement` objects (from
[`Measurements.jl`](https://github.com/giordano/Measurements.jl) package), in
which case you don’t need to pass a separate `errors` vector for the
uncertainties of the signal.
"""
plan
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 4818 | ### press-rybicki.jl --- Perform fast but approximate Lomb–Scargle periodogram
#
# Copyright (C) 2016 Astropy Developers
# Copyright (C) 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Code:
struct FastGLSPlan{T,A,B<:AbstractVector{T},C,D,E,F,G,H,I,J,K,L} <: PeriodogramPlan
times::A
signal::B
freq::C
sumw::D
w::E
y::B
YY::T
fftgrid::F
bfft_vect::G
bfft_grid::H
bfft_plan::I
Mfft::J
noise::K
norm::Symbol
P::L
end
struct FastGLSPlan_fit_mean{T,A,B<:AbstractVector{T},C,D,E,F,G,H,I,J,K,L} <: PeriodogramPlan
times::A
signal::B
freq::C
sumw::D
w::E
y::B
YY::T
fftgrid::F
bfft_vect::G
bfft_grid::H
bfft_plan::I
Mfft::J
noise::K
norm::Symbol
P::L
end
include("extirpolation.jl")
# Fast, but approximate, method to compute the Lomb–Scargle periodogram for
# evenly spaced frequency grid. See
# * Press, W. H., Rybicki, G. B. 1989, ApJ, 338, 277 (URL:
# http://dx.doi.org/10.1086/167197,
# Bibcode: http://adsabs.harvard.edu/abs/1989ApJ...338..277P)
# This is adapted from Astropy implementation of the method. See
# `lombscargle_fast' function.
function _press_rybicki!(P, times::AbstractVector{<:Real}, y::AbstractVector{<:Real},
w::AbstractVector{<:Real}, t0, df, N , f0, YY::Real,
fftgrid, bfft_vec::AbstractVector{Complex{T}},
grid, plan, Nfft::Integer, Mfft::Integer) where {T<:AbstractFloat}
#---------------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Ch, Sh = trig_sum!(grid, fftgrid, bfft_vec, plan, times, w .* y, df, N, Nfft, t0, f0, 1, Mfft)
C2, S2 = trig_sum!(grid, fftgrid, bfft_vec, plan, times, w, df, N, Nfft, t0, f0, 2, Mfft)
#---------------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
tan_2ωτ = S2 ./ C2
C2w = 1 ./ (sqrt.(1 .+ tan_2ωτ .* tan_2ωτ)) # = cos(2 * ωτ)
S2w = tan_2ωτ .* C2w # = sin(2 * ωτ)
Cw = sqrt.((1 .+ C2w) ./ 2) # = cos(ωτ)
Sw = sign.(S2w) .* sqrt.((1 .- C2w) ./ 2) # = sin(ωτ)
return P .= ((Ch .* Cw .+ Sh .* Sw) .^ 2 ./ ((1 .+ C2 .* C2w .+ S2 .* S2w) ./ 2) .+
(Sh .* Cw .- Ch .* Sw) .^ 2 ./ ((1 .- C2 .* C2w .- S2 .* S2w) ./ 2)) ./ YY
end
function _press_rybicki_fit_mean!(P, times::AbstractVector{<:Real},
y::AbstractVector{<:Real}, w::AbstractVector{<:Real},
t0, df, N , f0, YY::Real, fftgrid,
bfft_vec::AbstractVector{Complex{T}}, grid, plan,
Nfft::Integer, Mfft::Integer) where {T<:AbstractFloat}
#---------------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Ch, Sh = trig_sum!(grid, fftgrid, bfft_vec, plan, times, w .* y, df, N, Nfft, t0, f0, 1, Mfft)
C2, S2 = trig_sum!(grid, fftgrid, bfft_vec, plan, times, w, df, N, Nfft, t0, f0, 2, Mfft)
C, S = trig_sum!(grid, fftgrid, bfft_vec, plan, times, w, df, N, Nfft, t0, f0, 1, Mfft)
#---------------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
tan_2ωτ = (S2 .- 2 .* S .* C) ./ (C2 .- (C .* C .- S .* S))
C2w = 1 ./ (sqrt.(1 .+ tan_2ωτ .* tan_2ωτ)) # = cos(2 * ωτ)
S2w = tan_2ωτ .* C2w # = sin(2 * ωτ)
Cw = sqrt.((1 .+ C2w) ./ 2) # = cos(ωτ)
Sw = sign.(S2w) .* sqrt.((1 .- C2w) ./ 2) # = sin(ωτ)
return P .= ((Ch .* Cw .+ Sh .* Sw) .^ 2 ./
((1 .+ C2 .* C2w .+ S2 .* S2w) ./ 2 .- (C .* Cw .+ S .* Sw) .^ 2) .+
(Sh .* Cw .- Ch .* Sw) .^ 2 ./
((1 .- C2 .* C2w .- S2 .* S2w) ./ 2 .- (S .* Cw .- C .* Sw) .^ 2)) ./ YY
end
_periodogram!(P::AbstractVector, times, p::FastGLSPlan) =
_press_rybicki!(P, times, p.y, p.w, minimum(p.times), step(p.freq),
length(p.freq), minimum(p.freq), p.YY, p.fftgrid, p.bfft_vect,
p.bfft_grid, p.bfft_plan, length(p.bfft_vect), p.Mfft)
_periodogram!(P::AbstractVector, times, p::FastGLSPlan_fit_mean) =
_press_rybicki_fit_mean!(P, times, p.y, p.w, minimum(p.times), step(p.freq),
length(p.freq), minimum(p.freq), p.YY, p.fftgrid, p.bfft_vect,
p.bfft_grid, p.bfft_plan, length(p.bfft_vect), p.Mfft)
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 1801 | ### townsend.jl --- Perform original Lomb–Scargle periodogram
#
# Copyright (C) 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Code:
struct LSPlan{T,A,B<:AbstractVector{T},C,D,E,F,G} <: PeriodogramPlan
times::A
signal::B
freq::C
sumw::D
w::E
X::B
YY::T
noise::F
norm::Symbol
P::G
end
# Original algorithm that doesn't take into account uncertainties and doesn't
# fit the mean of the signal. This is implemented following the recipe by
# * Townsend, R. H. D. 2010, ApJS, 191, 247 (URL:
# http://dx.doi.org/10.1088/0067-0049/191/2/247,
# Bibcode: http://adsabs.harvard.edu/abs/2010ApJS..191..247T)
function _lombscargle_orig!(P::AbstractVector{T}, times::AbstractVector{<:Real},
X::AbstractVector{<:Real}, freqs::AbstractVector{<:Real},
XX::Real) where {T<:Real}
N = length(X)
@inbounds Threads.@threads for n in eachindex(freqs)
ω = freqs[n] * 2 * pi
XC = XS = CC = CS = zero(T)
@inbounds for j in eachindex(times)
S, C = sincos(ω * times[j])
XC += X[j]*C
XS += X[j]*S
CC += C*C
CS += C*S
end
SS = N - CC
s_τ, c_τ = sincos(atan(CS, CC - N / 2) / 2)
c_τ2 = c_τ*c_τ
s_τ2 = s_τ*s_τ
cs_τ_CS = 2c_τ*s_τ*CS
P[n] = (abs2(c_τ*XC + s_τ*XS)/(c_τ2*CC + cs_τ_CS + s_τ2*SS) +
abs2(c_τ*XS - s_τ*XC)/(c_τ2*SS - cs_τ_CS + s_τ2*CC))/XX
end
return P
end
_periodogram!(P::AbstractVector, times, p::LSPlan) = _lombscargle_orig!(P, times, p.X, p.freq, p.YY)
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 10485 | ### utils.jl
#
# Copyright (C) 2016, 2017 Mosè Giordano.
#
# Maintainer: Mosè Giordano <mose AT gnu DOT org>
# Keywords: periodogram, lomb scargle
#
# This file is a part of LombScargle.jl.
#
# License is BSD 3-clause "New" or "Revised".
#
### Commentary
#
# This file contains some utilities for the LombScargle.jl package.
#
### Code:
export power, freq, freqpower, findmaxpower, findmaxfreq, period, periodpower,
findmaxperiod, prob, probinv, fap, fapinv
"""
power(p::Periodogram)
Return the power vector of Lomb–Scargle periodogram `p`.
"""
power(p::Periodogram) = p.power
"""
freq(p::Periodogram)
Return the frequency vector of Lomb–Scargle periodogram `p`.
"""
freq(p::Periodogram) = p.freq
"""
freqpower(p::Periodogram)
Return the 2-tuple `(freq(p), power(p))`, where `freq(p)` and `power(p)` are the
frequency vector and the power vector of Lomb–Scargle periodogram `p`
respectively.
"""
freqpower(p::Periodogram) = (freq(p), power(p))
"""
findmaxpower(p::Periodogram)
Return the highest power of the periodogram `p`.
"""
findmaxpower(p::Periodogram) = maximum(power(p))
_findmaxfreq(freq::AbstractVector{<:Real}, power::AbstractVector{<:Real}, threshold::Real) =
freq[findall(x -> x >= threshold, power)]
"""
findmaxfreq(p::Periodogram, [interval::AbstractVector{Real}], threshold::Real=findmaxpower(p))
Return the array of frequencies with the highest power in the periodogram `p`.
If a scalar real argument `threshold` is provided, return the frequencies with
power larger than or equal to `threshold`. If you want to limit the search to a
narrower frequency range, pass as second argument a vector with the extrema of
the interval.
"""
findmaxfreq(p::Periodogram, threshold::Real=findmaxpower(p)) =
_findmaxfreq(freqpower(p)..., threshold)
function findmaxfreq(p::Periodogram, interval::AbstractVector{<:Real}, threshold::Real=NaN)
f = freq(p)
lo, hi = extrema(interval)
indices = findall(x -> lo <= x <= hi, f)
pow = power(p)[indices]
if isnan(threshold)
threshold = maximum(pow)
end
return _findmaxfreq(f[indices], pow, threshold)
end
"""
period(p::Periodogram)
Return the period vector of Lomb–Scargle periodogram `p`. It is equal to `1 ./ freq(p)`.
"""
period(p::Periodogram) = 1 ./ freq(p)
"""
periodpower(p::Periodogram)
Return the 2-tuple `(period(p), power(p))`, where `period(p)` and `power(p)` are
the period vector and the power vector of Lomb–Scargle periodogram `p`
respectively.
"""
periodpower(p::Periodogram) = (period(p), power(p))
"""
findmaxperiod(p::Periodogram, [interval::AbstractVector{Real}], threshold::Real=findmaxpower(p))
Return the array of periods with the highest power in the periodogram `p`. If a
scalar real argument `threshold` is provided, return the period with power
larger than or equal to `threshold`. If you want to limit the search to a
narrower period range, pass as second argument a vector with the extrema of the
interval.
"""
findmaxperiod(p::Periodogram, threshold::Real=findmaxpower(p)) =
1 ./ findmaxfreq(p, threshold)
findmaxperiod(p::Periodogram, interval::AbstractVector{<:Real},
threshold::Real=NaN) =
1 ./ findmaxfreq(p, 1 ./ interval, threshold)
"""
autofrequency(times::AbstractVector{Real};
samples_per_peak::Integer=5,
nyquist_factor::Integer=5,
minimum_frequency::Real=NaN,
maximum_frequency::Real=NaN)
Determine a suitable frequency grid for the given vector of `times`.
Optional keyword arguments are:
* `samples_per_peak`: the approximate number of desired samples across the
typical peak
* `nyquist_factor`: the multiple of the average Nyquist frequency used to choose
the maximum frequency if `maximum_frequency` is not provided
* `minimum_frequency`: if specified, then use this minimum frequency rather than
one chosen based on the size of the baseline
* `maximum_frequency`: if specified, then use this maximum frequency rather than
one chosen based on the average Nyquist frequency
This is based on prescription given at
<https://jakevdp.github.io/blog/2015/06/13/lomb-scargle-in-python/> and uses the
same keywords names adopted in Astropy.
"""
function autofrequency(times::AbstractVector{<:Real};
samples_per_peak::Integer=5,
nyquist_factor::Integer=5,
minimum_frequency::Real=NaN,
maximum_frequency::Real=NaN)
T = maximum(times) - minimum(times)
δf = inv(samples_per_peak * T)
f_min = isfinite(minimum_frequency) ? minimum_frequency : (δf / 2)
if isfinite(maximum_frequency)
return f_min:δf:maximum_frequency
else
return f_min:δf:(nyquist_factor * length(times) / 2T)
end
end
"""
prob(P::Periodogram, pow::Real)
Return the probability that the periodogram power can exceed the value `pow`.
Its inverse is the `probinv` function.
"""
function prob(P::Periodogram, pow::Real)
N = length(P.times)
normalization = P.norm
if normalization == :standard
return (1 - pow)^((N - 3) / 2)
elseif normalization == :Scargle
return exp(-pow)
elseif normalization == :HorneBaliunas
return (1 - 2*pow/(N - 1))^((N - 3) / 2)
elseif normalization == :Cumming
return (1 + 2*pow/(N - 3))^((3 - N) / 2)
else
error("normalization \"", string(normalization), "\" not supported")
end
end
"""
probinv(P::Periodogram, prob::Real)
Return the power value of the periodogram power whose probability is `prob`.
This is the inverse of `prob` function.
"""
function probinv(P::Periodogram, prob::Real)
N = length(P.times)
normalization = P.norm
if normalization == :standard
return 1 - prob^(2/(N - 3))
elseif normalization == :Scargle
return -log(prob)
elseif normalization == :HorneBaliunas
return (N - 1) * (1 - prob ^ (2 / (N - 3))) / 2
elseif normalization == :Cumming
return (N - 3) * (prob ^ (2 / (3 - N)) - 1) / 2
else
error("normalization \"", string(normalization), "\" not supported")
end
end
"""
LombScargle.M(P::Periodogram)
Estimates the number of independent frequencies in the periodogram `P`.
"""
function M(P::Periodogram)
tmin, tmax = extrema(P.times)
fmin, fmax = extrema(P.freq)
return (tmax - tmin)*(fmax - fmin)
end
"""
fap(P::Periodogram, pow::Real)
Return the false-alarm probability for periodogram `P` and power value `pow`.
Its inverse is the [`fapinv`](@ref) function.
"""
fap(P::Periodogram, pow::Real) = 1 - (1 - prob(P, pow))^M(P)
"""
fapinv(P::Periodogram, prob::Real)
Return the power value of the periodogram whose false-alarm probability is
`prob`.
This is the inverse of [`fap`](@ref) function.
"""
fapinv(P::Periodogram, prob::Real) = probinv(P, 1 - (1 - prob)^(inv(M(P))))
"""
LombScargle.model(times::AbstractVector{Real},
signal::AbstractVector{R2},
[errors::AbstractVector{R3},]
frequency::Real,
[times_fit::AbstractVector{R4}];
center_data::Bool=true,
fit_mean::Bool=true)
Return the best fitting Lomb–Scargle model for the given signal at the given
frequency.
Mandatory arguments are:
* `times`: the observation times
* `signal`: the signal, sampled at `times` (must have the same length as
`times`)
* `frequency`: the frequency at which to calculate the model
Optional arguments are:
* `errors`: the vector of uncertainties of the signal. If provided, it must
have the same length as `signal` and `times`, and be the third argument. Like
for [`lombscargle`](@ref), if the signal has uncertainties, the `signal`
vector can also be a vector of `Measurement` objects, and this argument should
be omitted
* `times_fit`: the vector of times at which the model will be calculated. It
defaults to `times`. If provided, it must come after `frequency`
Optional keyword arguments `center_data` and `fit_mean` have the same meaning as
in [`lombscargle`](@ref):
- `fit_mean`: whether to fit for the mean. If this is `false`, like in the
original Lomb--Scargle periodogram, ``\\mathbf{A}`` does not have the third
column of ones, ``c_f`` is set to ``0`` and the unknown vector to be determined
becomes ``x = [a_f, b_f]^\\text{T}``
- `center_data`: whether the data should be pre-centered before solving the
linear system. This is particularly important if `fit_mean=false`
"""
function model(t::AbstractVector{<:Real}, s::AbstractVector{T},
errors::AbstractVector{<:Real}, f::Real,
t_fit::AbstractVector{<:Real}=t;
center_data::Bool=true, fit_mean::Bool=true) where {T<:Real}
@assert length(t) == length(s) == length(errors)
if center_data
# Compute weights vector
w = 1 ./ (errors .^ 2)
m = (w⋅s)/sum(w)
y = (s .- m) ./ errors
else
# We don't want to center the data: the mean is 0 and the signal is left
# unchanged
m = zero(T)
y = s./errors
end
# The Lomb–Scargle periodogram looks for the best fitting sinusoidal
# function
# a·cos(ωt) + b·sin(ωt) + c
# In order to find the coefficients a, b, c for the given frequency we can
# solve the linear system A·x = y, where A is the matrix with rows:
# [cos(ωt) sin(ωt) 1]; x is the column vector [a, b, c], and y is the
# column vector of the signal
ω = 2 * f
if fit_mean
a, b, c = [cospi.(ω .* t) sinpi.(ω .* t) fill(1, size(t))] ./ errors \ y
return a .* cospi.(ω .* t_fit) .+ b .* sinpi.(ω .* t_fit) .+ (c + m)
else
# If fit_mean==false, the model to be fitted is a·cos(ωt) + b·sin(ωt)
a, b = [cospi.(ω .* t) sinpi.(ω .* t)] ./ errors \ y
return a .* cospi.(ω .* t_fit) .+ b .* sinpi.(ω .* t_fit) .+ m
end
end
# No uncertainties: errors=ones(s)
model(t::AbstractVector{<:Real}, s::AbstractVector{<:Real},
f::Real, t_fit::AbstractVector{<:Real}=t; kwargs...) =
model(t, s, fill(1, size(s)), f, t_fit; kwargs...)
# Uncertainties provided via Measurement type
model(t::AbstractVector{<:Real}, s::AbstractVector{<:Measurement},
f::Real, t_fit::AbstractVector{<:Real}=t; kwargs...) =
model(t, Measurements.value.(s), Measurements.uncertainty.(s),
f, t_fit; kwargs...)
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 4867 | ### Compare LombScargle.jl with Astropy
using LombScargle, Test, PyCall, Random
ast = pyimport("astropy.timeseries")
Random.seed!(1)
ntimes = 401
@testset "Un-evenly spaced data" begin
t = range(0.01, stop=10pi, length=ntimes)
t += step(t)*rand(ntimes)
for f in (x -> sinpi(x), x -> sin(x) + 1.5*cospi(4*x) + 3)
s = f.(t)
# "psd" normalization in LombScargle slightly differs from that of
# Astropy in a few points and the test would fail if we includ it.
@testset "$fitmean, $nrm, $fast, $center" for fitmean in (true, false),
nrm in ("standard", "model", "log"), fast in ((true, "fast"), (false, "cython")),
center in (true, false)
f_jl, p_jl = freqpower(lombscargle(t, s, fit_mean = fitmean,
center_data = center,
normalization=Symbol(nrm),
padding_factors=[2],
maximum_frequency=20, fast = fast[1]))
f_py, p_py =
ast.LombScargle(t, s,
fit_mean = fitmean,
center_data = center).autopower(method=fast[2],
normalization=nrm,
maximum_frequency=20)
@test f_jl ≈ f_py
@test p_jl ≈ p_py
end
end
end
t = range(0.01, stop=10pi, length=ntimes)
errors = rand(0.1:1e-3:4.0, ntimes)
@testset "Evenly spaced data" begin # Use both heteroskedastic and homoskedastic uncertainties.
for f in (x -> sinpi(x), x -> sin(x) + 1.5*cospi(4*x) + 3), err in (ones(ntimes), errors)
s = f.(t)
@testset "$fitmean, $nrm, $fast, $center" for fitmean in (true, false),
nrm in ("standard", "model", "log", "psd"), fast in ((true, "fast"), (false, "cython")),
center in (true, false)
f_jl, p_jl = freqpower(lombscargle(t, s, err,
fast = fast[1],
fit_mean = fitmean,
center_data = center,
normalization=Symbol(nrm),
padding_factors=[2],
maximum_frequency=10,
samples_per_peak=10))
f_py, p_py =
ast.LombScargle(t, s, dy = err,
fit_mean = fitmean,
center_data = center).autopower(method=fast[2],
normalization=nrm,
maximum_frequency=10,
samples_per_peak=10)
@test f_jl ≈ f_py
@test p_jl ≈ p_py
end
end
end
@testset "heteroskedastic uncertainties" begin # with non-fast method
for f in (x -> sinpi(x), x -> sin(x) + 1.5*cospi(4*x) + 3)
s = f.(t)
@testset "$fitmean, $nrm, $fast, $center" for fitmean in (true, false),
nrm in ("standard", "model", "psd"), fast in ((true, "fast"), (false, "cython")),
center in (true, false)
f_jl, p_jl = freqpower(lombscargle(t, s, errors,
fit_mean = fitmean,
center_data = center,
fast = fast[1],
normalization = Symbol(nrm),
padding_factors=[2],
maximum_frequency=20))
f_py, p_py =
ast.LombScargle(t, s, dy = errors,
fit_mean = fitmean,
center_data = center).autopower(method=fast[2],
normalization = nrm,
maximum_frequency=20)
@test f_jl ≈ f_py
@test p_jl ≈ p_py
end
end
end
@testset "Model functions" begin
for f in (x -> sinpi(x), x -> sin(x) + 1.5*cospi(4*x) + 3)
s = f.(t)
@testset "$fm, $cd" for fm in (true, false), cd in (true, false)
m_jl = LombScargle.model(t, s, 1/2pi, fit_mean=fm, center_data=cd)
m_py = ast.LombScargle(t, s, center_data=cd, fit_mean=fm).model(t, 1/2pi)
@test m_jl ≈ m_py
end
end
end
@info("LombScargle.jl is consistent with Astropy")
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | code | 14266 | using LombScargle
using Measurements, FFTW, StableRNGs
using Test
Threads.nthreads() > 1 && (FFTW.set_num_threads(2); @info("Multi-threading enabled"))
ntimes = 1001
# Observation times
t = range(0.01, stop=10pi, length=ntimes)
# Signal
s = sin.(t)
pgram1 = @inferred(lombscargle(LombScargle.plan(t, s, fast = false, fit_mean=false)))
pgram2 = @inferred(lombscargle(LombScargle.plan(t, s, fast = false, fit_mean=true)))
pgram3 = @inferred(lombscargle(LombScargle.plan(t, s, fast = false, center_data=false, fit_mean=false)))
pgram4 = @inferred(lombscargle(LombScargle.plan(t, s, fast = false, center_data=false, fit_mean=true)))
@testset "Periodograms" begin
@testset "Random stuff" begin
rng = StableRNG(1)
# Randomized times
trandom = t .+ step(t) .* rand(rng, ntimes)
# Randomized signal
srandom = sinpi.(trandom) .+ cospi.(2trandom) .+ rand(rng, ntimes)
# Frequency grid
nfreqs = 10000
freqs = range(0.01, stop=3, length=nfreqs)
# Randomize frequency grid
freqs += step(freqs) * rand(rng, nfreqs)
# Use "freqpower" and "periodpower" just to call that function and increase code
# coverage. "autofrequency" function is tested below.
prandom1 = lombscargle(trandom, srandom, frequencies=freqs, fit_mean=false)
prandom2 = lombscargle(trandom, srandom, frequencies=freqs, fit_mean=true)
@test freqpower(pgram1)[2] ≈ periodpower(pgram2)[2] atol = 6e-7
@testset "Infinities" begin
# Make sure there are no infinities in `pgram1'. It seems to work only on
# 64-bit systems.
Sys.WORD_SIZE == 64 && @test(!(Inf in power(prandom1)) && !(Inf in power(prandom2)))
end
end
@testset "fit_mean and center_data" begin
@test power(pgram1) ≈ power(pgram2) atol = 5e-7
@test power(pgram3) ≈ power(pgram4) atol = 5e-7
end
# Test the values in order to prevent wrong results in both algorithms
@testset "Test values" begin
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, fit_mean=true)) ≈ [0.029886871262324886,0.0005456198989410226,1.912507742056023e-5, 4.54258409531214e-6, 1.0238342782430832e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, fit_mean=false)) ≈ [0.02988686776042212, 0.0005456197937194695,1.9125076826683257e-5,4.542583863304549e-6,1.0238340733199874e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, center_data=false, fit_mean=true)) ≈ [0.029886871262325004,0.0005456198989536703,1.9125077421448458e-5,4.5425840956285145e-6,1.023834278337881e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, center_data=false, fit_mean=false)) ≈ [0.029886868328967767,0.0005456198924872134,1.9125084251687147e-5,4.542588504467314e-6,1.0238354525870936e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:model)) ≈ [0.030807614469885718,0.0005459177625354441,1.9125443196143085e-5,4.54260473047638e-6,1.0238447607164715e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:log)) ≈ [0.030342586720560734,0.0005457688036440774,1.9125260307148152e-5,4.542594412890309e-6,1.0238395194654036e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:psd)) ≈ [7.474096700871138,0.1364484040771917,0.004782791641128195,0.0011360075968541799,0.002560400630125523]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:Scargle)) ≈ [0.029886871262324904,0.0005456198989410194,1.912507742056126e-5,4.54258409531238e-6,1.0238342782428552e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:HorneBaliunas)) ≈ [14.943435631162451,0.2728099494705097,0.009562538710280628,0.00227129204765619,0.005119171391214276]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:Cumming)) ≈ [15.372999620472974,0.2806521440709115,0.009837423440787873,0.0023365826071340815,0.005266327088140394]
@test_throws ErrorException lombscargle(t, s, frequencies=0.2:0.2:1, normalization=:foo)
end
err = collect(range(0.5, stop=1.5, length=ntimes))
@testset "Signal with uncertainties" begin
@test power(lombscargle(t, s, err, frequencies=0.1:0.1:1, fit_mean=true)) ≈ [0.06659683848818691,0.09230959166317589,0.006625919314669043,0.0015664010997692612,0.0005085442118408477,0.00019704659245878378,9.658452525613897e-5,6.331573873913433e-5,4.903871967643573e-5,3.7948448825374025e-5]
@test power(lombscargle(t, s, err, frequencies=0.1:0.1:1, fit_mean=false)) ≈ [0.0664002483305464,0.09219168665786254,0.006625915010614472,0.0015663421089042564,0.0005085109569237008,0.00019703233981948823,9.6577091433651e-5,6.33101344670203e-5,4.9033581990442793e-5,3.7944076990210425e-5]
@test power(lombscargle(t, s, err, frequencies=0.1:0.1:1, fit_mean=false, center_data=false)) ≈ [0.06920814049261209,0.09360344864985352,0.006634919960009565,0.0015362072871144769,0.0004858250831632676,0.00018179850370583626,8.543727416919218e-5,5.379994730581837e-5,4.0107232867763e-5,2.9784059487535237e-5]
@test power(lombscargle(t, s, err)) ==
power(lombscargle(t, measurement.(s, err)))
@test power(lombscargle(t, s, err, frequencies = 0.1:0.1:1, fast = false, normalization = :psd)) ≈ [21.851224476672318,30.287888352835566,2.1740438975167593,0.5139550589572747,0.16685947834022155,0.06465335925734642,0.031690545531213095,0.020774656147387098,0.01609019430987704,0.012451342926314715]
end
pgram5 = lombscargle(t, s, maximum_frequency=30, fast=true)
pgram6 = lombscargle(t, s, maximum_frequency=30, fast=false)
@testset "Fast method" begin
@test power(pgram5) ≈ power(pgram6) atol = 3e-4
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, fast=true, fit_mean=true, flags = FFTW.MEASURE, timelimit = 5)) ≈
[0.029886871262325053, 0.0005447325913220627, 2.1246300058375996e-5, 4.1259517049745417e-7, 5.04610747916143e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, fast=true, fit_mean=false, flags = FFTW.MEASURE, timelimit = 5)) ≈
[0.0298868677604221, 0.0005447324642349108, 2.1246261449681898e-5, 4.125675324400738e-7, 5.042481545188997e-5]
@test power(lombscargle(t, s, frequencies=0.2:0.2:1, fast=true, center_data=false, fit_mean=false, flags = FFTW.MEASURE, timelimit = 5)) ≈
[0.029886868328967798, 0.0005447325727792022, 2.1246201600761156e-5, 4.1251689656274853e-7, 5.0422982073668846e-5]
@test power(lombscargle(t, s, err, frequencies=0.2:0.2:1, fast=true, fit_mean=true)) ≈ [0.09230959166317658, 0.0015636510410697796, 0.00019210902295493893, 8.221668511639927e-5, 0.00021747947231895047]
@test power(lombscargle(t, s, err, frequencies=0.2:0.2:1, fast=true, fit_mean=false)) ≈ [0.09219168665786256, 0.0015635926899499144, 0.00019209076832480172, 8.2184576575774e-5, 0.00021723818031507325]
@test power(lombscargle(t, s, err, frequencies=0.2:0.2:1, fast=true, center_data=false, fit_mean=false)) ≈ [0.09360344864985316, 0.001533862558849818, 0.00017415674032714024, 5.1945371781883936e-5, 0.00011977935090727627]
@test power(lombscargle(t, s, err)) ==
power(lombscargle(t, measurement.(s, err)))
end
# Compare result of uncertainties with both methods (fast and non-fast).
@testset "Fast and non-fast methods" begin
rng = StableRNG(1)
errors = rand(rng, 0.1:1e-3:4.0, ntimes)
@test power(lombscargle(t, s, errors)) ≈ power(lombscargle(t, s, errors, fast = false)) atol = 0.3
@test power(lombscargle(t, s, errors, fit_mean = false)) ≈ power(lombscargle(t, s, errors, fit_mean = false, fast = false)) atol = 0.2
end
@testset "Float32" begin
@test typeof(period(lombscargle(Vector{Float32}(t), Vector{Float32}(s), fast = false))) == Vector{Float32}
end
end
@testset "Utilities" begin
@testset "findmaxpower, findmaxfreq, findmaxperiod" begin
@test findmaxfreq(pgram1) ≈ [31.997145470342]
@test findmaxfreq(pgram1, 0.965) ≈ [0.15602150741832602,31.685102455505348,31.997145470342,63.52622641842902,63.838269433265665]
@test findmaxperiod(pgram1) ≈ 1 ./ findmaxfreq(pgram1)
@test findmaxperiod(pgram1, 0.965) ≈ 1 ./ findmaxfreq(pgram1, 0.965)
global s = sinpi.(2t) .+ cospi.(4t)
p = lombscargle(t, s, maximum_frequency=4)
@test findmaxfreq(p, [0.9, 1.1]) ≈ [1.0029954048320957]
@test findmaxfreq(p, [1.9, 2.1]) ≈ [2.002806697267899]
@test findmaxperiod(p, [1/0.9, 1/1.1]) ≈ 1 ./ findmaxfreq(p, [0.9, 1.1])
@test findmaxperiod(p, [1/1.9, 1/2.1]) ≈ 1 ./ findmaxfreq(p, [1.9, 2.1])
@test findmaxpower(pgram1) ≈ 0.9695017551608017
end
@testset "LombScargle.autofrequency" begin
@test LombScargle.autofrequency(t) ≈ 0.003184112396292367:0.006368224792584734:79.6824127172165
@test LombScargle.autofrequency(t, minimum_frequency=0) ≈ 0.0:0.006368224792584734:79.6792286048202
@test LombScargle.autofrequency(t, maximum_frequency=10) ≈ 0.003184112396292367:0.006368224792584734:9.99492881196174
# This last test also makes sure that `freq` and `power` fields of a Periodogram
# object can have different type.
if Threads.nthreads() > 1 && Sys.isapple()
# This would make Julia crash, skip it. See
# https://github.com/JuliaLang/julia/issues/35702
@test_skip freq(lombscargle(1:11, big.(sin.(1:11)))) ≈ 0.01:0.02:2.75
else
@test freq(lombscargle(1:11, big.(sin.(1:11)))) ≈ 0.01:0.02:2.75
end
end
@testset "Probabilities and FAP" begin
global t = collect(range(0.01, stop = 10pi, length = 101))
global s = sin.(t)
for norm in (:standard, :Scargle, :HorneBaliunas, :Cumming)
P = lombscargle(t, s, normalization = norm)
for z_0 in (0.1, 0.5, 0.9)
@test prob(P, probinv(P, z_0)) ≈ z_0
@test fap(P, fapinv(P, z_0)) ≈ z_0
end
end
P = lombscargle(t, s, normalization = :log)
@test_throws ErrorException prob(P, 0.5)
@test_throws ErrorException probinv(P, 0.5)
end
@testset "LombScargle.model" begin
@test s ≈ LombScargle.model(t, s, 1/2pi, center_data=false, fit_mean=false)
global s = sinpi.(t) .+ pi .* cospi.(t) .+ ℯ
@test s ≈ LombScargle.model(t, measurement.(s, fill(1, size(s))), 0.5)
end
@testset "LombScargle.add_at!" begin
a = ones(Int, 3)
LombScargle.add_at!(a, [3, 1, 3, 1, 2], 1:5)
@test a ≈ [7, 6, 5]
end
@testset "LombScargle.extirpolate!" begin
x = collect(range(0, stop = 10, length = 50))
y = sin.(x)
vec13 = Vector{Complex{Float64}}(undef, 13)
LombScargle.extirpolate!(vec13, x, y, 13)
@test vec13 ≈ [0.39537718210649553,3.979484140636793,4.833090108345013,0.506805556164743,-3.828112427525919,-4.748341359084166,-1.3022050566901917,3.3367666084342256,5.070478111668922,1.291245296032218,-0.8264466821981216,0.0,0.0]
x = collect(range(0, stop = 10, length = 50))
y = sin.(x)
@test LombScargle.extirpolate!(Vector{Complex{Float64}}(undef, 11), x, y, 11) ≈ vec13[1:11]
end
x = collect(range(0, stop = 10, length = 50))
y = sin.(x)
@testset "LombScargle.trig_sum!" begin
N = 10
Nfft = nextpow(2, 5N)
fftgrid = Vector{Complex{Float64}}(undef, N)
bfft_vec = Vector{Complex{Float64}}(undef, Nfft)
p = plan_bfft(bfft_vec)
grid = similar(bfft_vec)
C, S = LombScargle.trig_sum!(grid, fftgrid, bfft_vec, p, x, y, 1, N, Nfft, minimum(x))
@test S ≈ [0.0,0.3753570125888358,0.08163980192703546,-0.10139634351774979,-0.4334223744905633,-2.7843373311769777,0.32405810159838055,0.05729663600471602,-0.13191736591325876,-0.5295781583202946]
@test C ≈ [8.708141477890015,-0.5402668064176129,-0.37460815054027985,-0.3793457539084364,-0.5972351546196192,14.612204307982497,-0.5020253140297526,-0.37724493022381034,-0.394096831764578,-0.6828241623474718]
end
@testset "Bootstrap" begin
rng = StableRNG(1)
plan = LombScargle.plan(x, y)
# Fill the periodogram in the plan with random numbers, to remove
# possible NaNs, which would make the check below harder. Zeroing the
# vector would be uninteresting.
plan.P .= rand.()
P = copy(plan.P)
@test LombScargle.bootstrap(rng, 5, plan).p ≈
[0.2583163570869385, 0.24972129609003385, 0.23092196417031927, 0.18502993723773883, 0.1789661587851332]
# Make sure the periodogram in the plan didn't change.
@test plan.P == P
rng = StableRNG(1)
plan = LombScargle.plan(x, y; fit_mean = false)
plan.P .= rand.()
P = copy(plan.P)
@test LombScargle.bootstrap(rng, 5, plan).p ≈
[0.2580212594813987, 0.24897444742007394, 0.23090538831280463, 0.18492853793841382, 0.17895144706266247]
@test plan.P == P
err = collect(range(0.5, stop = 1.5, length = 50))
rng = StableRNG(1)
plan = LombScargle.plan(x, measurement.(y, err); fast = true)
plan.P .= rand.()
P = copy(plan.P)
b = LombScargle.bootstrap(rng, 50, plan)
@test fap(b, fapinv(b, 0.02)) ≈ 0.02
@test plan.P == P
rng = StableRNG(1)
plan = LombScargle.plan(x, measurement.(y, err); fast = false)
plan.P .= rand.()
P = copy(plan.P)
b = LombScargle.bootstrap(rng, 50, plan)
@test fap(b, fapinv(b, 0.02)) ≈ 0.02
@test plan.P == P
rng = StableRNG(1)
plan = LombScargle.plan(x, measurement.(y, err), fast = false, fit_mean = false)
plan.P .= rand.()
P = copy(plan.P)
@test fapinv(LombScargle.bootstrap(rng, 50, plan),
0.2) ≈ 0.23917691901908134
@test plan.P == P
rng = StableRNG(1)
plan = LombScargle.plan(x, y; fast=false, fit_mean=false)
plan.P .= rand.()
P = copy(plan.P)
@test fapinv(LombScargle.bootstrap(rng, 1000, plan), 0.2) ≈
0.2157617143004672
@test plan.P == P
end
end
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | docs | 6629 | # History of LombScargle.jl
## v1.0.3 (2021-11-23)
### Bug Fixes
* Removed some redundant internal methods.
* Fixed a typo in PSD normalisation formula in documentation
([#28](https://github.com/JuliaAstro/LombScargle.jl/issues/28),
[#29](https://github.com/JuliaAstro/LombScargle.jl/pull/29)).
## v1.0.2 (2021-08-19)
### Bug Fixes
* Make sure `bootstrap([::AbstractRNG,] ::Integer, ::PeriodogramPlan)` methods
don't modify the periodogram of the input plan.
## v1.0.1 (2021-08-17)
### Bug Fixes
* Restore `bootstrap(::Integer, ::PeriodogramPlan)` method that was accidentally
removed in v0.5.1.
## v1.0.0 (2020-11-22)
### New Features
* Previously, when using the fast method the vector of the signal was padded to
a length which is a power of 2, but now you can choose the factors of the
length of the padded vector with the new keyword argument for
`LombScargle.lombscargle` and `LombScargle.plan` functions:
`padding_factors::Union{NTuple{N,<:Integer} where {N},Vector{<:Integer}}`.
This defaults to `[2,3,5,7]`, which is the optimal choice for FFTW and allows
for smaller vectors compared to powers of 2. To reproduce the same results as
with the previous default setting you need to use `padding_factors=[2]`.
## v0.5.1 (2020-05-15)
### Bug Fixes
* Minor reorganisation of the bootstraping code.
## v0.5.0 (2019-12-08)
### Breaking Changes
* Support for Julia 0.7 was dropped, now the minimum version required is Julia
v1.0.
## v0.4.0 (2018-08-23)
### Breaking Changes
* Support for Julia 0.6 was dropped.
## v0.3.1 (2018-07-28)
* Minor performance improvements
* New documentation at https://juliaastro.github.io/LombScargle.jl/stable/
## v0.3.0 (2017-04-29)
### New Features
* You can pre-plan a periodogram before actually performing it using
`LombScargle.plan` function. This computes some quantities needed afterwards
and pre-allocate the memory for the actual computation of the periodogram.
The speed-up is particularly relevant for the fast method.
* Add `flags` and `timelimit` optional keywords to `lombscargle` function, to
set the FFTW planner flags and the time limit.
* Package license changed to BSD 3-clause "New" or "Revised".
### Breaking Changes
* Support for Julia 0.4 and 0.5 was dropped.
* The `normalization` keyword of `lombscargle` function now must be a `Symbol`
(instead of `AbstractString`), the default being `:standard` (instead of
`"standard"`). The same normalizations as before are supported, the names
kept the same capitalization.
### Improvements
This version faced several performance improvements, in particular to
`lombscargle` and `LombScargle.model` functions, besides the pre-planning
feature.
* The fast method of `lombscarlge` now is faster, the larger the size of input
array, the larger the improvement. In addition, the fast Fourier transform
computed internally with FFTW library can take advantage of multi-threading
(call `FFTW.set_num_threads(n)` to use `n` threads) in order to speed-up
computation. However, please note that the running time will not scale as `n`
because computation of the FFT is only a part of the algorithm. The memory
footprint of this function is also considerably lower. To give you an idea of
the improvement, for an input of 100000 datapoints, a pre-planned periodogram
is 70% faster than a (non-planned) periodogram in previous version and
requires almost 90% less memory. With 4 FFTW threads the speed-up is of 80%.
* The two non-fast methods now are about 20%-30% faster, thanks to the use of
`sincos` function from math library. These methods now also support Julia’s
native
[multi-threading](http://docs.julialang.org/en/stable/manual/parallel-computing/#multi-threading-experimental).
Run Julia with `n` threads (e.g., `JULIA_NUM_THREADS=4 julia` for 4 threads)
in order to gain an `n`-fold scaling. These functions also eat considerably
less memory: if the periodogram is pre-planned, all operations are then
performed in-place, so memory usage of the periodogram only is independent of
input size.
* The `LombScargle.model` function is now a bit faster and less memory-greedy
than before.
### Bug Fixes
* PSD normalization with heteroskedastic errors has been fixed.
## v0.2.0 (2016-12-07)
### Breaking Changes
* The fast method is now used when the frequency grid is evenly spaced (a
`Range` object), no matter what the `times` vector is. The previous behavior
was due to a wrong interpretation of the applicability of the method.
### Bug Fixes
* `Periodogram` type now has 4 parameters, one for the type of each field. Now
`power`, `freq`, and `times` fields need not to have all the same floating
point type.
* In the non-fast variant of the Generalised Lomb–Scargle method, when
`fit_mean` and/or `center_data` are `true`, pre-center the data by subtracting
from the signal the weighted average of the signal itself, instead of the
arithmetic mean.
## v0.1.2 (2016-10-17)
### New Features
* New function for performing bootstrap resampling: `LombScargle.bootstrap`.
The `fap` and `fapinv` functions have now new methods to estimate false-alarm
probability and its inverse from a bootstrap sample.
* New utilities: `period`, `periodpower`, `findmaxperiod`.
### Bug Fixes
* Fix power in the standard (i.e., `fast = false` variant) generalised
Lomb–Scargle algorithm with `fit_mean = true`. You will find different
results than before, but for the better, previous results were slightly wrong.
## v0.1.1 (2016-08-20)
### New Features
* New function: `LombScargl.model`. It gives the best fitting Lomb–Scargle
model at a given frequency.
* `findmaxfreq` function now can take an optional argument (`interval`) to limit
the search for the maximum frequency to a certain frequency range.
## v0.1.0 (2016-08-18)
### New Features
* The fast, but approximate, method by Press & Rybicki (1989) has been
implemented. This has complexity O[N log(N)], to be compared with the O[N^2]
of the true Lomb–Scargle periodogram. The implementation in this package is
based on the one in Astropy. Related to this method, three new keywords for
`lombscargle` function has been added: `fast`, `oversampling`, `Mfft`.
* The generalised Lomb–Scargle algorithm by Zechmeister & Kürster is used also
with `fit_mean=false`, when the user provided the uncertainties.
## v0.0.2 (2016-08-05)
### New Features
* New functions: `findmaxpower`, `prob`, `probinv`, `fap`, `fapinv`.
* New optional keyword for `lombscargle` function: `noise_level`.
## v0.0.1 (2016-07-16)
Initial release.
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | docs | 9846 | # LombScargle.jl
| **Documentation** | **Build Status** | **Code Coverage** |
|:---------------------------------------:|:-----------------------------------:|:-------------------------------:|
| [![][docs-stable-img]][docs-stable-url] | [![Build Status][gha-img]][gha-url] | [![][coveral-img]][coveral-url] |
| [![][docs-latest-img]][docs-latest-url] | | [![][codecov-img]][codecov-url] |
Introduction
------------
`LombScargle.jl` is a [Julia](http://julialang.org/) package for a fast
multi-threaded estimation of
the [frequency spectrum](https://en.wikipedia.org/wiki/Frequency_spectrum) of a
periodic signal
with
[the Lomb–Scargle periodogram](https://en.wikipedia.org/wiki/The_Lomb–Scargle_periodogram).
Another Julia package that provides tools to perform spectral analysis of
signals is [`DSP.jl`](https://github.com/JuliaDSP/DSP.jl), but its methods
require that the signal has been sampled at equally spaced times. Instead, the
Lomb–Scargle periodogram enables you to analyze unevenly sampled data as well,
which is a fairly common case in astronomy, a field where this periodogram is
widely used.
The algorithms used in this package are reported in the following papers:
* Press, W. H., Rybicki, G. B. 1989, ApJ, 338, 277 (URL:
http://dx.doi.org/10.1086/167197, Bibcode:
http://adsabs.harvard.edu/abs/1989ApJ...338..277P)
* Townsend, R. H. D. 2010, ApJS, 191, 247 (URL:
http://dx.doi.org/10.1088/0067-0049/191/2/247, Bibcode:
http://adsabs.harvard.edu/abs/2010ApJS..191..247T)
* Zechmeister, M., Kürster, M. 2009, A&A, 496, 577 (URL:
http://dx.doi.org/10.1051/0004-6361:200811296, Bibcode:
http://adsabs.harvard.edu/abs/2009A%26A...496..577Z)
The package provides facilities to:
* compute the periodogram using different methods (with different speeds) and
different normalizations. This is one of the fastest implementations of these
methods available as free software. If Julia is run with more than
one
[thread](http://docs.julialang.org/en/stable/manual/parallel-computing/#multi-threading-experimental),
computation is automatically multi-threaded, further speeding up calculations;
* access the frequency and period grid of the resulting periodogram, together
with the power spectrum;
* find the maximum power in the periodogram and the frequency and period
corresponding to the peak. All these queries can be restricted to a specified
region, in order to search a local maximum, instead of the global one;
* calculate the probability that a peak arises from noise only (false-alarm
probability) using analytic formulas, in order to assess the significance of
the peak;
* perform bootstrap resamplings in order to compute the false-alarm probability
with a statistical method;
* determine the best-fitting Lomb–Scargle model for the given data set at the
given frequency.
All these features are thoroughly described in the full documentation, see
below. Here we only give basic information.
### Documentation
The complete manual of `LombScargle.jl` is available
[here](https://juliaastro.github.io/LombScargle.jl/stable/). It has detailed explanation of all
functions provided by the package and more examples than what you will find
here, also with some plots.
Installation
------------
The latest version of `LombScargle.jl` is available for Julia 1.0 and later
versions, and can be installed with [Julia built-in package
manager](https://julialang.github.io/Pkg.jl/stable/). In a Julia session, after
entering the package manager mode with `]`, run the command
```julia
pkg> add LombScargle
```
Older versions are also available for Julia 0.4-0.7.
Usage
-----
After installing the package, you can start using it with
```julia
julia> using LombScargle
```
The module defines a new `LombScargle.Periodogram` data type, which, however, is
not exported because you will most probably not need to directly manipulate such
objects. This data type holds both the frequency and the power vectors of the
periodogram.
The main function provided by the package is `lombscargle`:
```julia
lombscargle(times, signal[, errors])
```
which returns a `LombScargle.Periodogram`. The only mandatory arguments are:
* `times`: the vector of observation times
* `signal`: the vector of observations associated with `times`
All these vectors must have the same length. The only optional argument is:
* `errors`: the uncertainties associated to each `signal` point. This vector
must have the same length as `times` and `signal`.
Besides the two arguments introduced above, `lombscargle` has a number of other
optional keywords in order to choose the right algorithm to use and tweak the
periodogram. For the description of all these arguments see the complete
manual.
If the signal has uncertainties, the `signal` vector can also be a vector of
`Measurement` objects (from
[`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl) package), in
which case you need not to pass a separate `errors` vector for the uncertainties
of the signal. You can create arrays of `Measurement` objects with the
`measurement` function, see `Measurements.jl` manual at
https://juliaphysics.github.io/Measurements.jl/latest/ for more details.
With the `LombScargle.plan` function you can pre-plan a periodogram and save
time and memory for the actual computation of the periodogram. See the
[manual](https://juliaastro.github.io/LombScargle.jl/stable/#Planning-the-Periodogram-1)
for details.
Examples
--------
Here is an example of a noisy periodic signal (`sin(π*t) + 1.5*cos(2π*t)`)
sampled at unevenly spaced times.
```julia
julia> using LombScargle
julia> ntimes = 1001
1001
# Observation times
julia> t = range(0.01, stop=10pi, length=ntimes)
0.01:0.03140592653589793:31.41592653589793
# Randomize times
julia> t += step(t)*rand(ntimes);
# The signal
julia> s = sinpi.(t) .+ 1.5 .* cospi.(2t) .+ rand(ntimes);
# Pre-plan the periodogram (see the documentation)
julia> plan = LombScargle.plan(t, s);
# Compute the periodogram
julia> pgram = lombscargle(plan)
```
You can plot the result, for example with
[`Plots`](https://github.com/tbreloff/Plots.jl) package. Use `freqpower`
function to get the frequency grid and the power of the periodogram as a
2-tuple.
```julia
using Plots
plot(freqpower(pgram)...)
```
### Signal with Uncertainties
The generalised Lomb–Scargle periodogram (used when the `fit_mean` optional
keyword is `true`) is able to handle a signal with uncertainties, and they will
be used as weights in the algorithm. The uncertainties can be passed either as
the third optional argument `errors` to `lombscargle` or by providing this
function with a `signal` vector of type `Measurement` (from
[`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl) package).
```julia
using Measurements, Plots
ntimes = 1001
t = range(0.01, stop=10pi, length=ntimes)
s = sinpi.(2t)
errors = rand(0.1:1e-3:4.0, ntimes)
plot(freqpower(lombscargle(t, s, errors, maximum_frequency=1.5))...)
plot(freqpower(lombscargle(t, measurement(s, errors), maximum_frequency=1.5))...)
```
Performance
-----------
A pre-planned periodogram in `LombScargle.jl` computed in single thread mode
with the fast method is more than 2 times faster than the implementation of the
same algorithm provided by Astropy, and more than 4 times faster if 4 FFTW
threads are used (on machines with at least 4 physical CPUs).
The following plot shows a comparison between the times needed to compute a
periodogram for a signal with N datapoints using `LombScargle.jl`, with 1 or 4
FFTW threads (with `flags = FFTW.MEASURE` for better performance), and the
single-threaded Astropy implementation. (Julia version: 1.6.0; `LombScargle.jl`
version: 1.0.0; Python version: 3.8.6; Astropy version: 4.1. CPU: Intel(R)
Core(TM) i7-4870HQ CPU @ 2.50GHz.)

Note that this comparison is unfair, as Astropy doesn’t support pre-planning a
periodogram nor multi-threading, and it pads vectors for FFT to a length which
is a power of 2, while by default `LombScargle.jl` uses length which are
multiples of 2, 3, 5, 7. A non-planned periodogram in single thread mode in
`LombScargle.jl` is still twice as fast as Astropy.
Development
-----------
The package is developed at https://github.com/JuliaAstro/LombScargle.jl. There
you can submit bug reports, make suggestions, and propose pull requests.
### History
The ChangeLog of the package is available in
[NEWS.md](https://github.com/JuliaAstro/LombScargle.jl/blob/master/NEWS.md) file
in top directory.
License
-------
The `LombScargle.jl` package is licensed under the BSD 3-clause "New" or
"Revised" License. The original author is Mosè Giordano.
### Acknowledgemets
This package adapts the implementation in Astropy of the the fast Lomb–Scargle
method by Press & Rybicki (1989). We claim no endorsement nor promotion by the
Astropy Team.
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-latest-url]: https://juliaastro.github.io/LombScargle.jl/latest/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://juliaastro.github.io/LombScargle.jl/stable/
[gha-img]: https://github.com/JuliaAstro/LombScargle.jl/workflows/CI/badge.svg
[gha-url]: https://github.com/JuliaAstro/LombScargle.jl/actions?query=workflow%3ACI
[coveral-img]: https://coveralls.io/repos/github/JuliaAstro/LombScargle.jl/badge.svg?branch=master
[coveral-url]: https://coveralls.io/github/JuliaAstro/LombScargle.jl?branch=master
[codecov-img]: https://codecov.io/gh/JuliaAstro/LombScargle.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaAstro/LombScargle.jl
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"BSD-3-Clause"
] | 1.0.3 | d64a0ce7539181136a85fd8fe4f42626387f0f26 | docs | 34648 | LombScargle.jl
==============
```@meta
DocTestSetup = quote
using LombScargle
end
```
Introduction
------------
[`LombScargle.jl`](https://github.com/JuliaAstro/LombScargle.jl) is a package for
a fast multi-threaded estimation of the [frequency
spectrum](https://en.wikipedia.org/wiki/Frequency_spectrum) of a periodic signal
with [the Lomb--Scargle
periodogram](https://en.wikipedia.org/wiki/The_Lomb–Scargle_periodogram). This
is written in [Julia](http://julialang.org/), a modern high-level,
high-performance dynamic programming language designed for technical computing.
Another Julia package that provides tools to perform spectral analysis of
signals is [DSP.jl](https://github.com/JuliaDSP/DSP.jl), but its methods require
that the signal has been sampled at equally spaced times. Instead, the
Lomb--Scargle periodogram enables you to analyze unevenly sampled data as well,
which is a fairly common case in astronomy, a field where this periodogram is
widely used.
The algorithms used in this package are reported in the following papers:
* [PR89] Press, W. H., Rybicki, G. B. 1989, ApJ, 338, 277 (URL:
<http://dx.doi.org/10.1086/167197>, Bibcode:
<http://adsabs.harvard.edu/abs/1989ApJ...338..277P>)
* [TOW10] Townsend, R. H. D. 2010, ApJS, 191, 247 (URL:
<http://dx.doi.org/10.1088/0067-0049/191/2/247>, Bibcode:
<http://adsabs.harvard.edu/abs/2010ApJS..191..247T>)
* [ZK09] Zechmeister, M., Kürster, M. 2009, A&A, 496, 577 (URL:
<http://dx.doi.org/10.1051/0004-6361:200811296>, Bibcode:
<http://adsabs.harvard.edu/abs/2009A%26A...496..577Z>)
Other relevant papers are:
* [CMB99] Cumming, A., Marcy, G. W., & Butler, R. P. 1999, ApJ, 526, 890 (URL:
<http://dx.doi.org/10.1086/308020>, Bibcode:
<http://adsabs.harvard.edu/abs/1999ApJ...526..890C>)
* [CUM04] Cumming, A. 2004, MNRAS, 354, 1165 (URL:
<http://dx.doi.org/10.1111/j.1365-2966.2004.08275.x>, Bibcode:
<http://adsabs.harvard.edu/abs/2004MNRAS.354.1165C>)
* [HB86] Horne, J. H., & Baliunas, S. L. 1986, ApJ, 302, 757 (URL:
<http://dx.doi.org/10.1086/164037>, Bibcode:
<http://adsabs.harvard.edu/abs/1986ApJ...302..757H>)
* [LOM76] Lomb, N. R. 1976, Ap&SS, 39, 447 (URL:
<http://dx.doi.org/10.1007/BF00648343>, Bibcode:
<http://adsabs.harvard.edu/abs/1976Ap%26SS..39..447L>)
* [MHC93] Murdoch, K. A., Hearnshaw, J. B., & Clark, M. 1993, ApJ, 413, 349
(URL: <http://dx.doi.org/10.1086/173003>, Bibcode:
<http://adsabs.harvard.edu/abs/1993ApJ...413..349M>)
* [SCA82] Scargle, J. D. 1982, ApJ, 263, 835 (URL:
<http://dx.doi.org/10.1086/160554>, Bibcode:
<http://adsabs.harvard.edu/abs/1982ApJ...263..835S>)
* [SS10] Sturrock, P. A., & Scargle, J. D. 2010, ApJ, 718, 527 (URL:
<http://dx.doi.org/10.1088/0004-637X/718/1/527>, Bibcode:
<http://adsabs.harvard.edu/abs/2010ApJ...718..527S>)
The package provides facilities to:
- compute the periodogram using different methods (with different
speeds) and different normalizations. This is one of the fastest
implementations of these methods available as free software. If
Julia is run with more than one
[thread](http://docs.julialang.org/en/stable/manual/parallel-computing/#multi-threading-experimental),
computation is automatically multi-threaded, further speeding up
calculations;
- access the frequency and period grid of the resulting periodogram,
together with the power spectrum;
- find the maximum power in the periodogram and the frequency and
period corresponding to the peak. All these queries can be
restricted to a specified region, in order to search a local
maximum, instead of the global one;
- calculate the probability that a peak arises from noise only
(false-alarm probability) using analytic formulas, in order to
assess the significance of the peak;
- perform bootstrap resamplings in order to compute the false-alarm
probability with a statistical method;
- determine the best-fitting Lomb--Scargle model for the given data
set at the given frequency.
Installation
------------
`LombScargle.jl` is available for Julia 0.7 and later versions, and can
be installed with [Julia's built-in package
manager](http://docs.julialang.org/en/stable/manual/packages/). In a
Julia session run the commands
```julia
julia> using Pkg
julia> Pkg.update()
julia> Pkg.add("LombScargle")
```
Older versions are also available for Julia 0.4-0.6.
Usage
-----
After installing the package, you can start using it with
```julia
using LombScargle
```
The module defines a new `LombScargle.Periodogram` data type, which,
however, is not exported because you will most probably not need to
directly manipulate such objects. This data type holds both the
frequency and the power vectors of the periodogram.
The main function provided by the package is `lombscargle`:
```@docs
lombscargle(::AbstractVector{<:Real}, rest...)
```
`lombscargle` returns a `LombScargle.Periodogram`. The only two mandatory
arguments are:
- `times`: the vector of observation times
- `signal`: the vector of observations associated with `times`
The optional argument is:
- `errors`: the uncertainties associated to each `signal` point.
All these vectors must have the same length.
!!! tip
You can pre-plan a periodogram with [`LombScargle.plan`](@ref)
function, which has the same syntax as [`lombscargle`](@ref)
described in this section. In this way the actual computation of the
periodogram is faster and you will save memory. See the [Planning the
Periodogram](#Planning-the-Periodogram-1) section below.
!!! tip
`LombScargle.jl` exploits Julia's native
[multi-threading](http://docs.julialang.org/en/stable/manual/parallel-computing/#multi-threading-experimental)
for the non-fast methods (the methods used when you set the keyword
`fast=false`). Run Julia with ``n`` threads (e.g., `JULIA_NUM_THREADS=4 julia` for
4 threads, if your machine has 4 physical cores) in order to automatically gain
an ``n`` -fold scaling.
Please note that multi-threading is still an experimental feature in Julia, so
you may encounter issues when running it with more than one thread. For example,
bug [#17395](https://github.com/JuliaLang/julia/issues/17395) (if still open)
may prevent the function, on some systems, from effectively scaling.
If the signal has uncertainties, the `signal` vector can also be a vector of
`Measurement` objects (from
[Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl) package), in
which case you need not to pass a separate `errors` vector for the uncertainties
of the signal. You can create arrays of `Measurement` objects with the
`measurement` function, see `Measurements.jl` manual at
<https://juliaphysics.github.io/Measurements.jl/stable> for more details. The
generalised Lomb--Scargle periodogram by [ZK09] is always used when the signal
has uncertainties, because the original Lomb--Scargle algorithm cannot handle
them.
!!! tip
The uncertainties are only used in the generalised Lomb--Scargle algorithm to
build an
[inverse-variance](https://en.wikipedia.org/wiki/Inverse-variance_weighting)
weights vector (see [ZK09]), that gives more importance to
datapoints with lower uncertainties. The case where all measurements have the
same uncertainty (a condition known as
[homoskedasticity](https://en.wikipedia.org/wiki/Homoscedasticity)) results in a
constant weights vector, like if there are no uncertainties at all. If you have
homoskedastic errors, you do not need to provide them to
[`lombscargle`](@ref).
### Planning the Periodogram
In a manner similar to planning Fourier transforms with FFTW, it is possible to
speed-up computation of the Lomb--Scargle periodogram by pre-planning it with
[`LombScargle.plan`](@ref) function. It has the same syntax as
[`lombscargle`](@ref), which in the base case is:
```@docs
LombScargle.plan
LombScargle.autofrequency
```
`LombScargle.plan` takes all the same argument as [`lombscargle`](@ref) shown
above and returns a `LombScargle.PeriodogramPlan` object after having
pre-computed certain quantities needed afterwards, and pre-allocated the memory
for the periodogram. It is highly suggested to plan a periodogram before
actually computing it, especially for the fast method. Once you plan a
periodogram, you can pass the `LombScargle.PeriodogramPlan` to
[`lombscargle`](@ref) as the only argument.
```@docs
lombscargle(::LombScargle.PeriodogramPlan)
```
Planning the periodogram has a twofold advantage. First of all, the planning
stage is
[type-unstable](https://docs.julialang.org/en/latest/manual/performance-tips.html),
because the type of the plan depends on the value of input parameters, and not
on their types. Thus, separating the planning (inherently inefficient) from the
actual computation of the periodogram (completely type-stable) makes overall
computation faster than directly calling [`lombscargle`](@ref). Secondly, the
`LombScargle.PeriodogramPlan` bears the time vector, but the quantities that are
pre-computed in planning stage do not actually depend on it. This is
particularly useful if you want to calculate the [False-Alarm Probability](@ref)
via bootstrapping with [`LombScargle.bootstrap`](@ref): the vector time is
randomly shuffled, but pre-computed quantities will remain the same, saving both
time and memory in each iteration. In addition, you ensure that you will use the
same options you used to compute the periodogram.
### Fast Algorithm
When the frequency grid is evenly spaced, you can compute an approximate
generalised Lomb--Scargle periodogram using a fast algorithm proposed by [PR89]
that greatly speeds up calculations, as it scales as ``O[N \log(M)]`` for ``N``
data points and ``M`` frequencies. For comparison, the true Lomb--Scargle
periodogram has complexity ``O[NM]``. The larger the number of datapoints, the
more accurate the approximation.
!!! note
This method internally performs a [Fast Fourier
Transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) (FFT) to
compute some quantities, but it is in no way equivalent to conventional Fourier
periodogram analysis.
`LombScargle.jl` uses [FFTW](http://fftw.org/) functions to compute the FFT. You
can speed-up this task by using multi-threading: call `FFTW.set_num_threads(n)`
to use ``n`` threads. However, please note that the running time will not scale as
``n`` because computation of the FFT is only a part of the algorithm.
The only prerequisite in order to be able to employ this fast method is to
provide a `frequencies` vector as an `AbstractRange` object, which ensures that
the frequency grid is perfectly evenly spaced. This is the default, since
`LombScargle.autofrequency` returns an `AbstractRange` object.
!!! tip
In Julia, an `AbstractRange` object can be constructed for example with the
[`range`](https://docs.julialang.org/en/latest/base/math/#Base.range) function
(you specify the start of the range, and optionally the stop, the length and the
step of the vector) or with the syntax
[`start:[step:]stop`](https://docs.julialang.org/en/latest/base/math/#Base.::)
(you specify the start and the end of the range, and optionally the linear
step).
Since this fast method is accurate only for large datasets, it is enabled by
default only if the number of output frequencies is larger than 200. You can
override the default choice of using this method by setting the `fast` keyword
to `true` or `false`. We recall that in any case, the `frequencies` vector must
be a `Range` in order to use this method.
To summarize, provided that `frequencies` vector is an `AbstractRange` object,
you can use the fast method:
- by default if the length of the output frequency grid is larger than
200 points
- in any case with the `fast=true` keyword
Setting `fast=false` always ensures you that this method will not be used,
instead `fast=true` actually enables it only if `frequencies` is an
`AbstractRange`.
### Normalization
By default, the periodogram ``p(f)`` is normalized so that it has values in the
range ``0 \leq p(f) \leq 1``, with ``p = 0`` indicating no improvement of the
fit and ``p = 1`` a "perfect" fit (100% reduction of ``\chi^2`` or
``\chi^2 = 0``). This is the normalization suggested by [LOM76] and [ZK09], and
corresponds to the `:standard` normalization in [`lombscargle`](@ref)
function. [ZK09] wrote the formula for the power of the periodogram at frequency
``f`` as
```math
p(f) = \frac{1}{YY}\left[\frac{YC^2_{\tau}}{CC_{\tau}} +
\frac{YS^2_{\tau}}{SS_{\tau}}\right]
```
See the paper for details. The other normalizations for periodograms
``P(f)`` are calculated from this one. In what follows, ``N`` is the number
of observations.
- `:model`:
```math
P(f) = \frac{p(f)}{1 - p(f)}
```
- `:log`:
```math
P(f) = -\log(1 - p(f))
```
- `:psd`:
```math
P(f) = \frac{W}{2}\left[\frac{YC^2_{\tau}}{CC_{\tau}} +
\frac{YS^2_{\tau}}{SS_{\tau}}\right] = p(f) \frac{W*YY}{2}
```
where W is the sum of the inverse of the individual errors, ``W = \sum \frac{1}{\sigma_{i}}``, as given in [ZK09].
- `:Scargle`:
```math
P(f) = \frac{p(f)}{\text{noise level}}
```
This normalization can be used when you know the noise level (expected from
the a priori known noise variance or population variance), but this isn't
usually the case. See [SCA82]
- `:HorneBaliunas`:
```math
P(f) = \frac{N - 1}{2} p(f)
```
This is like the `:Scargle` normalization, where the noise has been estimated
for Gaussian noise to be ``(N - 1)/2``. See [HB86]
- If the data contains a signal or if errors are under- or overestimated or if
intrinsic variability is present, then ``(N-1)/2`` may not be a good
uncorrelated estimator for the noise level. [CMB99] suggested to estimate the
noise level a posteriori with the residuals of the best fit and normalised the
periodogram as:
```math
P(f) = \frac{N - 3}{2} \frac{p(f)}{1 - p(f_{\text{best}})}
```
This is the `:Cumming` normalization option
### Access Frequency Grid and Power Spectrum of the Periodogram
[`lombscargle`](@ref) returns a `LombScargle.Periodogram` object, but you most
probably want to use the frequency grid and the power spectrum. You can access
these vectors with `freq` and `power` functions, just like in `DSP.jl`
package. If you want to get the 2-tuple `(freq(p), power(p))` use the
`freqpower` function.
```@docs
power
freq
freqpower
```
### Access Period Grid
The following utilities are the analogs of [`freq`](@ref) and
[`freqpower`](@ref), but relative to the periods instead of the
frequencies. Thus `period(p)` returns the vector of periods in the periodogram,
that is `1./freq(p)`, and `periodpower(p)` gives you the 2-tuple `(period(p),
power(p))`.
```@docs
period
periodpower
```
### `findmaxpower`, `findmaxfreq`, and `findmaxperiod` Functions
Once you compute the periodogram, you usually want to know which are the
frequencies or periods with highest power. To do this, you can use the
[`findmaxfreq`](@ref) and [`findmaxperiod`](@ref) functions.
```@docs
findmaxpower
findmaxfreq
findmaxperiod
```
### False-Alarm Probability
Noise in the data produce fluctuations in the periodogram that will present
several local peaks, but not all of them related to real periodicities. The
significance of the peaks can be tested by calculating the probability that its
power can arise purely from noise. The higher the value of the power, the lower
will be this probability.
!!! note
[CMB99] showed that the different normalizations result
in different probability functions. `LombScargle.jl` can calculate the
probability (and the false-alarm probability) only for the normalizations
reported by [ZK09], that are `:standard`, `:Scargle`,
`:HorneBaliunas`, and `:Cumming`.
The probability ``\text{Prob}(p > p_{0})`` that the periodogram power ``p`` can
exceed the value ``p_{0}`` can be calculated with the [`prob`](@ref) function,
whose first argument is the periodogram and the second one is the ``p_{0}``
value. The function [`probinv`](@ref) is its inverse: it takes the probability
as second argument and returns the corresponding ``p_{0}`` value.
```@docs
prob(::LombScargle.Periodogram, ::Real)
probinv(::LombScargle.Periodogram, ::Real)
LombScargle.M
fap(::LombScargle.Periodogram, ::Real)
fapinv(::LombScargle.Periodogram, ::Real)
```
Here are the probability functions for each normalization supported by
`LombScargle.jl`:
- `:standard` (``p \in [0, 1]``):
```math
\text{Prob}(p > p_{0}) = (1 - p_{0})^{(N - 3)/2}
```
- `:Scargle` (``p \in [0, \infty)``):
```math
\text{Prob}(p > p_{0}) = \exp(-p_{0})
```
- `:HorneBaliunas` (``p \in [0, (N - 1)/2]``):
```math
\text{Prob}(p > p_{0}) = \left(1 - \frac{2p_{0}}{N - 1}\right)^{(N - 3)/2}
```
- `:Cumming` (``p \in [0, \infty)``):
```math
\text{Prob}(p > p_{0}) = \left(1 + \frac{2p_{0}}{N - 3}\right)^{-(N - 3)/2}
```
As explained by [SS10], «the term "false-alarm probability denotes the
probability that at least one out of ``M`` independent power values in a
prescribed search band of a power spectrum computed from a white-noise time
series is expected to be as large as or larger than a given
value». `LombScargle.jl` provides the [`fap`](@ref) function to calculate the
false-alarm probability (FAP) of a given power in a periodogram. Its first
argument is the periodogram, the second one is the value ``p_{0}`` of the power
of which you want to calculate the FAP. The function [`fap`](@ref) uses the
formula
```math
\text{FAP} = 1 - (1 - \text{Prob}(p > p_{0}))^M
```
where ``M`` is the number of independent frequencies estimated with ``M = T
\cdot \Delta f``, being ``T`` the duration of the observations and ``\Delta f``
the width of the frequency range in which the periodogram has been calculated
(see [CUM04]). The function [`fapinv`](@ref) is the inverse of [`fap`](@ref): it
takes as second argument the value of the FAP and returns the corresponding
value ``p_{0}`` of the power.
The detection threshold ``p_{0}`` is the periodogram power corresponding to some
(small) value of ``\text{FAP}``, i.e. the value of ``p`` exceeded due to noise
alone in only a small fraction ``\text{FAP}`` of trials. An observed power
larger than ``p_{0}`` indicates that a signal is likely present (see [CUM04]).
!!! warning
Some authors stressed that this method to calculate the false-alarm probability
is not completely reliable. A different approach to calculate the false-alarm
probability is to perform Monte Carlo or bootstrap simulations in order to
determine how often a certain power level ``p_{0}`` is exceeded just by chance
(see [CMB99], [CUM04], and [ZK09]). See the [Bootstrapping](@ref) section.
#### Bootstrapping
One of the possible and simplest statistical methods that you can use to measure
the false-alarm probability and its inverse is
[bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29)
(see section 4.2.2 of [MHC93]).
!!! note
We emphasize that you can use this method only if you know your data points are
[independent and identically
distributed](https://en.wikipedia.org/wiki/Independent_and_identically_distributed_random_variables),
and they have [white uncorrelated
noise](https://en.wikipedia.org/wiki/White_noise).
The recipe of the bootstrap method is very simple to implement:
- repeat the Lomb--Scargle analysis a large number ``N`` of times on the original
data, but with the signal (and errors, if present) vector randomly
shuffled. As an alternative, shuffle only the time vector;
- out of all these simulations, store the powers of the highest peaks;
- in order to estimate the false-alarm probability of a given power, count how
many times the highest peak of the simulations exceeds that power, as a
fraction of ``N``. If you instead want to find the inverse of the false-alarm
probability ``\text{prob}``, looks for the ``N\cdot\text{prob}``-th element of the
highest peaks vector sorted in descending order.
Remember to pass to [`lombscargle`](@ref) function the same options, if any, you
used to compute the Lomb--Scargle periodogram before.
`LombScargle.jl` provides simple methods to perform such analysis. The
[`LombScargle.bootstrap`](@ref) function allows you to create a bootstrap sample
with `N` permutations of the original data.
```@docs
LombScargle.bootstrap
```
The false-alarm probability and its inverse can be calculated with [`fap`](@ref)
and [`fapinv`](@ref) functions respectively. Their syntax is the same as the
methods introduced above, but with a `LombScargle.Bootstrap` object as first
argument, instead of the `LombScargle.Periodogram` one.
```@docs
fap(::LombScargle.Bootstrap{<:AbstractFloat}, ::Real)
fapinv(::LombScargle.Bootstrap{<:AbstractFloat}, ::Real)
```
### `LombScargle.model` Function
For each frequency ``f`` (and hence for the corresponding angular frequency
``\omega = 2\pi f``) the Lomb--Scargle algorithm looks for the sinusoidal function
of the type
```math
a_f\cos(\omega t) + b_f\sin(\omega t) + c_f
```
that best fits the data. In the original Lomb--Scargle algorithm the offset
``c`` is null (see [LOM76]). In order to find the best-fitting coefficients
``a_f``, ``b_f``, and ``c_f`` for the given frequency ``f``, without actually
performing the periodogram, you can solve the linear system ``\mathbf{A}x =
\mathbf{y}``, where ``\mathbf{A}`` is the matrix
```math
\begin{aligned}
\begin{bmatrix}
\cos(\omega t) & \sin(\omega t) & 1
\end{bmatrix} =
\begin{bmatrix}
\cos(\omega t_{1}) & \sin(\omega t_{1}) & 1 \\
\vdots & \vdots & \vdots \\
\cos(\omega t_{n}) & \sin(\omega t_{n}) & 1
\end{bmatrix}
\end{aligned}
```
``t = [t_1, \dots, t_n]^\text{T}`` is the column vector of observation
times, ``x`` is the column vector with the unknown coefficients
```math
\begin{aligned}
\begin{bmatrix}
a_f \\
b_f \\
c_f
\end{bmatrix}
\end{aligned}
```
and ``\textbf{y}`` is the column vector of the signal. The solution of the matrix
gives the wanted coefficients.
This is what the [`LombScargle.model`](@ref) function does in order to return
the best fitting Lomb--Scargle model for the given signal at the given
frequency.
```@docs
LombScargle.model
```
Examples
--------
Here is an example of a noisy periodic signal (``\sin(\pi t) + 1.5\cos(2\pi t)``)
sampled at unevenly spaced times.
```julia
julia> using LombScargle
julia> ntimes = 1001
1001
julia> t = range(0.01, stop = 10pi, length = ntimes) # Observation times
0.01:0.03140592653589793:31.41592653589793
julia> t += step(t)*rand(ntimes) # Randomize times
julia> s = sinpi.(t) .+ 1.5cospi.(2t) .+ rand(ntimes) # The signal
julia> plan = LombScargle.plan(t, s); # Pre-plan the periodogram
julia> pgram = lombscargle(plan) # Compute the periodogram
LombScargle.Periodogram{Float64,StepRangeLen{Float64,Base.TwicePrecision{Float64},Base.TwicePrecision{Float64}},Array{Float64,1}}([0.000472346, 0.000461633, 0.000440906, 0.000412717, 0.000383552, 0.000355828, 0.000289723, 0.000154585, 3.44734e-5, 5.94437e-7 … 3.15125e-5, 0.000487391, 0.0018939, 0.00367003, 0.00484181, 0.00495189, 0.00453233, 0.00480968, 0.00619657, 0.0074052], 0.003185690706734265:0.00637138141346853:79.72190993602499, [0.0295785, 0.0540516, 0.0780093, 0.122759, 0.15685, 0.192366, 0.206601, 0.252829, 0.265771, 0.315443 … 31.1512, 31.1758, 31.2195, 31.2342, 31.2752, 31.293, 31.3517, 31.3761, 31.4148, 31.4199], :standard)
```
You can plot the result, for example with
[Plots](https://github.com/tbreloff/Plots.jl) package. Use [`freqpower`](@ref)
function to get the frequency grid and the power of the periodogram as a
2-tuple.
```julia
using Plots
plot(freqpower(pgram)...)
```

You can also plot the power vs the period, instead of the frequency, with
[`periodpower`](@ref):
```julia
using Plots
plot(periodpower(pgram)...)
```

!!! warning
If you do not fit for the mean of the signal (`fit_mean=false` keyword to
[`lombscargle`](@ref) function) without centering the data (`center_data=false`)
you can get inaccurate results. For example, spurious peaks at low frequencies
can appear and the real peaks lose power:
```julia
plot(freqpower(lombscargle(t, s, fit_mean=false, center_data=false))...)
```

!!! tip
You can tune the frequency grid with appropriate keywords to
[`lombscargle`](@ref) function. For example, in order to increase the sampling
increase `samples_per_peak`, and set `maximum_frequency` to lower values in
order to narrow the frequency range:
```julia
plot(freqpower(lombscargle(t, s, samples_per_peak=20, maximum_frequency=1.5))...)
```

If you simply want to use your own frequency grid, directly set the
`frequencies` keyword:
```julia
plot(freqpower(lombscargle(t, s, frequencies=0.001:1e-3:1.5))...)
```

### Signal with Uncertainties
The generalised Lomb--Scargle periodogram is able to handle a signal with
uncertainties, and they will be used as weights in the algorithm. The
uncertainties can be passed either as the third optional argument `errors` to
[`lombscargle`](@ref) or by providing this function with a `signal` vector of
type `Measurement` (from
[Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl) package).
```julia
using Measurements, Plots
ntimes = 1001
t = range(0.01, stop = 10pi, length = ntimes)
s = sinpi.(2t)
errors = rand(0.1:1e-3:4.0, ntimes)
# Run one of the two following equivalent commands
plot(freqpower(lombscargle(t, s, errors, maximum_frequency=1.5))...)
plot(freqpower(lombscargle(t, measurement(s, errors), maximum_frequency=1.5))...)
```

This is the plot of the power versus the period:
```julia
# Run one of the two following equivalent commands
plot(periodpower(lombscargle(t, s, errors, maximum_frequency=1.5))...)
plot(periodpower(lombscargle(t, measurement(s, errors), maximum_frequency=1.5))...)
```

We recall that the generalised Lomb--Scargle algorithm is used when the
`fit_mean` optional keyword to [`lombscargle`](@ref) is `true` if no error is
provided, instead it is always used if the signal has uncertainties.
### Find Highest Power and Associated Frequencies and Periods
[`findmaxfreq`](@ref) function tells you the frequencies with the highest power
in the periodogram (and you can get the period by taking its inverse):
```jldoctest
julia> t = range(0, stop = 10, length = 1001);
julia> s = sinpi.(t);
julia> plan = LombScargle.plan(t, s); # Plan the periodogram
julia> p = lombscargle(plan);
julia> findmaxperiod(p) # Period with highest power
1-element Array{Float64,1}:
0.004987779939149084
julia> findmaxfreq(p) # Frequency with the highest power
1-element Array{Float64,1}:
200.49
```
This peak is at high frequencies, very far from the expected value of the period
of 2. In order to find the real peak, you can either narrow the ranges in order
to exclude higher armonics
```julia
julia> findmaxperiod(p, [1, 10]) # Limit the search to periods in [1, 10]
1-element Array{Float64,1}:
2.04082
julia> findmaxfreq(p, [0.1, 1]) # Limit the search to frequencies in [0.1, 1]
1-element Array{Float64,1}:
0.49
```
or pass the `threshold` argument to [`findmaxfreq`](@ref) or
[`findmaxperiod`](@ref). You can use [`findmaxpower`](@ref) to discover the
highest power in the periodogram:
```julia
julia> findmaxpower(p)
0.9958310178312316
julia> findmaxperiod(p, 0.95)
10-element Array{Float64,1}:
2.04082
1.96078
0.0100513
0.0100492
0.00995124
0.00994926
0.00501278
0.00501228
0.00498778
0.00498728
julia> findmaxfreq(p, 0.95)
10-element Array{Float64,1}:
0.49
0.51
99.49
99.51
100.49
100.51
199.49
199.51
200.49
200.51
```
The first peak is the real one, the other double peaks appear at higher
armonics.
!!! tip
Usually, plotting the periodogram can give you a clue of what's going on.
### Significance of the Peaks
The significance of the peaks in the Lomb--Scargle periodogram can be assessed
by measuring the [False-Alarm Probability](#false-alarm-probability). Analytic
expressions of this quantity and its inverse can be obtained with the
[`fap`](@ref) and [`fapinv`](@ref) functions, respectively.
```julia
julia> t = linspace(0.01, 20, samples_per_peak = 10)
julia> s = sinpi.(e.*t).^2 .- cos.(5t).^4
julia> plan = LombScargle.plan(t, s);
julia> p = lombscargle(plan)
# Find the false-alarm probability for the highest peak.
julia> fap(p, 0.3)
0.028198095962262748
```
Thus, a peak with power ``0.3`` has a probability of ``0.028`` that it is due to
noise only. A quantity that is often used is the inverse of the false-alarm
probability as well: what is the minimum power whose false-alarm probability is
lower than the given probability? For example, if you want to know the minimum
power for which the false-alarm probability is at most ``0.01`` you can use:
```julia
julia> fapinv(p, 0.01)
0.3304696923786712
```
As we already noted, analytic expressions of the false-alarm probability and its
inverse may not be reliable if your data does not satisfy specific
assumptions. A better way to calculate this quantity is to use statistical
methods. One of this is bootstrapping. In `LombScargle.jl`, you can use the
function [`LombScargle.bootstrap`](@ref) to create a bootstrap sample and then
you can calculate the false-alarm probability and its inverse using this sample.
!!! tip
When applying the bootstrap method you should use the same options you used to
perform the periodogram on your data. Using the same periodogram plan you used
to compute the periodogram will ensure that you use the same options. However,
note that the fast method gives approximate results that for some frequencies
may not be reliable (they can go outside the range ``[0, 1]`` for the standard
normalization). More robust results can be obtained with the `fast = false`
option.
```julia
# Create a bootstrap sample with 10000
# resamplings of the original data, re-using the
# same periodogram plan. The larger the better.
# This may take some minutes.
julia> b = LombScargle.bootstrap(10000, plan)
# Calculate the false-alarm probability of a peak
# with power 0.3 using this bootstrap sample.
julia> fap(b, 0.3)
0.0209
# Calculate the lowest power that has probability
# less than 0.01 in this bootstrap sample.
julia> fapinv(b, 0.01)
0.3268290388848437
```
If you query [`fapinv`](@ref) with a too low probability, the corresponding
power cannot be determined and you will get `NaN` as result.
```julia
julia> fapinv(b, 1e-5)
NaN
```
If you want to find the power corresponding to a false-alarm probability of
``\text{prob} = 10^{-5}``, you have to create a new bootstrap sample with ``N``
resamplings so that ``N\cdot\text{prob}`` can be rounded to an integer larger than
or equal to one (for example ``N = 10^{5}``).
### Find the Best-Fitting Model
The [`LombScargle.model`](@ref) function can help you to test whether a certain
frequency fits well your data.
```julia
using Plots
t = range(0.01, stop = 10pi, length = 1000) # Observation times
s = sinpi.(t) .+ 1.2cospi.(t) .+ 0.3rand(length(t)) # The noisy signal
# Pick-up the best frequency
f = findmaxfreq(lombscargle(t, s, maximum_frequency=10, samples_per_peak=20))[1]
t_fit = range(0, stop = 1, length = 50)
s_fit = LombScargle.model(t, s, f, t_fit/f) # Determine the model
scatter(mod.(t.*f, 1), s, lab="Phased data", title="Best Lomb-Scargle frequency: $f")
plot!(t_fit, s_fit, lab="Best-fitting model", linewidth=4)
```

!!! tip
If there are more than one dominant frequency you may need to consider more
models. This task may require some work and patience. Plot the periodogram in
order to find the best frequencies.
```julia
using Plots
t = range(0.01, stop = 5, length = 1000) # Observation times
s = sinpi.(2t) .+ 1.2cospi.(4t) .+ 0.3rand(length(t)) # Noisy signal
plan = LombScargle.plan(t, s, samples_per_peak=50)
p = lombscargle(plan)
# After plotting the periodogram, you discover
# that it has two prominent peaks around 1 and 2.
f1 = findmaxfreq(p, [0.8, 1.2])[1] # Get peak frequency around 1
f2 = findmaxfreq(p, [1.8, 2.2])[1] # Get peak frequency around 2
fit1 = LombScargle.model(t, s, f1) # Determine the first model
fit2 = LombScargle.model(t, s, f2) # Determine the second model
scatter(t, s, lab="Data", title="Best-fitting Lomb-Scargle model")
plot!(t, fit1 + fit2, lab="Best-fitting model", linewidth=4)
```

Performance
-----------
A pre-planned periodogram in `LombScargle.jl` computed in single thread mode
with the fast method is more than 2 times faster than the implementation of the
same algorithm provided by AstroPy, and more than 4 times faster if 4 FFTW
threads are used (on machines with at least 4 physical CPUs).
The following plot shows a comparison between the times needed to compute a
periodogram for a signal with N datapoints using `LombScargle.jl`, with 1 or 4
FFTW threads (with `flags = FFTW.MEASURE` for better performance), and the
single-threaded Astropy implementation. (Julia version: 1.6.0; `LombScargle.jl`
version: 1.0.0; Python version: 3.8.6; Astropy version: 4.1. CPU: Intel(R)
Core(TM) i7-4870HQ CPU @ 2.50GHz.)

Note that this comparison is unfair, as Astropy doesn’t support pre-planning a
periodogram nor multi-threading, and it pads vectors for FFT to a length which
is a power of 2, while by default `LombScargle.jl` uses length which are
multiples of 2, 3, 5, 7. A non-planned periodogram in single thread mode in
`LombScargle.jl` is still twice as fast as Astropy.
Development
-----------
The package is developed at
<https://github.com/JuliaAstro/LombScargle.jl>. There you can submit bug
reports, make suggestions, and propose pull requests.
### History
The ChangeLog of the package is available in
[NEWS.md](https://github.com/JuliaAstro/LombScargle.jl/blob/master/NEWS.md) file
in top directory.
License
-------
The `LombScargle.jl` package is licensed under the BSD 3-clause "New" or
"Revised" License. The original author is Mosè Giordano.
### Acknowledgements
This package adapts the implementation in Astropy of the the fast Lomb--Scargle
method by [PR89]. We claim no endorsement nor promotion by the Astropy Team.
| LombScargle | https://github.com/JuliaAstro/LombScargle.jl.git |
|
[
"MIT"
] | 3.5.0 | 79e2d29b216ef24a0f4f905532b900dcf529aa06 | code | 14639 | module TikzPictures
export TikzPicture, PDF, TEX, TIKZ, SVG, save, tikzDeleteIntermediate, tikzCommand, tikzUseTectonic, TikzDocument, push!
import Base: push!
import LaTeXStrings: LaTeXString, @L_str
import tectonic_jll: tectonic
export LaTeXString, @L_str
_tikzDeleteIntermediate = true
_tikzCommand = "lualatex"
_tikzUseTectonic = false
mutable struct TikzPicture
data::AbstractString
options::AbstractString
preamble::AbstractString
environment::AbstractString
width::AbstractString
height::AbstractString
keepAspectRatio::Bool
enableWrite18::Bool
TikzPicture(data::AbstractString; options="", preamble="", environment="tikzpicture", width="", height="", keepAspectRatio=true, enableWrite18=true) = new(data, options, preamble, environment, width, height, keepAspectRatio, enableWrite18)
end
mutable struct TikzDocument
pictures::Vector{TikzPicture}
captions::Vector{AbstractString}
end
TikzDocument() = TikzDocument(TikzPicture[], String[])
# svg handling
include("svg.jl")
__init__() = __init__svg()
tikzUsePoppler() = svgBackend() == PopplerBackend()
tikzUsePoppler(value::Bool) = svgBackend(value ? PopplerBackend() : DVIBackend())
# standalone workaround:
# see http://tex.stackexchange.com/questions/315025/lualatex-texlive-2016-standalone-undefined-control-sequence
_standaloneWorkaround = false
function standaloneWorkaround()
global _standaloneWorkaround
_standaloneWorkaround
end
function standaloneWorkaround(value::Bool)
global _standaloneWorkaround
_standaloneWorkaround = value
nothing
end
function tikzDeleteIntermediate(value::Bool)
global _tikzDeleteIntermediate
_tikzDeleteIntermediate = value
nothing
end
function tikzDeleteIntermediate()
global _tikzDeleteIntermediate
_tikzDeleteIntermediate
end
function tikzCommand(value::AbstractString)
global _tikzCommand
_tikzCommand = value
nothing
end
function tikzCommand()
global _tikzCommand
_tikzCommand
end
function tikzUseTectonic(value::Bool)
global _tikzUseTectonic
_tikzUseTectonic = value
nothing
end
function tikzUseTectonic()
global _tikzUseTectonic
_tikzUseTectonic
end
function push!(td::TikzDocument, tp::TikzPicture; caption="")
push!(td.pictures, tp)
push!(td.captions, caption)
end
function removeExtension(filename::AbstractString, extension::AbstractString)
if endswith(filename, extension) || endswith(filename, uppercase(extension))
return filename[1:(end - length(extension))]
else
return filename
end
end
function bool_success(cmd::Cmd)
successful = false
try
successful = success(cmd)
catch ex
if ex isa Base.IOError
successful = false
else
rethrow()
end
end
return successful
end
abstract type SaveType end
mutable struct PDF <: SaveType
filename::AbstractString
PDF(filename::AbstractString) = new(removeExtension(filename, ".pdf"))
end
mutable struct TEX <: SaveType
filename::AbstractString
limit_to::Symbol
TEX(filename::AbstractString; include_preamble::Bool=true, limit_to::Symbol=(include_preamble ? :all : :picture)) =
new(removeExtension(filename, ".tex"), limit_to)
end
mutable struct TIKZ <: SaveType
filename::AbstractString
limit_to::Symbol
TIKZ(filename::AbstractString; include_preamble::Bool=true, limit_to::Symbol=(include_preamble ? :all : :picture)) = new(removeExtension(filename, ".tikz"), limit_to)
end
mutable struct SVG <: SaveType
filename::AbstractString
SVG(filename::AbstractString) = new(removeExtension(filename, ".svg"))
end
extension(f::SaveType) = lowercase(split("$(typeof(f))",".")[end])
resize(tp::TikzPicture) = !isempty(tp.width) || !isempty(tp.height)
# from: https://discourse.julialang.org/t/collecting-all-output-from-shell-commands/15592
function execute(cmd::Cmd)
out = Pipe()
err = Pipe()
process = run(pipeline(ignorestatus(cmd), stdout=out, stderr=err))
close(out.in)
close(err.in)
(
stdout = String(read(out)),
stderr = String(read(err)),
code = process.exitcode
)
end
function write_adjustbox_options(tex::IO, tp::TikzPicture)
adjustbox_options = []
if !isempty(tp.width)
push!(adjustbox_options, "width=$(tp.width)")
end
if !isempty(tp.height)
push!(adjustbox_options, "height=$(tp.height)")
end
if tp.keepAspectRatio
push!(adjustbox_options, "keepaspectratio")
end
adjustbox_option_string = join(adjustbox_options, ',')
println(tex, "\\begin{adjustbox}{$adjustbox_option_string}")
end
showable(::MIME"image/svg+xml", tp::TikzPicture) = true
function save(f::Union{TEX,TIKZ}, tp::TikzPicture)
if !in(f.limit_to, [:all, :picture, :data])
throw(ArgumentError("limit_to must be one of :all, :picture, and :data"))
end
filename = f.filename
ext = extension(f)
tex = open("$(filename).$(ext)", "w")
if f.limit_to in [:all, :picture]
if f.limit_to == :all
if standaloneWorkaround()
println(tex, "\\RequirePackage{luatex85}")
end
if resize(tp)
# [tikz] class option has to be moved to a \usepackage
# https://tex.stackexchange.com/questions/455546/can-i-resize-a-tikz-picture-to-have-certain-dimensions-width-height
println(tex, "\\documentclass{standalone}")
println(tex, "\\usepackage{tikz}")
println(tex, "\\usepackage{adjustbox}")
else
println(tex, "\\documentclass[tikz]{standalone}")
end
println(tex, tp.preamble)
println(tex, "\\begin{document}")
end
if resize(tp)
write_adjustbox_options(tex, tp)
end
print(tex, "\\begin{$(tp.environment)}[")
print(tex, tp.options)
println(tex, "]")
end
println(tex, tp.data)
if f.limit_to in [:all, :picture]
println(tex, "\\end{$(tp.environment)}")
if resize(tp)
println(tex, "\\end{adjustbox}")
else
print(tex, "\n")
end
if f.limit_to == :all
println(tex, "\\end{document}")
end
end
close(tex)
end
function save(f::TEX, td::TikzDocument)
if isempty(td.pictures)
throw(ArgumentError("TikzDocument does not contain pictures"))
elseif !in(f.limit_to, [:all, :picture])
throw(ArgumentError("limit_to must be either :all or :picture"))
end
filename = f.filename
tex = open("$(filename).tex", "w")
if f.limit_to == :all
println(tex, "\\documentclass{article}")
println(tex, "\\usepackage{caption}")
println(tex, "\\usepackage{tikz}")
println(tex, td.pictures[1].preamble)
println(tex, "\\begin{document}")
end
println(tex, "\\centering")
@assert length(td.pictures) == length(td.captions)
i = 1
for tp in td.pictures
println(tex, "\\centering")
if resize(tp)
write_adjustbox_options(tex, tp)
end
print(tex, "\\begin{$(tp.environment)}[")
print(tex, tp.options)
println(tex, "]")
println(tex, tp.data)
println(tex, "\\end{$(tp.environment)}")
if resize(tp)
println(tex, "\\end{adjustbox}")
end
print(tex, "\\captionof{figure}{")
print(tex, td.captions[i])
println(tex, "}")
println(tex, "\\vspace{5ex}")
println(tex)
i += 1
end
if f.limit_to == :all
println(tex, "\\end{document}")
end
close(tex)
end
function latexerrormsg(s)
if '?' in s || '!' in s
beginError = false
for l in split(s, '\n')
if beginError
if !isempty(l) && l[1] == '?'
return
else
println(l)
end
else
if !isempty(l) && l[1] == '!'
println(l)
beginError = true
end
end
end
else
println(s)
end
end
_joinpath(a, b) = "$a/$b"
function _run(tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString; dvi::Bool=false)
arg = String[tikzCommand()]
latexSuccess = false
texlog = ""
if tikzUseTectonic() || !bool_success(`$(tikzCommand()) -v`)
tectonic() do tectonic_bin
if dvi
error("Tectonic does not currently support dvi backend")
end
arg[1] = tectonic_bin
if tp.enableWrite18
push!(arg, "-Zshell-escape")
end
push!(arg, "-o$(temp_dir)")
result = execute(`$(arg) $(temp_filename*".tex")`)
latexSuccess = (result.code == 0)
texlog = result.stderr
end
else
if tp.enableWrite18
push!(arg, "--enable-write18")
end
if dvi
push!(arg, "--output-format=dvi")
end
push!(arg, "--output-directory=$(temp_dir)")
latexSuccess = bool_success(`$(arg) $(temp_filename*".tex")`)
try
texlog = read(temp_filename * ".log", String)
catch
texlog = read(_joinpath(temp_dir,"texput.log"), String)
end
end
return latexSuccess, texlog
end
function save(f::PDF, tp::TikzPicture)
# Isolate basename and foldername of file
basefilename = basename(f.filename)
working_dir = dirname(abspath(f.filename))
# Call anonymous function to do task and automatically return
cd(working_dir) do
temp_dir = mktempdir("./")
temp_filename = _joinpath(temp_dir,basefilename)
# Save the TEX file in tmp dir
save(TEX(temp_filename * ".tex"), tp)
latexSuccess, texlog = _run(tp, temp_dir, temp_filename)
if occursin("LaTeX Warning: Label(s)", texlog)
latexSuccess, texlog = _run(tp, temp_dir, temp_filename)
end
# Move PDF out of tmpdir regardless
# Give warning if PDF file already exists
if isfile("$(basefilename).pdf")
@warn "$(basefilename).pdf already exists, overwriting!"
end
if latexSuccess
mv("$(temp_filename).pdf", "$(basefilename).pdf",force=true)
end
try
# Shouldn't need to be try-catched anymore, but best to be safe
# This failing is NOT critical either, so just make it a warning
if tikzDeleteIntermediate()
# Delete tmp dir
rm(temp_dir, recursive=true)
end
catch
@warn "TikzPictures: Your intermediate files are not being deleted."
end
if !latexSuccess
# Remove failed attempt.
if !standaloneWorkaround() && occursin("\\sa@placebox ->\\newpage \\global \\pdfpagewidth", texlog)
standaloneWorkaround(true)
save(f, tp)
return
end
latexerrormsg(texlog)
error("LaTeX error")
end
end
end
function save(f::PDF, td::TikzDocument)
# Isolate basename and foldername of file
basefilename = basename(f.filename)
working_dir = dirname(abspath(f.filename))
# Call anonymous function to do task and automatically return
cd(working_dir) do
# Create tmp dir in working directory
temp_dir = mktempdir("./")
temp_filename = _joinpath(temp_dir,basefilename)
try
save(TEX(temp_filename * ".tex"), td)
latexSuccess, texlog = _run(td.pictures[1], temp_dir, temp_filename)
# Move PDF out of tmpdir regardless
if isfile("$(basefilename).pdf")
@warn "$(basefilename).pdf already exists, overwriting!"
end
mv("$(temp_filename).pdf", "$(basefilename).pdf",force=true)
try
# Shouldn't need to be try-catched anymore, but best to be safe
# This failing is NOT critical either, so just make it a warning
if tikzDeleteIntermediate()
# Delete tmp dir
rm(temp_dir, recursive=true)
end
catch
@warn "TikzPictures: Your intermediate files are not being deleted."
end
catch
@warn "Error saving as PDF."
rethrow()
end
end
end
function save(f::SVG, tp::TikzPicture)
basefilename = basename(f.filename)
working_dir = dirname(abspath(f.filename))
# Call anonymous function to do task and automatically return
cd(working_dir) do
# Create tmp dir in working directory
temp_dir = mktempdir("./")
temp_filename = _joinpath(temp_dir,basefilename)
# Save the TEX file in tmp dir, then convert to SVG
save(TEX(temp_filename * ".tex"), tp)
_mkTempSvg(tp, temp_dir, temp_filename)
# Move SVG out of tmpdir into working dir and give warning if overwriting
if isfile("$(basefilename).svg")
@warn "$(basefilename).svg already exists, overwriting!"
end
mv("$(temp_filename).svg", "$(basefilename).svg",force=true)
try
# Shouldn't need to be try-catched anymore, but best to be safe
# This failing is NOT critical either, so just make it a warning
if tikzDeleteIntermediate()
# Delete tmp dir
rm(temp_dir, recursive=true)
end
catch
@warn "TikzPictures: Your intermediate files are not being deleted."
end
end
end
# this is needed to work with multiple images in ijulia (kind of a hack)
global _tikzid = round(UInt64, time() * 1e6)
function Base.show(f::IO, ::MIME"image/svg+xml", tp::TikzPicture)
global _tikzid
filename = "tikzpicture"
save(SVG(filename), tp)
s = read("$filename.svg", String)
s = replace(s, "glyph" => "glyph-$(_tikzid)-")
s = replace(s, "\"clip" => "\"clip-$(_tikzid)-")
s = replace(s, "#clip" => "#clip-$(_tikzid)-")
s = replace(s, "\"image" => "\"image-$(_tikzid)-")
s = replace(s, "#image" => "#image-$(_tikzid)-")
s = replace(s, "linearGradient id=\"linear" => "linearGradient id=\"linear-$(_tikzid)-")
s = replace(s, "#linear" => "#linear-$(_tikzid)-")
s = replace(s, "image id=\"" => "image style=\"image-rendering: pixelated;\" id=\"")
_tikzid += 1
println(f, s)
if tikzDeleteIntermediate()
rm("$filename.svg")
end
end
end # module
| TikzPictures | https://github.com/JuliaTeX/TikzPictures.jl.git |
|
[
"MIT"
] | 3.5.0 | 79e2d29b216ef24a0f4f905532b900dcf529aa06 | code | 3721 | using Requires
export SVGBackend, PdfToSvgBackend, PopplerBackend, DVIBackend, svgBackend
# types of backends that convert PDFs to SVGs
abstract type SVGBackend end
struct PdfToSvgBackend <: SVGBackend end
struct PopplerBackend <: SVGBackend end
struct DVIBackend <: SVGBackend end
# the current backend with a getter and a setter
const _svgBackend = Ref{SVGBackend}()
svgBackend() = _svgBackend[]
function svgBackend(backend::SVGBackend)
_initialize(backend)
_svgBackend[] = backend
end
# call this function from __init__
function __init__svg()
# determine the backend to use
if Sys.which("pdf2svg") !== nothing
svgBackend(PdfToSvgBackend())
else
try
svgBackend(PopplerBackend())
catch cause
@warn "Failed to load PopplerBackend; falling back on DVIBackend" cause
svgBackend(DVIBackend())
end
end
# define a new implementation for PopplerBackend, but only after `import Poppler_jll`
@require Poppler_jll="9c32591e-4766-534b-9725-b71a8799265b" begin
function _mkTempSvg(backend::PopplerBackend, tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString)
_mkTempPdf(tp, temp_dir, temp_filename) # convert to PDF and then to SVG
Poppler_jll.pdftocairo() do exe
return success(`$exe -svg $(temp_filename).pdf $(temp_filename).svg`)
end # convert PDF file in tmpdir to SVG file in tmpdir
end
end
end
#
# This function is the one used by save(::SVG); produce an SVG file in the temporary directory
#
function _mkTempSvg(tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString)
backend = svgBackend()
if !_mkTempSvg(backend, tp, temp_dir, temp_filename)
if tikzDeleteIntermediate()
rm(temp_dir, force=true, recursive=true)
end
error("$backend failed. Consider using another backend.")
end # otherwise, everything is fine
end
# backend initialization
_initialize(backend::SVGBackend) = nothing # default
_initialize(backend::PopplerBackend) =
if !Requires.isprecompiling()
@eval TikzPictures begin
try
import Poppler_jll # will trigger @require in __init__svg
catch
error("Unable to import Poppler_jll") # should not happen as long as Poppler_jll is a dependency
end
end
end
# compile a temporary PDF file that can be converted to SVG
function _mkTempPdf(tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString; dvi::Bool=false)
latexSuccess, texlog = _run(tp, temp_dir, temp_filename; dvi=dvi)
if occursin("LaTeX Warning: Label(s)", texlog)
latexSuccess, texlog = _run(tp, temp_filename, temp_dir, dvi=dvi)
end # second run
if !latexSuccess
latexerrormsg(texlog)
error("LaTeX error")
end
end
# compile temporary SVGs with different backends
_mkTempSvg(backend::SVGBackend, tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString) =
return false # always fail
function _mkTempSvg(backend::PdfToSvgBackend, tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString)
_mkTempPdf(tp, temp_dir, temp_filename) # convert to PDF and then to SVG
return success(`pdf2svg $(temp_filename).pdf $(temp_filename).svg`)
end
function _mkTempSvg(backend::DVIBackend, tp::TikzPicture, temp_dir::AbstractString, temp_filename::AbstractString)
_mkTempPdf(tp, temp_dir, temp_filename; dvi=true) # convert to DVI and then to SVG
cd(temp_dir) do
return success(`dvisvgm --no-fonts --output=$(basename(temp_filename)*".svg") $(basename(temp_filename)*".dvi")`)
end
end
| TikzPictures | https://github.com/JuliaTeX/TikzPictures.jl.git |
|
[
"MIT"
] | 3.5.0 | 79e2d29b216ef24a0f4f905532b900dcf529aa06 | code | 4478 | using TikzPictures
using Test
svgBackends = [
"testPic.pdf2svg.svg" => PdfToSvgBackend(),
"testPic.poppler.svg" => PopplerBackend(),
"testPic.dvisvgm.svg" => DVIBackend()
]
if VERSION < v"1.3"
deleteat!(svgBackends, 2)
@warn "Not testing PopplerBackend on Julia versions < 1.3"
end
# Pre-test cleanup (for repeated tests)
for file in ["testPic.pdf", "testPic.svg", "testDoc.pdf", "testDoc.tex", first.(svgBackends)...]
if isfile(file)
rm(file)
end
end
# redfining bool_success so we don't have to export it from TikzPictures
function bool_success(cmd::Cmd)
successful = false
try
successful = success(cmd)
catch ex
if ex isa Base.IOError
successful = false
else
rethrow()
end
end
return successful
end
# Run tests
data = "\\draw (0,0) -- (10,10);\n\\draw (10,0) -- (0,10);\n\\node at (5,5) {tikz \$\\sqrt{\\pi}\$};"
tp = TikzPicture(data, options="scale=0.25", preamble="")
td = TikzDocument()
push!(td, tp, caption="hello")
save(TEX("testPic"), tp)
@test isfile("testPic.tex")
# check that the TEX file contains the desired environments
function has_environment(content::String, environment::String)
has_begin = occursin("\\begin{$environment}", content)
has_end = occursin("\\end{$environment}", content)
if has_begin && has_end
return true # has both
elseif !has_begin && !has_end
return false # has neither
else
error("\\begin{$environment} and \\end{$environment} do not match")
end
end
filecontent = join(readlines("testPic.tex", keep=true)) # read with line breaks
@test occursin(data, filecontent) # also check that the data is contained
@test has_environment(filecontent, "tikzpicture")
@test has_environment(filecontent, "document")
# same check for include_preamble=false and limit_to=:picture
save(TEX("testPic"; include_preamble=false), tp)
filecontent = join(readlines("testPic.tex", keep=true))
@test occursin(data, filecontent)
@test has_environment(filecontent, "tikzpicture") # must occur
@test !has_environment(filecontent, "document") # must not occur
# same check for limit_to=:data
save(TEX("testPic"; limit_to=:data), tp)
filecontent = join(readlines("testPic.tex", keep=true))
@test occursin(data, filecontent)
@test !has_environment(filecontent, "tikzpicture")
@test !has_environment(filecontent, "document")
save(TEX("testPic"), tp) # save again with limit_to=:all
if bool_success(`lualatex -v`)
save(PDF("testPic"), tp)
@test isfile("testPic.pdf")
save(SVG("testPic"), tp)
@test isfile("testPic.svg") # default SVG backend
@testset for (k, v) in svgBackends
svgBackend(v)
for usetectonic in [true, false]
if usetectonic && v == DVIBackend()
continue
end
tikzUseTectonic(usetectonic)
save(SVG(k), tp)
@test isfile(k)
rm(k)
end
end
save(PDF("testDoc"), td)
@test isfile("testDoc.pdf")
else
@warn "lualatex is missing; can not test compilation"
end
# Test tikz-cd
data = "A\\arrow{rd}\\arrow{r} & B \\\\& C"
tp = TikzPicture(data, options="scale=0.25", environment="tikzcd", preamble="\\usepackage{tikz-cd}")
td = TikzDocument()
push!(td, tp, caption="hello")
save(TEX("testCD"), tp)
@test isfile("testCD.tex")
filecontent = join(readlines("testCD.tex", keep=true)) # read with line breaks
@test occursin(data, filecontent) # also check that the data is contained
@test has_environment(filecontent, "tikzcd")
@test has_environment(filecontent, "document")
if bool_success(`lualatex -v`)
save(PDF("testCD"), tp)
@test isfile("testCD.pdf")
save(PDF("testCDDoc"), td)
@test isfile("testCDDoc.pdf")
else
@warn "lualatex is missing; can not test compilation"
end
# Test adjustbox width/height
adjustbox_width = TikzPicture(L"\node (sigmoid) [circle, draw=black, label=left:{$\sigma(z)$}] {};", width="2cm")
save(TEX("adjustbox_width"), adjustbox_width)
@test isfile("adjustbox_width.tex")
adjustbox_height = TikzPicture(L"\node (sigmoid) [circle, draw=black, label=left:{$\sigma(z)$}] {};", height="10cm")
save(TEX("adjustbox_height"), adjustbox_height)
@test isfile("adjustbox_height.tex")
adjustbox_aspect = TikzPicture(L"\node (sigmoid) [circle, draw=black, label=left:{$\sigma(z)$}] {};", height="10cm", keepAspectRatio=false)
save(TEX("adjustbox_aspect"), adjustbox_aspect)
@test isfile("adjustbox_aspect.tex")
| TikzPictures | https://github.com/JuliaTeX/TikzPictures.jl.git |
|
[
"MIT"
] | 3.5.0 | 79e2d29b216ef24a0f4f905532b900dcf529aa06 | docs | 2313 | # TikzPictures
[](https://github.com/JuliaTeX/PGFPlots.jl/actions)
[](https://codecov.io/gh/JuliaTeX/TikzPictures.jl)
This library allows one to create Tikz pictures and save in various formats. It integrates with IJulia, outputting SVG images to the notebook.
This library will try to use the lualatex package already installed on the system. If the library cannot run lualatex, it will fall back to trying to use tectonic for the compilation.
Lualatex may be installed through the texlive and miktex distributions. You should have dvisvgm installed. On Ubuntu, you can get the required packages, if not already present, by running `sudo apt-get install pdf2svg texlive-latex-base texlive-binaries texlive-pictures texlive-latex-extra texlive-luatex`.
Note: this package will attempt to turn off interpolation in the generated SVG, but this currently only works in Chrome.
## Example
```julia
using TikzPictures
tp = TikzPicture("\\draw (0,0) -- (10,10);\n\\draw (10,0) -- (0,10);\n\\node at (5,5) {tikz \$\\sqrt{\\pi}\$};", options="scale=0.25", preamble="")
save(PDF("test"), tp)
save(SVG("test"), tp)
save(TEX("test"), tp)
save(TIKZ("test"), tp)
```
As you can see above, you have to escape backslashes and dollar signs in LaTeX. To simplify things, this package provides the LaTeXString type, which can be constructed via L"...." without escaping backslashes or dollar signs.
```julia
tp = TikzPicture(L"""
\draw (0,0) -- (10,10);
\draw (10,0) -- (0,10);
\node at (5,5) {tikz $\sqrt{\pi}$};"""
, options="scale=0.25", preamble="")
```
## Embedding TEX files in external documents
Compiling a standalone LaTeX file requires the Tikz code to be wrapped in a `tikzpicture` environment, which again is wrapped in a `document` environment. You can omit these wrappers if you intend to embed the output in a larger document, instead of compiling it as a standalone file.
```julia
save(TEX("test"; limit_to=:all), tp) # the default, save a complete file
save(TEX("test"; limit_to=:picture), tp) # only wrap in a tikzpicture environment
save(TEX("test"; limit_to=:data), tp) # do not wrap the Tikz code, at all
```
| TikzPictures | https://github.com/JuliaTeX/TikzPictures.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 3578 | module RockSample
using LinearAlgebra
using POMDPs
using POMDPTools
using StaticArrays
using Parameters
using Random
using Compose
using Combinatorics
using DiscreteValueIteration
using ParticleFilters # used in heuristics
export
RockSamplePOMDP,
RSPos,
RSState,
RSExit,
RSExitSolver,
RSMDPSolver,
RSQMDPSolver
const RSPos = SVector{2, Int}
"""
RSState{K}
Represents the state in a RockSamplePOMDP problem.
`K` is an integer representing the number of rocks
# Fields
- `pos::RPos` position of the robot
- `rocks::SVector{K, Bool}` the status of the rocks (false=bad, true=good)
"""
struct RSState{K}
pos::RSPos
rocks::SVector{K, Bool}
end
@with_kw struct RockSamplePOMDP{K} <: POMDP{RSState{K}, Int, Int}
map_size::Tuple{Int, Int} = (5,5)
rocks_positions::SVector{K,RSPos} = @SVector([(1,1), (3,3), (4,4)])
init_pos::RSPos = (1,1)
sensor_efficiency::Float64 = 20.0
bad_rock_penalty::Float64 = -10
good_rock_reward::Float64 = 10.
step_penalty::Float64 = 0.
sensor_use_penalty::Float64 = 0.
exit_reward::Float64 = 10.
terminal_state::RSState{K} = RSState(RSPos(-1,-1),
SVector{length(rocks_positions),Bool}(falses(length(rocks_positions))))
# Some special indices for quickly retrieving the stateindex of any state
indices::Vector{Int} = cumprod([map_size[1], map_size[2], fill(2, length(rocks_positions))...][1:end-1])
discount_factor::Float64 = 0.95
end
# to handle the case where rocks_positions is not a StaticArray
function RockSamplePOMDP(map_size,
rocks_positions,
args...
)
k = length(rocks_positions)
return RockSamplePOMDP{k}(map_size,
SVector{k,RSPos}(rocks_positions),
args...
)
end
# Generate a random instance of RockSample(n,m) with a n×n square map and m rocks
RockSamplePOMDP(map_size::Int, rocknum::Int, rng::AbstractRNG=Random.GLOBAL_RNG) = RockSamplePOMDP((map_size,map_size), rocknum, rng)
# Generate a random instance of RockSample with a n×m map and l rocks
function RockSamplePOMDP(map_size::Tuple{Int, Int}, rocknum::Int, rng::AbstractRNG=Random.GLOBAL_RNG)
possible_ps = [(i, j) for i in 1:map_size[1], j in 1:map_size[2]]
selected = unique(rand(rng, possible_ps, rocknum))
while length(selected) != rocknum
push!(selected, rand(rng, possible_ps))
selected = unique!(selected)
end
return RockSamplePOMDP(map_size=map_size, rocks_positions=selected)
end
# transform a Rocksample state to a vector
function POMDPs.convert_s(T::Type{<:AbstractArray}, s::RSState, m::RockSamplePOMDP)
return convert(T, vcat(s.pos, s.rocks))
end
# transform a vector to a RSState
function POMDPs.convert_s(T::Type{RSState}, v::AbstractArray, m::RockSamplePOMDP)
return RSState(RSPos(v[1], v[2]), SVector{length(v)-2,Bool}(v[i] for i = 3:length(v)))
end
# To handle the case where the `rocks_positions` is specified
RockSamplePOMDP(map_size::Tuple{Int, Int}, rocks_positions::AbstractVector) = RockSamplePOMDP(map_size=map_size, rocks_positions=rocks_positions)
POMDPs.isterminal(pomdp::RockSamplePOMDP, s::RSState) = s.pos == pomdp.terminal_state.pos
POMDPs.discount(pomdp::RockSamplePOMDP) = pomdp.discount_factor
include("states.jl")
include("actions.jl")
include("transition.jl")
include("observations.jl")
include("reward.jl")
include("visualization.jl")
include("heuristics.jl")
end # module
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 1042 | const N_BASIC_ACTIONS = 5
const BASIC_ACTIONS_DICT = Dict(:sample => 1,
:north => 2,
:east => 3,
:south => 4,
:west => 5)
const ACTION_DIRS = (RSPos(0,0),
RSPos(0,1),
RSPos(1,0),
RSPos(0,-1),
RSPos(-1,0))
POMDPs.actions(pomdp::RockSamplePOMDP{K}) where K = 1:N_BASIC_ACTIONS+K
POMDPs.actionindex(pomdp::RockSamplePOMDP, a::Int) = a
function POMDPs.actions(pomdp::RockSamplePOMDP{K}, s::RSState) where K
if in(s.pos, pomdp.rocks_positions) # slow? pomdp.rock_pos is a vec
return actions(pomdp)
else
# sample not available
return 2:N_BASIC_ACTIONS+K
end
end
function POMDPs.actions(pomdp::RockSamplePOMDP, b)
# All states in a belief should have the same position, which is what the valid action space depends on
state = rand(Random.GLOBAL_RNG, b)
return actions(pomdp, state)
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 3657 | # A fixed action policy which always takes the action `move east`.
struct RSExitSolver <: Solver end
struct RSExit <: Policy
exit_return::Vector{Float64}
end
POMDPs.solve(::RSExitSolver, m::RockSamplePOMDP) = RSExit([discount(m)^(m.map_size[1]-x) * m.exit_reward for x in 1:m.map_size[1]])
POMDPs.solve(solver::RSExitSolver, m::UnderlyingMDP{P}) where P <: RockSamplePOMDP = solve(solver, m.pomdp)
POMDPs.value(p::RSExit, s::RSState) = s.pos[1] == -1 ? 0.0 : p.exit_return[s.pos[1]]
function POMDPs.value(p::RSExit, b::AbstractParticleBelief)
utility = 0.0
for (i, s) in enumerate(particles(b))
if s.pos[1] != -1 # if s is not terminal
utility += weight(b, i) * p.exit_return[s.pos[1]]
end
end
return utility / weight_sum(b)
end
POMDPs.action(p::RSExit, b) = 2 # Move east
# Dedicated MDP solver for RockSample
struct RSMDPSolver <: Solver
include_Q::Bool
end
RSMDPSolver(;include_Q=false) = RSMDPSolver(include_Q)
POMDPs.solve(solver::RSMDPSolver, m::RockSamplePOMDP) = solve(solver, UnderlyingMDP(m))
function POMDPs.solve(solver::RSMDPSolver, m::UnderlyingMDP{P}) where P <: RockSamplePOMDP
util = rs_mdp_utility(m.pomdp)
if solver.include_Q
return solve(ValueIterationSolver(init_util=util, include_Q=true), m)
else
return ValueIterationPolicy(m, utility=util, include_Q=false)
end
end
# Dedicated QMDP solver for RockSample
struct RSQMDPSolver <: Solver end
function POMDPs.solve(::RSQMDPSolver, m::RockSamplePOMDP)
vi_policy = solve(RSMDPSolver(include_Q=true), m)
return AlphaVectorPolicy(m, vi_policy.qmat, vi_policy.action_map)
end
# Solve for the optimal utility of RockSample, assuming full observability.
function rs_mdp_utility(m::RockSamplePOMDP{K}) where K
util = zeros(length(states(m)))
discounts = discount(m) .^ (0:(m.map_size[1]+m.map_size[2]-2))
# Rewards for exiting.
exit_returns = [discounts[m.map_size[1] - x + 1] * m.exit_reward for x in 1:m.map_size[1]]
# Calculate the optimal utility for states having no good rocks, which is the exit return.
rocks = falses(K)
for x in 1:m.map_size[1]
for y in 1:m.map_size[2]
util[stateindex(m, RSState(RSPos(x,y), SVector{K,Bool}(rocks)))] = exit_returns[x]
end
end
# The optimal utility of states having k good rocks can be derived from the utility of states having k-1 good rocks:
# Utility_k = max(ExitReturn, argmax_{r∈GoodRocks}(γ^{Manhattan distance to r}Utility_{k-1}))
for good_rock_num in 1:K
for good_rocks in combinations(1:K, good_rock_num)
rocks = falses(K)
for good_rock in good_rocks
rocks[good_rock] = true
end
for x in 1:m.map_size[1]
for y in 1:m.map_size[2]
best_return = exit_returns[x]
for good_rock in good_rocks
dist_to_good_rock = abs(x - m.rocks_positions[good_rock][1]) + abs(y - m.rocks_positions[good_rock][2])
rocks[good_rock] = false
sample_return = discounts[dist_to_good_rock+1] * (m.good_rock_reward + discounts[2] * util[stateindex(m, RSState(m.rocks_positions[good_rock], SVector{K,Bool}(rocks)))])
rocks[good_rock] = true
if sample_return > best_return
best_return = sample_return
end
end
util[stateindex(m, RSState(RSPos(x,y), SVector{K,Bool}(rocks)))] = best_return
end
end
end
end
return util
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 797 | const OBSERVATION_NAME = (:good, :bad, :none)
POMDPs.observations(pomdp::RockSamplePOMDP) = 1:3
POMDPs.obsindex(pomdp::RockSamplePOMDP, o::Int) = o
function POMDPs.observation(pomdp::RockSamplePOMDP, a::Int, s::RSState)
if a <= N_BASIC_ACTIONS
# no obs
return SparseCat((1,2,3), (0.0,0.0,1.0)) # for type stability
else
rock_ind = a - N_BASIC_ACTIONS
rock_pos = pomdp.rocks_positions[rock_ind]
dist = norm(rock_pos - s.pos)
efficiency = 0.5*(1.0 + exp(-dist*log(2)/pomdp.sensor_efficiency))
rock_state = s.rocks[rock_ind]
if rock_state
return SparseCat((1,2,3), (efficiency, 1.0 - efficiency, 0.0))
else
return SparseCat((1,2,3), (1.0 - efficiency, efficiency, 0.0))
end
end
end | RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 555 | function POMDPs.reward(pomdp::RockSamplePOMDP, s::RSState, a::Int)
r = pomdp.step_penalty
if next_position(s, a)[1] > pomdp.map_size[1]
r += pomdp.exit_reward
return r
end
if a == BASIC_ACTIONS_DICT[:sample] && in(s.pos, pomdp.rocks_positions) # sample
rock_ind = findfirst(isequal(s.pos), pomdp.rocks_positions) # slow ?
r += s.rocks[rock_ind] ? pomdp.good_rock_reward : pomdp.bad_rock_penalty
elseif a > N_BASIC_ACTIONS # using senssor
r += pomdp.sensor_use_penalty
end
return r
end | RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 1357 | function POMDPs.stateindex(pomdp::RockSamplePOMDP{K}, s::RSState{K}) where K
if isterminal(pomdp, s)
return length(pomdp)
end
return s.pos[1] + pomdp.indices[1] * (s.pos[2]-1) + dot(view(pomdp.indices, 2:(K+1)), s.rocks)
end
function state_from_index(pomdp::RockSamplePOMDP{K}, si::Int) where K
if si == length(pomdp)
return pomdp.terminal_state
end
rocks_dim = @SVector fill(2, K)
nx, ny = pomdp.map_size
s = CartesianIndices((nx, ny, rocks_dim...))[si]
pos = RSPos(s[1], s[2])
rocks = SVector{K, Bool}(s.I[3:(K+2)] .- 1)
return RSState{K}(pos, rocks)
end
# the state space is the pomdp itself
POMDPs.states(pomdp::RockSamplePOMDP) = pomdp
Base.length(pomdp::RockSamplePOMDP) = pomdp.map_size[1]*pomdp.map_size[2]*2^length(pomdp.rocks_positions) + 1
# we define an iterator over it
function Base.iterate(pomdp::RockSamplePOMDP, i::Int=1)
if i > length(pomdp)
return nothing
end
s = state_from_index(pomdp, i)
return (s, i+1)
end
function POMDPs.initialstate(pomdp::RockSamplePOMDP{K}) where K
probs = normalize!(ones(2^K), 1)
states = Vector{RSState{K}}(undef, 2^K)
for (i,rocks) in enumerate(Iterators.product(ntuple(x->[false, true], K)...))
states[i] = RSState{K}(pomdp.init_pos, SVector(rocks))
end
return SparseCat(states, probs)
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 1223 | function POMDPs.transition(pomdp::RockSamplePOMDP{K}, s::RSState{K}, a::Int) where K
if isterminal(pomdp, s)
return Deterministic(pomdp.terminal_state)
end
new_pos = next_position(s, a)
if a == BASIC_ACTIONS_DICT[:sample] && in(s.pos, pomdp.rocks_positions)
rock_ind = findfirst(isequal(s.pos), pomdp.rocks_positions) # slow ?
# set the new rock to bad
new_rocks = MVector{K, Bool}(undef)
for r=1:K
new_rocks[r] = r == rock_ind ? false : s.rocks[r]
end
new_rocks = SVector(new_rocks)
else
new_rocks = s.rocks
end
if new_pos[1] > pomdp.map_size[1]
# the robot reached the exit area
new_state = pomdp.terminal_state
else
new_pos = RSPos(clamp(new_pos[1], 1, pomdp.map_size[1]),
clamp(new_pos[2], 1, pomdp.map_size[2]))
new_state = RSState{K}(new_pos, new_rocks)
end
return Deterministic(new_state)
end
function next_position(s::RSState, a::Int)
if a > N_BASIC_ACTIONS || a == 1
# robot check rocks or samples
return s.pos
elseif a <= N_BASIC_ACTIONS
# the robot moves
return s.pos + ACTION_DIRS[a]
end
end | RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 5155 | function POMDPTools.render(pomdp::RockSamplePOMDP, step;
viz_rock_state=true,
viz_belief=true,
pre_act_text=""
)
nx, ny = pomdp.map_size[1] + 1, pomdp.map_size[2] + 1
cells = []
for x in 1:nx-1, y in 1:ny-1
ctx = cell_ctx((x, y), (nx, ny))
cell = compose(ctx, rectangle(), fill("white"))
push!(cells, cell)
end
grid = compose(context(), linewidth(0.5mm), stroke("gray"), cells...)
outline = compose(context(), linewidth(1mm), rectangle())
rocks = []
for (i, (rx, ry)) in enumerate(pomdp.rocks_positions)
ctx = cell_ctx((rx, ry), (nx, ny))
clr = "black"
if viz_rock_state && get(step, :s, nothing) !== nothing
clr = step[:s].rocks[i] ? "green" : "red"
end
rock = compose(ctx, ngon(0.5, 0.5, 0.3, 6), stroke(clr), fill("gray"))
push!(rocks, rock)
end
rocks = compose(context(), rocks...)
exit_area = render_exit((nx, ny))
agent = nothing
action = nothing
if get(step, :s, nothing) !== nothing
agent_ctx = cell_ctx(step[:s].pos, (nx, ny))
agent = render_agent(agent_ctx)
if get(step, :a, nothing) !== nothing
action = render_action(pomdp, step)
end
end
action_text = render_action_text(pomdp, step, pre_act_text)
belief = nothing
if viz_belief && (get(step, :b, nothing) !== nothing)
belief = render_belief(pomdp, step)
end
sz = min(w, h)
return compose(context((w - sz) / 2, (h - sz) / 2, sz, sz), action, agent, belief,
exit_area, rocks, action_text, grid, outline)
end
function cell_ctx(xy, size)
nx, ny = size
x, y = xy
return context((x - 1) / nx, (ny - y - 1) / ny, 1 / nx, 1 / ny)
end
function render_belief(pomdp::RockSamplePOMDP, step)
rock_beliefs = get_rock_beliefs(pomdp, get(step, :b, nothing))
nx, ny = pomdp.map_size[1] + 1, pomdp.map_size[2] + 1
belief_outlines = []
belief_fills = []
for (i, (rx, ry)) in enumerate(pomdp.rocks_positions)
ctx = cell_ctx((rx, ry), (nx, ny))
clr = "black"
belief_outline = compose(ctx, rectangle(0.1, 0.87, 0.8, 0.07), stroke("gray31"), fill("gray31"))
belief_fill = compose(ctx, rectangle(0.1, 0.87, rock_beliefs[i] * 0.8, 0.07), stroke("lawngreen"), fill("lawngreen"))
push!(belief_outlines, belief_outline)
push!(belief_fills, belief_fill)
end
return compose(context(), belief_fills..., belief_outlines...)
end
function get_rock_beliefs(pomdp::RockSamplePOMDP{K}, b) where K
rock_beliefs = zeros(Float64, K)
for (sᵢ, bᵢ) in weighted_iterator(b)
rock_beliefs[sᵢ.rocks.==1] .+= bᵢ
end
return rock_beliefs
end
function render_exit(size)
nx, ny = size
x = nx
y = ny
ctx = context((x - 1) / nx, (ny - y) / ny, 1 / nx, 1)
rot = Rotation(pi / 2, 0.5, 0.5)
txt = compose(ctx, text(0.5, 0.5, "EXIT AREA", hcenter, vtop, rot),
stroke("black"),
fill("black"),
fontsize(20pt))
return compose(ctx, txt, rectangle(), fill("red"))
end
function render_agent(ctx)
center = compose(context(), circle(0.5, 0.5, 0.3), fill("orange"), stroke("black"))
lwheel = compose(context(), ellipse(0.2, 0.5, 0.1, 0.3), fill("orange"), stroke("black"))
rwheel = compose(context(), ellipse(0.8, 0.5, 0.1, 0.3), fill("orange"), stroke("black"))
return compose(ctx, center, lwheel, rwheel)
end
function render_action_text(pomdp::RockSamplePOMDP, step, pre_act_text)
actions = ["Sample", "North", "East", "South", "West"]
action_text = "Terminal"
if get(step, :a, nothing) !== nothing
if step.a <= N_BASIC_ACTIONS
action_text = actions[step.a]
else
action_text = "Sensing Rock $(step.a - N_BASIC_ACTIONS)"
end
end
action_text = pre_act_text * action_text
_, ny = pomdp.map_size
ny += 1
ctx = context(0, (ny - 1) / ny, 1, 1 / ny)
txt = compose(ctx, text(0.5, 0.5, action_text, hcenter),
stroke("black"),
fill("black"),
fontsize(20pt))
return compose(ctx, txt, rectangle(), fill("white"))
end
function render_action(pomdp::RockSamplePOMDP, step)
if step.a == BASIC_ACTIONS_DICT[:sample]
ctx = cell_ctx(step.s.pos, pomdp.map_size .+ (1, 1))
if in(step.s.pos, pomdp.rocks_positions)
rock_ind = findfirst(isequal(step.s.pos), pomdp.rocks_positions)
clr = step.s.rocks[rock_ind] ? "green" : "red"
else
clr = "black"
end
return compose(ctx, ngon(0.5, 0.5, 0.1, 6), stroke("gray"), fill(clr))
elseif step.a > N_BASIC_ACTIONS
rock_ind = step.a - N_BASIC_ACTIONS
rock_pos = pomdp.rocks_positions[rock_ind]
nx, ny = pomdp.map_size[1] + 1, pomdp.map_size[2] + 1
rock_pos = ((rock_pos[1] - 0.5) / nx, (ny - rock_pos[2] - 0.5) / ny)
rob_pos = ((step.s.pos[1] - 0.5) / nx, (ny - step.s.pos[2] - 0.5) / ny)
sz = min(w, h)
return compose(context((w - sz) / 2, (h - sz) / 2, sz, sz), line([rob_pos, rock_pos]), stroke("orange"), linewidth(0.01w))
end
return nothing
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 6862 | using Random
using RockSample
using POMDPs
using POMDPTools
using Test
using Compose
using ParticleFilters
function test_state_indexing(pomdp::RockSamplePOMDP{K}, ss::Vector{RSState{K}}) where K
for (i,s) in enumerate(states(pomdp))
if s != ss[i]
return false
end
end
return true
end
@testset "state space" begin
pomdp = RockSamplePOMDP{3}()
state_iterator = states(pomdp)
ss = ordered_states(pomdp)
@test length(ss) == length(pomdp)
@test test_state_indexing(pomdp, ss)
pomdp = RockSamplePOMDP{3}(map_size=(7, 10))
state_iterator = states(pomdp)
ss = ordered_states(pomdp)
@test length(ss) == length(pomdp)
@test test_state_indexing(pomdp, ss)
end
@testset "convert_s" begin
p = RockSamplePOMDP{3}()
b0 = initialstate(p)
s_test = rand(b0)
v_s_test = convert_s(Vector{Float64}, s_test, p)
s_back = convert_s(RSState, v_s_test, p)
@test s_back == s_test
end
@testset "action space" begin
rng = MersenneTwister(1)
pomdp = RockSamplePOMDP((5, 5), 3, rng)
acts = actions(pomdp)
b1 = ParticleCollection{RSState{3}}(
RSState{3}[
RSState{3}([1, 1], Bool[0, 1, 0]),
RSState{3}([1, 1], Bool[1, 1, 1])
], nothing)
b2 = ParticleCollection{RSState{3}}(
RSState{3}[
RSState{3}([3, 1], Bool[0, 1, 0]),
RSState{3}([3, 1], Bool[1, 1, 1])
], nothing)
@test acts == ordered_actions(pomdp)
@test length(acts) == length(actions(pomdp))
@test length(acts) == RockSample.N_BASIC_ACTIONS + 3
s = RSState{3}((3,1), (true, false, false))
@test actions(pomdp, s) == actions(pomdp)
s2 = RSState{3}((1,2), (true, false, false))
@test length(actions(pomdp, s2)) == length(actions(pomdp)) - 1
@test actionindex(pomdp, 1) == 1
@test length(actions(pomdp, b1)) == length(actions(pomdp)) - 1
@test actions(pomdp, b2) == actions(pomdp)
end
@testset "observation" begin
rng = MersenneTwister(1)
pomdp = RockSamplePOMDP{3}(init_pos=(1,1))
obs = observations(pomdp)
@test obs == ordered_observations(pomdp)
s0 = rand(rng, initialstate(pomdp))
od = observation(pomdp, 1, s0)
o = rand(rng, od)
@test o == 3
@inferred observation(pomdp, 6, s0)
@inferred observation(pomdp, 1, s0)
o = rand(rng, observation(pomdp, 6, s0))
@test o == 1
o = rand(rng, observation(pomdp, 7, s0))
@test o == 1
@test has_consistent_observation_distributions(pomdp)
end
@testset "reward" begin
pomdp = RockSamplePOMDP{3}(init_pos=(1,1))
rng = MersenneTwister(3)
s = rand(rng, initialstate(pomdp))
@test reward(pomdp, s, 1, s) == pomdp.bad_rock_penalty
@test reward(pomdp, s, 2, s) == pomdp.step_penalty
s = RSState(RSPos(3,3), s.rocks)
@test reward(pomdp, s, 1, s) == pomdp.good_rock_reward
@test reward(pomdp, s, 3, s) == pomdp.step_penalty
@test reward(pomdp, s, 6, s) == pomdp.sensor_use_penalty
@test reward(pomdp, s, 6, s) == 0.
@test reward(pomdp, s, 2, s) == 0.
s = RSState(RSPos(5,4), s.rocks)
sp = rand(rng, transition(pomdp, s, RockSample.BASIC_ACTIONS_DICT[:east]))
@test reward(pomdp, s, RockSample.BASIC_ACTIONS_DICT[:east], sp) == pomdp.exit_reward
pomdp = RockSamplePOMDP{3}(init_pos=(1,1), step_penalty=-1., sensor_use_penalty=-5.)
rng = MersenneTwister(3)
s = rand(rng, initialstate(pomdp))
@test reward(pomdp, s, 2, s) == -1.
@test reward(pomdp, s, 6, s) == -5. - 1.
@test reward(pomdp, s, 1, s) == pomdp.bad_rock_penalty - 1.
s = RSState(RSPos(3,3), s.rocks)
@test reward(pomdp, s, 1, s) == pomdp.good_rock_reward - 1.
end
@testset "simulation" begin
pomdp = RockSamplePOMDP{3}(init_pos=(1,1))
rng = MersenneTwister(3)
up = DiscreteUpdater(pomdp)
# go straight to the exit
policy = FunctionPolicy(s->RockSample.BASIC_ACTIONS_DICT[:east])
hr = HistoryRecorder(rng=rng)
b0 = initialstate(pomdp)
s0 = rand(b0)
rs_exit = solve(RSExitSolver(), pomdp)
hist = simulate(hr, pomdp, policy, up, b0, s0)
@test undiscounted_reward(hist) == pomdp.exit_reward
@test discounted_reward(hist) ≈ discount(pomdp)^(n_steps(hist) - 1) * pomdp.exit_reward
@test discounted_reward(hist) ≈ value(rs_exit, s0)
@test value(rs_exit, pomdp.terminal_state) == 0.0
# random policy
policy = RandomPolicy(pomdp, rng=rng)
hr = HistoryRecorder(rng=rng)
hist = simulate(hr, pomdp, policy, up)
@test n_steps(hist) > pomdp.map_size[1]
end
@testset "mdp/qmdp policy" begin
pomdp = RockSamplePOMDP(5,5)
@time solve(RSMDPSolver(), UnderlyingMDP(pomdp))
@time solve(RSMDPSolver(), pomdp)
@time solve(RSQMDPSolver(), pomdp)
end
@testset "rendering" begin
pomdp = RockSamplePOMDP{3}(init_pos=(1,1))
s0 = RSState{3}((1,1), [true, false, true])
render(pomdp, (s=s0, a=3))
b0 = initialstate(pomdp)
render(pomdp, (s=s0, a=3, b=b0))
end
@testset "constructor" begin
@test RockSamplePOMDP() isa RockSamplePOMDP
@test RockSamplePOMDP(rocks_positions=[(1,1),(2,2)]) isa RockSamplePOMDP{2}
@test RockSamplePOMDP(7,8) isa RockSamplePOMDP{8}
@test RockSamplePOMDP((13,14), 15) isa RockSamplePOMDP{15}
@test RockSamplePOMDP((11,5), [(1,2), (2,4), (11,5)]) isa RockSamplePOMDP{3}
end
@testset "visualization" begin
include("test_visualization.jl")
test_initial_state()
test_particle_collection()
end
@testset "transition" begin
rng = MersenneTwister(1)
pomdp = RockSamplePOMDP{3}(init_pos=(1,1))
s0 = rand(rng, initialstate(pomdp))
@test s0.pos == pomdp.init_pos
d = transition(pomdp, s0, 2) # move up
sp = rand(rng, d)
spp = rand(rng, d)
@test spp == sp
@test sp.pos == [1, 2]
@test sp.rocks == s0.rocks
s = RSState{3}((pomdp.map_size[1], 1), s0.rocks)
d = transition(pomdp, s, 3) # move right
sp = rand(rng, d)
@test isterminal(pomdp, sp)
@test sp == pomdp.terminal_state
@inferred transition(pomdp, s0, 3)
@inferred rand(rng, transition(pomdp, s0, 3))
@test has_consistent_transition_distributions(pomdp)
include("../src/transition.jl")
include("../src/actions.jl")
# test next_pose for each action
s = RSState{3}((1, 1), s0.rocks)
# sample
@test next_position(s, BASIC_ACTIONS_DICT[:sample]) == s.pos
# north
@test next_position(s, BASIC_ACTIONS_DICT[:north]) == [1, 2]
# east
@test next_position(s, BASIC_ACTIONS_DICT[:east]) == [2, 1]
# south
s = RSState{3}((1, 2), s0.rocks)
@test next_position(s, BASIC_ACTIONS_DICT[:south]) == [1, 1]
# west
s = RSState{3}((2, 1), s0.rocks)
@test next_position(s, BASIC_ACTIONS_DICT[:west]) == [1, 1]
# test sense
for i in 1:length(s.rocks)
@test next_position(s, N_BASIC_ACTIONS+i) == s.pos
end
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 506 | using Random
using POMDPs
using POMDPSimulators
using POMDPGifs
using RockSample
using Cairo
using SARSOP
rng = MersenneTwister(1)
pomdp = RockSamplePOMDP{3}(rocks_positions=[(2,3), (4,4), (4,2)],
sensor_efficiency=20.0,
discount_factor=0.95,
good_rock_reward = 20.0)
solver = SARSOPSolver(precision=1e-3)
policy = solve(solver, pomdp)
sim = GifSimulator(filename="test.gif", max_steps=30)
simulate(sim, pomdp, policy)
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | code | 1412 | pomdp = RockSamplePOMDP{3}()
function test_initial_state()
rng = MersenneTwister(2)
s0 = rand(rng, initialstate(pomdp))
c = render(pomdp, (s=s0, a=6))
c = render(pomdp, (s=s0, a=6, b=Deterministic(s0)))
c = render(pomdp, (s=s0, a=6, b=initialstate(pomdp)))
c |> SVG("rocksample.svg")
end
function test_particle_collection()
b0 = ParticleCollection{RSState{3}}(
RSState{3}[
RSState{3}([1, 1], Bool[1, 0, 0]), RSState{3}([1, 1], Bool[1, 1, 1]),
RSState{3}([1, 1], Bool[0, 0, 1]), RSState{3}([1, 1], Bool[1, 0, 1]),
RSState{3}([1, 1], Bool[1, 0, 0]), RSState{3}([1, 1], Bool[1, 1, 0]),
RSState{3}([1, 1], Bool[0, 1, 0]), RSState{3}([1, 1], Bool[1, 1, 0]),
RSState{3}([1, 1], Bool[1, 0, 1]), RSState{3}([1, 1], Bool[0, 1, 1]),
RSState{3}([1, 1], Bool[0, 0, 1]), RSState{3}([1, 1], Bool[1, 0, 0]),
RSState{3}([1, 1], Bool[1, 0, 1]), RSState{3}([1, 1], Bool[0, 1, 1]),
RSState{3}([1, 1], Bool[0, 1, 1]), RSState{3}([1, 1], Bool[1, 1, 0]),
RSState{3}([1, 1], Bool[1, 1, 1]), RSState{3}([1, 1], Bool[0, 0, 1]),
RSState{3}([1, 1], Bool[1, 1, 1]), RSState{3}([1, 1], Bool[1, 0, 1])
],
nothing
)
s0 = rand(b0)
c = render(pomdp, (s=s0, a=6, b=b0))
c |> SVG("rocksample2.svg")
end
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e865fbf1480668f32111facc113f897ccdc9eac | docs | 3783 | # RockSample.jl
[](https://github.com/JuliaPOMDP/RockSample.jl/actions/workflows/CI.yml)
[](https://codecov.io/github/JuliaPOMDP/RockSample.jl)
Implement the RockSample [1] problem with the [POMDPs.jl](https://github.com/JuliaPOMDP/POMDPs.jl) interface.
[1] T. Smith, R. Simmons, "Heuristic Search Value Iteration for POMDPs," in *Association for Uncertainty in Artificial Intelligence (UAI)*, 2004

## Installation
```julia
using Pkg
Pkg.add("RockSample")
```
## Problem description
- **States**: position of the robot and status of the rocks.
- **Actions**: There are 5 basic actions, moving up, down, left, and right, and sampling a rock and $K$ sensing actions to check the state of a rock. When sampling or sensing, the robot does not move.
- **Transition model**: When taking a moving action, the robot moves deterministically to the desired cell. The robot can only exit the map by the exit area (on the right side of the grid). Actions that causes the robot to go over the edge of the grid have no effects.
- **Observation model**: The robot can observe the status of the rock with some noise when executing a sensing action. The noise varies exponentially with the distance to the rock. The decaying rate is controlled by the parameter `sensor_efficiency`. If the robot is moving or sampling it does not receive an observation (receives `:none`).
- **Reward model**: At each action, the robot receives a reward of `step_penalty` (negative number). The robot receives a positive reward of `exit_reward` for reaching the exit area. When sampling, the robot receives a reward of `good_rock_reward` if the sampled rock is good or `bad_rock_penalty` (negative number) if the rock is bad. When using the sensor, the robot receives a reward of `sensor_use_penalty` (negative number). The `step_penalty` is additive to the other rewards (e.g. when sampling a good rock, the robot would receive a reward of `step_penalty` + `good_rock_reward`).
### Example
```julia
using POMDPs
using RockSample
using SARSOP # load a POMDP Solver
using POMDPGifs # to make gifs
using Cairo # for making/saving the gif
pomdp = RockSamplePOMDP(rocks_positions=[(2,3), (4,4), (4,2)],
sensor_efficiency=20.0,
discount_factor=0.95,
good_rock_reward = 20.0)
solver = SARSOPSolver(precision=1e-3)
policy = solve(solver, pomdp)
sim = GifSimulator(filename="test.gif", max_steps=30)
simulate(sim, pomdp, policy)
```
**`RockSamplePOMDP` Parameters:**
- constructor: `RockSamplePOMDP{K}(kwargs...)` where `K` is an integer representing the number of rocks
- keyword arguments:
- `map_size::Tuple{Int,Int}` the size of the grid, default (5,5)
- `rocks_positions::Vector{K,RSPos}`, the list of positions of the rocks, default `@SVector[(1,1), (3,3), (4,4)]`
- `init_pos::RSPos` the initial position of the robot, default (1, 1)
- `sensor_efficiency::Float64`, the decaying rate of the sensor performance, default 20.
- `bad_rock_penalty::Float64` default -10.
- `good_rock_reward::Float64` default 10.
- `step_penalty::Float64` default 0.
- `sensor_use_penalty::Float64` default 0.
- `exit_reward::Float64` default 10.
- `discount_factor::Float64` default 0.95
**Internal types:**
`RSPos` : represent a position in the grid as a static array of 2 integers.
`RSState`: represent the state of the POMDP. The field `pos::RSPos` is the position of the robots
and the field `rocks::SVector{K, Bool}` represents the status of the rocks (good or bad).
| RockSample | https://github.com/JuliaPOMDP/RockSample.jl.git |
|
[
"MIT"
] | 0.2.3 | dd82ddaf1f1bcbd250ca14e9ca090427aaba6b38 | code | 7141 | module ThreadedSparseArrays
export ThreadedSparseMatrixCSC
using LinearAlgebra
import LinearAlgebra: mul!
using SparseArrays
import SparseArrays: getcolptr, AbstractSparseMatrixCSC, DenseMatrixUnion
const AdjOrTransDenseMatrix = if VERSION < v"1.6.0-rc2"
SparseArrays.AdjOrTransStridedOrTriangularMatrix
else
Union{DenseMatrixUnion,Adjoint{<:Any,<:DenseMatrixUnion},Transpose{<:Any,<:DenseMatrixUnion}}
end
# * Threading utilities
struct RangeIterator
k::Int
d::Int
r::Int
end
"""
RangeIterator(n::Int,k::Int)
Returns an iterator splitting the range `1:n` into `min(k,n)` parts of (almost) equal size.
"""
RangeIterator(n::Int, k::Int) = RangeIterator(min(n,k),divrem(n,k)...)
Base.length(it::RangeIterator) = it.k
endpos(it::RangeIterator, i::Int) = i*it.d+min(i,it.r)
Base.iterate(it::RangeIterator, i::Int=1) = i>it.k ? nothing : (endpos(it,i-1)+1:endpos(it,i), i+1)
# * ThreadedSparseMatrixCSC
"""
ThreadedSparseMatrixCSC(A)
Thin container around `A::SparseMatrixCSC` that will enable certain
threaded multiplications of `A` with dense matrices.
"""
struct ThreadedSparseMatrixCSC{Tv,Ti,At} <: AbstractSparseMatrixCSC{Tv,Ti}
A::At
ThreadedSparseMatrixCSC(A::At) where {Tv,Ti,At<:AbstractSparseMatrixCSC{Tv,Ti}} =
new{Tv,Ti,At}(A)
end
Base.size(A::ThreadedSparseMatrixCSC, args...) = size(A.A, args...)
for f in [:rowvals, :nonzeros, :getcolptr]
@eval SparseArrays.$(f)(A::ThreadedSparseMatrixCSC) = SparseArrays.$(f)(A.A)
end
@static if v"1.7.0" <= VERSION < v"1.8.0-"
SparseArrays._goodbuffers(A::ThreadedSparseMatrixCSC) = SparseArrays._goodbuffers(A.A)
SparseArrays._checkbuffers(A::ThreadedSparseMatrixCSC) = SparseArrays._checkbuffers(A.A)
end
for (T,t) in ((ThreadedSparseMatrixCSC,identity), (Adjoint{<:Any,<:ThreadedSparseMatrixCSC},adjoint), (Transpose{<:Any,<:ThreadedSparseMatrixCSC},transpose))
@eval Base.copy(A::$T) = ThreadedSparseMatrixCSC(copy($t($t(A).A)))
@eval Base.permutedims(A::$T, (a,b)) = ThreadedSparseMatrixCSC(permutedims($t($t(A).A), (a,b)))
end
# sparse * sparse multiplications are not (currently) threaded, but we want to keep the return type
for (T1,t1) in ((ThreadedSparseMatrixCSC,identity), (Adjoint{<:Any,<:ThreadedSparseMatrixCSC},adjoint), (Transpose{<:Any,<:ThreadedSparseMatrixCSC},transpose))
for (T2,t2) in ((ThreadedSparseMatrixCSC,identity), (Adjoint{<:Any,<:ThreadedSparseMatrixCSC},adjoint), (Transpose{<:Any,<:ThreadedSparseMatrixCSC},transpose))
@eval Base.:(*)(A::$T1, B::$T2) = ThreadedSparseMatrixCSC($t1($t1(A).A)*$t2($t2(B).A))
end
end
function mul!(C::StridedVecOrMat, A::ThreadedSparseMatrixCSC, B::Union{StridedVector,AdjOrTransDenseMatrix}, α::Number, β::Number)
size(A, 2) == size(B, 1) || throw(DimensionMismatch())
size(A, 1) == size(C, 1) || throw(DimensionMismatch())
size(B, 2) == size(C, 2) || throw(DimensionMismatch())
nzv = nonzeros(A)
rv = rowvals(A)
if β != 1
β != 0 ? rmul!(C, β) : fill!(C, zero(eltype(C)))
end
@sync for r in RangeIterator(size(C,2), Threads.nthreads())
Threads.@spawn for k in r
@inbounds for col in 1:size(A, 2)
αxj = B[col,k] * α
for j in nzrange(A, col)
C[rv[j], k] += nzv[j]*αxj
end
end
end
end
C
end
for (T, t) in ((Adjoint, adjoint), (Transpose, transpose))
@eval function mul!(C::StridedVecOrMat, xA::$T{<:Any,<:ThreadedSparseMatrixCSC}, B::AdjOrTransDenseMatrix, α::Number, β::Number)
A = xA.parent
size(A, 2) == size(C, 1) || throw(DimensionMismatch())
size(A, 1) == size(B, 1) || throw(DimensionMismatch())
size(B, 2) == size(C, 2) || throw(DimensionMismatch())
nzv = nonzeros(A)
rv = rowvals(A)
if β != 1
β != 0 ? rmul!(C, β) : fill!(C, zero(eltype(C)))
end
@sync for r in RangeIterator(size(C,2), Threads.nthreads())
Threads.@spawn for k in r
@inbounds for col in 1:size(A, 2)
tmp = zero(eltype(C))
for j in nzrange(A, col)
tmp += $t(nzv[j])*B[rv[j],k]
end
C[col,k] += tmp * α
end
end
end
C
end
@eval function mul!(C::StridedVecOrMat, xA::$T{<:Any,<:ThreadedSparseMatrixCSC}, B::StridedVector, α::Number, β::Number)
A = xA.parent
size(A, 2) == size(C, 1) || throw(DimensionMismatch())
size(A, 1) == size(B, 1) || throw(DimensionMismatch())
size(B, 2) == size(C, 2) || throw(DimensionMismatch())
@assert size(B,2)==1
nzv = nonzeros(A)
rv = rowvals(A)
if β != 1
β != 0 ? rmul!(C, β) : fill!(C, zero(eltype(C)))
end
@sync for r in RangeIterator(size(A,2), Threads.nthreads())
Threads.@spawn @inbounds for col in r
tmp = zero(eltype(C))
for j in nzrange(A, col)
tmp += $t(nzv[j])*B[rv[j]]
end
C[col] += tmp * α
end
end
C
end
end
function mul!(C::StridedVecOrMat, X::AdjOrTransDenseMatrix, A::ThreadedSparseMatrixCSC, α::Number, β::Number)
mX, nX = size(X)
nX == size(A, 1) || throw(DimensionMismatch())
mX == size(C, 1) || throw(DimensionMismatch())
size(A, 2) == size(C, 2) || throw(DimensionMismatch())
rv = rowvals(A)
nzv = nonzeros(A)
if β != 1
β != 0 ? rmul!(C, β) : fill!(C, zero(eltype(C)))
end
# TODO: split in X isa DenseMatrixUnion and X isa Adjoint/Transpose so we can use @simd in the first case (see original code in SparseArrays)
@sync for r in RangeIterator(size(A,2), Threads.nthreads())
Threads.@spawn for col in r
@inbounds for k in nzrange(A, col)
Aiα = nzv[k] * α
rvk = rv[k]
for multivec_row in 1:mX
C[multivec_row, col] += X[multivec_row, rvk] * Aiα
end
end
end
end
C
end
for (T, t) in ((Adjoint, adjoint), (Transpose, transpose))
@eval function mul!(C::StridedVecOrMat, X::AdjOrTransDenseMatrix, xA::$T{<:Any,<:ThreadedSparseMatrixCSC}, α::Number, β::Number)
A = xA.parent
mX, nX = size(X)
nX == size(A, 2) || throw(DimensionMismatch())
mX == size(C, 1) || throw(DimensionMismatch())
size(A, 1) == size(C, 2) || throw(DimensionMismatch())
rv = rowvals(A)
nzv = nonzeros(A)
if β != 1
β != 0 ? rmul!(C, β) : fill!(C, zero(eltype(C)))
end
# transpose of Threaded * Dense algorithm above
@sync for r in RangeIterator(size(C,1), Threads.nthreads())
Threads.@spawn for k in r
@inbounds for col in 1:size(A, 2)
αxj = X[k,col] * α
for j in nzrange(A, col)
C[k, rv[j]] += $t(nzv[j])*αxj
end
end
end
end
C
end
end
end # module
| ThreadedSparseArrays | https://github.com/jagot/ThreadedSparseArrays.jl.git |
|
[
"MIT"
] | 0.2.3 | dd82ddaf1f1bcbd250ca14e9ca090427aaba6b38 | code | 5337 | using ThreadedSparseArrays
using SparseArrays
using LinearAlgebra
using Random
using StableRNGs
using Test
@static if VERSION < v"1.6.0"
parentmodule(x) = x.linfo.def.module
end
function match_exception(f, ::Type{T}=DimensionMismatch, func=r"^mul!$", m=ThreadedSparseArrays) where T
try
f()
catch ex
st = stacktrace(catch_backtrace())[1]
return ex isa T && match(func,string(st.func))!==nothing && parentmodule(st) === m
end
false
end
function rand_dense(rng,::Type{ComplexF64}, N, n)
M = max(N,n)
Matrix(sparse(randperm(rng,M),1:M,one(ComplexF64)))[1:N,1:n]
end
rand_dense(rng,::Type{ComplexF64}, N) = (x=zeros(ComplexF64,N); x[rand(rng,1:N)] = one(ComplexF64); x)
rand_dense(rng,::Type{Complex{Int}}, sz...) = rand(rng,0:5,sz...) .+ im*rand(rng,0:5,sz...)
rand_sparse(rng,::Type{ComplexF64}, N, n, p) = sprand(rng,ComplexF64,N,n,p)
rand_sparse(rng,::Type{Complex{Int}}, N, n, p) = sprand(N,n,p,x->Complex{Int}.(rand(rng,0:5,x),rand(rng,0:5,x)))
rand_scalar(rng,::Type{T}) where T<:Complex = T(rand(rng,2 .^ (1:5)) + im*rand(rng,2 .^ (1:5)))
@testset "ThreadedSparseArrays.jl" begin
# Test that all combinations of dense*sparse multiplication are threaded
@testset "Threading" begin
A = ThreadedSparseMatrixCSC(spzeros(2,3))
B = zeros(4,5)
@test match_exception(()->A*B)
@test match_exception(()->A'B)
@test match_exception(()->A*B')
@test match_exception(()->A'B')
@test match_exception(()->B*A)
@test match_exception(()->B'A)
@test match_exception(()->B*A')
@test match_exception(()->B'A')
end
# These test below are here to get the right fallback for sparse times sparse.
# The implementations are not (currently) threaded.
@testset "Fallbacks" begin
A = ThreadedSparseMatrixCSC(spzeros(2,3))
B = spzeros(4,5)
@test match_exception(()->A*B, DimensionMismatch, r"", SparseArrays)
@test match_exception(()->A'B, DimensionMismatch, r"", SparseArrays)
@test match_exception(()->A*B', DimensionMismatch, r"", SparseArrays)
@test match_exception(()->A'B', DimensionMismatch, r"", SparseArrays)
@test match_exception(()->B*A, DimensionMismatch, r"", SparseArrays)
@test match_exception(()->B'A, DimensionMismatch, r"", SparseArrays)
@test match_exception(()->B*A', DimensionMismatch, r"", SparseArrays)
@test match_exception(()->B'A', DimensionMismatch, r"", SparseArrays)
end
@testset "ReturnType_$(op1)_$(op2)" for op1 in [identity,adjoint,transpose], op2 in [identity,adjoint,transpose]
rng = StableRNG(1234)
A = rand_sparse(rng,Complex{Int64},10,10,0.4)
B = rand_sparse(rng,Complex{Int64},10,10,0.4)
ref = op1(A)*op2(B)
out = op1(ThreadedSparseMatrixCSC(A))*op2(ThreadedSparseMatrixCSC(B))
@test out isa ThreadedSparseMatrixCSC
@test out == ref
out = op1(ThreadedSparseMatrixCSC(A))*op2(B)
@test out isa SparseMatrixCSC
@test out == ref
out = op1(A)*op2(ThreadedSparseMatrixCSC(B))
@test out isa SparseMatrixCSC
@test out == ref
end
@testset "copy_$op" for op in [identity,adjoint,transpose]
rng = StableRNG(1234)
A = rand_sparse(rng,Complex{Int64},8,10,0.4)
out = copy(op(ThreadedSparseMatrixCSC(A)))
@test out isa ThreadedSparseMatrixCSC
@test out == op(A)
out = permutedims(op(ThreadedSparseMatrixCSC(A)))
@test out isa ThreadedSparseMatrixCSC
@test out == permutedims(op(A))
out = convert(Matrix,op(ThreadedSparseMatrixCSC(A)))
@test out isa Matrix
@test out == op(A)
end
N = 1000
n = 200
@testset "$T" for T in (ComplexF64, Complex{Int64})
rng = StableRNG(1234)
C = rand_sparse(rng,T,N,n,0.05)
@testset "(α,β)=$αβ" for αβ in ((), (rand_scalar(rng,T), zero(T)), (zero(T),rand_scalar(rng,T)), (rand_scalar(rng,T),rand_scalar(rng,T)))
@testset "R_$(op)" for op in [identity,adjoint,transpose]
Ct = op(ThreadedSparseMatrixCSC(C))
M = size(Ct,1)
X = rand_dense(rng,T,M,M)
out = zeros(T, size(Ct))
LinearAlgebra.mul!(out, X, Ct, αβ...)
ref = zeros(T, size(op(C)))
LinearAlgebra.mul!(ref, X, op(C), αβ...)
@test out == ref
end
@testset "L_$(op)" for op in [identity,adjoint,transpose]
Ct = op(ThreadedSparseMatrixCSC(C))
m = size(Ct,2)
X = rand_dense(rng,T,m,m)
out = zeros(T, size(Ct))
LinearAlgebra.mul!(out, Ct, X, αβ...)
ref = zeros(T, size(op(C)))
LinearAlgebra.mul!(ref, op(C), X, αβ...)
@test out == ref
end
x = rand_dense(rng,T,N)
@testset "L_$(op)_vec" for op in [adjoint,transpose]
Ct = op(ThreadedSparseMatrixCSC(C))
out = zeros(T, n)
LinearAlgebra.mul!(out, Ct, x, αβ...)
ref = zeros(T, n)
LinearAlgebra.mul!(ref, op(C), x, αβ...)
@test out == ref
end
end
end
end
| ThreadedSparseArrays | https://github.com/jagot/ThreadedSparseArrays.jl.git |
|
[
"MIT"
] | 0.2.3 | dd82ddaf1f1bcbd250ca14e9ca090427aaba6b38 | docs | 1667 | # ThreadedSparseArrays.jl
[](https://github.com/jagot/ThreadedSparseArrays.jl/actions)
[](https://codecov.io/gh/jagot/ThreadedSparseArrays.jl)
Simple package providing a wrapper type enabling threaded sparse
matrix–dense matrix multiplication. Based on [this
PR](https://github.com/JuliaLang/julia/pull/29525).
## Installation
Install with:
```
] add ThreadedSparseArrays
```
Note that you *must* enable threading in Julia for
ThreadedSparseArrays to work. You can do so by setting the
[JULIA_NUM_THREADS](https://docs.julialang.org/en/v1/manual/environment-variables/#JULIA_NUM_THREADS-1)
environment variable. To test that it is set properly, run
```julia
Threads.nthreads()
```
and make sure it returns the number of threads you wanted.
## Example usage
To use ThreadedSparseArrays, all you need to do is to wrap your sparse
matrix using the ThreadedSparseMatrixCSC type, like this:
```julia
using SparseArrays
using ThreadedSparseArrays
A = sprand(10000, 100, 0.05); # sparse
X1 = randn(100, 100); # dense
X2 = randn(10000, 100); # dense
At = ThreadedSparseMatrixCSC(A); # threaded version
# threaded sparse matrix–dense matrix multiplication
B1 = At*X1;
B2 = At'X2;
```
## Notes
* If the right hand side `X` is a `Vector`, you need to use `At'X` to
get threading. `At*X` will not work.
* You might only get speedups for large matrices. Use `@btime` from
the [BenchmarkTools.jl](https://github.com/JuliaCI/BenchmarkTools.jl)
package to check if your use case is improved.
| ThreadedSparseArrays | https://github.com/jagot/ThreadedSparseArrays.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 950 | using Documenter, DocumenterTools
push!(LOAD_PATH,"../src/")
using NumNN
makedocs(
sitename = "NumNN.jl",
authors = "Mohammad Hizzani",
# doctest = false,
# clean = true,
# linkcheck = false,
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"
),
modules = [NumNN],
pages = Any[
"Home" => "index.md",
"Docstrings" => "docstrings.md",
"Tutorials" => "tutorials.md"
],
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
#=deploydocs(
repo = "<repository url>"
)=#
deploydocs(
repo = "github.com/MohHizzani/NumNN.jl.git",
target = "build",
# push_preview = true,
branch = "gh-pages",
devurl = "dev",
versions = ["stable" => "v^", "v#.#.#", "dev" => "dev"],
# make = nothing,
# forcepush = true,
)
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 61 | module NumNN
include("includes.jl")
end # module NumNN
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 18070 |
"""
abstract type to include all layers
"""
abstract type Layer end
export Layer
### FCLayer
@doc raw"""
FCLayer(channels=0, actFun=:noAct, [layerInput = nothing; keepProb = 1.0])
Fully-connected layer (equivalent to Dense in TensorFlow etc.)
# Arguments
- `channels` := (`Integer`) is the number of nodes in the layer
- `actFun` := (`Symbol`) is the activation function of this layer
- `layerInput` := (`Layer` or `Array`) the input of this array (optional don't need to assign it)
- `keepProb` := (`AbstractFloat`) the keep probability (1 - prob of the dropout rate)
---------
# Summary
mutable struct FCLayer <: Layer
-------
# Fields
- `channels::Integer` := is the number of nodes in the layer
- `actFun::Symbol` := the activation function of this layer
- `inputS::Tuple{Integer, Integer}` := input size of the layer, of the shape (channels of the previous layer, size of mini-batch)
- `outputS::Tuple{Integer, Integer}` := output size of the layer, of the shape (channels of this layer, size of mini-batch)
- `keepProb::AbstractFloat` := the keep probability (rate) of the drop-out operation `<1.0`
- `W::Array{T,2} where {T}` := the scaling parameters of this layer `W * X`, of the shape (channels of this layer, channels of the previous layer)
- `B::Array{T,2} where {T}` := the bias of this layer `W * X .+ B`, of the shape (channels of this layer, 1)
- `dW::Array{T,2} where {T}` := the derivative of the loss function to the W parameters $\frac{dJ}{dW}$
- `dB::Array{T,2} where {T}` := the derivative of the loss function to the B parameters $\frac{dJ}{dB}$
- `forwCount::Integer` := forward propagation counter
- `backCount::Integer` := backward propagation counter
- `updateCount::Integer` := update parameters counter
- `prevLayer::L where {L<:Union{Layer,Nothing}}` := the previous layer which is
the input of this layer
- `nextLayers::Array{Layer,1}` := An array of the next `layer`(s)
---------
# Supertype Hierarchy
FCLayer <: Layer <: Any
---------
# Examples
```julia
X_Input = Input(X_train)
X = FCLayer(20, :relu)(X_Input)
```
In the previous example the variable `X_Input` is a pointer to the `Input` layer, and `X` is an pointer to the `FCLayer(20, :relu)` layer.
**Note** that the layer instance can be used as a connecting function.
"""
mutable struct FCLayer <: Layer
channels::Integer
actFun::Symbol
inputS::Tuple{Integer, Integer}
outputS::Tuple{Integer, Integer}
"""
drop-out keeping node probability
"""
keepProb::AbstractFloat
W::Array{T,2} where {T}
B::Array{T,2} where {T}
dW::Array{T,2} where {T}
dB::Array{T,2} where {T}
# ### adding Z & A place holder for recursive calling
# ### and a counter for how many it was called
# Z::Array{T,2} where {T}
# dA::Array{T,2} where {T}
# A::Array{T,2} where {T}
V::Dict{Symbol,Array{T,2} where {T,N}}
S::Dict{Symbol,Array{T,2} where {T,N}}
forwCount::Integer
backCount::Integer
updateCount::Integer
"""
pointer to previous layer
"""
prevLayer::L where {L<:Union{Layer,Nothing}}
nextLayers::Array{Layer,1}
function FCLayer(channels=0, actFun=:noAct, layerInput = nothing; keepProb = 1.0)
# W, B
if isa(layerInput, Layer)
T = eltype(layerInput.W)
nl = channels
nl_1 = size(layerInput.W)[1]
prevLayer = layerInput
elseif isa(layerInput, Array)
T = eltype(layerInput)
nl = channels
nl_1 = size(layerInput)[1]
prevLayer = nothing
else
T = Any
nl = 0
nl_1 = 0
prevLayer = nothing
end
new(
channels,
actFun,
(0,0), #inputS
(0,0), #outputS
keepProb,
Matrix{T}(undef, 0, 0),
Matrix{T}(undef, 0, 0),
Matrix{T}(undef, 0, 0),
Matrix{T}(undef, 0, 0),
# Matrix{T}(undef, 0, 0),
# Matrix{T}(undef, 0, 0),
# Matrix{T}(undef, 0, 0),
Dict(:dw => Matrix{T}(undef, 0, 0), :db => Matrix{T}(undef, 0, 0)),
Dict(:dw => Matrix{T}(undef, 0, 0), :db => Matrix{T}(undef, 0, 0)),
0,
0,
0,
prevLayer,
Array{Layer,1}(undef,0)
)#
end #FCLayer
end #struct FCLayer
export FCLayer
### Mulit-Input Layer MILayer
export MILayer
abstract type MILayer <: Layer end
### AddLayer
@doc raw"""
AddLayer(; [channels = 0])
Layer performs and addition of multiple previous layers
# Arguments
- `channels` := (`Integer`) number of channels/nodes of this array which equals to the same of the previous layer(s)
---------
# Summary
mutable struct AddLayer <: MILayer
# Fields
- `channels::Integer` := is the number of nodes or `channels` in the layer
- `inputS::Tuple` := input size of the layer
- `outputS::Tuple` := output size of the layer
- `forwCount::Integer` := forward propagation counter
- `backCount::Integer` := backward propagation counter
- `updateCount::Integer` := update parameters counter
- `nextLayers::Array{Layer,1}` := An array of the next `layer`(s)
- `prevLayer::Array{Layer,1}` := An array of the previous `layer`(s) to be added
---------
# Supertype Hierarchy
AddLayer <: MILayer <: Layer <: Any
-------
# Examples
```julia
XIn1 = Input(X_train)
X1 = FCLayer(10, :relu)(XIn1)
XIn2 = Input(X_train)
X2 = FCLayer(10, :tanh)(XIn2)
Xa = AddLayer()([X1,X2])
```
"""
mutable struct AddLayer <: MILayer
channels::Integer
inputS::Tuple
outputS::Tuple
# A::Array{T,N} where {T,N}
# dA::Array{T,N} where {T,N}
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::Array{Layer,1}
function AddLayer(; channels = 0)
# channels = l1.channels
# T = eltype(l1.W)
new(
channels,
(0,), #inputS
(0,), #outputS
# Matrix{Nothing}(undef, 0, 0),
# Matrix{Nothing}(undef, 0, 0),
0,
0,
0,
Array{Layer,1}(undef,0), #nextLayers
Array{Layer,1}(undef,0), #prevLayer
)
end #function AddLayer
end
export AddLayer
### ConcatLayer
export ConcatLayer
@doc raw"""
ConcatLayer(; channels = 0)
Perform concatenation of group of previous `Layer`s
# Summary
mutable struct ConcatLayer <: MILayer
# Fields
channels :: Integer
inputS :: Tuple
outputS :: Tuple
forwCount :: Integer
backCount :: Integer
updateCount :: Integer
nextLayers :: Array{Layer,1}
prevLayer :: Array{Layer,1}
# Supertype Hierarchy
ConcatLayer <: MILayer <: Layer <: Any
"""
mutable struct ConcatLayer <: MILayer
channels::Integer
inputS::Tuple
outputS::Tuple
# A::Array{T,N} where {T,N}
# dA::Array{T,N} where {T,N}
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::Array{Layer,1}
LSlice::Dict{Layer,UnitRange{Integer}}
function ConcatLayer(; channels = 0)
# channels = l1.channels
# T = eltype(l1.W)
new(
channels,
(0,), #inputS
(0,), #outputS
# Matrix{Nothing}(undef, 0, 0),
# Matrix{Nothing}(undef, 0, 0),
0,
0,
0,
Array{Layer,1}(undef,0), #nextLayers
Array{Layer,1}(undef,0), #prevLayer
Dict{Layer,UnitRange{Integer}}(),
)
end #function ConcatLayer
end #mutable struct ConcatLayer
### Activation
@doc raw"""
Activation(actFun)
# Arguments
- `actFun::Symbol` := the activation function of this layer
---------
# Summary
mutable struct Activation <: Layer
--------
# Fields
- `actFun::Symbol` := the activation function of this layer
- `channels::Integer` := is the number of nodes or `channels` in the layer
- `inputS::Tuple` := input size of the layer
- `outputS::Tuple` := output size of the layer
- `forwCount::Integer` := forward propagation counter
- `backCount::Integer` := backward propagation counter
- `updateCount::Integer` := update parameters counter
- `nextLayers::Array{Layer,1}` := An array of the next `layer`(s)
- `prevLayer::Array{Layer,1}` := An array of the previous `layer`(s) to be added
---------
# Supertype Hierarchy
Activation <: Layer <: An
---------
# Examples
```julia
X_Input = Input(X_train)
X = FCLayer(10, :noAct)(X_Input)
X = Activation(:relu)(X)
```
"""
mutable struct Activation <: Layer
actFun::Symbol
channels::Integer
inputS::Tuple
outputS::Tuple
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::L where {L<:Union{Layer,Nothing}}
function Activation(actFun=:relu)
new(
actFun,
0, #channels
(0,), #inputS
(0,), #outputS
0,
0,
0,
Array{Layer,1}(undef,0),
nothing,
)
end #function Activation
end #mutable struct Activation
export Activation
### Input
@doc raw"""
Input(X_shape::Tuple)
`Input` `Layer` that is used as a pointer to the input array(s).
# Arguments
- `X_shape::Tuple` := shape of the input Array
-------
# Summary
mutable struct Input <: Layer
# Fields
- `channels::Integer` := is the number of nodes or `channels` in the layer
- `inputS::Tuple` := input size of the layer
- `outputS::Tuple` := output size of the layer
- `forwCount::Integer` := forward propagation counter
- `backCount::Integer` := backward propagation counter
- `updateCount::Integer` := update parameters counter
- `nextLayers::Array{Layer,1}` := An array of the next `layer`(s)
- `prevLayer::Array{Layer,1}` := An array of the previous `layer`(s) to be added
------
# Supertype Hierarchy
Input <: Layer <: Any
--------
# Examples
```julia
X_Input = Input(size(X_train))
X = FCLayer(10, :relu)(X_Input)
```
It is possible to use the Array instead of its size `NumNN` will take care of the rest
```julia
X_Input = Input(X_train)
X = FCLayer(10, :relu)(X_Input)
```
"""
mutable struct Input <: Layer
channels::Integer
inputS::Tuple
outputS::Tuple
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::L where {L<:Union{Layer,Nothing}}
function Input(X_shape::Tuple)
N = length(X_shape)
if N==2
channels = X_shape[1]
elseif N==3
channels = X_shape[2]
elseif N==4
channels = X_shape[3]
elseif N==5
channels = X_shape[4]
end
new(
channels,
X_shape, #inputS
X_shape, #outputS
0,
0,
0,
Array{Layer,1}(undef,0),
nothing,
)
end #function Layer
end #mutable struct Input
function Input(X::Array{T,N}) where {T,N}
X_shape = size(X)
Input(X_shape)
end #function Input(X::Array{T,N}) where {T,N}
export Input
### BatchNorm
@doc raw"""
BatchNorm(;dim=1, ϵ=1e-10)
Batch Normalization `Layer` that is used ot normalize across the dimensions specified by the argument `dim`.
# Arguments
- `dim::Integer` := is the dimension to normalize across
- `ϵ::AbstractFloat` := is a backup constant that is used to prevent from division on zero when ``σ^2`` is zero
---------
# Summary
mutable struct BatchNorm <: Layer
-------
# Fields
- `channels::Integer` := is the number of nodes in the layer
- `inputS::Tuple{Integer, Integer}` := input size of the layer, of the shape (channels of the previous layer, size of mini-batch)
- `outputS::Tuple{Integer, Integer}` := output size of the layer, of the shape (channels of this layer, size of mini-batch)
- `dim::Integer` := the dimension to normalize across
- `ϵ::AbstractFloat` := backup constant to protect from dividing on zero when ``σ^2 = 0``
- `W::Array{T,2} where {T}` := the scaling parameters of this layer `W * X`, same shape of the mean `μ`
- `B::Array{T,2} where {T}` := the bias of this layer `W * X .+ B`, same shape of the variance ``σ^2``
- `dW::Array{T,2} where {T}` := the derivative of the loss function to the W parameters $\frac{dJ}{dW}$
- `dB::Array{T,2} where {T}` := the derivative of the loss function to the B parameters $\frac{dJ}{dB}$
- `forwCount::Integer` := forward propagation counter
- `backCount::Integer` := backward propagation counter
- `updateCount::Integer` := update parameters counter
- `prevLayer::L where {L<:Union{Layer,Nothing}}` := the previous layer which is
the input of this layer
- `nextLayers::Array{Layer,1}` := An array of the next `layer`(s)
---------
# Supertype Hierarchy
BatchNorm <: Layer <: Any
---------
# Examples
```julia
X_train = rand(14,14,3,32) #input of shape `14×14` with channels of `3` and mini-batch size `32`
X_Input = Input(X_train)
X = Conv2D(10, (3,3))(X_Input)
X = BatchNorm(dim=3) #to normalize across the channels dimension
X = Activation(:relu)
```
```julia
X_train = rand(128,5,32) #input of shape `128` with channels of `5` and mini-batch size `32`
X_Input = Input(X_train)
X = Conv1D(10, 5)(X_Input)
X = BatchNorm(dim=2) #to normalize across the channels dimension
X = Activation(:relu)
``
```julia
X_train = rand(64*64,32) #input of shape `64*64` and mini-batch size `32`
X_Input = Input(X_train)
X = FCLayer(10, :noAct)(X_Input)
X = BatchNorm(dim=1) #to normalize across the features dimension
X = Activation(:relu)
````
"""
mutable struct BatchNorm <: Layer
channels::Integer
inputS::Tuple
outputS::Tuple
dim::Integer
ϵ::AbstractFloat
W::Array{T, N} where {T,N}
dW::Array{T, N} where {T,N}
B::Array{T, N} where {T,N}
dB::Array{T, N} where {T,N}
V::Dict{Symbol,Array}
S::Dict{Symbol,Array}
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::L where {L<:Union{Layer,Nothing}}
function BatchNorm(;dim=1, ϵ=1e-10)
new(
0, #channels
(0,), #inputS
(0,), #outputS
dim,
ϵ,
Array{Any,1}(undef,0), #W
Array{Any,1}(undef,0), #dW
Array{Any,1}(undef,0), #B
Array{Any,1}(undef,0), #dB
Dict(:dw=>Array{Any,1}(undef,0),
:db=>Array{Any,1}(undef,0)), #V
Dict(:dw=>Array{Any,1}(undef,0),
:db=>Array{Any,1}(undef,0)), #S
0, #forwCount
0, #backCount
0, #updateCount
Array{Layer,1}(undef,0), #nextLayers
nothing, #prevLayer
)
end #function BatchNorm
end #mutable struct BatchNorm
export BatchNorm
### Flatten
export Flatten
@doc raw"""
Flatten()
Flatten the input into 2D `Array`
# Summary
mutable struct Flatten <: Layer
# Fields
channels :: Integer
inputS :: Tuple
outputS :: Tuple
forwCount :: Integer
backCount :: Integer
updateCount :: Integer
nextLayers :: Array{Layer,1}
prevLayer :: Union{Nothing, Layer}
# Supertype Hierarchy
Flatten <: Layer <: Any
"""
mutable struct Flatten <: Layer
channels::Integer
inputS::Tuple
outputS::Tuple
forwCount::Integer
backCount::Integer
updateCount::Integer
nextLayers::Array{Layer,1}
prevLayer::L where {L<:Union{Layer,Nothing}}
function Flatten()
return new(
0, #channels
(0,), #inputS
(0,), #outputS
0, #forwCount
0, #backCount
0, #updateCount
Array{Layer,1}(undef,0), #nextLayers
nothing,
)
end
end
### Model
@doc raw"""
function Model(
X,
Y,
inLayer::Layer,
outLayer::Layer,
α;
optimizer = :gds,
β1 = 0.9,
β2 = 0.999,
ϵAdam = 1e-8,
regulization = 0,
λ = 1.0,
lossFun = :categoricalCrossentropy,
paramsDtype::DataType = Float64,
)
# Summary
mutable struct Model <: Any
# Fields
inLayer :: Layer
outLayer :: Layer
lossFun :: Symbol
paramsDtype :: DataType
regulization :: Integer
λ :: AbstractFloat
α :: AbstractFloat
optimizer :: Symbol
ϵAdam :: AbstractFloat
β1 :: AbstractFloat
β2 :: AbstractFloat
"""
mutable struct Model
# layers::Array{Layer,1}
inLayer::Layer
outLayer::Layer
lossFun::Symbol
paramsDtype::DataType
"""
regulization type
0 : means no regulization
1 : L1 regulization
2 : L2 regulization
"""
regulization::Integer
"""
regulization constant
"""
λ::AbstractFloat
"""
learning rate
"""
α::AbstractFloat
optimizer::Symbol
ϵAdam::AbstractFloat
β1::AbstractFloat
β2::AbstractFloat
function Model(
X,
Y,
inLayer::Layer,
outLayer::Layer,
α;
optimizer = :gds,
β1 = 0.9,
β2 = 0.999,
ϵAdam = 1e-8,
regulization = 0,
λ = 1.0,
lossFun = :categoricalCrossentropy,
paramsDtype::DataType = Float64,
)
deepInitWB!(outLayer; dtype = paramsDtype)
resetCount!(outLayer, :forwCount)
deepInitVS!(outLayer, optimizer)
resetCount!(outLayer, :forwCount)
@assert regulization in [0, 1, 2]
return new(
inLayer,
outLayer,
lossFun,
paramsDtype,
regulization,
λ,
α,
optimizer,
ϵAdam,
β1,
β2,
)
end #inner-constructor
end #Model
export Model
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 4180 |
abstract type actFun end
export actFun
export probToValue
### sigmoid
abstract type σ <: actFun end
"""
return the Sigmoid output
inputs must be matices
"""
σ(X, W, B) = 1 ./ (1 .+ exp.(.-(W * X .+ B)))
σ(Z) = 1 ./ (1 .+ exp.(.-Z))
export σ
"""
return the derivative of Sigmoid function
"""
dσ(Z) = σ(Z) .* (1 .- σ(Z))
export dσ
@doc raw"""
function probToValue(
actFun::Type{σ},
probs::AbstractArray{T,N},
labels::Aa = nothing;
evalConst = 0.5,
) where {Aa<:Union{<:AbstractArray,Nothing},T,N}
Convert the probabilities return out of sigmoid function to Bool value (i.e. 0,1) values based on comparing on a threshold value `evalConst`
# Return
- `Ŷ_bool` := Boolean valuse of the probabilites
- `acc` := Accuracy when `labels` provided
"""
function probToValue(
actFun::Type{σ},
probs::AbstractArray{T,N},
labels::Aa = nothing;
evalConst = 0.5,
) where {Aa<:Union{<:AbstractArray,Nothing},T,N}
s = size{probs}
Ŷ_bool = probs .> T(evalConst)
acc = nothing
if labels isa AbstractArray
acc = sum(Ŷ_bool .== labels) / (s[end-1] * s[end])
# println("Accuracy = $acc")
end
return Ŷ_bool, acc
end #predictpredict(probs::AbstractArray{T, 2},
### relu
abstract type relu <: actFun end
"""
return the ReLU output
"""
function relu(Z::AbstractArray{T,N}) where {T,N}
max.(zero(T), Z)
end #function relu(Z::AbstractArray{T,N}) where {T,N}
export relu
"""
return the derivative of ReLU function
"""
function drelu(Z::AbstractArray{T,N}) where {T,N}
return T.(Z .> zero(T))
end #function drelu(z::AbstractArray{T,N}) where {T,N}
export drelu
### softmax
abstract type softmaxFamily <: actFun end
abstract type softmax <: softmaxFamily end
"""
compute the softmax function
"""
function softmax(Ŷ::AbstractArray{T,N}) where {T,N}
Ŷ_exp = exp.(Ŷ)
sumofexp = sum(Ŷ_exp, dims = N - 1)
return Ŷ_exp ./ sumofexp
end #softmax
function dsoftmax(Ŷ, dim = 1)
sŶ = softmax(Ŷ)
T = eltype(Ŷ)
softMat = AbstractArray{T,3}(undef, 0, 0, 0)
sSize = size(Ŷ)[dim]
for c in eachcol(sŶ)
tmpMat = zeros(T, sSize, sSize)
for i = 1:length(c)
for j = 1:length(c)
if i == j
tmpMat[i, j] = c[i] * (1 - c[j])
else
tmpMat[i, j] = -c[i] * c[j]
end
end
end
softMat = cat(softMat, tmpMat, dims = 3)
end
return softMat
end #dsoftmax
export softmax, dsoftmax
@doc raw"""
function probToValue(
actFun::Type{S},
probs::AbstractArray{T,N};
labels = nothing,
) where {T,N,S<:softmaxFamily}
convert the probabilites out of `softmax` or softmax-like functions into `Bool` values, where the max value gets 1 and the other get zeros
# Return
- `Ŷ_bool` := Boolean valuse of the probabilites
- `acc` := Accuracy when `labels` provided
"""
function probToValue(
actFun::Type{S},
probs::AbstractArray{T,N};
labels = nothing,
) where {T,N,S<:softmaxFamily}
maximums = maximum(probs, dims = N - 1)
Ŷ_bool = probs .== maximums
acc = nothing
if labels isa AbstractArray
acc = 0
bool_labels = Bool.(labels)
ax = axes(bool_labels)[1:end-1]
endax = axes(bool_labels)[end]
trueFalse = Array{Bool,1}(undef, length(endax))
@simd for i in endax
lab = view(bool_labels, ax..., i)
pred = view(Ŷ_bool, ax..., i)
trueFalse[i] = (lab == pred)
end
acc = mean(trueFalse)
# println("Accuracy = $acc")
end
return Ŷ_bool, acc
end #predictpredict(probs::AbstractArray{T, 2}, :softmax)
### tanh
abstract type tanh <: actFun end
Base.tanh(Z::AbstractArray{T,N}) where {T,N} = Base.tanh.(Z)
tanh(Z::AbstractArray{T,N}) where {T,N} = Base.tanh.(Z)
dtanh(Z::AbstractArray{T,N}) where {T,N} = 1 .- (Base.tanh.(Z)) .^ 2
export dtanh, tanh
### noAct
abstract type noAct <: actFun end
function noAct(Z)
return Z
end
function dnoAct(Z)
return ones(eltype(Z), size(Z)...)
end
export noAct, dnoAct
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 3195 |
@doc raw"""
oneHot(Y; classes = [], numC = 0)
convert array of integer classes into one Hot coding.
# Arguments
- `Y` := a vector of classes as a number
- `classes` := the classes explicity represented (in case not all the classes are present in the labels given)
- `numC` := number of classes as alternative to `classes` variable
# Examples
```julia
Y = rand(0:9, 100); # a 100 item with class of [0-9]
"""
function oneHot(Y; classes = [], numC = 0)
if numC > 0
c = numC
Cs = sort(unique(Y))
elseif length(classes) > 0
Cs = sort(classes)
c = length(Cs)
else
Cs = sort(unique(Y))
c = length(Cs)
end
hotY = BitArray{2}(undef, c, length(Y))
@simd for i=1:length(Y)
hotY[:,i] .= (Cs .== Y[i])
end
return hotY
end #oneHot
export oneHot
@doc raw"""
resetCount!(outLayer::Layer, cnt::Symbol)
to reset a counter in all layers under `outLayer`.
# Arguments
- `outLayer::Layer` := the layer from start reseting the counter
- `cnt::Symbol` := the counter to be reseted
# Examples
```julia
X_train = rand(128, 100);
X_Input = Input(X_train);
X = FCLayer(50, :relu)(X_Input);
X_out = FCLayer(10, :softmax)(X);
FCache = chainForProp(X_train, X_Input);
# Now to reset the forwCount in all layers
resetCount!(X_out, :forwCount)
```
"""
function resetCount!(outLayer::Layer,
cnt::Symbol)
prevLayer = outLayer.prevLayer
if outLayer isa Input
# if outLayer.forwCount != 0
eval(:($outLayer.$cnt = 0))
# end #if outLayer.forwCount != 0
elseif isa(outLayer, MILayer) #if prevLayer == nothing
for prevLayer in outLayer.prevLayer
resetCount!(prevLayer, cnt)
end #for
eval(:($outLayer.$cnt = 0))
else #if prevLayer == nothing
resetCount!(prevLayer, cnt)
eval(:($outLayer.$cnt = 0))
end #if prevLayer == nothing
return nothing
end #function resetForwCount
export resetCount!
### to extend the getindex fun
@doc raw"""
getindex(it, key; default) = haskey(it, key) ? it[key] : default
# Examples
```julia
D = Dict(:A=>"A", :B=>"B")
A = getindex(D, :A)
## this will return an error
#C = getindex(D: :C)
#instead
C = getindex(D, :C; default="C")
#this will return the `String` C
```
"""
Base.getindex(it, key; default) = haskey(it, key) ? it[key] : default
#### getLayerSlice
export getLayerSlice
function getLayerSlice(cLayer::Layer, nextLayer::ConcatLayer, BCache::Dict{Layer, Dict{Symbol, AbstractArray}})
N = ndims(BCache[nextLayer][:dA])
fAx = axes(BCache[nextLayer][:dA])[1:end-2]
lAx = axes(BCache[nextLayer][:dA])[end]
LSlice = nextLayer.LSlice[cLayer]
return BCache[nextLayer][:dA][fAx...,LSlice,lAx]
end #function getLayerSlice(cLayer::Layer, nextLayer::ConcatLayer
"""
getLayerSlice(cLayer::Layer, nextLayer::Layer, BCache::Dict{Layer, Dict{Symbol, AbstractArray}})
Fall back method for `Layer`s other than `ConcatLayer`
"""
function getLayerSlice(cLayer::Layer, nextLayer::Layer, BCache::Dict{Layer, Dict{Symbol, AbstractArray}})
return BCache[nextLayer][:dA]
end #function getLayerSlice(cLayer::Layer, nextLayer::Layer
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
|
[
"MIT"
] | 0.5.3 | a1f51078c01785207ec90a733bbceff8c2ecbe72 | code | 13678 |
using ProgressMeter
using Random
using LinearAlgebra
include("layerForProp.jl")
"""
perform the chained forward propagation using recursive calls
input:
X := input of the forward propagation
cLayer := output layer
cnt := an internal counter used to cache the layers was performed
not to redo it again
returns:
A := the output of the last layer
for internal use, it set again the values of Z and A in each layer
to be used later in back propagation and add one to the layer
forwCount value when pass through it
"""
function chainForProp!(X, cLayer::Layer, cnt::Integer=-1; kwargs...)
if cnt<0
cnt = cLayer.forwCount+1
end
if cLayer isa Input
if cLayer.forwCount < cnt
layerForProp!(cLayer, X; kwargs...)
end #if cLayer.forwCount < cnt
return nothing
elseif isa(cLayer, AddLayer) #if typeof(cLayer)==AddLayer
if cLayer.forwCount < cnt
for prevLayer in cLayer.prevLayer
chainForProp!(X, prevLayer, cnt; kwargs...)
end
layerForProp!(cLayer; kwargs...)
end #if cLayer.forwCount < cnt
return nothing
else #if cLayer.prevLayer==nothing
if cLayer.forwCount < cnt
chainForProp!(X, cLayer.prevLayer, cnt; kwargs...)
layerForProp!(cLayer; kwargs...)
end #if cLayer.forwCount < cnt
return nothing
end #if cLayer.prevLayer!=nothing
end #function chainForProp!
export chainForProp!
"""
predict Y using the model and the input X and the labels Y
inputs:
model := the trained model
X := the input matrix
Y := the input labels to compare with
output:
a Dict of
"Yhat" := the predicted values
"Yhat_bools" := the predicted labels
"accuracy" := the accuracy of the predicted labels
"""
function predictBatch(
model::Model,
X::Array,
Y=nothing,
)
chainForProp!(X, model.outLayer)
Ŷ = model.outLayer.A
T = eltype(Ŷ)
outLayer = model.outLayer
actFun = outLayer.actFun
# if isbool(Y)
return probToValue(eval(:($actFun)), Ŷ, labels=Y)
end #predict
function predict(
model::Model,
X_In::AbstractArray,
Y_In=nothing;
batchSize = 32,
printAcc = true,
useProgBar = false,
GCInt = 5,
noBool = false,
)
outLayer, lossFun, α = model.outLayer, model.lossFun, model.α
Costs = []
N = ndims(X_In)
T = eltype(X_In)
m = size(X_In)[end]
# c = size(Y_train)[end-1]
nB = m ÷ batchSize
N = ndims(X_In)
axX = axes(X_In)[1:end-1]
Y = nothing
if Y_In != nothing
axY = axes(Y_In)[1:end-1]
end
if useProgBar
p = Progress((m % batchSize != 0 ? nB+1 : nB), 0.1)
end
Ŷ_out = Array{T,N}(undef,repeat([0],N)...)
accuracy = []
@simd for j=1:nB
downInd = (j-1)*batchSize+1
upInd = j * batchSize
X = X_In[axX..., downInd:upInd]
if Y_In != nothing
Y = Y_In[axY..., downInd:upInd]
end
Ŷ, acc = predictBatch(model, X, Y)
# Ŷ_out = cat(Ŷ_out, Ŷ, dims=1:N)
if acc != nothing
push!(accuracy, acc)
end
if useProgBar
update!(p, j, showvalues=[("Instances $m", j*batchSize)])
end #if useProgBar
# X = Y = nothing
Ŷ = nothing
# if j%GCInt == 0
# Base.GC.gc()
# end
end
if m % batchSize != 0
downInd = (nB)*batchSize+1
X = X_In[axX..., downInd:end]
if Y_In != nothing
Y = Y_In[axY..., downInd:end]
end
Ŷ, acc = predict(model, X, Y)
Ŷ_out = cat(Ŷ_out, Ŷ, dims=1:N)
if acc != nothing
push!(accuracy, acc)
end
# X = Y = nothing
Ŷ = nothing
# Base.GC.gc()
update!(p, nB+1, showvalues=[("Instances $m", m)])
end
accuracyM = nothing
if !(isempty(accuracy))
accuracyM = mean(accuracy)
end
if noBool
Ŷ_out = nothing
end
return Ŷ_out, accuracyM
end #function predict(
# model::Model,
# X_In::AbstractArray,
# Y_In=nothing;
# batchSize = 32,
# printAcc = true,
# useProgBar = false,
# )
export predict
"""
return true if the array values are boolean (ones and zeros)
"""
function isbool(y::Array{T}) where {T}
return iszero(y[y .!= one(T)])
end
### back propagation
include("layerBackProp.jl")
"""
inputs:
X := is a (nx, m) matrix
Y := is a (c, m) matrix
model := is the model to perform the back propagation on
cLayer := is an internal variable to hold the current layer
cnt := is an internal variable to count the step of back propagation currently on
output:
nothing
"""
function chainBackProp!(X,Y,
model::Model,
cLayer::L=nothing,
cnt = -1;
tMiniBatch::Integer=-1, #can be used to perform both back and update params
kwargs...,
) where {L<:Union{Layer,Nothing}}
if cnt < 0
cnt = model.outLayer.backCount+1
end
if cLayer==nothing
layerBackProp!(model.outLayer, model; labels=Y, kwargs...)
if tMiniBatch > 0
layerUpdateParams!(model, model.outLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
end
chainBackProp!(X,Y,model, model.outLayer.prevLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
elseif cLayer isa AddLayer
layerBackProp!(cLayer, model; kwargs...)
if tMiniBatch > 0
layerUpdateParams!(model, cLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
end
if cLayer.backCount >= cnt #in case layerBackProp did not do the back
#prop becasue the next layers are not all
#done yet
for prevLayer in cLayer.prevLayer
chainBackProp!(X,Y,model, prevLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
end #for
end
else #if cLayer==nothing
layerBackProp!(cLayer, model; kwargs...)
if tMiniBatch > 0
layerUpdateParams!(model, cLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
end
if cLayer.backCount >= cnt #in case layerBackProp did not do the back
#prop becasue the next layers are not all
#done yet
if !(cLayer isa Input)
chainBackProp!(X,Y,model, cLayer.prevLayer, cnt; tMiniBatch=tMiniBatch, kwargs...)
end #if cLayer.prevLayer == nothing
end
end #if cLayer==nothing
return nothing
end #backProp
export chainBackProp!
###update parameters
include("layerUpdateParams.jl")
function chainUpdateParams!(model::Model,
cLayer::L=nothing,
cnt = -1;
tMiniBatch::Integer = 1) where {L<:Union{Layer,Nothing}}
if cnt < 0
cnt = model.outLayer.updateCount + 1
end
if cLayer==nothing
layerUpdateParams!(model, model.outLayer, cnt, tMiniBatch=tMiniBatch)
chainUpdateParams!(model, model.outLayer.prevLayer, cnt, tMiniBatch=tMiniBatch)
elseif cLayer isa AddLayer
#update the AddLayer updateCounter
if cLayer.updateCount < cnt
cLayer.updateCount += 1
end
for prevLayer in cLayer.prevLayer
chainUpdateParams!(model, prevLayer, cnt, tMiniBatch=tMiniBatch)
end #for
else #if cLayer==nothing
layerUpdateParams!(model, cLayer, cnt, tMiniBatch=tMiniBatch)
if !(cLayer isa Input)
chainUpdateParams!(model, cLayer.prevLayer, cnt, tMiniBatch=tMiniBatch)
end #if cLayer.prevLayer == nothing
end #if cLayer==nothing
return nothing
end #function chainUpdateParams!
export chainUpdateParams!
"""
Repeat the trainging (forward/backward propagation)
inputs:
X_train := the training input
Y_train := the training labels
model := the model to train
epochs := the number of repetitions of the training phase
;
kwarg:
batchSize := the size of training when mini batch training
printCostsIntervals := the interval (every what to print the current cost value)
useProgBar := (true, false) value to use prograss bar
"""
function train(
X_train,
Y_train,
model::Model,
epochs;
batchSize = 64,
printCostsInterval = 0,
useProgBar = false,
embedUpdate = true,
metrics::Array{Symbol,1} = [:accuracy, :cost],
)
outLayer, lossFun, α = model.outLayer, model.lossFun, model.α
Costs = []
m = size(X_train)[end]
c = size(Y_train)[end-1]
nB = m ÷ batchSize
shufInd = randperm(m)
N = ndims(X_train)
axX = axes(X_train)[1:end-1]
axY = axes(Y_train)[1:end-1]
if useProgBar
p = Progress(epochs, 1)
end
accuracy = []
avgCost = 0.0
avgAcc = 0.0
for i=1:epochs
minCosts = [] #the costs of all mini-batches
minAcc = []
for j=1:nB
downInd = (j-1)*batchSize+1
upInd = j * batchSize
batchInd = shufInd[downInd:upInd]
X = X_train[axX..., batchInd]
Y = Y_train[axY..., batchInd]
if :accuracy in metrics
ŷ, acc = predict(model, X, Y; noBool = true, batchSize = batchSize)
push!(minAcc, acc)
avgAcc = mean([avgAcc, acc])
else
chainForProp!(X,
model.outLayer)
end
a = model.outLayer.A
if :cost in metrics
minCost = sum(eval(:($lossFun($a, $Y))))/batchSize
if lossFun==:binaryCrossentropy
minCost /= c
end #if lossFun==:binaryCrossentropy
push!(minCosts, minCost)
avgCost = mean([avgCost, minCost])
end
if embedUpdate
chainBackProp!(X,Y,
model;
tMiniBatch = j)
else
chainBackProp!(X,Y,
model;
tMiniBatch = -1)
chainUpdateParams!(model; tMiniBatch = j)
end #if embedUpdate
if useProgBar
if :accuracy in metrics && :cost in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize), (:Accuracy, avgAcc), (:Cost, avgCost)])
elseif :accuracy in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize), (:Accuracy, avgAcc)])
elseif :cost in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize), (:Cost, avgCost)])
else
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize)])
end
end
# chainUpdateParams!(model; tMiniBatch = j)
end #for j=1:nB iterate over the mini batches
if m%batchSize != 0
downInd = (nB)*batchSize+1
batchInd = shufInd[downInd:end]
X = X_train[axX..., batchInd]
Y = Y_train[axY..., batchInd]
if :accuracy in metrics
ŷ, acc = predict(model, X, Y; noBool = true, batchSize = size(X)[end])
push!(minAcc, acc)
avgAcc = mean([avgAcc, acc])
else
chainForProp!(X,
model.outLayer)
end
if :cost in metrics
a = model.outLayer.A
minCost = sum(eval(:($lossFun($a, $Y))))/size(X)[end]
if lossFun==:binaryCrossentropy
minCost /= c
end #if lossFun==:binaryCrossentropy
push!(minCosts, minCost)
end
if embedUpdate
chainBackProp!(X,Y,
model;
tMiniBatch = nB+1)
else
chainBackProp!(X,Y,
model;
tMiniBatch = -1)
chainUpdateParams!(model; tMiniBatch = nB+1)
end #if embedUpdate
# chainUpdateParams!(model; tMiniBatch = nB+1)
end
push!(accuracy, mean(minAcc))
push!(Costs, mean(minCosts))
if printCostsInterval>0 && i%printCostsInterval==0
println("N = $i, Cost = $(Costs[end])")
end
if useProgBar
if useProgBar
if :accuracy in metrics && :cost in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", m), (:Accuracy, avgAcc), (:Cost, avgCost)])
elseif :accuracy in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize), (:Accuracy, avgAcc)])
elseif :cost in metrics
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize), (:Cost, avgCost)])
else
update!(p, i; showvalues=[(:Epoch, i), ("Instances ($m)", j*batchSize)])
end
end
end
end
# model.W, model.B = W, B
return Costs
end #train
export train
| NumNN | https://github.com/MohHizzani/NumNN.jl.git |
Subsets and Splits