licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 7789 |
"""
change(validation_options::ValidationOptions)
Allows to change `validation_options` in a GUI.
"""
function Common.change(data::ValidationOptions)
@qmlfunction(
get_data,
get_options,
set_options,
save_options,
unit_test
)
path_qml = string(@__DIR__,"/gui/ValidationOptions.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir)
exec()
return nothing
end
function get_urls_validation_main2(url_inputs::String,url_labels::String,validation_data::ValidationData)
if !isdir(url_inputs)
@error string(url_inputs," does not exist.")
return nothing
end
if problem_type()==:classification || problem_type()==:segmentation
if !isdir(url_labels)
@error string(url_labels," does not exist.")
return nothing
end
else
if !isfile(url_labels)
@error string(url_labels," does not exist.")
return nothing
end
end
validation_urls = validation_data.Urls
validation_urls.url_inputs = url_inputs
validation_urls.url_labels = url_labels
validation_data.PlotData.use_labels = true
get_urls_validation_main(model_data,validation_urls,validation_data)
return nothing
end
"""
get_urls_validation(url_inputs::String,url_labels::String)
Gets URLs to all files present in both folders (or a folder and a file)
specified by `url_inputs` and `url_labels` for validation. URLs are automatically
saved to `EasyMLValidation.validation_data`.
"""
get_urls_validation(url_inputs,url_labels) = get_urls_validation_main2(url_inputs,url_labels,validation_data)
function get_urls_validation_main2(url_inputs::String,validation_data)
if !isdir(url_inputs)
@error string(url_inputs," does not exist.")
return nothing
end
validation_urls = validation_data.Urls
validation_urls.url_inputs = url_inputs
validation_data.PlotData.use_labels = false
get_urls_validation_main(model_data,validation_urls,validation_data)
return nothing
end
"""
get_urls_validation(url_inputs::String)
Gets URLs to all files present in a folders specified by `url_inputs`
for validation. URLs are automatically saved to `EasyMLValidation.validation_data`.
"""
get_urls_validation(url_inputs) = get_urls_validation_main2(url_inputs,validation_data)
function get_urls_validation_main2(validation_data::ValidationData)
validation_urls = validation_data.Urls
validation_urls.url_inputs = ""
validation_urls.url_labels = ""
dir = pwd()
@info "Select a directory with input data."
path = get_folder(dir)
if !isempty(path)
validation_urls.url_inputs = path
@info string(validation_urls.url_inputs, " was selected.")
else
@error "Input data directory URL is empty. Aborted"
return nothing
end
if problem_type()==:classification
elseif problem_type()==:regression
@info "Select a file with label data if labels are available."
name_filters = ["*.csv","*.xlsx"]
path = get_file(dir,name_filters)
if !isempty(path)
validation_urls.url_labels = path
@info string(validation_urls.url_labels, " was selected.")
else
@warn "Label data URL is empty. Continuing without labels."
end
elseif problem_type()==:segmentation
@info "Select a directory with label data if labels are available."
path = get_folder(dir)
if !isempty(path)
validation_urls.url_labels = path
@info string(validation_urls.url_labels, " was selected.")
else
@warn "Label data directory URL is empty. Continuing without labels."
end
end
if validation_urls.url_labels!="" && problem_type()!=:classification
validation_data.PlotData.use_labels = true
else
validation_data.PlotData.use_labels = false
end
get_urls_validation_main(model_data,validation_urls,validation_data)
return nothing
end
"""
get_urls_validation()
Opens a folder/file dialog or dialogs to choose folders or folder and a file containing inputs
and labels. Folder/file dialog for labels can be skipped if there are no labels available.
URLs are automatically saved to `EasyMLValidation.validation_data`.
"""
get_urls_validation() = get_urls_validation_main2(validation_data)
"""
validate()
Opens a GUI where validation progress and results can be observed.
"""
function validate()
if model_data.model isa Chain{Tuple{}}
@error "Model is empty."
return nothing
elseif isempty(model_data.classes)
@error "Classes are empty."
return nothing
end
if isempty(validation_data.Urls.input_urls)
@error "No input urls. Run 'get_urls_validation'."
return nothing
end
empty_channel(:validation_start)
empty_channel(:validation_progress)
empty_channel(:validation_modifiers)
t = validate_main2(model_data,validation_data,options,channels)
# Launches GUI
@qmlfunction(
# Handle classes
num_classes,
get_class_field,
# Data handling
get_problem_type,
get_input_type,
get_options,
get_progress,
put_channel,
get_image_size,
get_image_validation,
get_data,
# Other
yield,
unit_test
)
f1 = CxxWrap.@safe_cfunction(display_original_image_validation, Cvoid,(Array{UInt32,1}, Int32, Int32))
f2 = CxxWrap.@safe_cfunction(display_label_image_validation, Cvoid,(Array{UInt32,1}, Int32, Int32))
path_qml = string(@__DIR__,"/gui/ValidationPlot.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text),
gui_dir = gui_dir,
display_original_image_validation = f1,
display_label_image_validation = f2
)
exec()
state,error = check_task(t)
if state==:error
@warn string("Validation aborted due to the following error: ",error)
end
# Clean up
validation_data.PlotData.original_image = Array{RGB{N0f8},2}(undef,0,0)
validation_data.PlotData.label_image = Array{RGB{N0f8},2}(undef,0,0)
# Return results
if input_type()==:image
if problem_type()==:classification
return validation_image_classification_results
elseif problem_type()==:regression
return validation_image_regression_results
else # problem_type()==:segmentation
return validation_image_segmentation_results
end
end
end
"""
remove_validation_data()
Removes all validation data except for result.
"""
function remove_validation_data()
fields = fieldnames(ValidationUrls)
for field in fields
val = getproperty(validation_data.Urls, field)
if val isa Array
empty!(val)
elseif val isa String
setproperty!(validation_data.Urls, field, "")
end
end
end
"""
remove_validation_results()
Removes validation results.
"""
function remove_validation_results()
data = validation_data.ImageClassificationResults
fields = fieldnames(ValidationImageClassificationResults)
for field in fields
empty!(getfield(data, field))
end
data = validation_data.ImageRegressionResults
fields = fieldnames(ValidationImageRegressionResults)
for field in fields
empty!(getfield(data, field))
end
data = validation_data.ImageSegmentationResults
fields = fieldnames(ValidationImageSegmentationResults)
for field in fields
empty!(getfield(data, field))
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 17502 |
#---Geting urls------------------------------------------------------------
function get_urls_validation_main(model_data::ModelData,
validation_urls::ValidationUrls,validation_data::ValidationData)
url_inputs = validation_urls.url_inputs
url_labels = validation_urls.url_labels
if input_type()==:image
allowed_ext = ["png","jpg","jpeg"]
end
if problem_type()==:classification
input_urls,dirs = get_urls1(url_inputs,allowed_ext)
labels = map(class -> class.name,model_data.classes)
if issubset(dirs,labels)
validation_data.PlotData.use_labels = true
labels_int = map((label,l) ->
repeat([findfirst(label.==labels)],l),dirs,length.(input_urls))
validation_urls.labels_classification = reduce(vcat,labels_int)
end
elseif problem_type()==:regression
input_urls_raw,_,filenames_inputs_raw = get_urls1(url_inputs,allowed_ext)
input_urls = input_urls_raw[1]
filenames_inputs = filenames_inputs_raw[1]
if validation_data.PlotData.use_labels==true
input_urls_copy = copy(input_urls)
filenames_inputs_copy = copy(filenames_inputs)
filenames_labels,loaded_labels = load_regression_data(validation_urls.url_labels)
intersect_regression_data!(input_urls_copy,filenames_inputs_copy,
loaded_labels,filenames_labels)
if isempty(loaded_labels)
validation_data.PlotData.use_labels = false
@warn string("No file names in ",url_labels ," correspond to file names in ",
url_inputs," . Files were loaded without labels.")
else
validation_urls.labels_regression = loaded_labels
input_urls = input_urls_copy
end
end
else # problem_type()==:segmentation
if validation_data.PlotData.use_labels==true
input_urls,label_urls,_,_,_ = get_urls2(url_inputs,url_labels,allowed_ext)
validation_urls.label_urls = reduce(vcat,label_urls)
else
input_urls,_ = get_urls1(url_inputs,allowed_ext)
end
end
validation_urls.input_urls = reduce(vcat,input_urls)
return nothing
end
#---Data preparation------------------------------------------------------------
function prepare_validation_data(classes::Vector{ImageClassificationClass},
norm_func::Function,model_data::ModelData,ind::Int64,validation_data::ValidationData)
local data_input_raw
original_image = load_image(validation_data.Urls.input_urls[ind])
if size(original_image)!=model_data.input_size[1:2]
original_image = fix_image_size(model_data,original_image)
end
if :grayscale in model_data.input_properties
data_input_raw = image_to_gray_float(original_image)
else
data_input_raw = image_to_color_float(original_image)
end
norm_func(data_input_raw)
data_input = data_input_raw[:,:,:,:]
if validation_data.PlotData.use_labels
num = length(classes)
labels_temp = Vector{Float32}(undef,num)
fill!(labels_temp,0)
label_int = validation_data.Urls.labels_classification[ind]
labels_temp[label_int] = 1
labels = reshape(labels_temp,:,1)
else
labels = Array{Float32,2}(undef,0,0)
end
return data_input,labels,original_image
end
function prepare_validation_data(classes::Vector{ImageRegressionClass},
norm_func::Function,model_data::ModelData,ind::Int64,validation_data::ValidationData)
local data_input_raw
original_image = load_image(validation_data.Urls.input_urls[ind])
if size(original_image)!=model_data.input_size[1:2]
original_image = fix_image_size(model_data,original_image)
end
if :grayscale in model_data.input_properties
data_input_raw = image_to_gray_float(original_image)
else
data_input_raw = image_to_color_float(original_image)
end
norm_func(data_input_raw)
data_input = data_input_raw[:,:,:,:]
if validation_data.PlotData.use_labels
labels = reshape(validation_data.Urls.labels_regression[ind],:,1)
else
labels = Array{Float32,2}(undef,0,0)
end
return data_input,labels,original_image
end
function prepare_validation_data(classes::Vector{ImageSegmentationClass},
norm_func::Function,model_data::ModelData,ind::Int64,validation_data::ValidationData)
local data_input_raw
inds,labels_color,labels_incl,border,border_thickness = get_class_data(classes)
original_image = load_image(validation_data.Urls.input_urls[ind])
if :grayscale in model_data.input_properties
data_input_raw = image_to_gray_float(original_image)
else
data_input_raw = image_to_color_float(original_image)
end
norm_func(data_input_raw)
data_input = data_input_raw[:,:,:,:]
if validation_data.PlotData.use_labels
label = load_image(validation_data.Urls.label_urls[ind])
label_bool = label_to_bool(label,inds,labels_color,
labels_incl,border,border_thickness)
data_label = convert(Array{Float32,3},label_bool)[:,:,:,:]
else
data_label = Array{Float32,4}(undef,1,1,1,1)
end
return data_input,data_label,original_image
end
#---Validation output processing---------------------------------------------
function get_error_image(predicted_bool_feat::BitArray{2},truth::BitArray{2})
correct = predicted_bool_feat .& truth
false_pos = copy(predicted_bool_feat)
false_pos[truth] .= false
false_neg = copy(truth)
false_neg[predicted_bool_feat] .= false
s = (3,size(predicted_bool_feat)...)
error_bool = BitArray{3}(undef,s)
error_bool[1,:,:] .= false_pos
error_bool[2,:,:] .= false_pos
error_bool[1,:,:] = error_bool[1,:,:] .| false_neg
error_bool[2,:,:] = error_bool[2,:,:] .| correct
return error_bool
end
function compute(predicted_bool::BitArray{3},
label_bool::BitArray{3},labels_color::Vector{Vector{N0f8}},
num_feat::Int64,use_labels::Bool)
num = size(predicted_bool,3)
predicted_data = Vector{Tuple{BitArray{2},Vector{N0f8}}}(undef,num)
target_data = Vector{Tuple{BitArray{2},Vector{N0f8}}}(undef,num)
error_data = Vector{Tuple{BitArray{3},Vector{N0f8}}}(undef,num)
color_error = ones(N0f8,3)
for i = 1:num
color = labels_color[i]
predicted_bool_feat = predicted_bool[:,:,i]
predicted_data[i] = (predicted_bool_feat,color)
if validation_data.PlotData.use_labels
if i>num_feat
target_bool = label_bool[:,:,i-num_feat]
else
target_bool = label_bool[:,:,i]
end
target_data[i] = (target_bool,color)
error_bool = get_error_image(predicted_bool_feat,target_bool)
error_data[i] = (error_bool,color_error)
end
end
return predicted_data,target_data,error_data
end
function output_images(predicted_bool::BitArray{3},label_bool::BitArray{3},
classes::Vector{<:AbstractClass},use_labels::Bool)
class_inds,labels_color, _ ,border = get_class_data(classes)
labels_color = labels_color[class_inds]
labels_color_uint = convert(Vector{Vector{N0f8}},labels_color/255)
inds_border = findall(border)
border_colors = labels_color_uint[findall(border)]
labels_color_uint = vcat(labels_color_uint,border_colors,border_colors)
array_size = size(predicted_bool)
num_feat = array_size[3]
num_border = sum(border)
if num_border>0
border_bool = apply_border_data(predicted_bool,classes)
predicted_bool = cat(predicted_bool,border_bool,dims=Val(3))
end
for i=1:num_border
min_area = classes[inds_border[i]].min_area
ind = num_feat + i
if min_area>1
temp_array = predicted_bool[:,:,ind]
areaopen!(temp_array,min_area)
predicted_bool[:,:,ind] .= temp_array
end
end
predicted_data,target_data,error_data = compute(predicted_bool,label_bool,
labels_color_uint,num_feat,use_labels)
return predicted_data,target_data,error_data
end
function process_output(predicted::AbstractArray{Float32,2},label::AbstractArray{Float32,2},
original_image::Array{RGB{N0f8},2},other_data::NTuple{2, Float32},classes::Vector{ImageClassificationClass},
validation_data::ValidationData,channels::Channels)
class_names = map(x-> x.name,classes)
predicted_vec = Iterators.flatten(predicted)
predicted_int = findfirst(predicted_vec .== maximum(predicted_vec))
predicted_string = class_names[predicted_int]
if validation_data.PlotData.use_labels
label_vec = Iterators.flatten(label)
label_int = findfirst(label_vec .== maximum(label_vec))
label_string = class_names[label_int]
else
label_string = ""
end
# Return data
validation_results = validation_data.ImageClassificationResults
push!(validation_results.original_images,original_image)
push!(validation_results.predicted_labels,predicted_string)
push!(validation_results.target_labels,label_string)
push!(validation_results.accuracy,other_data[1])
push!(validation_results.loss,other_data[2])
# Update progress
put!(channels.validation_progress,other_data)
return nothing
end
function process_output(predicted::AbstractArray{Float32,2},label::AbstractArray{Float32,2},
original_image::Array{RGB{N0f8},2},other_data::NTuple{2, Float32},classes::Vector{ImageRegressionClass},
validation_data::ValidationData,channels::Channels)
# Return data
validation_results = validation_data.ImageRegressionResults
push!(validation_results.original_images,original_image)
push!(validation_results.predicted_labels,predicted[:])
push!(validation_results.target_labels,label[:])
push!(validation_results.accuracy,other_data[1])
push!(validation_results.loss,other_data[2])
# Update progress
put!(channels.validation_progress,other_data)
return nothing
end
function process_output(predicted::AbstractArray{Float32,4},data_label::AbstractArray{Float32,4},
original_image::Array{RGB{N0f8},2},other_data::NTuple{2, Float32},classes::Vector{ImageSegmentationClass},
validation_data::ValidationData,channels::Channels)
predicted_bool = predicted[:,:,:].>0.5
label_bool = data_label[:,:,:].>0.5
# Get output data
predicted_data,target_data,error_data =
output_images(predicted_bool,label_bool,classes,validation_data.PlotData.use_labels)
# Return data
validation_results = validation_data.ImageSegmentationResults
push!(validation_results.original_images,original_image)
push!(validation_results.predicted_data,predicted_data)
push!(validation_results.target_data,target_data)
push!(validation_results.error_data,error_data)
push!(validation_results.accuracy,other_data[1])
push!(validation_results.loss,other_data[2])
# Update progress
put!(channels.validation_progress,other_data)
return nothing
end
#---Image handling for QML--------------------------------------------------------
function bitarray_to_image(array_bool::BitArray{2},color::Vector{Normed{UInt8,8}})
s = size(array_bool)
array = zeros(RGB{N0f8},s...)
colorRGB = colorview(RGB,permutedims(color[:,:,:],(1,2,3)))[1]
array[array_bool] .= colorRGB
return collect(array)
end
function bitarray_to_image(array_bool::BitArray{3},color::Vector{Normed{UInt8,8}})
s = size(array_bool)[2:3]
array_vec = Vector{Array{RGB{N0f8},2}}(undef,0)
for i in 1:3
array_temp = zeros(RGB{N0f8},s...)
color_temp = zeros(Normed{UInt8,8},3)
color_temp[i] = color[i]
colorRGB = colorview(RGB,permutedims(color_temp[:,:,:],(1,2,3)))[1]
array_temp[array_bool[i,:,:]] .= colorRGB
push!(array_vec,array_temp)
end
array = sum(array_vec)
return collect(array)
end
# Saves image to the main image storage and returns its size
function get_image_validation(fields,inds)
fields = fix_QML_types(fields)
inds = fix_QML_types(inds)
image_data = Common.get_data_main(validation_data,fields,inds)
if image_data isa Array{RGB{N0f8},2}
image = image_data
else
image = bitarray_to_image(image_data...)
end
final_field = fields[end]
if final_field=="original_images"
validation_data.PlotData.original_image = image
elseif any(final_field.==("predicted_data","target_data","error_data"))
validation_data.PlotData.label_image = image
end
return [size(image)...]
end
function get_image_size(fields,inds)
fields = fix_QML_types(fields)
inds = fix_QML_types(inds)
image_data = get_data(fields,inds)
if image_data isa Array{RGB{N0f8},2}
return [size(image_data)...]
else
return [size(image_data[1])...]
end
end
function display_original_image_validation(buffer::Array{UInt32, 1},width::Int32,height::Int32)
buffer = reshape(buffer, convert(Int64,width), convert(Int64,height))
buffer = reinterpret(ARGB32, buffer)
image = validation_data.PlotData.original_image
s = size(image)
if size(buffer)==reverse(size(image)) || (s[1]==s[2] && size(buffer)==size(image))
buffer .= transpose(image)
elseif size(buffer)==s
buffer .= image
end
return
end
function display_label_image_validation(buffer::Array{UInt32, 1},width::Int32,height::Int32)
buffer = reshape(buffer, convert(Int64,width), convert(Int64,height))
buffer = reinterpret(ARGB32, buffer)
image = validation_data.PlotData.label_image
if size(buffer)==reverse(size(image))
buffer .= transpose(image)
end
return
end
#----------------------------------------------------------------------------------
function get_weights(classes::Vector{<:AbstractClass},validation_options::ValidationOptions)
if validation_options.Accuracy.weight_accuracy
if problem_type()==:classification
return map(class -> class.weight,classes)
elseif problem_type()==:regression
return Vector{Float32}(undef,0)
else # problem_type()==:segmentation
true_classes_bool = (!).(map(class -> class.overlap, classes))
classes = classes = classes[true_classes_bool]
weights = map(class -> class.weight,classes)
borders_bool = map(class -> class.BorderClass.enabled, classes)
border_weights = weights[borders_bool]
append!(weights,border_weights)
return weights
end
else
return Vector{Float32}(undef,0)
end
end
function check_abort_signal(channel::Channel)
if isready(channel)
value = fetch(channel)[1]
if value==0
return true
else
return false
end
else
return false
end
end
function validate_inner(model::AbstractModel,norm_func::Function,classes::Vector{<:AbstractClass},model_data::ModelData,
accuracy::Function,loss::Function,num::Int64,validation_data::ValidationData,num_slices_val::Int64,
offset_val::Int64,use_GPU::Bool,channels::Channels)
for i = 1:num
if check_abort_signal(channels.validation_modifiers)
#return nothing
end
input_data,label,other = prepare_validation_data(classes,norm_func,model_data,i,validation_data)
predicted = forward(model,input_data,num_slices=num_slices_val,offset=offset_val,use_GPU=use_GPU)
if validation_data.PlotData.use_labels
accuracy_val = accuracy(predicted,label)
loss_val = loss(predicted,label)
other_data = (accuracy_val,loss_val)
else
other_data = (0.f0,0.f0)
end
process_output(predicted,label,other,other_data,classes,validation_data,channels)
end
return nothing
end
# Main validation function
function validate_main(model_data::ModelData,validation_data::ValidationData,
options::Options,channels::Channels)
# Initialisation
remove_validation_results()
num = length(validation_data.Urls.input_urls)
put!(channels.validation_start,num)
classes = model_data.classes
model = model_data.model
loss = model_data.loss
ws = get_weights(classes,options.ValidationOptions)
accuracy = get_accuracy_func(ws,options.ValidationOptions)
use_GPU = false
if options.GlobalOptions.HardwareResources.allow_GPU
if has_cuda()
use_GPU = true
else
@warn "No CUDA capable device was detected. Using CPU instead."
end
end
normalization = model_data.normalization
norm_func(x) = model_data.normalization.f(x,normalization.args...)
if problem_type()==:segmentation
num_slices_val = options.GlobalOptions.HardwareResources.num_slices
offset_val = options.GlobalOptions.HardwareResources.offset
else
num_slices_val = 1
offset_val = 0
end
# Validation starts
validate_inner(model,norm_func,classes,model_data,accuracy,loss,num,validation_data,
num_slices_val,offset_val,use_GPU,channels)
return nothing
end
function validate_main2(model_data::ModelData,validation_data::ValidationData,
options::Options,channels::Channels)
t = Threads.@spawn validate_main(model_data,validation_data,options,channels)
push!(validation_data.tasks,t)
return t
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2460 |
using EasyML, Test
EasyML.Common.unit_test.state = true
examples_dir = joinpath(@__DIR__,"examples")
models_dir = joinpath(@__DIR__,"models")
#---Testing modules------------------------------------------
@info "Common"
include("modules/common/runtests.jl")
@info "Classes"
include("modules/classes/runtests.jl")
@info "Design"
include("modules/design/runtests.jl")
@info "Data preparation"
include("modules/datapreparation/runtests.jl")
@info "Training"
include("modules/training/runtests.jl")
@info "Validation"
include("modules/validation/runtests.jl")
@info "Application"
include("modules/application/runtests.jl")
#---Testing module glue------------------------------------------
@testset "Module glue" begin
cd(@__DIR__)
@test begin
training_options.Testing.data_preparation_mode = :auto
training_options.Testing.test_data_fraction = 0.2
load_model("models/classification.model")
get_urls_training("examples/classification/test")
get_urls_testing()
prepare_training_data()
prepare_testing_data()
true
end
@test begin
load_model("models/regression.model")
get_urls_training("examples/regression/test","examples/regression/test.csv")
get_urls_testing()
prepare_training_data()
prepare_testing_data()
true
end
@test begin
training_options.Testing.test_data_fraction = 0.5
training_options.Hyperparameters.batch_size = 4
load_model("models/segmentation.model")
get_urls_training("examples/segmentation/images", "examples/segmentation/labels")
get_urls_testing()
prepare_training_data()
prepare_testing_data()
true
end
@test begin
remove_training_data()
get_urls_testing()
load_model("models/classification.model")
push!(EasyML.unit_test.urls,"examples/classification/test")
get_urls_training()
get_urls_testing("examples/classification/test")
load_model("models/regression.model")
get_urls_testing("examples/regression/test","examples/regression/test.csv")
training_options.Testing.data_preparation_mode = :manual
push!(EasyML.unit_test.urls,"examples/regression/test")
push!(EasyML.unit_test.urls,"examples/regression/test.csv")
get_urls_testing()
model_data.input_size = (0,0,0)
prepare_training_data()
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1164 |
using EasyML.Application
cd(@__DIR__)
global_options.HardwareResources.num_slices = 1
#---Main functionality-----------------------------------------------
@info "Options"
change(application_options)
change_output_options()
@info "Classification"
load_model(joinpath(models_dir,"classification.model"))
change_output_options()
url_input = joinpath(examples_dir,"classification/test")
get_urls_application(url_input)
apply()
@info "Regression"
load_model(joinpath(models_dir,"regression.model"))
change_output_options()
url_input = joinpath(examples_dir,"regression/test")
get_urls_application(url_input)
apply()
@info "Segmentation"
load_model(joinpath(models_dir,"segmentation.model"))
change_output_options()
url_input = joinpath(examples_dir,"segmentation/images")
get_urls_application(url_input)
apply()
model_data.output_options = Application.ImageClassificationOutputOptions[]
change_output_options()
remove_application_data()
rm("Output data",recursive=true)
rm("options.bson")
#---Other QML-------------------------------------------------------
push!(EasyML.unit_test.urls,joinpath(examples_dir,"classification/test"))
get_urls_application()
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1150 |
using EasyML.Classes
import EasyML.Classes
cd(@__DIR__)
#---Main functionality------------------------------------------
@testset "Change classes" begin
@test begin change_classes(); true end
@test begin
load_model(joinpath(models_dir,"classification.model"))
change_classes()
true
end
@test begin
load_model(joinpath(models_dir,"regression.model"))
change_classes()
true
end
@test begin
load_model(joinpath(models_dir,"segmentation.model"))
change_classes()
true
end
end
#---Other QML-----------------------------------------------------
@testset "Other QML" begin
@test begin
Classes.set_problem_type(0)
Classes.get_problem_type()
Classes.set_problem_type(1)
Classes.get_problem_type()
Classes.set_problem_type(2)
Classes.get_problem_type()
Classes.get_input_type()
true
end
end
#---Other--------------------------------------------------------
@testset "Other" begin
@test begin
Classes.get_class_data(model_data.classes)
true
end
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 8144 |
import EasyML.Common, QML
using EasyML.Common, Parameters, Flux, Test, DelimitedFiles, BSON
cd(@__DIR__)
#---Main functionality-----------------------------------------------------
@testset "Model loading/saving " begin
@test begin set_savepath("models/test.model"); true end
@test begin set_savepath("model"); true end
model_data.classes = repeat([ImageSegmentationClass()],2)
url = "models/test.model"
@test begin save_model(url); true end
@test begin load_model(url); true end
@test begin load_model("models/old_test.model"); true end
@test begin load_model("models/broken_property.model"); true end
rm("models/test.model")
@test begin
try
url = "models/test2.model"
load_model(url)
catch e
e isa ErrorException
end
end
Common.all_data_urls.model_name = ""
push!(Common.unit_test.urls,"models/test.model")
@test begin save_model(); true end
push!(Common.unit_test.urls,"models/test.model")
@test begin load_model(); true end
rm("models/test.model")
@test begin
try
push!(Common.unit_test.urls,"models/test2.model")
load_model()
catch e
e isa ErrorException
end
end
end
@testset "Options loading/saving" begin
mutable struct OptionsBusted
GlobalOptions::Bool
end
options_busted = OptionsBusted(true)
@test begin
save_options()
dict_busted = Dict()
BSON.@save("options.bson",dict_busted)
true
end
@test begin
load_options()
load_options()
rm("options.bson")
load_options()
rm("options.bson")
true
end
end
@testset verbose = true "QML interaction " begin
mutable struct Data2
a::Symbol
b::Vector{Symbol}
c::Vector{Vector{Symbol}}
end
mutable struct Data
Data2::Data2
end
data = Data(Data2(:a,[:b],[[:c]]))
@testset "Type conversion" begin
propmap = QML.QQmlPropertyMap()
propmap["string"] = "some string"
propmap["integer"] = zero(Int64)
propmap["float"] = zero(Float64)
propmap["list"] = [1,2,3,4]
@test fix_QML_types(propmap["string"])=="some string"
@test fix_QML_types(propmap["integer"])==zero(Int64)
@test fix_QML_types(propmap["float"])==zero(Float64)
@test fix_QML_types(propmap["list"])==[1,2,3,4]
@test fix_QML_types((1,2))==(1,2)
end
@testset "Get data" begin
import EasyML.Common.get_data_main
@test get_data_main(data,["Data2","a"],[])=="a"
@test get_data_main(data,["Data2","b"],[1])=="b"
@test get_data_main(data,["Data2","c"],[1,1])=="c"
@test get_data(["TrainingData","warnings"])==String[]
@test get_options(["GlobalOptions","Graphics","scaling_factor"])==1.0
@test get_options(["ApplicationOptions","image_type"])=="png"
end
@testset "Set data" begin
import EasyML.Common.set_data_main
@test begin
set_data_main(data,["Data2","a"],("c"))
data.Data2.a == :c
end
@test begin
set_data_main(data,["Data2","b"],([1],"d"))
data.Data2.b[1] == :d
end
@test begin
set_data_main(data,["Data2","c"],([1,1],"e"))
data.Data2.c[1][1] == :e
end
@test begin
set_data(["TrainingData","warnings"],[])
true
end
@test begin
set_options(["GlobalOptions","Graphics","scaling_factor"],1.0)
true
end
@test begin
set_options(["ApplicationOptions","image_type"],"PNG")
true
end
end
@testset "Get file/folder" begin
@test begin
push!(Common.unit_test.urls,"test")
out = get_folder()
out == "test"
end
@test begin
push!(Common.unit_test.urls,"test")
out = get_file()
out == "test"
end
end
@testset "Channels" begin
struct Channels
a::Channel
b::Channel
end
channels = Channels(Channel{Int64}(1),Channel{Tuple{Int64,Float64}}(1))
@test begin
import EasyML.Common.check_progress_main
check_progress_main(channels,"a")
put!(channels.a,1)
check_progress_main(channels,"a")
check_progress("data_preparation_progress")
true
end
@test begin
import EasyML.Common.get_progress_main
get_progress_main(channels,"a")
get_progress_main(channels,"a")
put!(channels.b,(1,1.0))
get_progress_main(channels,"b")
get_progress_main(channels,"b")
put!(channels.a,1)
get_progress_main(channels,:a)
get_progress_main(channels,:a)
put!(channels.b,(1,1.0))
get_progress_main(channels,:b)
get_progress_main(channels,:b)
get_progress(:data_preparation_progress)
true
end
@test begin
import EasyML.Common.empty_channel_main
put!(channels.a,1)
empty_channel_main(channels,"a")
put!(channels.a,1)
empty_channel_main(channels,:a)
empty_channel(:data_preparation_progress)
true
end
@test begin
import EasyML.Common.put_channel_main
put_channel_main(channels,"b",[0.0,1.0])
put_channel("data_preparation_progress",1)
true
end
end
end
@testset "Set property" begin
@test begin
model_data.input_properties = [:grayscale]
try
model_data.input_properties = [:a]
catch
true
end
try
model_data.problem_type = :a
catch
true
end
end
@test begin
obj = Common.Application.OutputVolume()
obj.binning = :auto
obj.normalization = :none
obj.value = 10
true
end
@test begin
obj = Common.application_options
obj.apply_by = :file
obj.data_type= :csv
obj.image_type = :png
obj.savepath = ""
true
end
@test begin
try
obj = Common.application_options
obj.apply_by = :esgsg
catch
true
end
end
end
@testset "Padding " begin
@test begin
if EasyML.has_cuda()
collect(EasyML.Application.pad(EasyML.CuArray(ones(Float32,5,5,1,1)),(2,2),EasyML.Application.same))==ones(Float32,7,7,1,1)
else
EasyML.Application.pad(ones(Float32,5,5,1,1),(2,2),EasyML.Application.same)==ones(Float32,7,7,1,1)
end
end
@test begin
EasyML.Application.pad(ones(Float32,5,5,1,1),(2,2),ones)==ones(Float32,7,7,1,1)
end
@test begin
EasyML.Application.pad(ones(Float32,5,5),(2,2),EasyML.Application.same)==ones(Float32,7,7)
end
@test begin
EasyML.Application.pad(ones(Float32,5,5),(2,2),ones)==ones(Float32,7,7)
end
end
@testset "Other " begin
@test begin
f1() = true
t1 = Task(f1)
check_task(t1)
schedule(t1)
sleep(2)
check_task(t1)
f2() = 1/[]
t2 = Task(f2)
schedule(t2)
sleep(2)
check_task(t2)
true
end
@test begin
writedlm("test.qml", ["","import", "import", "import","",""])
url = "test.qml"
Common.add_templates(url)
Common.add_templates(url)
rm("test.qml")
true
end
@test begin problem_type(); true end
@test begin input_type(); true end
@test begin
change(global_options)
rm("options.bson")
true
end
@test begin
Common.max_num_threads()
Common.num_threads()
true
end
@test begin EasyML.none([]); true end
@test begin
conn(8)
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 5146 |
using EasyML.DataPreparation
import EasyML.DataPreparation
cd(@__DIR__)
#---Main functionality----------------------------------------------------
data_preparation_options.Images.BackgroundCropping.enabled = true
@testset "Opening options" begin
@test begin change(data_preparation_options); true end
rm("options.bson")
end
for i = 1:2
if i==1
model_data.input_properties = [:grayscale]
DataPreparation.image_preparation_options.mirroring = true
DataPreparation.image_preparation_options.num_angles = 2
else
model_data.input_properties = Symbol[]
DataPreparation.image_preparation_options.mirroring = false
DataPreparation.image_preparation_options.num_angles = 1
end
@testset "Classification" begin
@test begin
load_model(joinpath(models_dir,"classification.model"))
change_classes()
true
end
@test begin
url_input = joinpath(examples_dir,"classification/test")
get_urls(url_input)
true
end
@test begin results = prepare_data(); true end
end
@testset "Regression" begin
@test begin
load_model(joinpath(models_dir,"regression.model"))
change_classes()
true
end
@test begin
url_input = joinpath(examples_dir,"regression/test")
url_label = joinpath(examples_dir,"regression/test.csv")
get_urls(url_input,url_label)
true
end
@test begin results = prepare_data(); true end
end
@testset "Segmentatation" begin
@test begin
load_model(joinpath(models_dir,"segmentation.model"))
change_classes()
true
end
@test begin
url_input = joinpath(examples_dir,"segmentation/images")
url_label = joinpath(examples_dir,"segmentation/labels")
get_urls(url_input,url_label)
true
end
@test begin results = prepare_data(); true end
end
end
@info "Handling errors"
@testset "Handling errors" begin
@test begin
empty!(model_data.classes)
prepare_data()
true
end
@test begin
model_data.problem_type = :classification
DataPreparation.preparation_data.ClassificationData = DataPreparation.ClassificationData()
prepare_data()
load_model(joinpath(models_dir,"classification.model"))
url_input = string(examples_dir,"\\classification\\")
get_urls(url_input)
push!(DataPreparation.unit_test.urls,string(examples_dir,"/classification/test"))
get_urls()
true
end
@test begin
model_data.problem_type = :regression
DataPreparation.preparation_data.RegressionData = DataPreparation.RegressionData()
prepare_data()
load_model(joinpath(models_dir,"regression.model"))
url_input = joinpath(examples_dir,"regression/")
url_label = joinpath(examples_dir,"regression/test.csv")
get_urls(url_input,url_label)
push!(DataPreparation.unit_test.urls, joinpath(examples_dir,"regression/test"),joinpath(examples_dir,"regression/test.csv"))
get_urls()
true
end
@test begin
model_data.problem_type = :segmentation
DataPreparation.preparation_data.SegmentationData = DataPreparation.SegmentationData()
prepare_data()
load_model(joinpath(models_dir,"segmentation.model"))
url_input = joinpath(examples_dir,"segmentation/")
url_label = joinpath(examples_dir,"segmentation/labels")
get_urls(url_input,url_label)
push!(DataPreparation.unit_test.urls, joinpath(examples_dir,"segmentation/images"),joinpath(examples_dir,"segmentation/labels"))
get_urls()
true
end
@test begin
model_data.problem_type = :classification
url_input = string(examples_dir,"/")
get_urls(url_input)
url_input = "examples2/"
get_urls(url_input)
true
end
@test begin
model_data.problem_type = :segmentation
url_input = string(examples_dir,"/")
url_label = string(examples_dir,"/")
get_urls(url_input,url_label)
url_input = "examples2/"
url_label = "examples2/"
get_urls(url_input,url_label)
get_urls(url_input)
true
end
@test begin
push!(DataPreparation.unit_test.urls, "")
load_model()
true
end
end
#---Other QML----------------------------------------
@testset "Other QML" begin
@test begin
DataPreparation.set_options(["DataPreparationOptions","Images","num_angles"],1)
true
end
empty!(model_data.input_properties)
@test begin
DataPreparation.set_model_data("input_properties","grayscale")
DataPreparation.get_model_data("input_properties","grayscale")==true
end
@test begin
DataPreparation.rm_model_data("input_properties","grayscale")
DataPreparation.get_model_data("input_properties","grayscale")==false
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1469 |
using EasyML.Design
import EasyML.Design
cd(@__DIR__)
#---Main functionality----------------------------------------------------
@testset "Main functionality" begin
set_savepath("models/test.model")
# Empty model
@test begin design_model(); true end
# All layers test model
@test begin
load_model("models/all_test.model")
design_model()
true
end
# Flatten error model
@test begin
load_model("models/flatten_error_test.model")
design_model()
true
end
# No output error model
@test begin
load_model("models/no_output_error_test.model")
design_model()
true
end
# Losses
@test begin
load_model("models/minimal_test.model")
losses = 0:13
for i = 1:length(losses)
model_data.layers_info[end].loss = losses[i]
design_model()
end
true
end
end
#---Other QML----------------------------------------------------------
@testset "Other QML" begin
@test begin
Design.set_problem_type(0)
Design.get_problem_type()
Design.set_problem_type(1)
Design.get_problem_type()
Design.set_problem_type(2)
Design.get_problem_type()
Design.get_input_type()
true
end
@test begin
fields = ["DesignOptions","width"]
value = 340
Design.set_options(fields,value)
true
end
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1984 |
@info "Classification test"
#---Init--------------------------------------------------------------------
model_data.problem_type = :classification
classes = ImageClassificationClass[]
for i=1:10
push!(classes,ImageClassificationClass(string(i),1))
end
model_data.classes = classes
#---Training test-----------------------------------------------------------
@testset "Input: Vector | Testing: Auto | Weights: Auto | Accuracy: Weight" begin
@test begin
Training.training_options.Testing.data_preparation_mode = :auto
Training.training_options.Accuracy.accuracy_mode = :auto
Training.training_options.Accuracy.weight_accuracy = true
data_input = map(_ -> rand(Float32,25),1:200)
data_labels = map(_ -> Int32(rand(1:10)),1:200)
set_training_data(data_input,data_labels)
set_testing_data()
model_data.model = Flux.Chain(Flux.Dense(25, 10))
train()
true
end
end
@testset "Input: Array | Testing: Manual | Weights: Manual | Accuracy: Regular" begin
@test begin
Training.training_options.Testing.data_preparation_mode = :manual
Training.training_options.Accuracy.accuracy_mode = :manual
Training.training_options.Accuracy.weight_accuracy = false
data_input = map(_ -> rand(Float32,5,5,1),1:200)
data_labels = map(_ -> Int32(rand(1:10)),1:200)
set_training_data(data_input,data_labels)
data_input = map(_ -> rand(Float32,5,5,1),1:20)
data_labels = map(_ -> Int32(rand(1:10)),1:20)
set_testing_data(data_input,data_labels)
set_weights(ones(10))
model_data.model = Flux.Chain(x->Flux.flatten(x),Flux.Dense(25, 10))
train()
true
end
end
#---Clean up test-----------------------------------------------------------
@testset "Clean up" begin
@test begin
remove_training_data()
remove_testing_data()
remove_training_results()
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2778 |
@info "Regression test"
#---Init test--------------------------------------------------------------
model_data.problem_type = :regression
#---Training test-----------------------------------------------------------
function vector_vector(mode::Symbol)
Training.training_options.Testing.data_preparation_mode = mode
data_input = map(_ -> rand(Float32,25),1:200)
data_labels = map(_ -> rand(Float32,5),1:200)
set_training_data(data_input,data_labels)
if mode==:Auto
set_testing_data()
else
data_input = map(_ -> rand(Float32,25),1:20)
data_labels = map(_ -> rand(Float32,5),1:20)
set_testing_data(data_input,data_labels)
end
return nothing
end
function array_vector(mode::Symbol)
data_input = map(_ -> rand(Float32,5,5,1),1:200)
data_labels = map(_ -> rand(Float32,5),1:200)
set_training_data(data_input,data_labels)
if mode==:Auto
set_testing_data()
else
data_input = map(_ -> rand(Float32,5,5,1),1:20)
data_labels = map(_ -> rand(Float32,5),1:20)
set_testing_data(data_input,data_labels)
end
return nothing
end
function array_array(mode::Symbol)
data_input = map(_ -> rand(Float32,5,5,1),1:1000)
data_labels = map(_ -> rand(Float32,5,5,1),1:1000)
set_training_data(data_input,data_labels)
if mode==:Auto
set_testing_data()
else
data_input = map(_ -> rand(Float32,5,5,1),1:10)
data_labels = map(_ -> rand(Float32,5,5,1),1:10)
set_testing_data(data_input,data_labels)
end
return nothing
end
Training.training_options.Accuracy.weight_accuracy = false
Training.training_options.Accuracy.accuracy_mode = :auto
@testset "Input: Vector | Output: Vector" begin
model_data.model = Flux.Chain(Flux.Dense(25, 5))
@test begin
vector_vector(:auto)
train()
true
end
@test begin
vector_vector(:manual)
train()
true
end
end
@testset "Input: Array | Output: Vector" begin
model_data.model = Flux.Chain(x->Flux.flatten(x),Flux.Dense(25, 5))
@test begin
array_vector(:auto)
train()
true
end
@test begin
array_vector(:manual)
train()
true
end
end
@testset "Input: Array | Output: Array" begin
model_data.model = Flux.Chain(Flux.Conv((1,1), 1 => 1))
@test begin
array_array(:auto)
train()
true
end
@test begin
array_array(:manual)
train()
true
end
end
#---Clean up test-----------------------------------------------------------
@testset "Clean up" begin
@test begin
remove_training_data()
remove_testing_data()
remove_training_results()
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1156 |
using EasyML.Training
import EasyML.Training
cd(@__DIR__)
training_options.Testing.test_data_fraction = 0.1
model_data.normalization.f = EasyML.none
model_data.normalization.args = ()
set_savepath("models/test.model")
change(training_options)
#---CPU-----------------------------------------------------------
@info "CPU tests started"
global_options.HardwareResources.allow_GPU = false
include("classification.jl")
include("regression.jl")
include("segmentation.jl")
#---GPU------------------------------------------------------------
@info "GPU tests started"
global_options.HardwareResources.allow_GPU = true
include("classification.jl")
include("regression.jl")
include("segmentation.jl")
#---Other---------------------------------------------------------
Training.get_weights(model_data,Training.training_data.RegressionData)
load_model(joinpath(models_dir,"segmentation.model"))
Training.training_data.SegmentationData.Data.data_labels = [BitArray(undef,10,10,3)]
Training.get_weights(model_data,Training.training_data.SegmentationData)
#-----------------------------------------------------------------
rm("models/",recursive=true) | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1459 |
@info "Segmentation test"
#---Init test--------------------------------------------------------------
model_data.problem_type = :segmentation
#---Training test-----------------------------------------------------------
function array_array(mode::Symbol)
data_input = map(_ -> rand(Float32,5,5,1),1:200)
data_labels = map(_ -> BitArray{3}(undef,5,5,3),1:200)
set_training_data(data_input,data_labels)
if mode==:auto
set_testing_data()
else
data_input = map(_ -> rand(Float32,5,5,1),1:20)
data_labels = map(_ -> BitArray{3}(undef,5,5,3),1:20)
set_testing_data(data_input,data_labels)
end
return nothing
end
@testset "Input: Array | Output: Array" begin
Training.training_options.Accuracy.weight_accuracy = false
Training.training_options.Accuracy.accuracy_mode = :auto
model_data.model = Flux.Chain(Flux.Conv((1,1), 1 => 3))
@test begin
array_array(:auto)
train()
true
end
Training.training_options.Accuracy.weight_accuracy = true
Training.training_options.Accuracy.accuracy_mode = :manual
set_weights(ones(3))
@test begin
array_array(:manual)
train()
true
end
end
#---Clean up test-----------------------------------------------------------
@testset "Clean up" begin
@test begin
remove_training_data()
remove_testing_data()
remove_training_results()
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 4486 |
using EasyML.Validation
import EasyML.Validation
cd(@__DIR__)
global_options.HardwareResources.num_slices = 3
global_options.HardwareResources.offset = 80
#---Main functionality---------------------------------------------------------
@testset "Options" begin
@test begin change(global_options); true end
@test begin change(validation_options); true end
end
rm("options.bson")
validation_options.Accuracy.weight_accuracy = true
global_options.HardwareResources.allow_GPU = true
@testset "Classfication" begin
load_model(joinpath(models_dir,"classification.model"))
@test begin
change_classes()
true
end
@test begin
url_data = "examples/with labels/classification/test"
get_urls_validation(url_data)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
@testset "Regression" begin
load_model(joinpath(models_dir,"regression.model"))
@test begin
change_classes()
true
end
@test begin
url_input = "examples/with labels/regression/test"
url_labels = "examples/with labels/regression/test.csv"
get_urls_validation(url_input,url_labels)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
@testset "Segmentation" begin
load_model(joinpath(models_dir,"segmentation.model"))
@test begin
change_classes()
true
end
@test begin
url_input = "examples/with labels/segmentation/images"
url_labels = "examples/with labels/segmentation/labels"
get_urls_validation(url_input,url_labels)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
validation_options.Accuracy.weight_accuracy = false
global_options.HardwareResources.allow_GPU = false
@testset "Classfication" begin
load_model(joinpath(models_dir,"classification.model"))
@test begin
change_classes()
true
end
@test begin
url_data = "examples/without labels/classification/test"
get_urls_validation(url_data)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
@testset "Regression" begin
load_model(joinpath(models_dir,"regression.model"))
@test begin
change_classes()
true
end
@test begin
url_input = "examples/without labels/regression/test"
get_urls_validation(url_input)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
@testset "Segmentation" begin
load_model(joinpath(models_dir,"segmentation.model"))
@test begin
change_classes()
true
end
@test begin
url_input = "examples/without labels/segmentation/images"
get_urls_validation(url_input)
true
end
@test begin
results = validate()
remove_validation_data()
remove_validation_results()
true
end
end
#---Other QML---------------------------------------------------------
@testset "Other QML" begin
@test begin Validation.set_options(["GlobalOptions","Graphics","scaling_factor"],1); true end
@test begin
model_data.problem_type = :classification
push!(Validation.unit_test.urls, "examples/with labels/classification/test")
get_urls_validation()
model_data.problem_type = :regression
push!(Validation.unit_test.urls, "examples/with labels/regression/test","examples/with labels/regression/test.csv")
get_urls_validation()
model_data.problem_type = :segmentation
push!(Validation.unit_test.urls, "examples/with labels/segmentation/images","examples/with labels/segmentation/labels")
get_urls_validation()
true
end
end
#---Other------------------------------------------------------------
@testset "Other" begin
@test begin Validation.conn(8); true end
@test begin
remove_validation_data()
validate()
empty!(model_data.classes)
validate()
model_data.model = Flux.Chain()
validate()
true
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 2870 | <p align="center">
<img width=200px src=https://raw.githubusercontent.com/OML-NPA/EasyML.jl/main/docs/src/assets/logo.png></img>
</p>
<h1 align="center">EasyML.jl</h1>
[](https://oml-npa.github.io/EasyML.jl/stable/)
[](https://oml-npa.github.io/EasyML.jl/dev/)
[](https://github.com/OML-NPA/EasyM.jl/actions/workflows/CI-main.yml)
[](https://github.com/OML-NPA/EasyM.jl/actions/workflows/CI-dev.yml)
[](https://codecov.io/gh/OML-NPA/EasyML.jl)
This package allows to use machine learning in Julia through a graphical user interface.
NB! This is a beta version. Bugs and breaking changes should be expected.
## Features
It is possible to:
- Design a neural network
- Train a neural network
- Validate a neural network
- Apply a neural network to new data
Classification, regression and segmentation on images are currently supported.
[Flux.jl](https://github.com/FluxML/Flux.jl) machine learning library is used under the hood.
<img src="https://github.com/OML-NPA/EasyML.jl/blob/dev/docs/src/assets/images/design_model.png" height="190"> <img src="https://github.com/OML-NPA/EasyML.jl/blob/dev/docs/src/assets/images/train.png" height="190"> <img src="https://github.com/OML-NPA/EasyML.jl/blob/dev/docs/src/assets/images/validate2.png" height="190">
## Installation
Run `] add EasyML` in REPL.
If fonts do not look correct then install [this](https://github.com/OML-NPA/EasyML.jl/raw/main/src/fonts/font.otf) and [this](https://github.com/OML-NPA/EasyML.jl/raw/main/src/fonts/font_bold.otf) font.
## Quick guide
EasyML is easy enough to figure out by yourself! Just run the following lines.
### Add the package
```julia
using EasyML
```
### Set up
```julia
change(global_options)
```
### Design
```julia
change_classes()
design_model()
```
### Train
```julia
change(data_preparation_options)
change(training_options)
get_urls_training()
get_urls_testing()
prepare_training_data()
prepare_testing_data()
results = train()
remove_training_data()
remove_testing_data()
remove_training_results()
```
### Validate
```julia
change(validation_options)
get_urls_validation()
results = validate()
remove_validation_data()
remove_validation_results()
```
### Apply
```julia
change(application_options)
change_output_options()
get_urls_application()
apply()
remove_application_data()
```
### On reopening
```julia
load_model()
load_options()
```
## Development
A plan for the project can be seen [here](https://github.com/OML-NPA/EasyML.jl/projects/2).
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 11488 |
## Model data
A struct named `model_data` is exported and holds all information about your model.
```julia
mutable struct ModelData
model::AbstractModel # any of the supported models (currently only Flux.jl model).
normalization::Normalization # a Normalization struct, which describes how to normalize input data
loss::Function # holds loss that is used during training and validation.
input_size::Union{NTuple{2,Int64},NTuple{3,Int64}} # model input size.
output_size::Union{NTuple{2,Int64},NTuple{3,Int64}} # model output size.
input_type::Symbol # type of input data (:image).
problem_type::Symbol # type of ML problem (:classification, :regression or :segmentation).
classes::Vector{<:AbstractClass} # hold information about classes that a neural network outputs and what should be done with them.
output_options::Vector{<:AbstractOutputOptions} # hold information about output options for each class for application of the model.
layers_info::Vector{AbstractLayerInfo} # contains information for visualisation of layers.
end
```
`classes` and `output_options` type depends on a type of a problem.
```julia
mutable struct ImageClassificationClass<:AbstractClass
name::String # name of a class.
weight::Float32 # weight of a class used for weighted accuracy calculation.
end
mutable struct ImageRegressionClass<:AbstractClass
name::String # name of a class.
end
mutable struct ImageSegmentationClass<:AbstractClass
name::String # name of a class. It is just for your convenience.
weight::Float32 # weight of a class used for weighted accuracy calculation.
color::Vector{Float64} # RGB color of a class, which should correspond to its color on your images. Uses 0-255 range.
parents::Vector{String} # up to two parents can be specified by their name. Objects from a child are added to its parent.
overlap::Bool # specifies that a class is an overlap of two classes and should be just added to specified parents.
min_area::Int64 # minimum area of an object.
BorderClass::BorderClass # allows to train a neural network to recognize borders and, therefore, better separate objects during post-processing.
end
mutable struct BorderClass
enabled::Bool
thickness::Int64 # border thickness in pixels.
end
```
`ImageClassificationOutputOptions` and `ImageRegressionOutputOptions` are currently empty. New functionality can be added on request.
```julia
mutable struct ImageSegmentationOutputOptions<:AbstractOutputOptions
Mask::OutputMask # holds output mask options.
Area::OutputArea # holds area of objects options.
Volume::OutputVolume # holds volume of objects options.
end
mutable struct OutputMask
mask::Bool # exports a mask after applying all processing except for border data.
mask_border::Bool # exports a mask with class borders if a class has border detection enabled.
mask_applied_border::Bool # exports a mask processed using border data.
end
mutable struct OutputArea
area_distribution::Bool # exports area distribution of detected objects as a histogram.
obj_area::Bool # exports area of each detected object.
obj_area_sum::Bool # exports sum of all areas for each class.
binning::Symbol # specifies a binning method (:auto, :number_of_bins, :bin_width).
value::Float64 # number of bins or bin width depending on a previous settings.
normalisation::Symbol # normalisation type for a histogram (:none, :pdf, :density, :probability).
end
mutable struct OutputVolume
volume_distribution::Bool # exports volume distribution of detected objects as a histogram.
obj_volume::Bool # exports volume of each detected object.
obj_volume_sum::Bool # exports sum of all volumes for each class.
binning::Symbol # specifies a binning method (:auto, :number_of_bins, :bin_width).
value::Float64 # number of bins or bin width depending on a previous settings.
normalisation::Symbol # normalisation type for a histogram (:none, :pdf, :density, :probability).
```
Example code for a segmentation problem.
```julia
class1 = ImageSegmentationClass(name = "Cell", weight = 1, color = [0,255,0], min_area = 5, BorderClass=BorderClass(true,5))
class2 = ImageSegmentationClass(name = "Vacuole", weight = 1, color = [255,0,0], parents = ["Cell",""], min_area = 5)
class_output_options1 = ImageSegmentationOutputOptions()
class_output_options2 = ImageSegmentationOutputOptions()
settings.problem_type = :segmentation
classes = [class1,class2]
output_options = [class_output_options1,class_output_options2]
model_data.classes = classes
model_data.OutputOptions = output_options
```
## Options
All options are located in `EasyML.options`.
```julia
mutable struct Options
GlobalOptions::GlobalOptions
DesignOptions::DesignOptions
DataPreparationOptions::DataPreparationOptions
TrainingOptions::TrainingOptions
ValidationOptions::ValidationOptions = validation_options
ApplicationOptions::ApplicationOptions
end
```
### Global options
```julia
mutable struct GlobalOptions
Graphics::Graphics
HardwareResources::HardwareResources
end
```
Can be accessed as `EasyML.global_options`.
```julia
mutable struct HardwareResources
allow_GPU::Bool # allows to use a GPU if a compatible one is installed.
num_threads::Int64 # a number of CPU threads that will be used.
num_slices::Int64 # allows to process images during validation and application that otherwise cause an out of memory error by slicing them into multiple parts. Used only for segmentation.
offset::Int64 # offsets each slice by a given number of pixels to allow for an absence of a seam.
end
```
```julia
mutable struct Graphics
scaling_factor::Float64 # scales GUI by a given factor.
end
```
### Design options
```julia
mutable struct DesignOptions
width::Float64 # width of layers
height::Float64 # height of layers
min_dist_x::Float64 # minimum horizontal distance between layers
min_dist_y::Float64 # minimum vertical distance between layers
end
```
Can be accessed as `EasyML.design_options`.
### Training options
```julia
mutable struct TrainingOptions
Accuracy::AccuracyOptions
Testing::TestingOptions
Hyperparameters::HyperparametersOptions
end
```
Can be accessed as `EasyML.training_options`.
```julia
mutable struct AccuracyOptions
weight_accuracy::Bool # uses weight accuracy where applicable.
accuracy_mode::Symbol # either :auto or :manual. :manual allows to specify weights manually for each class.
end
```
```julia
mutable struct TestingOptions
test_data_fraction::Float64 # a fraction of data from training data to be used for testing if data preparation mode is set to :Auto.
num_tests::Float64 # a number of tests to be done each epoch at equal intervals.
data_preparation_mode::Symbol # Either :Auto or :Manual. Auto takes a specified fraction of training data to be used for testing. Manual allows to use other data as testing data.
end
```
```julia
mutable struct HyperparametersOptions
optimiser::Symbol # an optimiser that should be used during training. ADAM usually works well for all cases.
optimiser_params::Vector{Float64} # parameters specific for each optimiser. Default ones can be found in EasyML.training_options_data
learning_rate::Float64 # pecifies how fast a model should train. Lower values - more stable, but slower. Higher values - less stable, but faster. Should be decreased as training progresses.
epochs::Int64 # a number of rounds for which a model should be trained.
batch_size::Int64 # a number of data that should be batched together during training.
end
```
### Data preparation options
```julia
struct DataPreparationOptions
Images::ImagePreparationOptions = image_preparation_options
end
```
Can be accessed as `data_preparation_options`.
```julia
@with_kw mutable struct ImagePreparationOptions
grayscale::Bool = false # converts images to grayscale for training, validation and application.
mirroring::Bool = false # augments data by producing horizontally mirrored images.
num_angles::Int64 = 1 # augments data by rotating images using a specified number of angles. 1 means no rotation, only an angle of 0.
min_fr_pix::Float64 = 0.0 # if supplied images are bigger than a model's input size, then an image is broken into chunks with a correct size. This option specifies the minimum number of labeled pixels for these chunks to be kept.
BackgroundCropping::BackgroundCroppingOptions = background_cropping_options # crops image to removed uniformly black backgorund.
end
```
```julia
@with_kw mutable struct BackgroundCroppingOptions
enabled::Bool = false
threshold::Float64 = 0.3 # creates a mask from values less than threshold.
closing_value::Int64 = 1 # value for morphological closing which is used to smooth the mask.
end
```
### Validation options
```julia
mutable struct ValidationOptions
Accuracy::AccuracyOptions = accuracy_options
end
```
Can be accessed as `validation_options`
```julia
mutable struct AccuracyOptions
weight_accuracy::Bool # uses weight accuracy where applicable.
accuracy_mode::Symbol # either :auto or :manual. :manual allows to specify weights manually for each class.
end
```
### Application options
```julia
mutable struct ApplicationOptions
savepath::String # specifies where results are saved.
apply_by::Symbol # either :file or :folder. If :folder is chosen, then results of files in the same folder are combined.
data_type::Symbol # output data type (:csv,:xlsx,:json,:bson).
image_type::Symbol # output image type (:png,:tiff,:json,:bson).
scaling::Float64 # multiplies results by a specified factor
end
```
Can be accessed as `application_options`.
## A custom loop
A custom loop can be written using `forward`.
Example code for a segmentation problem.
```julia
model = model_data.model
data = [ones(Float32,160,160,3,1)] # vector with your data goes here
results = Vector{BitArray{3}}(undef,0)
for i = 1:length(data)
output_raw = forward(model,data[i])
output_bool = output_raw[:,:,:].>0.5
output = apply_border_data(output_bool,model_data.classes) # can be removed if your model does not detect borders
push!(results,output)
end
```
```@docs
EasyML.forward
```
```@docs
EasyML.apply_border_data
```
## Custom training data
Example code for a classification problem.
```julia
model_data.problem_type = :classification # :regression, or :segmentation
model_data.model = your_model
set_training_data(your_input,your_labels)
set_testing_data(your_test_input,your_test_labels)
training_options.Accuracy.accuracy_mode==:manual
set_weights(your_weights)
train()
```
```@docs
EasyML.set_training_data
```
```@docs
EasyML.set_testing_data
```
```@docs
EasyML.set_weights
```
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 1092 |
## Setting up
```@docs
change(global_options::EasyML.GlobalOptions)
```
```@docs
load_options
```
```@docs
save_options
```
```@docs
save_model
```
```@docs
load_model
```
## Design
```@docs
change_classes()
```
```@docs
design_model()
```
## Training
```@docs
change(application_options::EasyML.DataPreparationOptions)
```
```@docs
change(training_options::EasyML.TrainingOptions)
```
```@docs
get_urls_training
```
```@docs
get_urls_testing
```
```@docs
prepare_training_data
```
```@docs
prepare_testing_data
```
```@docs
train
```
```@docs
remove_training_data
```
```@docs
remove_testing_data
```
```@docs
remove_training_results
```
## Validation
```@docs
change(application_options::EasyML.ValidationOptions)
```
```@docs
get_urls_validation
```
```@docs
validate
```
```@docs
remove_validation_data
```
```@docs
remove_validation_results
```
## Application
```@docs
change(application_options::EasyML.ApplicationOptions)
```
```@docs
change_output_options()
```
```@docs
get_urls_application
```
```@docs
apply
```
```@docs
remove_application_data
``` | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 8301 |
## Global options
```@raw html
<img src="./assets/images/global_options1.png" width = 520em>
<p><par class="definition">GUI scaling</par> - scales GUI by a given factor.</p>
```
```@raw html
<img src="./assets/images/global_options2.png" width = 520em>
<p><par class="definition">Allow GPU</par> - allows to use a GPU if a compatible one is installed.</p>
<p><par class="definition">Number of threads</par> - a number of CPU threads that will be used.</p>
<p><par class="definition">Number of slices</par> - allows to process images during validation and application that otherwise cause an out of memory error by slicing them into multiple parts. Used only for segmentation.</p>
<p><par class="definition">Offset</par> - offsets each slice by a given number of pixels to allow for an absence of a seam. </p>
```
## Adding classes
```@raw html
<img src="./assets/images/change_classes.png" width = 450em>
```
```@raw html
<style>
.definition{
color: rgb(231,76, 60);
font-weight: bold;
}
</style>
<p><par class="definition">Name</par> - name of a class. It is just for your convenience.</p>
<p><par class="definition">Weight</par> - used for weight accuracy during training and validation.
Calculated automatically during data preparation based on the frequency of classes. Can be also specified manually.</p>
<p><par class="definition">Parent</par> - adds a class to the specified parent.</p>
<p><par class="definition">Parent 2</par> - appears if the first parent is specified. Adds a class to the specified parent.</p>
<p><par class="definition">Color (RGB)</par> - RGB color of a class, which should correspond to its color on your images. Uses 0-255 range.</p>
<p><par class="definition">Overlap of classes</par> - specifies that a class is an overlap of two classes and should be just added to specified parents.</p>
<p><par class="definition">Minimum area</par> - removes objects that have area smaller than specified.</p>
<p><par class="definition">Generate border class</par> - prepares labels with object borders during data preparation and uses them for training.</p>
<p><par class="definition">Border thickness</par> - specifies thickness of a border in pixels.</p>
```
## Output options
```@raw html
<img src="./assets/images/change_output_options1.png" width = 600em>
<p><par class="definition">Output mask</par> - exports a mask after applying all processing except for border data.</p>
<p><par class="definition">Border mask</par> - exports a mask with class borders if a class has border detection enabled.</p>
<p><par class="definition">Applied border mask</par> - exports a mask also processed using border data.</p>
```
```@raw html
<img src="./assets/images/change_output_options2.png" width = 640em>
<p><par class="definition">Area distribution</par> - exports area distribution of detected objects as a histogram.</p>
<p><par class="definition">Area of objects</par> - exports area of each detected object.</p>
<p><par class="definition">Sum of areas of objects</par> - exports sum of all areas for each class.</p>
<p><par class="definition">Binning method</par> - specifies a binning method: automatic, number of bins or bin width.</p>
<p><par class="definition">Value</par> - number of bins or bin width depending on previous settings.</p>
<p><par class="definition">Normalisation</par> - normalisation type for a histogram: pdf, density, probability or none.</p>
```
```@raw html
<img src="./assets/images/change_output_options3.png" width = 640em>
```
All is the same as for area.
## Model design

```@raw html
<style>
.column1 {
float: left;
}
.column2 {
padding: 0.40em 0em 0em 2.8em;
}
.filler {
float: left;
width: 100%;
height: 100px
margin-bottom: 10em;
}
row::after{
content: "";
clear: both;
display: table;
}
</style>
<div class="row">
<div class="column1">
<div>
<img src="./assets/images/icons/saveIcon.png" width = 34em>
</div>
<div>
<img src="./assets/images/icons/optionsIcon.png" width = 34em>
</div>
<div>
<img src="./assets/images/icons/arrangeIcon.png" width = 34em>
</div>
</div class="column1">
<div class="column2">
<p>- saves your model</p>
<p>- opens options for changing visual aspects</p>
<p>- arranges layers according to made connections</p>
</div class="column2">
</div class="row">
<div class="filler"></div class="filler">
```
## Data preparation options
```@raw html
<img src="./assets/images/data_preparation_options.png" width = 520em>
<p><par class="definition">Convert to grayscale</par> - converts images to grayscale for training, validation and application.</p>
<p><par class="definition">Crop background</par> - crops uniformly dark areas. Finds the last column from left and right and row from bottom and top to be uniformly dark.</p>
<p><par class="definition">Threshold</par> - creates a mask from values less than threshold.</p>
<p><par class="definition">Morphological closing value</par> - value for morphological closing which is used to smooth the mask.</p>
<p><par class="definition">Minimum fraction of labeled pixels</par> - if supplied images are bigger than a model's input size, then an image is broken into chunks with a correct size. This option specifies the minimum number of labeled pixels for these chunks to be kept.</p>
<p><par class="definition">Mirroring</par> - augments data by producing horizontally mirrored images.</p>
<p><par class="definition">Rotation</par> - augments data by rotating images using a specified number of angles. 1 means no rotation, only an angle of 0.</p>
```
## Training options
```@raw html
<img src="./assets/images/training_options1.png" width = 520em>
<p><par class="definition">Weight accuracy</par> - uses weight accuracy where applicable.</p>
<p><par class="definition">Mode</par> - either auto or manual. manual allows to specify weights manually for each class.</p>
```
```@raw html
<img src="./assets/images/training_options2.png" width = 520em>
<p><par class="definition">Data preparation mode</par> - either auto or manual. auto takes a specified fraction of training data to be used for testing. Manual allows to use other data as testing data.</p>
<p><par class="definition">Test data fraction</par> - a fraction of data from training data to be used for testing if data preparation mode is Auto.</p>
<p><par class="definition">Number of test</par> - a number of tests to be done each epoch at equal intervals.</p>
```
```@raw html
<img src="./assets/images/training_options3.png" width = 520em>
<p><par class="definition">Optimiser</par> - an optimiser that should be used during training. ADAM usually works well for all cases.</p>
<p>Next are parameters specific for each optimiser.</p>
<p><par class="definition">Learning rate</par> - specifies how fast a model should train. Lower values - more stable, but slower. Higher values - less stable, but faster. Should be decreased as training progresses.</p>
<p><par class="definition">Batch size</par> - a number of images that should be batched together during training.</p>
<p><par class="definition">Number of epochs</par> - a number of rounds for which a model should be trained.</p>
```
## Validation options
```@raw html
<img src="./assets/images/validation_options.png" width = 520em>
<p><par class="definition">Weight accuracy</par> - uses weight accuracy where applicable.</p>
```
## Application options
```@raw html
<img src="./assets/images/application_options.png" width = 520em>
<p><par class="definition">Save path</par> - a folder where output data should be saved.</p>
<p><par class="definition">Analyse by</par> - either file or folder. Used for segmentation. Analysis by file treats every image independently. Analysis by folder combines data for images in the same folder.</p>
<p><par class="definition">Output data type</par> - a format in which data should be saved.</p>
<p><par class="definition">Output image type</par>Output image type - a format in which images should be saved.</p>
<p><par class="definition">Scaling</par> - used for segmentation. Converts pixels to a unit of measurement of your choice.</p>
``` | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 253 |
If you encounter any issues with training, validation or application, then check first
corresponding
`EasyML.training_data.tasks`,
`EasyML.validation_data.tasks` and
`EasyML.application_data.tasks`.
Failed tasks and their errors will be there. | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 1331 |
## Package features
This package allows to use machine learning in Julia through a graphical user interface.
It is possible to:
- Design a neural network
- Train a neural network
- Validate a neural network
- Apply a neural network to new data
Classification, regression and segmentation on images are currently supported.
[Flux.jl](https://github.com/FluxML/Flux.jl) machine learning library is used under the hood.
```@raw html
<style>
.column1 {
float: left;
width: 34%;
padding: 0.25%;
}
.column2 {
float: left;
width: 32.5%;
padding: 0.25%;
}
.column3 {
float: left;
width: 32.75%;
padding: 0.25%;
}
.filler {
float: left;
width: 100%;
margin-bottom: 0.6em;
}
row::after{
content: "";
clear: both;
display: table;
}
</style>
<div class="row">
<div class="column1">
<img src="./assets/images/design_model.png">
</div>
<div class="column2">
<img src="./assets/images/train.png">
</div>
<div class="column3">
<img src="./assets/images/validate1.png">
</div>
</div>
<div class="filler">
</div>
```
## Installation
Run `] add EasyML` in REPL.
If fonts do not look correct then install [this](https://github.com/OML-NPA/EasyML.jl/raw/main/src/fonts/font.otf) and [this](https://github.com/OML-NPA/EasyML.jl/raw/main/src/fonts/font_bold.otf) font. | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | docs | 824 |
EasyML is easy enough to figure out by yourself! Just run the following lines.
## Add the package
```julia
using EasyML
```
## Set up
```julia
change(global_options)
```
## Design
```julia
change_classes()
change_output_options()
design_model()
```
## Train
```julia
change(data_preparation_options)
change(training_options)
get_urls_training()
get_urls_testing()
prepare_training_data()
prepare_testing_data()
results = train()
remove_training_data()
remove_testing_data()
remove_training_results()
```
## Validate
```julia
change(validation_options)
get_urls_validation()
results = validate()
remove_validation_data()
remove_validation_results()
```
## Apply
```julia
change(application_options)
get_urls_application()
apply()
remove_application_data()
```
## On reopening
```julia
load_model()
load_options()
``` | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 655 | using Documenter, ManifoldLearning
makedocs(
modules = [ManifoldLearning],
doctest = false,
clean = true,
sitename = "ManifoldLearning.jl",
pages = [
"Home" => "index.md",
"Methods" => [
"Isomap" => "isomap.md",
"Locally Linear Embedding" => "lle.md",
"Hessian Eigenmaps" => "hlle.md",
"Laplacian Eigenmaps" => "lem.md",
"Local Tangent Space Alignment" => "ltsa.md",
"Diffusion maps" => "diffmap.md",
"t-SNE" => "tsne.md",
],
"Misc" => [
"Interface" => "interface.md",
# "Nearest Neighbors" => "knn.md",
"Datasets" => "datasets.md",
],
]
)
deploydocs(repo = "github.com/wildart/ManifoldLearning.jl.git")
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 737 | using ManifoldLearning
include("nearestneighbors.jl")
X, L = ManifoldLearning.swiss_roll(;segments=5)
# Use default distance matrix based method to find nearest neighbors
Y1 = predict(fit(Isomap, X, k=10))
# Use NearestNeighbors package to find nearest neighbors
Y2 = predict(fit(Isomap, X, nntype=KDTree, k=10))
# Use FLANN package to find nearest neighbors
Y3 = predict(fit(Isomap, X, nntype=FLANNTree, k=8))
using Plots
plot(
scatter3d(X[1,:], X[2,:], X[3,:], zcolor=L, m=2, leg=:none, camera=(10,10), title="Swiss Roll"),
scatter(Y1[1,:], Y1[2,:], c=L, m=2, title="Distance Matrix"),
scatter(Y2[1,:], Y2[2,:], c=L, m=2, title="NearestNeighbors"),
scatter(Y3[1,:], Y3[2,:], c=L, m=2, title="FLANN")
, leg=false)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 2856 | # Additional wrappers for calculations of nearest neighbors
using ManifoldLearning
using LinearAlgebra: norm
import Base: show, size
import StatsAPI: fit
import ManifoldLearning: knn, inrange
# Wrapper around NearestNeighbors functionality
using NearestNeighbors: NearestNeighbors
struct KDTree <: ManifoldLearning.AbstractNearestNeighbors
fitted::AbstractMatrix
tree::NearestNeighbors.KDTree
end
show(io::IO, NN::KDTree) = print(io, "KDTree")
size(NN::KDTree) = (length(NN.fitted.data[1]), length(NN.fitted.data))
fit(::Type{KDTree}, X::AbstractMatrix{T}) where {T<:Real} =
KDTree(X, NearestNeighbors.KDTree(X))
function knn(NN::KDTree, X::AbstractVecOrMat{T}, k::Integer;
self::Bool=false, weights::Bool=true, kwargs...) where {T<:Real}
m, n = size(X)
@assert n > k "Number of observations must be more then $(k)"
A, D = NearestNeighbors.knn(NN.tree, X, k, true)
return A, D
end
function inrange(NN::KDTree, X::AbstractVecOrMat{T}, r::Real;
weights::Bool=false, kwargs...) where {T<:Real}
m, n = size(X)
A = NearestNeighbors.inrange(NN.tree, X, r)
W = Vector{Vector{T}}(undef, (weights ? n : 0))
if weights
for (i, ii) in enumerate(A)
W[i] = T[]
if length(ii) > 0
for v in eachcol(NN.fitted[:, ii])
d = norm(X[:,i] - v)
push!(W[i], d)
end
end
end
end
return A, W
end
# Wrapper around FLANN functionality
using FLANN: FLANN
struct FLANNTree{T <: Real} <: ManifoldLearning.AbstractNearestNeighbors
d::Int
index::FLANN.FLANNIndex{T}
end
show(io::IO, NN::FLANNTree) = print(io, "FLANNTree")
size(NN::FLANNTree) = (NN.d, length(NN.index))
function fit(::Type{FLANNTree}, X::AbstractMatrix{T}) where {T<:Real}
params = FLANN.FLANNParameters()
idx = FLANN.flann(X, params)
FLANNTree(size(X,1), idx)
end
function knn(NN::FLANNTree, X::AbstractVecOrMat{T}, k::Integer;
self::Bool=false, weights::Bool=false, kwargs...) where {T<:Real}
m, n = size(X)
E, D = FLANN.knn(NN.index, X, k+1)
idxs = (1:k).+(!self)
A = Vector{Vector{Int}}(undef, n)
W = Vector{Vector{T}}(undef, (weights ? n : 0))
for (i,(es, ds)) in enumerate(zip(eachcol(E), eachcol(D)))
A[i] = es[idxs]
if weights
W[i] = sqrt.(ds[idxs])
end
end
return A, W
end
function inrange(NN::FLANNTree, X::AbstractVecOrMat{T}, r::Real;
weights::Bool=false, kwargs...) where {T<:Real}
m, n = size(X)
A = Vector{Vector{Int}}(undef, n)
W = Vector{Vector{T}}(undef, (weights ? n : 0))
for (i, x) in enumerate(eachcol(X))
E, D = FLANN.inrange(NN.index, x, r)
A[i] = E
if weights
W[i] = D
end
end
return A, W
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 2118 | module ManifoldLearning
using LinearAlgebra
using SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, spzeros, spdiagm,
findnz, dropzeros!, nonzeros, sparse
using StatsAPI: pairwise
using Statistics: mean
using MultivariateStats: NonlinearDimensionalityReduction, KernelPCA,
dmat2gram, gram2dmat, transform!, projection,
symmetrize!, PCA
using Graphs: nv, add_edge!, connected_components, dijkstra_shortest_paths,
induced_subgraph, SimpleGraph
using Random: AbstractRNG, default_rng
import StatsAPI: fit, predict, pairwise, pairwise!
import Base: show, summary, size
import LinearAlgebra: eigvals
import Graphs: vertices, neighbors
import SparseArrays: sparse
export
# Transformation types
Isomap, # Type: Isomap model
HLLE, # Type: Hessian Eigenmaps model
LLE, # Type: Locally Linear Embedding model
LTSA, # Type: Local Tangent Space Alignment model
LEM, # Type: Laplacian Eigenmaps model
DiffMap, # Type: Diffusion maps model
TSNE, # Type: t-Distributed Stochastic Neighborhood Embedding
## common interface
outdim, # the output dimension of the transformation
fit, # perform the manifold learning
predict, # transform the data using a given model
eigvals, # eigenvalues from the spectral analysis
neighbors, # the number of nearest neighbors used for aproximate local subspace
vertices # vertices of largest connected component
include("interface.jl")
include("utils.jl")
include("nearestneighbors.jl")
include("isomap.jl")
include("hlle.jl")
include("lle.jl")
include("ltsa.jl")
include("lem.jl")
include("diffmaps.jl")
include("tsne.jl")
# deprecated functions
@deprecate transform(m) predict(m)
@deprecate transform(m, x) predict(m, x)
@deprecate outdim(m) size(m)[2]
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 3554 | # Diffusion maps
# --------------
# Diffusion maps,
# Coifman, R. & Lafon, S., Applied and Computational Harmonic Analysis, Elsevier, 2006, 21, 5-30
"""
DiffMap{T <: Real} <: AbstractDimensionalityReduction
The `DiffMap` type represents diffusion maps model constructed for `T` type data.
"""
struct DiffMap{T <: Real} <: NonlinearDimensionalityReduction
d::Number
t::Int
α::Real
ɛ::Real
λ::AbstractVector{T}
K::AbstractMatrix{T}
proj::Projection{T}
end
## properties
size(R::DiffMap) = (R.d, size(R.proj, 1))
eigvals(R::DiffMap) = R.λ
## custom
"""Returns the kernel matrix of the diffusion maps model `R`"""
kernel(R::DiffMap) = R.K
## show
function summary(io::IO, R::DiffMap)
id, od = size(R)
print(io, "Diffusion Maps(indim = $id, outdim = $od, t = $(R.t), α = $(R.α), ɛ = $(R.ɛ))")
end
function show(io::IO, R::DiffMap)
summary(io, R)
io = IOContext(io, :limit=>true)
println(io)
println(io, "Kernel: ")
Base.print_matrix(io, R.K, "[", ",","]")
println(io)
println(io, "Embedding:")
Base.print_matrix(io, transform(R), "[", ",","]")
end
## interface functions
"""
fit(DiffMap, data; maxoutdim=2, t=1, α=0.0, ɛ=1.0)
Fit a isometric mapping model to `data`.
# Arguments
* `data::Matrix`: a ``d \\times n``matrix of observations. Each column of `data` is
an observation, `d` is a number of features, `n` is a number of observations.
# Keyword arguments
* `kernel::Union{Nothing, Function}`: the kernel function.
It maps two input vectors (observations) to a scalar (a metric of their similarity).
by default, a Gaussian kernel. If `kernel` set to `nothing`, we assume `data` is
instead the ``n \\times n`` precomputed Gram matrix.
* `ɛ::Real=1.0`: the Gaussian kernel variance (the scale parameter). It's ignored if the custom `kernel` is passed.
* `maxoutdim::Int=2`: the dimension of the reduced space.
* `t::Int=1`: the number of transitions
* `α::Real=0.0`: a normalization parameter
# Examples
```julia
X = rand(3, 100) # toy data matrix, 100 observations
# default kernel
M = fit(DiffMap, X) # construct diffusion map model
R = transform(M) # perform dimensionality reduction
# custom kernel
kernel = (x, y) -> x' * y # linear kernel
M = fit(DiffMap, X, kernel=kernel)
# precomputed Gram matrix
kernel = (x, y) -> x' * y # linear kernel
K = ManifoldLearning.pairwise(kernel, eachcol(X), symmetric=true)
M = fit(DiffMap, K, kernel=nothing)
```
"""
function fit(::Type{DiffMap}, X::AbstractMatrix{T};
ɛ::Real=1.0,
kernel::Union{Nothing, Function}=(x, y) -> exp(-sum((x .- y) .^ 2) / convert(T, ɛ)),
maxoutdim::Int=2,
t::Int=1,
α::Real=0.0) where {T<:Real}
if isa(kernel, Function)
# compute Gram matrix
L = pairwise(kernel, eachcol(X), symmetric=true)
d = size(X,1)
else
# X is the pre-computed Gram matrix
L = deepcopy(X) # deep copy needed b/c of procedure for α > 0
d = NaN
@assert issymmetric(L)
end
# Calculate Laplacian & normalize it
if α > 0
normalize!(L, α=α, norm=:sym) # Lᵅ = D⁻ᵅ*L*D⁻ᵅ
normalize!(L, α=α, norm=:rw) # M =(Dᵅ)⁻¹*Lᵅ
end
# Eigendecomposition & reduction
λ, V = decompose(L, maxoutdim; rev=true, skipfirst=false)
Y = (λ .^ t) .* V'
return DiffMap{T}(d, t, α, ɛ, λ, L, Y)
end
"""
predict(R::DiffMap)
Transforms the data fitted to the diffusion map model `R` into a reduced space representation.
"""
predict(R::DiffMap) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 3106 | # Hessian Eigenmaps (HLLE)
# ---------------------------
# Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data,
# D. Donoho and C. Grimes, Proc Natl Acad Sci U S A. 2003 May 13; 100(10): 5591–5596
import Combinatorics: combinations
"""
HLLE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
The `HLLE` type represents a Hessian eigenmaps model constructed for `T` type data with a help of the `NN` nearest neighbor algorithm.
"""
struct HLLE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
d::Int
k::Real
λ::AbstractVector{T}
proj::Projection{T}
nearestneighbors::NN
component::AbstractVector{Int}
end
## properties
size(R::HLLE) = (R.d, size(R.proj, 1))
eigvals(R::HLLE) = R.λ
neighbors(R::HLLE) = R.k
vertices(R::HLLE) = R.component
## show
function summary(io::IO, R::HLLE)
id, od = size(R)
msg = isinteger(R.k) ? "neighbors" : "epsilon"
print(io, "HLLE{$(R.nearestneighbors)}(indim = $id, outdim = $od, $msg = $(R.k))")
end
## interface functions
"""
fit(HLLE, data; k=12, maxoutdim=2, nntype=BruteForce)
Fit a Hessian eigenmaps model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `k`: a number of nearest neighbors for construction of local subspace representation
* `maxoutdim`: a dimension of the reduced space.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
# Examples
```julia
M = fit(HLLE, rand(3,100)) # construct Hessian eigenmaps model
R = predict(M) # perform dimensionality reduction
```
"""
function fit(::Type{HLLE}, X::AbstractMatrix{T};
k::Real=12, maxoutdim::Int=2, nntype=BruteForce) where {T<:Real}
# Construct NN graph
d, n = size(X)
NN = fit(nntype, X)
A = adjacency_matrix(NN, X, k)
G, C = largest_component(SimpleGraph(A))
# Obtain tangent coordinates and develop Hessian estimator
hs = (maxoutdim*(maxoutdim+1)) >> 1
W = spzeros(T, hs*n, n)
for i=1:n
II, _ = findnz(A[:,C[i]])
# re-center points in neighborhood
VX = view(X, :, II)
μ = mean(VX, dims=2)
N = VX .- μ
# calculate tangent coordinates
tc = svd(N).V[:,1:maxoutdim]
# Develop Hessian estimator
l = length(II)
Yi = [ones(T, l) tc zeros(T, l, hs)]
for ii=1:maxoutdim
Yi[:,maxoutdim+ii+1] = tc[:,ii].^2
end
yi = 2*(1+maxoutdim)
for (ii,jj) in combinations(1:maxoutdim, 2)
Yi[:, yi] = tc[:, ii] .* tc[:, jj]
yi += 1
end
F = qr(Yi)
H = transpose(F.Q[:,(end-(hs-1)):end])
W[(1:hs).+(i-1)*hs, II] = H
end
# decomposition
λ, V = decompose(transpose(W)*W, maxoutdim)
return HLLE{nntype, T}(d, k, λ, transpose(V) .* convert(T, sqrt(n)), NN, C)
end
"""
predict(R::HLLE)
Transforms the data fitted to the Hessian eigenmaps model `R` into a reduced space representation.
"""
predict(R::HLLE) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 2020 | ## Interface
const Projection{T <: Real} = AbstractMatrix{T}
"""
vertices(R::NonlinearDimensionalityReduction)
Returns vertices of largest connected component in the model `R`.
"""
vertices(R::NonlinearDimensionalityReduction) = Int[]
"""
neighbors(R::NonlinearDimensionalityReduction)
Returns the number of nearest neighbors used for aproximate local subspace
"""
neighbors(R::NonlinearDimensionalityReduction) = 0
"""
fit(NonlinearDimensionalityReduction, X)
Perform model fitting given the data `X`
"""
fit(::Type{NonlinearDimensionalityReduction}, X::AbstractMatrix; kwargs...) = throw("Model fitting is not implemented")
"""
predict(R::NonlinearDimensionalityReduction)
Returns a reduced space representation of the data given the model `R` inform of the projection matrix (of size ``(d, n)``), where `d` is a dimension of the reduced space and `n` in the number of the observations. Each column of the projection matrix corresponds to an observation in projected reduced space.
"""
predict(R::NonlinearDimensionalityReduction) = throw("Data transformation is not implemented")
"""
size(R::NonlinearDimensionalityReduction)
Returns a tuple of the input and reduced space dimensions for the model `R`
"""
size(R::NonlinearDimensionalityReduction) = (0,0)
"""
eigvals(R::NonlinearDimensionalityReduction)
Returns eignevalues of the reduced space reporesentation for the model `R`
"""
eigvals(R::NonlinearDimensionalityReduction) = Float64[]
# Auxiliary functions
show(io::IO, ::MIME"text/plain", R::T) where {T<:NonlinearDimensionalityReduction} = summary(io, R)
function show(io::IO, R::T) where {T<:NonlinearDimensionalityReduction}
summary(io, R)
io = IOContext(io, :limit=>true)
println(io)
println(io, "connected component: ")
Base.show_vector(io, vertices(R))
println(io)
println(io, "eigenvalues: ")
Base.show_vector(io, eigvals(R))
println(io)
println(io, "projection:")
Base.print_matrix(io, transform(R), "[", ",","]")
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 2978 | # Isomap
# ------
# A Global Geometric Framework for Nonlinear Dimensionality Reduction,
# J. B. Tenenbaum, V. de Silva and J. C. Langford, Science 290 (5500): 2319-2323, 22 December 2000
"""
Isomap{NN <: AbstractNearestNeighbors} <: AbstractDimensionalityReduction
The `Isomap` type represents an isometric mapping model constructed with a help of the `NN` nearest neighbor algorithm.
"""
struct Isomap{NN <: AbstractNearestNeighbors} <: NonlinearDimensionalityReduction
d::Int
k::Real
model::KernelPCA
nearestneighbors::NN
component::AbstractVector{Int}
end
## properties
size(R::Isomap) = (R.d, size(R.model)[2])
eigvals(R::Isomap) = eigvals(R.model)
neighbors(R::Isomap) = R.k
vertices(R::Isomap) = R.component
## show
function summary(io::IO, R::Isomap)
id, od = size(R)
msg = isinteger(R.k) ? "neighbors" : "epsilon"
print(io, "Isomap{$(R.nearestneighbors)}(indim = $id, outdim = $od, $msg = $(R.k))")
end
## interface functions
"""
fit(Isomap, data; k=12, maxoutdim=2, nntype=BruteForce)
Fit an isometric mapping model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `k`: a number of nearest neighbors for construction of local subspace representation
* `maxoutdim`: a dimension of the reduced space.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
# Examples
```julia
M = fit(Isomap, rand(3,100)) # construct Isomap model
R = predict(M) # perform dimensionality reduction
```
"""
function fit(::Type{Isomap}, X::AbstractMatrix{T};
k::Real=12, maxoutdim::Int=2, nntype=BruteForce) where {T<:Real}
# Construct NN graph
d, n = size(X)
NN = fit(nntype, X)
A = adjacency_matrix(NN, X, k)
G, C = largest_component(SimpleGraph(A))
# Compute shortest path for every point
n = length(C)
DD = zeros(T, n, n)
for i in 1:n
dj = dijkstra_shortest_paths(G, i, A)
DD[i,:] .= dj.dists
end
broadcast!(x->-x*x/2, DD, DD)
#symmetrize!(DD) # error in MvStats
DD = (DD+DD')/2
M = fit(KernelPCA, DD, kernel=nothing, maxoutdim=maxoutdim)
return Isomap{nntype}(d, k, M, NN, C)
end
"""
predict(R::Isomap)
Transforms the data fitted to the Isomap model `R` into a reduced space representation.
"""
predict(R::Isomap) = predict(R.model)
"""
predict(R::Isomap, X::AbstractVecOrMat)
Returns a transformed out-of-sample data `X` given the Isomap model `R` into a reduced space representation.
"""
function predict(R::Isomap, X::AbstractVecOrMat{T}) where {T<:Real}
n = size(X,2)
E, W = adjacency_list(R.nearestneighbors, X, R.k, self = true, weights=true)
D = gram2dmat(R.model.X)
G = zeros(size(R.model.X,2), n)
for i in 1:n
G[:,i] = minimum(D[:,E[i]] .+ W[i]', dims=2)
end
broadcast!(x->-x*x/2, G, G)
transform!(R.model.center, G)
return projection(R.model)'*G
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 2983 | # Laplacian Eigenmaps
# -------------------
# Laplacian Eigenmaps for Dimensionality Reduction and Data Representation,
# M. Belkin, P. Niyogi, Neural Computation, June 2003; 15 (6):1373-1396
"""
LEM{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
The `LEM` type represents a Laplacian eigenmaps model constructed for `T` type data with a help of the `NN` nearest neighbor algorithm.
"""
struct LEM{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
d::Int
k::Real
λ::AbstractVector{T}
ɛ::T
proj::Projection{T}
nearestneighbors::NN
component::AbstractVector{Int}
end
## properties
size(R::LEM) = (R.d, size(R.proj, 1))
eigvals(R::LEM) = R.λ
neighbors(R::LEM) = R.k
vertices(R::LEM) = R.component
## show
function summary(io::IO, R::LEM)
id, od = size(R)
msg = isinteger(R.k) ? "neighbors" : "epsilon"
print(io, "LEM{$(R.nearestneighbors)}(indim = $id, outdim = $od, $msg = $(R.k))")
end
## interface functions
"""
fit(LEM, data; k=12, maxoutdim=2, ɛ=1.0, nntype=BruteForce)
Fit a Laplacian eigenmaps model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `k`: a number of nearest neighbors for construction of local subspace representation
* `maxoutdim`: a dimension of the reduced space.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
* `ɛ`: a Gaussian kernel variance (the scale parameter)
* `laplacian`: a form of the Laplacian matrix used for spectral decomposition
* `:unnorm`: an unnormalized Laplacian
* `:sym`: a symmetrically normalized Laplacian
* `:rw`: a random walk normalized Laplacian
# Examples
```julia
M = fit(LEM, rand(3,100)) # construct Laplacian eigenmaps model
R = predict(M) # perform dimensionality reduction
```
"""
function fit(::Type{LEM}, X::AbstractMatrix{T}; k::Real=12, maxoutdim::Int=2,
ɛ::Real=1, laplacian::Symbol=:unnorm, nntype=BruteForce) where {T<:Real}
# Construct NN graph
d, n = size(X)
NN = fit(nntype, X)
A = adjacency_matrix(NN, X, k)
G, C = largest_component(SimpleGraph(A))
# Compute weights of heat kernel
W = A[C,C]
I, J, V = findnz(W)
@inbounds for (i,j,v) in zip(I,J,V)
W[i,j] = exp(-v*v/ε)
end
L, D = Laplacian(W)
λ, V = if laplacian == :unnorm
decompose(L, collect(D), maxoutdim)
elseif laplacian == :sym
normalize!(L, D, α=1/2, norm=laplacian)
decompose(L, maxoutdim)
elseif laplacian == :rw
normalize!(L, D, α=1, norm=laplacian)
decompose(L, maxoutdim)
else
throw(ArgumentError("Unkown Laplacian type: $laplacian"))
end
return LEM{nntype, T}(d, k, λ, ɛ, transpose(V), NN, C)
end
"""
predict(R::LEM)
Transforms the data fitted to the Laplacian eigenmaps model `R` into a reduced space representation.
"""
predict(R::LEM) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 3560 | # Locally Linear Embedding (LLE)
# ------------------------
# Nonlinear dimensionality reduction by locally linear embedding,
# Roweis, S. & Saul, L., Science 290:2323 (2000)
"""
LLE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
The `LLE` type represents a locally linear embedding model constructed for `T` type data constructed with a help of the `NN` nearest neighbor algorithm.
"""
struct LLE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
d::Int
k::Real
λ::AbstractVector{T}
proj::Projection{T}
nearestneighbors::NN
component::AbstractVector{Int}
end
## properties
size(R::LLE) = (R.d, size(R.proj, 1))
eigvals(R::LLE) = R.λ
neighbors(R::LLE) = R.k
vertices(R::LLE) = R.component
## show
function summary(io::IO, R::LLE)
id, od = size(R)
msg = isinteger(R.k) ? "neighbors" : "epsilon"
print(io, "LLE{$(R.nearestneighbors)}(indim = $id, outdim = $od, $msg = $(R.k))")
end
## interface functions
"""
fit(LLE, data; k=12, maxoutdim=2, nntype=BruteForce, tol=1e-5)
Fit a locally linear embedding model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `k`: a number of nearest neighbors for construction of local subspace representation
* `maxoutdim`: a dimension of the reduced space.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
* `tol`: an algorithm regularization tolerance
# Examples
```julia
M = fit(LLE, rand(3,100)) # construct LLE model
R = transform(M) # perform dimensionality reduction
```
"""
function fit(::Type{LLE}, X::AbstractMatrix{T};
k::Int=12, maxoutdim::Int=2, nntype=BruteForce, tol::Real=1e-5) where {T<:Real}
# Construct NN graph
d, n = size(X)
NN = fit(nntype, X)
E, _ = adjacency_list(NN, X, k)
_, C = largest_component(SimpleGraph(n, E))
# Correct indexes of neighbors if more then one connected component
fixindex = length(C) < n
if fixindex
n = length(C)
R = Dict(zip(C, collect(1:n)))
end
if k > d
@warn("k > $d: regularization will be used")
else
tol = 0
end
# Reconstruct weights and compute embedding:
M = spdiagm(0 => fill(one(T), n))
#W = spzeros(T, n, n)
O = fill(one(T), k, 1)
for i in C
NI = E[i] # neighbor's indexes
# fix indexes for connected components
NIfix, NIcc, j = if fixindex # fix index
JJ = [i for i in NI if i ∈ C] # select points that are in CC
KK = [R[i] for i in JJ if haskey(R, i)] # convert NI to CC index
JJ, KK, R[i]
else
NI, NI, i
end
l = length(NIfix)
l == 0 && continue # skip
# centering neighborhood of point xᵢ
zᵢ = view(X, :, NIfix) .- view(X, :, i)
# calculate weights: wᵢ = (Gᵢ + αI)⁻¹1
G = zᵢ'zᵢ
w = (G + tol * I) \ fill(one(T), l, 1) |> vec
w ./= sum(w)
# M = (I - w)'(I - w) = I - w'I - Iw + w'w
M[NIcc,j] .-= w
M[j,NIcc] .-= w
M[NIcc,NIcc] .+= w*w'
#W[NI, i] .= w
end
#@assert all(sum(W, dims=1).-1 .< tol) "Weights are not normalized"
#M = (I-W)*(I-W)'
λ, V = decompose(M, maxoutdim)
return LLE{nntype, T}(d, k, λ, transpose(V) .* convert(T, sqrt(n)), NN, C)
end
"""
predict(R::LLE)
Transforms the data fitted to the LLE model `R` into a reduced space representation.
"""
predict(R::LLE) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 3319 | # Local Tangent Space Alignment (LTSA)
# ---------------------------
# Principal Manifolds and Nonlinear Dimension Reduction via Local Tangent Space Alignment,
# Zhang, Zhenyue; Hongyuan Zha (2004), SIAM Journal on Scientific Computing 26 (1): 313–338.
# doi:10.1137/s1064827502419154.
"""
LTSA{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
The `LTSA` type represents a local tangent space alignment model constructed for `T` type data with a help of the `NN` nearest neighbor algorithm.
"""
struct LTSA{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
d::Int
k::Real
λ::AbstractVector{T}
proj::Projection{T}
nearestneighbors::NN
component::AbstractVector{Int}
end
## properties
size(R::LTSA) = (R.d, size(R.proj, 1))
eigvals(R::LTSA) = R.λ
neighbors(R::LTSA) = R.k
vertices(R::LTSA) = R.component
## show
function summary(io::IO, R::LTSA)
id, od = size(R)
msg = isinteger(R.k) ? "neighbors" : "epsilon"
print(io, "LTSA{$(R.nearestneighbors)}(indim = $id, outdim = $od, neighbors = $(R.k))")
end
## interface functions
"""
fit(LTSA, data; k=12, maxoutdim=2, nntype=BruteForce)
Fit a local tangent space alignment model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `k`: a number of nearest neighbors for construction of local subspace representation
* `maxoutdim`: a dimension of the reduced space.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
# Examples
```julia
M = fit(LTSA, rand(3,100)) # construct LTSA model
R = transform(M) # perform dimensionality reduction
```
"""
function fit(::Type{LTSA}, X::AbstractMatrix{T};
k::Real=12, maxoutdim::Int=2, nntype=BruteForce) where {T<:Real}
# Construct NN graph
d, n = size(X)
NN = fit(nntype, X)
E, _ = adjacency_list(NN, X, k)
_, C = largest_component(SimpleGraph(n, E))
# Correct indexes of neighbors if more then one connected component
fixindex = length(C) < n
if fixindex
n = length(C)
R = Dict(zip(C, collect(1:n)))
end
B = spzeros(T,n,n)
for i in C
NI = E[i] # neighbor's indexes
# fix indexes for connected components
NIfix, NIcc = if fixindex # fix index
JJ = [i for i in NI if i ∈ C] # select points that are in CC
KK = [R[i] for i in JJ if haskey(R, i)] # convert NI to CC index
JJ, KK
else
NI, NI
end
l = length(NIfix)
l == 0 && continue # skip
# re-center points in neighborhood
VX = view(X, :, NIfix)
μ = mean(VX, dims=2)
δ_x = VX .- μ
# Compute orthogonal basis H of θ'
θ_t = view(svd(δ_x).V, :, 1:maxoutdim)
# Construct alignment matrix
S = ones(l)./sqrt(l)
G = hcat(S, θ_t)
B[NIcc, NIcc] .+= Diagonal(fill(one(T), l)) .- G*transpose(G)
end
# Align global coordinates
λ, V = decompose(B, maxoutdim)
return LTSA{nntype, T}(d, k, λ, transpose(V), NN, C)
end
"""
predict(R::LTSA)
Transforms the data fitted to the local tangent space alignment model `R` into a reduced space representation.
"""
predict(R::LTSA) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 4637 | """
AbstractNearestNeighbors
Abstract type for nearest neighbor plug-in implementations.
"""
abstract type AbstractNearestNeighbors end
"""
size(NN::AbstractNearestNeighbors)
Returns the size of the fitted data.
"""
function size(NN::AbstractNearestNeighbors) end
"""
knn(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Integer; kwargs...) -> (I,D)
Returns `(k, n)`-matrices of point indexes and distances of `k` nearest neighbors
for points in the `(m,n)`-matrix `X` given the `NN` object.
"""
function knn(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Integer; kwargs...) where T<:Real end
"""
inrange(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, r::Real; kwargs...) -> (I,D)
Returns collections of point indexes and distances in radius `r` of points in
the `(m,n)`-matrix `X` given the `NN` object.
"""
function inrange(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, r::Real; kwargs...) where T<:Real end
"""
adjacency_list(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Real; kwargs...) -> (A, W)
Perform construction of an adjacency list `A` with corresponding weights `W` from
the points in `X` given the `NN` object.
- If `k` is a positive integer, then `k` nearest neighbors are use for construction.
- If `k` is a real number, then radius `k` neighborhood is used for construction.
"""
function adjacency_list(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T},
k::Integer; weights::Bool=false, kwargs...) where T<:Real
A, W = knn(NN, X, k; weights=weights, kwargs...)
return A, W
end
function adjacency_list(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T},
k::Real; weights::Bool=false, kwargs...) where T<:Real
A, W = inrange(NN, X, k; weights=weights, kwargs...)
return A, W
end
"""
adjacency_matrix(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Real; kwargs...) -> A
Perform construction of a weighted adjacency distance matrix `A` from the points
in `X` given the `NN` object.
- If `k` is a positive integer, then `k` nearest neighbors are use for construction.
- If `k` is a real number, then radius `k` neighborhood is used for construction.
"""
function adjacency_matrix(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T},
k::Integer; symmetric::Bool=true, kwargs...) where T<:Real
n = size(NN)[2]
m = length(eachcol(X))
@assert n >=m "Cannot construc matrix for more then $n fitted points"
E, W = knn(NN, X, k; weights=true, kwargs...)
return sparse(E, W, n, symmetric=symmetric)
end
function adjacency_matrix(NN::AbstractNearestNeighbors, X::AbstractVecOrMat{T},
r::Real; symmetric::Bool=true, kwargs...) where T<:Real
n = size(NN)[2]
m = length(eachcol(X))
@assert n >=m "Cannot construc matrix for more then $n fitted points"
E, W = inrange(NN, X, r; weights=true, kwargs...)
return sparse(E, W, n, symmetric=symmetric)
end
# Implementation
"""
BruteForce
Calculate nearest neighborhoods using pairwise distance matrix.
"""
struct BruteForce{T<:Real} <: AbstractNearestNeighbors
fitted::AbstractMatrix{T}
end
show(io::IO, NN::BruteForce) = print(io, "BruteForce")
size(NN::BruteForce) = size(NN.fitted)
fit(::Type{BruteForce}, X::AbstractMatrix{T}) where {T<:Real} = BruteForce(X)
function knn(NN::BruteForce{T}, X::AbstractVecOrMat{T}, k::Integer;
self::Bool=false, weights::Bool=true, kwargs...) where T<:Real
l = size(NN)[2]
@assert l > k "Number of fitted observations must be at least $(k)"
# construct distance matrix
D = pairwise((x,y)->norm(x-y), eachcol(NN.fitted), eachcol(X))
idxs = (1:k).+(!self)
n = size(X,2)
A = Vector{Vector{Int}}(undef, n)
W = Vector{Vector{T}}(undef, (weights ? n : 0))
@inbounds for (j, ds) in enumerate(eachcol(D))
kidxs = sortperm(ds)[idxs]
A[j] = kidxs
if weights
W[j] = D[kidxs, j]
end
end
return A, W
end
function inrange(NN::BruteForce{T}, X::AbstractVecOrMat{T}, r::Real;
self::Bool=false, weights::Bool=false, kwargs...) where T<:Real
# construct distance matrix
D = pairwise((x,y)->norm(x-y), eachcol(NN.fitted), eachcol(X))
n = size(X,2)
A = Vector{Vector{Int}}(undef, n)
W = Vector{Vector{T}}(undef, (weights ? n : 0))
@inbounds for (j, ds) in enumerate(eachcol(D))
kidxs = self ? findall(0 .<= ds .<= r) : findall(0 .< ds .<= r)
A[j] = kidxs
if weights
W[j] = D[kidxs, j]
end
end
return A, W
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 6739 | # t-Distributed Stochastic Neighborhood Embedding (t-SNE)
# -------------------------------------------------------
# Visualizing Data using t-SNE
# L. van der Maaten, G. Hinton, Journal of Machine Learning Research 9 (2008) 2579-2605
"""
TSNE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
The `TSNE` type represents a t-SNE model constructed for `T` type data with a help of the `NN` nearest neighbor algorithm.
"""
struct TSNE{NN <: AbstractNearestNeighbors, T <: Real} <: NonlinearDimensionalityReduction
d::Int
p::Real
β::AbstractVector{T}
proj::Projection{T}
nearestneighbors::NN
end
## properties
size(R::TSNE) = (R.d, size(R.proj, 1))
neighbors(R::TSNE) = R.p
## show
function summary(io::IO, R::TSNE)
id, od = size(R)
print(io, "t-SNE{$(R.nearestneighbors)}(indim = $id, outdim = $od, perplexity = $(R.p))")
end
## auxiliary
function perplexities(D::AbstractMatrix{T}, p::Real=30;
maxiter::Integer=50, tol::Real=1e-7) where {T<:Real}
k, n = size(D)
P = zeros(T, size(D))
βs = zeros(T, n)
Ĥ = log(p) # desired entropy
for (i, Dᵢ) in enumerate(eachcol(D))
Pᵢ = @view P[:,i]
β = 1 # precision β = 1/σ²
βmax, βmin = Inf, 0
ΔH = 0
for j in 1:maxiter
Pᵢ .= exp.(-β.*Dᵢ)
div∑Pᵢ = 1/sum(Pᵢ)
H = -log(div∑Pᵢ) + β*(Dᵢ'Pᵢ)*div∑Pᵢ
Pᵢ .*= div∑Pᵢ
ΔH = H - Ĥ
(abs(ΔH) < tol || β < eps()) && break
if ΔH > 0
βmin, β = β, isinf(βmax) ? β*2 : (β + βmax)/2
else
βmax, β = β, (β + βmin)/2
end
end
#abs(ΔH) > tol && println("P[$i]: perplexity error is above tolerance: $ΔH")
βs[i] = β
end
P, βs
end
function optimize!(Y::AbstractMatrix{T}, P::AbstractMatrix{T}, r::Integer=1;
η::Real=200, exaggeration::Real=12,
tol::Real=1e-7, mingain::Real=0.01,
maxiter::Integer=100, exploreiter::Integer=250) where {T<:Real}
m, n = size(Y)
Q = zeros(T, (n*(n-1))>>1)
L = zeros(T, n, n)
∑Lᵢ = zeros(T, n)
∇C = zeros(T, m, n)
U = zeros(T, m, n)
G = fill!(similar(Y), 1)
∑P = max(sum(P), eps(T))
P .*= exaggeration/∑P
α = 0.5 # early exaggeration stage momentum
minerr = curerr = Inf
minitr = 0
for itr in 1:maxiter
# Student t-distribution: 1/(1+t²/r)^(r+1)/2
#@time pairwise!((x,y)->1+sum(abs2, x-y)/r, Q, eachcol(Y), skipdiagonal=true)
h = 1
@inbounds for i in 1:n, j in (i+1):n
s = 0
@simd for l in 1:m
s += abs2(Y[l,i]-Y[l,j])
end
Q[h] = 1+s/r
h += 1
end
Q .^= -(r+1)/2
∑Q = 2*sum(Q)
# KL[P||Q]
#@time curerr = 2*(P'*log.(P./Q))
# ∂C/∂Y = (2(r+1)/r)∑ⱼ (pᵢⱼ - qᵢⱼ)(yᵢ-yⱼ)/(1+||yᵢ-yⱼ||²)ʳ
#A = unpack((compact(P).-Q./∑Q).*Q, skipdiagonal=true) |> collect
k = 1
fill!(∑Lᵢ, 0)
@inbounds for i in 1:n
∑Lⱼ = 0
for j in (i+1):n
Qᵢⱼ = Q[k]
l = (P[i,j] - Qᵢⱼ ./ ∑Q)*Qᵢⱼ
L[i, j] = l
∑Lᵢ[j] += l
∑Lⱼ += l
k +=1
end
∑Lᵢ[i] += ∑Lⱼ
L[i,i] = -∑Lᵢ[i]
#ΔY = Y .- view(Y, :, i)
#∇C[:, i] .= ΔY*A[:, i]
end
BLAS.symm!('R', 'U', T(-2(r+1)/r), L, Y, zero(T), ∇C)
# update embedding
#@. U = α*U - η*G*∇C
#@. Y += U
@inbounds for (y, u, g, c) in zip(eachcol(Y), eachcol(U), eachcol(G), eachcol(∇C))
@. g = ifelse(u*c>0, max(g*0.8, mingain), g+0.2)
@. u = α*u - η*g*c
y .+= u
end
# switch off exploration stage
if exploreiter > 0 && itr >= min(maxiter, exploreiter)
P .*= 1/exaggeration
α = 0.8 # late stage momentum
exploreiter = 0
end
# convergence check
gnorm = norm(∇C)
gnorm < tol && break
#println("$itr: ||∇C||=$gnorm, min-gain: $(minimum(G))")
end
end
## interface functions
"""
fit(TSNE, data; p=30, maxoutdim=2, kwargs...)
Fit a t-SNE model to `data`.
# Arguments
* `data`: a matrix of observations. Each column of `data` is an observation.
# Keyword arguments
* `p`: a perplexity parameter (*defaut* `30`).
* `maxoutdim`: a dimension of the reduced space (*defaut* `2`).
* `maxiter`: a total number of iterations for the search algorithm (*defaut* `800`).
* `exploreiter`: a number of iterations for the exploration stage of the search algorithm (*defaut* `200`).
* `tol`: a tolerance threshold (*default* `1e-7`).
* `exaggeration`: a tightness control parameter between the original and the reduced space (*defaut* `12`).
* `initialize`: an initialization parameter for the embedding (*defaut* `:pca`).
* `rng`: a random number generator object for initialization of the initial embedding.
* `nntype`: a nearest neighbor construction class (derived from `AbstractNearestNeighbors`)
# Examples
```julia
M = fit(TSNE, rand(3,100)) # construct t-SNE model
R = predict(M) # perform dimensionality reduction
```
"""
function fit(::Type{TSNE}, X::AbstractMatrix{T}; p::Real=30, maxoutdim::Integer=2,
maxiter::Integer=800, exploreiter::Integer=200,
exaggeration::Real=12, tol::Real=1e-7, initialize::Symbol=:pca,
rng::AbstractRNG=default_rng(), nntype=BruteForce) where {T<:Real}
d, n = size(X)
k = min(n-1, round(Int, 3p))
# Construct NN graph
NN = fit(nntype, X)
# form distance matrix
D = adjacency_matrix(NN, X, k, symmetric=false)
D .^= 2 # sq. dists
I, J, V = findnz(D)
Ds = reshape(V, k, :)
# calculate perplexities & corresponding conditional probabilities matrix P
Px, βs = perplexities(Ds, p, tol=tol)
P = sparse(I, J, reshape(Px,:))
P .+= P' # symmetrize
P ./= max(sum(P), eps(T))
# form initial embedding and optimize it
Y = if initialize == :pca
predict(fit(PCA, X, maxoutdim=maxoutdim), X)
elseif initialize == :random
randn(rng, T, maxoutdim, n).*T(1e-4)
else
error("Uknown initialization method: $initialize")
end
dof = max(maxoutdim-1, 1)
optimize!(Y, P, dof; maxiter=maxiter, exploreiter=exploreiter, tol=tol,
exaggeration=exaggeration, η=max(n/exaggeration/4, 50))
return TSNE{nntype, T}(d, p, βs, Y, NN)
end
"""
predict(R::TSNE)
Transforms the data fitted to the t-SNE model `R` into a reduced space representation.
"""
predict(R::TSNE) = R.proj
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 8004 | """
knn(points::AbstractMatrix, k) -> distances, indices
Performs a lookup of the `k` nearest neigbours for each point in the `points`
dataset to the dataset itself, and returns distances to and indices of the neigbours.
*Note: Inefficient implementation that uses distance matrix. Not recomended for large datasets.*
"""
function knn(X::AbstractMatrix{T}, k::Int=12) where T<:Real
m, n = size(X)
@assert n > k "Number of observations must be more then $(k)"
r = Array{T}(undef, (n, n))
d = Array{T}(undef, k, n)
e = Array{Int}(undef, k, n)
mul!(r, transpose(X), X)
sa2 = sum(X.^2, dims=1)
@inbounds for j = 1 : n
@inbounds for i = 1 : j-1
r[i,j] = r[j,i]
end
r[j,j] = 0
@inbounds for i = j+1 : n
v = sa2[i] + sa2[j] - 2 * r[i,j]
r[i,j] = isnan(v) ? NaN : sqrt(max(v, 0.))
end
e[:, j] = sortperm(r[:,j])[2:k+1]
d[:, j] = r[e[:, j],j]
end
return (d, e)
end
"""
adjacency_matrix(A, W)
Returns a weighted adjacency matrix constructed from an adjacency list `A` and
weights `W`.
"""
function adjacency_matrix(A::AbstractArray{S},
W::AbstractArray{Q}) where {T<:Real,
S<:AbstractArray{<:Integer},
Q<:AbstractArray{T}}
@assert length(A) == length(W) "Weights and edge matrix must be of the same size"
n = length(A)
M = spzeros(T, n, n)
@inbounds for (j, (ii, ws)) in enumerate(zip(A, W))
M[ii, j] = ws
M[j, ii] = ws
end
return M
end
"""
Laplacian(A)
Construct Laplacian matrix `L` from the adjacency matrix `A`, s.t. ``L = D - A``
where ``D_{i,i} = \\sum_j A_{ji}``.
"""
function Laplacian(A::AbstractMatrix)
D = Diagonal(vec(sum(A, dims=1)))
return D - A, D
end
"""
normalize!(L, [D; α=1, norm=:sym])
Performs in-place normalization of the Laplacian `L` using the degree matrix `D`,
if provided, raised in a power `α`.
The `norm` parameter specifies normalization type:
- `:sym`: Laplacian `L` is symmetrically normalized, s.t. ``L_{sym} = D^{-\\alpha} L D^{-\\alpha}``.
- `:rw`: Laplacian `L` is random walk normalized, s.t. ``L_{rw} = D^{-\\alpha} L``.
where ``D`` is a diagonal matrix, s.t. ``D_{i,i} = \\sum_j L_{ji}``.
"""
function normalize!(L::AbstractMatrix, D=Diagonal(vec(sum(L, dims=1)));
α::Real=1.0, norm=:rw)
D⁻¹ = Diagonal(1 ./ diag(D).^α)
if norm == :sym
rmul!(lmul!(D⁻¹, L),D⁻¹)
elseif norm == :rw
lmul!(D⁻¹, L)
else
error("Uknown normalization: $norm")
end
end
"Crate a graph with largest connected component of adjacency matrix `W`"
function largest_component(G)
CC = connected_components(G)
if length(CC) > 1
@warn "Found $(length(CC)) connected components. Largest component is selected."
C = CC[argmax(map(length, CC))]
G = first(induced_subgraph(G, C))
else
C = first(CC)
end
return G, C
end
"""
swiss_roll(n::Int, noise::Real=0.03, segments=1)
Generate a swiss roll dataset of `n` points with point coordinate `noise` variance, and partitioned on number of `segments`.
"""
function swiss_roll(n::Int = 1000, noise::Real=0.03; segments=1, hlims=(-10.0,10.0),
rng::AbstractRNG=default_rng())
t = (3 * pi / 2) * (1 .+ 2 * rand(rng, n, 1))
height = (hlims[2]-hlims[1]) * rand(rng, n, 1) .+ hlims[1]
X = [t .* cos.(t) height t .* sin.(t)]
X .+= noise * randn(rng, n, 3)
mn,mx = extrema(t)
labels = segments == 0 ? t : round.(Int, (t.-mn)./(mx-mn).*(segments-1))
return collect(transpose(X)), labels
end
"""
spirals(n::Int, noise::Real=0.03, segments=1)
Generate a spirals dataset of `n` points with point coordinate `noise` variance.
"""
function spirals(n::Int = 1000, noise::Real=0.03; segments=1,
rng::AbstractRNG=default_rng())
t = collect(1:n) / n * 2π
height = 30 * rand(rng, n, 1)
X = [cos.(t).*(.5cos.(6t).+1) sin.(t).*(.4cos.(6t).+1) 0.4sin.(6t)] + noise * randn(n, 3)
labels = segments == 0 ? t : vec(rem.(sum([round.(Int, t / 2) round.(Int, height / 12)], dims=2), 2))
return collect(transpose(X)), labels
end
"""
scurve(n::Int, noise::Real=0.03, segments=1)
Generate an S curve dataset of `n` points with point coordinate `noise` variance.
"""
function scurve(n::Int = 1000, noise::Real=0.03; segments=1,
rng::AbstractRNG=default_rng())
t = 3π*(rand(rng, n) .- 0.5)
x = sin.(t)
y = 2rand(rng, n)
z = sign.(t) .* (cos.(t) .- 1)
height = 30 * rand(rng, n, 1)
X = [x y z] + noise * randn(n, 3)
mn,mx = extrema(t)
labels = segments == 0 ? t : round.(Int, (t.-mn)./(mx-mn).*(segments-1))
return collect(transpose(X)), labels
end
"""
pairwise!(f, dest, x; skipdiagonal=false)
Stores in a vector `dest`, that is a packed upper triangular matrix representation,
holding the result of applying `f` to all possible pairs of entries in
iterators `x`.
"""
function pairwise!(f, dest::AbstractVector{T}, x;
skipdiagonal=false) where {T<:Real}
# check sizes
n = length(x)
l = length(dest)
nelem = (n*(n+(skipdiagonal ? -1 : 1)))>>1
l < nelem && throw(ArgumentError("Not enough elements in `dest`. Must be at least $nelem"))
k = 1
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(x)
(i > j || (skipdiagonal && i == j )) && continue
dest[k] = f(xi, yj)
k+=1
end
return dest
end
"""
unpack(v [; skipdiagonal=false])
Return a symmetric matrix from from packed upper triangular matrix, stored as
vector `v`,
"""
function unpack(v::AbstractVector{T}; skipdiagonal=false) where {T}
l = length(v)
nelem = (sqrt(8l+1)+1)/2
!isinteger(nelem) && throw(ArgumentError("Incorrect input size"))
n = Int(nelem)-!skipdiagonal
A = zeros(T, n, n)
k=1
for i in 1:(n-skipdiagonal), j in (i+skipdiagonal):n
A[i,j] = v[k]
k+=1
end
return Symmetric(A)
end
"""
compact(A)
Return a packed upper triangular part of the symmetric matrix `A` as a vector.
"""
function compact(S::AbstractMatrix{T}) where {T}
n = size(S,1)
C = zeros(T, (n*(n-1))>>1)
compact!(C, S)
end
function compact!(C::AbstractVector{T}, S::AbstractMatrix{T}) where {T}
n = size(S,1)
k = 1
for i in 1:n-1
for j in i+1:n
C[k] = S[i,j]
k+=1
end
end
return C
end
"""
sparse(E, W, n)
Construct a sparse weighted adjacency `(n,n)`-matrix from the adjacency list `E`
and weights `W`.
"""
function sparse(E::AbstractVector{TI}, W::AbstractVector{TV}, n::Integer;
symmetric::Bool=true) where {TI<:AbstractVector{<:Integer},
TV<:AbstractVector{<:Real}}
# check sizes
k, l = length(E), length(W)
k != l && throw(ArgumentError("Incorrect input size"))
# construct sp matrix
A = spzeros(eltype(eltype(W)), n, n)
@inbounds for (j,(es, ds)) in enumerate(zip(E, W))
A[es, j] .= ds
if symmetric
A[j, es] .= ds
end
end
return A
end
"Perform spectral decomposition for Ax=λI"
function decompose(M::AbstractMatrix{<:Real}, d::Int; rev=false, skipfirst=true)
W = isa(M, AbstractSparseMatrix) ? Symmetric(Matrix(M)) : Symmetric(M)
F = eigen!(W)
rng = 1:d
idx = sortperm(F.values, rev=rev)[skipfirst ? rng.+1 : rng]
return F.values[idx], F.vectors[:,idx]
end
"Perform spectral decomposition for Ax=λB"
function decompose(A::AbstractMatrix{<:Real}, B::AbstractMatrix{<:Real}, d::Int; rev=false)
AA = isa(A, AbstractSparseMatrix) ? Symmetric(Matrix(A)) : Symmetric(A)
BB = isa(B, AbstractSparseMatrix) ? Symmetric(Matrix(B)) : Symmetric(B)
F = eigen(AA, BB)
idx = sortperm(F.values, rev=rev)[2:d+1]
return F.values[idx], F.vectors[:,idx]
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | code | 5438 | using ManifoldLearning
using Test
using Statistics
using StableRNGs
rng = StableRNG(83743871)
@testset "Nearest Neighbors" begin
# setup parameters
k = 12
X, _ = ManifoldLearning.swiss_roll(100, rng=rng)
DD, EE = ManifoldLearning.knn(X,k)
@test_throws AssertionError ManifoldLearning.knn(zeros(3,10), k)
NN = fit(ManifoldLearning.BruteForce, X)
A = ManifoldLearning.adjacency_matrix(NN, X, k)
@test size(X,2) == size(A,2)
@test A ≈ ManifoldLearning.adjacency_matrix(collect(eachcol(EE)), collect(eachcol(DD)))
E, W = ManifoldLearning.adjacency_list(NN, X, k, weights=true)
@test size(X,2) == length(W) && length(W[1]) == k
@test size(X,2) == length(E) && length(E[1]) == k
@test hcat(E...) == EE
@test hcat(W...) ≈ DD
@test A ≈ ManifoldLearning.adjacency_matrix(E, W)
A = ManifoldLearning.adjacency_matrix(NN, X[:,1:k+1], k)
@test size(X,2) == size(A,2)
@test sum(A[k+2:end,k+2:end]) == 0
A = ManifoldLearning.adjacency_matrix(NN, X, k/7)
@test size(X,2) == size(A,2)
@test maximum(A) <= k/7
E, W = ManifoldLearning.adjacency_list(NN, X, k/7, weights=true)
@test size(X,2) == length(W)
@test size(X,2) == length(E)
@test maximum(Iterators.flatten(W)) <= k/7
@test_throws AssertionError ManifoldLearning.adjacency_matrix(NN, X, 101)
@test_throws AssertionError ManifoldLearning.adjacency_list(NN, X, 101)
n = 5
ker = (x,y)->x'y
D = ManifoldLearning.pairwise(ker, eachcol(X[:,1:n]))
d = zeros(10)
@test_throws ArgumentError ManifoldLearning.pairwise!(ker, d, eachcol(X))
ManifoldLearning.pairwise!(ker, d, eachcol(X[:,1:n]), skipdiagonal=true)
@testset for i in 1:n, j in i+1:n
k = n*(i-1) - (i*(i+1))>>1 + j
@test D[i,j] ≈ d[k]
end
S = ManifoldLearning.unpack(d, skipdiagonal=true)
@test iszero(D-S-D.*[i==j ? 1.0 : 0.0 for i in 1:n, j in 1:n])
d = zeros(15)
ManifoldLearning.pairwise!(ker, d, eachcol(X[:,1:n]), skipdiagonal=false)
@testset for i in 1:n, j in i:n
k = -((i-2n)*(i-1))>>1 + j
@test D[i,j] ≈ d[k]
end
S = ManifoldLearning.unpack(d, skipdiagonal=false)
@test iszero(D-S)
end
@testset "Laplacian" begin
A = [0 1 0; 1 0 1; 0 1 0.0]
L, D = ManifoldLearning.Laplacian(A)
@test [D[i,i] for i in 1:3] == [1, 2, 1]
@test D-L == A
Lsym = ManifoldLearning.normalize!(copy(L), D; α=1/2, norm=:sym)
@test Lsym ≈ [1 -√.5 0; -√.5 1 -√.5; 0 -√.5 1]
Lrw = ManifoldLearning.normalize!(copy(L), D; α=1, norm=:rw)
@test Lrw ≈ [1 -1 0; -0.5 1 -0.5; 0 -1 1]
end
@testset "Manifold Learning" begin
# setup parameters
k = 12
n = 50
d = 2
X, L = ManifoldLearning.swiss_roll(n; rng=rng)
# test algorithms
@testset for algorithm in [Isomap, LEM, LLE, HLLE, LTSA, DiffMap, TSNE]
#print("$algorithm ")
for (k, T) in zip([5, 12], [Float32, Float64])
X = convert(Matrix{T}, X)
# construct KW parameters
kwargs = [:maxoutdim=>d]
if algorithm === DiffMap
push!(kwargs, :t => k)
elseif algorithm === TSNE
push!(kwargs, :p => k)
else
push!(kwargs, :k => k)
end
# call transformation
M = fit(algorithm, X; kwargs...)
Y = predict(M)
# basic test
@test size(M) == (3, d)
if k == 5 && (algorithm === LLE || algorithm === LTSA)
@test size(Y, 2) < n
else
@test size(Y, 2) == n
end
@test size(Y,1) == d
@test eltype(Y) === T
@test size(M) == (3, d)
@test length(split(sprint(show, M), '\n')) > 1
# additional options
if algorithm !== DiffMap && algorithm !== TSNE
@test neighbors(M) == k
@test length(vertices(M)) > 1
end
if algorithm !== TSNE
@test length(eigvals(M)) == d
end
if algorithm === LEM
@testset for L in [:sym, :rw]
Y = fit(algorithm, X; laplacian=L, kwargs...) |> predict
@test size(Y, 2) == n
@test eltype(Y) === T
end
end
if algorithm === DiffMap
# test if we provide pre-computed Gram matrix
kernel = (x, y) -> exp(-sum((x .- y) .^ 2)) # default kernel
custom_K = ManifoldLearning.pairwise(kernel, eachcol(X), symmetric=true)
M_custom_K = fit(algorithm, custom_K; kernel=nothing, kwargs...)
@test isnan(size(M_custom_K)[1])
@test predict(M_custom_K) ≈ Y
@testset for α in [0, 0.5, 1.0], ε in [1.0, Inf]
Y = predict(fit(DiffMap, X, α=α, ε=ε))
@test all(.!isnan.(Y))
@test size(Y, 2) == size(X, 2)
@test eltype(Y) === T
end
end
end
end
end
@testset "OOS" begin
n = 200
k = 5
d = 10
ϵ = 0.01
X, _ = ManifoldLearning.swiss_roll(n ;rng=rng)
M = fit(Isomap, X; k=k, maxoutdim=d)
@test all(sum(abs2, predict(M) .- predict(M,X), dims=1) .< eps())
XX = X + ϵ*randn(rng, size(X))
@test sqrt(mean((predict(M) - predict(M,XX)).^2)) < 2ϵ
end
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 2645 | # ManifoldLearning
*A Julia package for manifold learning and nonlinear dimensionality reduction.*
| **Documentation** | **Build Status** |
|:----------------------------------------------------------------------------:|:-----------------------------------------------------------------:|
| [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] | [![][CI-img]][CI-url] [![][coveralls-img]][coveralls-url] |
## Methods
- Isomap
- Diffusion maps
- Locally Linear Embedding (LLE)
- Hessian Eigenmaps (HLLE)
- Laplacian Eigenmaps (LEM)
- Local tangent space alignment (LTSA)
- t-Distributed Stochastic Neighborhood Embedding (t-SNE)
## Installation
The package can be installed with the Julia package manager.
From the Julia REPL, type `]` to enter the Pkg REPL mode and run:
```
pkg> add ManifoldLearning
```
## Examples
A simple example of using the *Isomap* reduction method.
```julia
julia> X, _ = ManifoldLearning.swiss_roll();
julia> X
3×1000 Array{Float64,2}:
-3.19512 3.51939 -0.0390153 … -9.46166 3.44159
29.1222 9.99283 2.25296 25.1417 28.8007
-10.1861 6.59074 -11.037 -1.04484 13.4034
julia> M = fit(Isomap, X)
Isomap(outdim = 2, neighbors = 12)
julia> Y = transform(M)
2×1000 Array{Float64,2}:
11.0033 -13.069 16.7116 … -3.26095 25.7771
18.4133 -6.2693 10.6698 20.0646 -24.8973
```
## Performance
Most of the methods use *k*-nearest neighbors method for constructing local subspace representation. By default, neighbors are computed from a *distance matrix* of a dataset. This is not an efficient method, especially, for large datasets.
Consider using a custom *k*-nearest neighbors function, e.g. from [NearestNeighbors.jl](https://github.com/KristofferC/NearestNeighbors.jl) or [FLANN.jl](https://github.com/wildart/FLANN.jl).
See example of custom `knn` function [here](misc/nearestneighbors.jl).
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://wildart.github.io/ManifoldLearning.jl/stable
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://wildart.github.io/ManifoldLearning.jl/dev
[CI-img]: https://github.com/wildart/ManifoldLearning.jl/actions/workflows/CI.yml/badge.svg
[CI-url]: https://github.com/wildart/ManifoldLearning.jl/actions/workflows/CI.yml
[coveralls-img]: https://coveralls.io/repos/github/wildart/ManifoldLearning.jl/badge.svg?branch=master
[coveralls-url]: https://coveralls.io/r/wildart/ManifoldLearning.jl?branch=master
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 816 | # Datasets
```@setup EG
using Plots, ManifoldLearning
gr(fmt=:svg)
```
The __ManifoldLearning__ package provides an implementation of synthetic test datasets:
```@docs
ManifoldLearning.swiss_roll
```
```@example EG
X, L = ManifoldLearning.swiss_roll(segments=5); #hide
scatter3d(X[1,:], X[2,:], X[3,:], c=L.+2, palette=cgrad(:default), ms=2.5, leg=:none, camera=(10,10)) #hide
```
```@docs
ManifoldLearning.spirals
```
```@example EG
X, L = ManifoldLearning.spirals(segments=5); #hide
scatter3d(X[1,:], X[2,:], X[3,:], c=L.+2, palette=cgrad(:default), ms=2.5, leg=:none, camera=(10,10)) #hide
```
```@docs
ManifoldLearning.scurve
```
```@example EG
X, L = ManifoldLearning.scurve(segments=5); #hide
scatter3d(X[1,:], X[2,:], X[3,:], c=L.+2, palette=cgrad(:default), ms=2.5, leg=:none, camera=(10,10)) #hide
```
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 829 | # Diffusion maps
[Diffusion maps](http://en.wikipedia.org/wiki/Diffusion_map) leverages the relationship between heat diffusion and a random walk; an analogy is drawn between the diffusion operator on a manifold and a Markov transition matrix operating on functions defined on the graph whose nodes were sampled from the manifold [^1].
This package defines a [`DiffMap`](@ref) type to represent a diffusion map results, and provides a set of methods to access its properties.
```@docs
DiffMap
fit(::Type{DiffMap}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::DiffMap)
ManifoldLearning.kernel(R::DiffMap)
```
## References
[^1]: Coifman, R. & Lafon, S. "Diffusion maps". Applied and Computational Harmonic Analysis, Elsevier, 2006, 21, 5-30. DOI:[10.1073/pnas.0500334102](http://dx.doi.org/doi:10.1073/pnas.0500334102)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 860 | # Hessian Eigenmaps
The Hessian Eigenmaps (Hessian LLE, HLLE) method adapts the weights in [`LLE`](@ref) to minimize the [Hessian](http://en.wikipedia.org/wiki/Hessian_matrix) operator. Like [`LLE`](@ref), it requires careful setting of the nearest neighbor parameter. The main advantage of Hessian LLE is the only method designed for non-convex data sets [^1].
This package defines a [`HLLE`](@ref) type to represent a Hessian LLE results, and provides a set of methods to access its properties.
```@docs
HLLE
fit(::Type{HLLE}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::HLLE)
```
# References
[^1]: Donoho, D. and Grimes, C. "Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data", Proc. Natl. Acad. Sci. USA. 2003 May 13; 100(10): 5591–5596. DOI:[10.1073/pnas.1031596100](http://dx.doi.org/doi:10.1073/pnas.1031596100)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 1592 | # ManifoldLearning.jl
The package __ManifoldLearning__ aims to provide a library for manifold learning
and non-linear dimensionality reduction. It provides set of nonlinear dimensionality
reduction methods, such as [`Isomap`](@ref), [`LLE`](@ref), [`LTSA`](@ref), and etc.
## Getting started
To install the package just type
```julia
] add ManifoldLearning
```
```@setup EG
using Plots
gr(fmt=:svg)
```
The following example shows how to apply [`Isomap`](@ref) dimensionality reduction method
to the build-in S curve dataset.
```@example EG
using ManifoldLearning
X, L = ManifoldLearning.scurve(segments=5);
scatter3d(X[1,:], X[2,:], X[3,:], c=L,palette=cgrad(:default),ms=2.5,leg=:none,camera=(10,10))
```
Now, we perform dimensionality reduction procedure and plot the resulting dataset:
```@example EG
Y = predict(fit(Isomap, X))
scatter(Y[1,:], Y[2,:], c=L, palette=cgrad(:default), ms=2.5, leg=:none)
```
Following dimensionality reduction methods are implemented in this package:
| Methods | Description |
|:--------|:------------|
|[`Isomap`](@ref)| Isometric mapping |
|[`LLE`](@ref)| Locally Linear Embedding |
|[`HLLE`](@ref)| Hessian Eigenmaps |
|[`LEM`](@ref)| Laplacian Eigenmaps |
|[`LTSA`](@ref)| Local Tangent Space Alignment |
|[`DiffMap`](@ref)| Diffusion maps |
|[`TSNE`](@ref)| t-Distributed Stochastic Neighborhood Embedding |
**Notes:** All methods implemented in this package adopt the column-major convention of JuliaStats: in a data matrix, each column corresponds to a sample/observation, while each row corresponds to a feature (variable or attribute).
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 2586 | # Programming interface
The interface of manifold learning methods in this packages is partially adopted from the packages [StatsAPI](https://github.com/JuliaStats/StatsAPI.jl),
[MultivariateStats.jl](https://github.com/JuliaStats/MultivariateStats.jl) and [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl).
You can implement additional dimensionality reduction algorithms by implementing the following interface.
## Dimensionality Reduction
The following functions are currently available from the interface.
`NonlinearDimensionalityReduction` is an abstract type required for all
implemented algorithms models.
```@docs
ManifoldLearning.NonlinearDimensionalityReduction
```
For performing the data dimensionality reduction procedure, a model of the data
is constructed by calling [`fit`](@ref) method, and the transformation of
the data given the model is done by [`predict`](@ref) method.
```@docs
fit(::Type{ManifoldLearning.NonlinearDimensionalityReduction}, X::AbstractMatrix)
predict(R::ManifoldLearning.NonlinearDimensionalityReduction)
```
There are auxiliary methods that allow to inspect properties of the constructed model.
```@docs
size(R::ManifoldLearning.NonlinearDimensionalityReduction)
eigvals(R::ManifoldLearning.NonlinearDimensionalityReduction)
vertices(R::ManifoldLearning.NonlinearDimensionalityReduction)
neighbors(R::ManifoldLearning.NonlinearDimensionalityReduction)
```
## Nearest Neighbors
An additional interface is available for creating an implementation of a nearest
neighbors algorithm, which is commonly used for dimensionality reduction methods.
Use `AbstractNearestNeighbors` abstract type to derive a type for a new
implementation.
```@docs
ManifoldLearning.AbstractNearestNeighbors
```
The above interface requires implementation of the following methods:
```@docs
ManifoldLearning.knn(NN::ManifoldLearning.AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Integer) where T<:Real
ManifoldLearning.inrange(NN::ManifoldLearning.AbstractNearestNeighbors, X::AbstractVecOrMat{T}, r::Real) where T<:Real
```
Following auxiliary methods available for any implementation of
`AbstractNearestNeighbors`-derived type:
```@docs
ManifoldLearning.adjacency_list(NN::ManifoldLearning.AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Integer) where T<:Real
ManifoldLearning.adjacency_matrix(NN::ManifoldLearning.AbstractNearestNeighbors, X::AbstractVecOrMat{T}, k::Integer) where T<:Real
```
The default implementation uses inefficient ``O(n^2)`` algorithm for nearest
neighbors calculations.
```@docs
ManifoldLearning.BruteForce
```
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 741 | # Isomap
[Isomap](http://en.wikipedia.org/wiki/Isomap) is a method for low-dimensional embedding. *Isomap* is used for computing a quasi-isometric, low-dimensional embedding of a set of high-dimensional data points[^1].
This package defines a [`Isomap`](@ref) type to represent a Isomap calculation results, and provides a set of methods to access its properties.
```@docs
Isomap
fit(::Type{Isomap}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::Isomap)
predict(R::Isomap, X::Union{AbstractArray{T,1}, AbstractArray{T,2}}) where T<:Real
```
## References
[^1]: Tenenbaum, J. B., de Silva, V. and Langford, J. C. "A Global Geometric Framework for Nonlinear Dimensionality Reduction". Science 290 (5500): 2319-2323, 22 December 2000.
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 964 | # Laplacian Eigenmaps
[Laplacian Eigenmaps](https://en.wikipedia.org/wiki/Nonlinear_dimensionality_reduction#Laplacian_eigenmaps) (LEM) method uses spectral techniques to perform dimensionality reduction. This technique relies on the basic assumption that the data lies in a low-dimensional manifold in a high-dimensional space. The algorithm provides a computationally efficient approach to non-linear dimensionality reduction that has locally preserving properties [^1].
This package defines a [`LEM`](@ref) type to represent a Laplacian eigenmaps results, and provides a set of methods to access its properties.
```@docs
LEM
fit(::Type{LEM}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::LEM)
```
## References
[^1]: Belkin, M. and Niyogi, P. "Laplacian Eigenmaps for Dimensionality Reduction and Data Representation". Neural Computation, June 2003; 15 (6):1373-1396. DOI:[10.1162/089976603321780317](http://dx.doi.org/doi:10.1162/089976603321780317)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 793 | # Locally Linear Embedding
[Locally Linear Embedding](http://en.wikipedia.org/wiki/Locally_linear_embedding#Locally-linear_embedding>) (LLE) technique builds a single global coordinate system of lower dimensionality. By exploiting the local symmetries of linear reconstructions, LLE is able to learn the global structure of nonlinear manifolds [^1].
This package defines a [`LLE`](@ref) type to represent a LLE results, and provides a set of methods to access its properties.
```@docs
LLE
fit(::Type{LLE}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::LLE)
```
## References
[^1]: Roweis, S. & Saul, L. "Nonlinear dimensionality reduction by locally linear embedding", Science 290:2323 (2000). DOI:[10.1126/science.290.5500.2323] (http://dx.doi.org/doi:10.1126/science.290.5500.2323)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 895 | # Local Tangent Space Alignment
[Local tangent space alignment](http://en.wikipedia.org/wiki/Local_tangent_space_alignment) (LTSA) is a method for manifold learning, which can efficiently learn a nonlinear embedding into low-dimensional coordinates from high-dimensional data, and can also reconstruct high-dimensional coordinates from embedding coordinates [^1].
This package defines a [`LTSA`](@ref) type to represent a local tangent space alignment results, and provides a set of methods to access its properties.
```@docs
LTSA
fit(::Type{LTSA}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::LTSA)
```
## References
[^1]: Zhang, Zhenyue; Hongyuan Zha. "Principal Manifolds and Nonlinear Dimension Reduction via Local Tangent Space Alignment". SIAM Journal on Scientific Computing 26 (1): 313–338, 2004. DOI:[10.1137/s1064827502419154](http://dx.doi.org/doi:10.1137/s1064827502419154)
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.9.0 | 4c5564c707899c3b6bc6d324b05e43eb7f277f2b | docs | 1161 | # t-Distributed Stochastic Neighborhood Embedding
The [`t`-Distributed Stochastic Neighborhood Embedding (t-SNE)](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) is a statistical dimensionality reduction
methods, based on the original SNE[^1] method with t-distributed variant[^2].
The method constructs a probability distribution over pairwise distances in
the data original space, and then optimizes a similar probability distribution of
the pairwise distances of low-dimensional embedding of the data by minimizing
the [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between two distributions.
This package defines a [`TSNE`](@ref) type to represent a t-SNE model, and provides
a set of methods to access its properties.
```@docs
TSNE
fit(::Type{TSNE}, X::AbstractArray{T,2}) where {T<:Real}
predict(R::TSNE)
```
# References
[^1]: Hinton, G. E., & Roweis, S. (2002). Stochastic neighbor embedding. Advances in neural information processing systems, 15.
[^2]: Van der Maaten, L., & Hinton, G. (2008). Visualizing data using t-SNE. Journal of machine learning research, 9(11).
| ManifoldLearning | https://github.com/wildart/ManifoldLearning.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 2120 | using BenchmarkTools, ConstrainedSystems
Ns, Nc = 1000, 100
A,B2,B1t,C,rhs1v,rhs2v,solex = ConstrainedSystems.basic_linalg_problem(Ns=Ns,Nc=Nc);
Δt = 1.0e-2
prob1, xexact, yexact = ConstrainedSystems.basic_constrained_problem(tmax=3Δt)
prob2, xexact, yexact = ConstrainedSystems.cartesian_pendulum_problem(tmax=3Δt)
BenchmarkTools.DEFAULT_PARAMETERS.gcsample = true
SUITE = BenchmarkGroup()
SUITE["saddle setup"] = BenchmarkGroup()
SUITE["saddle solve"] = BenchmarkGroup()
rhs = SaddleVector(rhs1v,rhs2v)
sol = similar(rhs)
SUITE["saddle setup"]["basic problem"] = @benchmarkable ConstrainedSystems.SaddleSystem(A,B2,B1t,C,rhs)
As = ConstrainedSystems.SaddleSystem(A,B2,B1t,C,rhs)
SUITE["saddle solve"]["basic problem"] = @benchmarkable sol .= As\rhs
SUITE["timemarch setup"] = BenchmarkGroup()
SUITE["timemarch basic problem"] = BenchmarkGroup()
SUITE["timemarch pendulum problem"] = BenchmarkGroup()
SUITE["timemarch setup"]["IFHEEuler"] = @benchmarkable ConstrainedSystems.init(prob2, IFHEEuler(),dt=Δt)
SUITE["timemarch setup"]["LiskaIFHERK"] = @benchmarkable ConstrainedSystems.init(prob2, LiskaIFHERK(),dt=Δt)
integrator11 = ConstrainedSystems.init(prob1, IFHEEuler(),dt=Δt)
integrator12 = ConstrainedSystems.init(prob1, LiskaIFHERK(),dt=Δt)
integrator21 = ConstrainedSystems.init(prob2, IFHEEuler(),dt=Δt)
integrator22 = ConstrainedSystems.init(prob2, LiskaIFHERK(),dt=Δt)
#SUITE["basic problem"]["LiskaIFHERK"] = @benchmarkable solve(prob1, LiskaIFHERK(),dt=Δt)
#SUITE["basic problem"]["IFHEEuler"] = @benchmarkable solve(prob1, IFHEEuler(),dt=Δt)
#SUITE["pendulum problem"]["LiskaIFHERK"] = @benchmarkable solve(prob2, LiskaIFHERK(),dt=Δt)
#SUITE["pendulum problem"]["IFHEEuler"] = @benchmarkable solve(prob2, IFHEEuler(),dt=Δt)
SUITE["timemarch basic problem"]["IFHEEuler"] = @benchmarkable step!(integrator11,Δt)
SUITE["timemarch basic problem"]["LiskaIFHERK"] = @benchmarkable step!(integrator12,Δt)
SUITE["timemarch pendulum problem"]["IFHEEuler"] = @benchmarkable step!(integrator21,Δt)
SUITE["timemarch pendulum problem"]["LiskaIFHERK"] = @benchmarkable step!(integrator22,Δt)
run(SUITE)
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 989 | using Documenter, ConstrainedSystems
makedocs(
sitename = "ConstrainedSystems.jl",
doctest = true,
clean = true,
pages = [
"Home" => "index.md",
"Manual" => ["manual/saddlesystems.md",
"manual/timemarching.md",
"manual/methods.md"
]
#"Internals" => [ "internals/properties.md"]
],
#format = Documenter.HTML(assets = ["assets/custom.css"])
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
mathengine = MathJax(Dict(
:TeX => Dict(
:equationNumbers => Dict(:autoNumber => "AMS"),
:Macros => Dict()
)
))
),
#assets = ["assets/custom.css"],
#strict = true
)
#if "DOCUMENTER_KEY" in keys(ENV)
deploydocs(
repo = "github.com/JuliaIBPM/ConstrainedSystems.jl.git",
target = "build",
deps = nothing,
make = nothing
#versions = "v^"
)
#end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 1427 | module ConstrainedSystems
using LinearMaps
#using KrylovKit
using RecursiveArrayTools
#using IterativeSolvers
#using UnPack
using Reexport
@reexport using OrdinaryDiffEq
import OrdinaryDiffEq: OrdinaryDiffEqAlgorithm, alg_order, alg_cache,
OrdinaryDiffEqMutableCache, OrdinaryDiffEqConstantCache,
initialize!, perform_step!, @muladd, @unpack, constvalue,
full_cache, @..
import OrdinaryDiffEq.DiffEqBase: AbstractDiffEqLinearOperator,
DEFAULT_UPDATE_FUNC, has_exp,
AbstractODEFunction, isinplace, numargs
import LinearMaps: LinearMap, FunctionMap
import RecursiveArrayTools: recursivecopy, recursivecopy!, recursive_mean
using LinearAlgebra
import LinearAlgebra: ldiv!, mul!, *, \, I
import Base: size, eltype, *, /, +, -
export SaddleSystem, SaddleVector, state, constraint, aux_state, linear_map
export constraint_from_state!
export solvector, mainvector
export SchurSolverType, Direct, CG, GMRES, Iterative
include("vectors.jl")
include("saddlepoint/saddlesystems.jl")
include("saddlepoint/linearmaps.jl")
include("saddlepoint/arithmetic.jl")
include("saddlepoint/testproblems.jl")
include("timemarching/types.jl")
include("timemarching/misc_utils.jl")
include("timemarching/timesaddlesystems.jl")
include("timemarching/algorithms.jl")
include("timemarching/testproblems.jl")
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 2455 | ### Right-hand side and solution vectors
const SaddleVector = ArrayPartition
"""
SaddleVector(u,f)
Construct a vector of a state part `u` and constraint part `f` of a
saddle-point vector, to be associated with a [`SaddleSystem`](@ref).
"""
function SaddleVector end
"""
solvector([;state=][,constraint=][,aux_state=])
Build a solution vector for a constrained system. This takes three optional keyword
arguments: `state`, `constraint`, and `aux_state`. If only a state is supplied,
then the constraint is set to an empty vector and the system is assumed to
correspond to an unconstrained system. (`aux_state` is ignored in this situation.)
"""
solvector(;state=nothing,constraint=nothing,aux_state=nothing) = _solvector(state,constraint,aux_state)
_solvector(::Nothing,::Nothing,::Nothing) = nothing
_solvector(s,::Nothing,::Nothing) = ArrayPartition(s,_empty(s))
_solvector(s,::Nothing,aux) = ArrayPartition(s,_empty(s))
_solvector(s,c,::Nothing) = ArrayPartition(s,c)
#_solvector(s,c,aux) = ArrayPartition(_solvector(s,c,nothing),aux)
_solvector(s,c,aux) = ArrayPartition(s,c,aux)
mainvector(u) = u
mainvector(u::ArrayPartition) = ArrayPartition(u.x[1],u.x[2])
_empty(s) = Vector{eltype(s)}()
"""
state(x::SaddleVector)
Provide the state part of the given saddle vector `x`
"""
state(u::ArrayPartition) = mainvector(u).x[1]
state(u) = u
"""
constraint(x::SaddleVector)
Provide the constraint part of the given saddle vector `x`
"""
constraint(u::ArrayPartition) = mainvector(u).x[2]
constraint(u) = eltype(u)[]
"""
aux_state(x)
Provide the auxiliary state part of the given vector `x`
"""
aux_state(u) = nothing # Array{eltype(u)}(undef,0,0)
aux_state(u::ArrayPartition{T,Tuple{F1,F2,F3}}) where {T,F1,F2,F3} = u.x[3]
for f in (:state,:constraint,:aux_state)
@eval $f(a::AbstractArray{T}) where {T<:ArrayPartition} = map($f,a)
end
#SaddleVector(u::TU,f::TF) where {TU,TF} = ArrayPartition(u,f)
"""
r1vector([;state_r1=][,aux_r1=])
Build a vector of the `r1` functions for the state ODEs and auxiliary state ODEs.
"""
r1vector(;state_r1=nothing,aux_r1=nothing) = _r1vector(state_r1,aux_r1)
_r1vector(::Nothing,::Nothing) = nothing
_r1vector(s,::Nothing) = s
_r1vector(::Nothing,a) = nothing
_r1vector(s,a) = ArrayPartition((s,a))
hasaux(r1) = false
hasaux(r1::ArrayPartition) = true
state_r1(r1) = r1
state_r1(r1::ArrayPartition) = r1.x[1]
aux_r1(r1) = nothing
aux_r1(r1::ArrayPartition) = r1.x[2]
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 5803 | ### ARITHMETIC OPERATIONS
function mul!(output::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}},sys::SaddleSystem{T,Ns,Nc},
input::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}}) where {T,Ns,Nc}
@unpack A, B₂, B₁ᵀ, C = sys
u,f = input
r₁,r₂ = output
length(u) == length(r₁) == Ns || error("Incompatible number of elements")
length(f) == length(r₂) == Nc || error("Incompatible number of elements")
r₁ .= A*u .+ B₁ᵀ*f
r₂ .= B₂*u .+ C*f
return output
end
function mul!(sol::Tuple{TU,TF},sys::SaddleSystem,rhs::Tuple{TU,TF}) where {TU,TF}
u, f = sol
r₁, r₂ = rhs
return mul!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function mul!(sol::ArrayPartition,sys::SaddleSystem,rhs::ArrayPartition)
u, f = sol.x
r₁, r₂ = rhs.x
return mul!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function (*)(sys::SaddleSystem,input::Tuple)
u, f = input
output = (similar(u),similar(f))
mul!(output,sys,input)
return output
end
# Routine for accepting vector inputs, parsing it into Ns and Nc parts
function mul!(sol::AbstractVector{T},sys::SaddleSystem{T,Ns,Nc},rhs::AbstractVector{T}) where {T,Ns,Nc}
mul!(_split_vector(sol,Ns,Nc),sys,_split_vector(rhs,Ns,Nc))
return sol
end
function (*)(sys::SaddleSystem{T},rhs::Union{AbstractVector{T},AbstractVectorOfArray{T},ArrayPartition{T}}) where {T}
output = similar(rhs)
mul!(output,sys,rhs)
return output
end
## Multiplying tuples of saddle point systems
function mul!(sol,sys::Tuple{T1,T2},rhs) where {T1<:SaddleSystem,T2<:SaddleSystem}
for (i,sysi) in enumerate(sys)
mul!(sol[i],sysi,rhs[i])
end
sol
end
#=
function mul!(sol,sys::ArrayPartition{T},rhs) where {T<:SaddleSystem}
for (i,sysi) in enumerate(sys.x)
mul!(sol.x[i],sysi,rhs.x[i])
end
sol
end
=#
function (*)(sys::Tuple{T1,T2},rhs) where {T1<:SaddleSystem,T2<:SaddleSystem}
sol = deepcopy.(rhs)
mul!(sol,sys,rhs)
return sol
end
#### Left division ####
function ldiv!(sol::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}},sys::SaddleSystem{T,Ns,Nc},
rhs::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}}) where {T,Ns,Nc}
@unpack A⁻¹, B₂, B₁ᵀ, B₂A⁻¹r₁, P, S⁻¹, _f_buf, _u_buf, A⁻¹B₁ᵀf = sys
N = Ns+Nc
u,f = sol
r₁,r₂ = rhs
length(u) == length(r₁) == Ns || error("Incompatible number of elements")
length(f) == length(r₂) == Nc || error("Incompatible number of elements")
mul!(u,A⁻¹,r₁)
B₂A⁻¹r₁ .= B₂*u
_f_buf .= r₂
_f_buf .-= B₂A⁻¹r₁
if Nc > 0
f .= S⁻¹*_f_buf
f .= P*f
end
_u_buf .= B₁ᵀ*f
mul!(A⁻¹B₁ᵀf,A⁻¹,_u_buf)
u .-= A⁻¹B₁ᵀf
return sol
end
function ldiv!(sol::Tuple{TU,TF},sys::SaddleSystem,rhs::Tuple{TU,TF}) where {TU,TF}
u, f = sol
r₁, r₂ = rhs
return ldiv!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function ldiv!(sol::ArrayPartition,sys::SaddleSystem,rhs::ArrayPartition)
u, f = sol.x
r₁, r₂ = rhs.x
return ldiv!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function (\)(sys::SaddleSystem,rhs::Tuple)
u, f = rhs
sol = (similar(u),similar(f))
ldiv!(sol,sys,rhs)
return sol
end
# Routine for accepting vector inputs, parsing it into Ns and Nc parts
function ldiv!(sol::AbstractVector{T},sys::SaddleSystem{T,Ns,Nc},rhs::AbstractVector{T}) where {T,Ns,Nc}
ldiv!(_split_vector(sol,Ns,Nc),sys,_split_vector(rhs,Ns,Nc))
return sol
end
function (\)(sys::SaddleSystem{T},rhs::Union{AbstractVector{T},AbstractVectorOfArray{T},ArrayPartition{T}}) where {T}
sol = similar(rhs)
ldiv!(sol,sys,rhs)
return sol
end
## Solving tuples of saddle point systems
function ldiv!(sol,sys::Tuple{T1,T2},rhs) where {T1<:SaddleSystem,T2<:SaddleSystem}
for (i,sysi) in enumerate(sys)
ldiv!(sol[i],sysi,rhs[i])
end
sol
end
#=
function ldiv!(sol,sys::ArrayPartition{T},rhs) where {M,T<:SaddleSystem}
for (i,sysi) in enumerate(sys.x)
ldiv!(sol.x[i],sysi,rhs.x[i])
end
sol
end
=#
function (\)(sys::Tuple{T1,T2},rhs) where {T1<:SaddleSystem,T2<:SaddleSystem}
sol = deepcopy.(rhs)
ldiv!(sol,sys,rhs)
return sol
end
# For getting the constraint part from the state part, when there is a C matrix
function constraint_from_state!(sol::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}},sys::SaddleSystem{T,Ns,Nc},
rhs::Union{Tuple{AbstractVector{T},AbstractVector{T}},AbstractVectorOfArray{T}}) where {T,Ns,Nc}
@unpack B₂, C, C⁻¹ = sys
u,f = sol
r₁,r₂ = rhs
length(u) == length(r₁) == Ns || error("Incompatible number of elements")
length(f) == length(r₂) == Nc || error("Incompatible number of elements")
_isinvertible(C) || error("C operator cannot be inverted")
f .= C⁻¹*(r₂ .- B₂*u)
return sol
end
function constraint_from_state!(sol::Tuple{TU,TF},sys::SaddleSystem,rhs::Tuple{TU,TF}) where {TU,TF}
u, f = sol
r₁, r₂ = rhs
return constraint_from_state!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function constraint_from_state!(sol::ArrayPartition,sys::SaddleSystem,rhs::ArrayPartition)
u, f = sol.x
r₁, r₂ = rhs.x
return constraint_from_state!((_unwrap_vec(u),_unwrap_vec(f)),sys,(_unwrap_vec(r₁),_unwrap_vec(r₂)))
end
function constraint_from_state!(sol::AbstractVector{T},sys::SaddleSystem{T,Ns,Nc},rhs::AbstractVector{T}) where {T,Ns,Nc}
constraint_from_state!(_split_vector(sol,Ns,Nc),sys,_split_vector(rhs,Ns,Nc))
return sol
end
# vector -> tuple
_split_vector(x,Ns,Nc) = view(x,1:Ns), view(x,Ns+1:Ns+Nc)
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 5597 | ### LINEAR MAP CONSTRUCTION
#=
If a function is already in-place, then `linear_map` will preserve this, so, e.g,
f_lm = linear_map(f,input,output)
mul!(output_vec,f_lm,input_vec)
will produce the same effect as f(output,input), but on vectors
=#
# for a given function of function-like object A, which acts upon data of type `input`
# and returns data of type `output`
# return a LinearMap that acts upon a vector form of u
linear_map(A,input,output;eltype=Float64) = _linear_map(A,input,output,eltype,Val(length(input)),Val(length(output)))
linear_map(A,input;eltype=Float64) = _linear_map(A,input,eltype,Val(length(input)))
linear_map(A::AbstractMatrix{T},input::AbstractVector{T};eltype=Float64) where {T} = LinearMap{eltype}(A)
linear_map(A::SaddleSystem{T,Ns,Nc},::Any;eltype=Float64) where {T,Ns,Nc} = LinearMap{T}(x->A*x,Ns+Nc)
linear_map(A::UniformScaling,input;eltype=Float64) = LinearMap{eltype}(x->A*x,length(input))
function linear_inverse_map(A,input;eltype=Float64)
hasmethod(\,Tuple{typeof(A),typeof(input)}) || error("No such backslash operator exists")
return LinearMap{eltype}(_create_vec_backslash(A,input),length(input))
end
function linear_inverse_map!(A,input;eltype=Float64)
hasmethod(ldiv!,Tuple{typeof(input),typeof(A),typeof(input)}) || error("No such ldiv! operator exists")
return LinearMap{eltype}(_create_vec_backslash!(A,input),length(input))
end
linear_inverse_map(A::SaddleSystem{T,Ns,Nc},::Any;eltype=Float64) where {T,Ns,Nc} = LinearMap{T}(x->A\x,Ns+Nc)
# Square operators. input of zero length
_linear_map(A,input,eltype,::Val{0}) =
LinearMap{eltype}(x -> (),0,0)
# Square operators. input of non-zero length
_linear_map(A,input,eltype,::Val{M}) where {M} =
LinearMap{eltype}(_create_fcn(A,input),length(input))
# input and output have zero lengths
_linear_map(A,input,output,eltype,::Val{0},::Val{0}) =
LinearMap{eltype}(x -> (),0,0)
# input is 0 length, output is not
_linear_map(A,input,output,eltype,::Val{0},::Val{M}) where {M} =
LinearMap{eltype}(x -> _unwrap_vec(zero(output)),length(output),0)
# output is 0 length, input is not
_linear_map(A,input,output,eltype,::Val{N},::Val{0}) where {N} =
LinearMap{eltype}(x -> (),0,length(input))
# non-zero lengths of input and output
_linear_map(A,input,output,eltype,::Val{N},::Val{M}) where {N,M} =
LinearMap{eltype}(_create_fcn(A,output,input),length(output),length(input))
ismultiplicative(A,input) = hasmethod(*,Tuple{typeof(A),typeof(input)})
# Create a function for operator A that can act upon an input of type AbstractVector
# and return an output of type AbstractVector. It should wrap the input vector
# in the input data type associated with A and it should then unwrap its
# output back into vector form
#=function _create_fcn(A,output,input)
# if A has an associated * operation, then use this
if _ismultiplicative(A,input)
fcn = _create_vec_multiplication(A,input)
# or if A is a function or function-like object, then use this
elseif hasmethod(A,Tuple{typeof(input)})
fcn = _create_vec_function(A,input)
# or just quit
else
error("No function exists for this operator to act upon this type of data")
end
return fcn
end
=#
_create_fcn(A,input) = _create_fcn(A,nothing,input)
_create_fcn(A,output,input) = _create_fcn(A,output,input,Val(ismultiplicative(A,input)))
_create_fcn(A,output,input,::Val{true}) = _create_vec_multiplication(A,input)
_create_fcn(A,output,input,::Val{false}) = _create_fcn_function(A,output,input)
_create_fcn_function(A,output,input) = _create_fcn_function(A,output,input,Val(isinplace(A,2)))
# out of place
function _create_fcn_function(A,output,input,::Val{false})
hasmethod(A,Tuple{typeof(input)}) || error("No function exists for this operator to act upon this type of data")
_create_vec_function(A,input)
end
function _create_fcn_function(A,output,input,::Val{true})
hasmethod(A,Tuple{typeof(output),typeof(input)}) || error("No function exists for this operator to act upon this type of data")
_create_vec_function!(A,output,input)
end
# In each of these, u, outp, inp only provide the templates/sizes for the wrapping.
@inline _create_vec_multiplication(A,u::TU) where {TU} = (x -> _unwrap_vec(A*_wrap_vec(x,u)))
@inline _create_vec_function(A,u::TU) where {TU} = (x -> _unwrap_vec(A(_wrap_vec(x,u))))
@inline _create_vec_function!(A,outp::TO,inp::TI) where {TO,TI} = ((y,x) -> (_outpwrap = _wrap_vec(y,outp); A(_outpwrap,_wrap_vec(x,inp))))
@inline _create_vec_backslash(A,u::TU) where {TU} = (x -> _unwrap_vec(A\_wrap_vec(x,u)))
@inline _create_vec_backslash!(A,u::TU) where {TU} = (y,x) -> (_ywrap = _wrap_vec(y,u); ldiv!(_ywrap,A,_wrap_vec(x,u)); y)
@inline _create_vec_backslash!(A::UniformScaling,u::TU) where {TU} = (y,x) -> (_wrap_vec(y,u) .= A\_wrap_vec(x,u))
@inline _create_vec_backslash!(A::AbstractMatrix,u::TU) where {TU} = (Afact = factorize(A); (y,x) -> (_ywrap = _wrap_vec(y,u); _ywrap .= Afact\_wrap_vec(x,u)))
#### WRAPPERS ####
# wrap the vector x in type u, unless u is already a subtype of AbstractVector,
# in which case it just keeps it as is.
_wrap_vec(x::AbstractVector{T},u::TU) where {T,TU} = TU(reshape(x,size(u)...))
#_wrap_vec(x::AbstractVector{T},u::AbstractVector{U}) where {T,U} = x
_wrap_vec(x::Vector{T},u::Vector{T}) where {T} = x
# if the vector x is simply a reshaped form of type u, then just get the
# parent of x
_wrap_vec(x::Base.ReshapedArray,u::TU) where {TU} = parent(x)
#### UNWRAPPERS ####
# Usually vec suffices
_unwrap_vec(x) = vec(x)
_unwrap_vec(x::Tuple) = x
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 9070 | ### SaddleSystem ###
using IterativeSolvers
abstract type SchurSolverType end
struct SaddleSystem{T,Ns,Nc,TU,TF,TS<:SchurSolverType}
A :: LinearMap{T}
B₂ :: LinearMap{T}
B₁ᵀ :: LinearMap{T}
C :: LinearMap{T}
A⁻¹ :: LinearMap{T}
C⁻¹ :: LinearMap{T}
A⁻¹B₁ᵀf :: Vector{T}
B₂A⁻¹r₁ :: Vector{T}
_f_buf :: Vector{T}
_u_buf :: Vector{T}
P :: LinearMap{T}
S :: LinearMap{T}
S⁻¹ :: LinearMap{T}
end
# Constructors
"""
SaddleSystem
Construct a saddle-point system operator from the constituent operator blocks. The resulting object can be used
with `*` and `\\` to multiply and solve. The saddle-point problem has the form
``
\\begin{bmatrix}A & B_1^T \\\\ B_2 & C \\end{bmatrix} \\begin{pmatrix} u \\\\ f \\end{pmatrix} = \\begin{pmatrix} r_1 \\\\ r_2 \\end{pmatrix}
``
### Constructors
`SaddleSystem(A::AbstractMatrix,B₂::AbstractMatrix,B₁ᵀ::AbstractMatrix,C::AbstractMatrix[,eltype=Float64])`.
Blocks are given as matrices. Must have consistent sizes to stack appropriately. If
this is called with `SaddleSystem(A,B₂,B₁ᵀ)`, it sets `C` to zero automatically.
`SaddleSystem(A,B₂,B₁ᵀ,C,u,f[,eltype=Float64])`.
Operators `A`, `B₂`, `B₁ᵀ`, `C` are given in various forms, including matrices, functions, and function-like objects.
`u` and `f` are examples of the data types in the corresponding solution and right-hand side vectors.
Guidelines:
* The entries `A` and `B₂` must be able to act upon `u` (either by multiplication or as a function) and `B₁ᵀ` and `C` must be able to act on `f` (also, either by multiplication or as a function).
* `A` and `B₁ᵀ` should return data of type `u`, and `B₂` and `C` should return data of type `f`.
* `A` must be invertible and be outfitted with operators `\` and `ldiv!`.
* Both `u` and `f` must be subtypes of `AbstractArray`: they must be equipped with `size`
and `vec` functions and with a constructor of the form `T(data)` where `T` is the data type of
`u` or `f` and `data` is the wrapped data array.
If called as `SaddleSystem(A,B₂,B₁ᵀ,u,f)`, the `C` block is omitted and assumed to be zero.
If called with `SaddleSystem(A,u)`, this is equivalent to calling `SaddleSystem(A,nothing,nothing,u,[])`, then this reverts
to the unconstrained system described by operator `A`.
The list of vectors `u` and `f` in any of these constructors can be bundled together
as a [`SaddleVector`](@ref), e.g. `SaddleSystem(A,B₂,B₁ᵀ,SaddleVector(u,f))`.
An optional keyword argument `solver=` can be used to specify the type of
solution for the Schur complement system. By default, this is set to `Direct`,
and the Schur complement matrix is formed, factorized, and stored. This can be
changed to a variety of iterative solvers, e.g. `BiCGStabl`, `CG`, `GMRES`, in which case
an iterative solver from [`IterativeSolvers.jl`](https://github.com/JuliaLinearAlgebra/IterativeSolvers.jl)
is used.
"""
function SaddleSystem(A::LinearMap{T},B₂::LinearMap{T},B₁ᵀ::LinearMap{T},C::LinearMap{T},
A⁻¹::LinearMap{T},P::LinearMap{T},TU,TF;solver::Type{TS}=Direct,kwargs...) where {T,TS<:SchurSolverType}
ns, nc = _check_sizes(A,B₂,B₁ᵀ,C,P)
S = C - B₂*A⁻¹*B₁ᵀ
S⁻¹ = _inverse_function(S,solver,kwargs...)
C⁻¹ = _inverse_function(C,solver,kwargs...)
return SaddleSystem{T,ns,nc,TU,TF,solver}(A,B₂,B₁ᵀ,C,A⁻¹,C⁻¹,zeros(T,ns),zeros(T,nc),zeros(T,nc),zeros(T,ns),P,S,S⁻¹)
end
##### Solver functions #####
abstract type Direct <: SchurSolverType end
function _inverse_function(S::LinearMap{T},::Type{Direct},kwargs...) where {T}
Sfact = factorize(Matrix(S))
M, N = size(S)
return LinearMap{T}(x -> Sfact\x,M)
end
#=
abstract type Iterative <: SchurSolverType end
function _inverse_function(S::LinearMap{T},::Type{Iterative},kwargs...) where {T}
M, N = size(S)
return LinearMap{T}(x -> (prob = LinearProblem(S,x); sol = solve(prob); sol.u) ,M)
end
=#
macro createsolver(stype)
sroutine = Symbol(lowercase(string(stype)),"!")
return esc(quote
export $stype
abstract type $stype <: SchurSolverType end
function _inverse_function(S::LinearMap{T},::Type{$stype},kwargs...) where {T}
M, N = size(S)
return LinearMap{T}(x -> (y = deepcopy(x); $sroutine(y,S,x;kwargs...); return y),M)
end
end)
end
@createsolver CG
@createsolver BiCGStabl
@createsolver GMRES
@createsolver MINRES
@createsolver IDRS
###########
### OTHER CONSTRUCTORS
### Matrix operators
function SaddleSystem(A::AbstractMatrix{T},B₂::AbstractMatrix{T},B₁ᵀ::AbstractMatrix{T},
C::AbstractMatrix{T};solver::Type{TS}=Direct,filter=I,kwargs...) where {T,TS<:SchurSolverType}
Afact = factorize(A)
Ainv = LinearMap{T}((y,x) -> y .= Afact\x,size(A,1))
return SaddleSystem(LinearMap{T}(A),LinearMap{T}(B₂),LinearMap{T}(B₁ᵀ),
LinearMap{T}(C),Ainv,linear_map(filter,zeros(T,size(C,1)),eltype=T),
Vector{T},Vector{T};solver=solver,kwargs...)
end
# For cases in which C is zero, no need to pass along the argument
SaddleSystem(A::AbstractMatrix{T},B₂::AbstractMatrix{T},B₁ᵀ::AbstractMatrix{T};solver::Type{TS}=Direct,filter=I,kwargs...) where {T,TS<:SchurSolverType} =
SaddleSystem(A,B₂,B₁ᵀ,zeros(T,size(B₂,1),size(B₁ᵀ,2));solver=solver,filter=filter,kwargs...)
### Operators are functions or function-like operators
# This version should take in functions or function-like objects that act upon given
# data types u and f. Should transform them into operators that act on abstract vectors
# of the same size
# There should already be an \ operator associated with A
# NOTE: should change default value of eltype to eltype(u)
function SaddleSystem(A,B₂,B₁ᵀ,C,u::TU,f::TF;eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TU,TF,TS<:SchurSolverType}
return SaddleSystem(linear_map(A,u,eltype=eltype),linear_map(B₂,u,f,eltype=eltype),
linear_map(B₁ᵀ,f,u,eltype=eltype),
linear_map(C,f,eltype=eltype),
linear_inverse_map!(A,u,eltype=eltype),
linear_map(filter,f,eltype=eltype),TU,TF;solver=solver,kwargs...)
end
# No C operator provided, so set it to zero
SaddleSystem(A,B₂,B₁ᵀ,u::TU,f::TF;
eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TU,TF,TS<:SchurSolverType} =
SaddleSystem(A,B₂,B₁ᵀ,C_zero(f,eltype),u,f;eltype=eltype,filter=filter,solver=solver,kwargs...)
# Unconstrained system
SaddleSystem(A,u::TU;eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TU,TS<:SchurSolverType} = SaddleSystem(A,nothing,nothing,u,Type{eltype}[];eltype=eltype,filter=filter,solver=solver,kwargs...)
### For handling ArrayPartition arguments for the solution/rhs
SaddleSystem(A,B₂,B₁ᵀ,C,v::ArrayPartition;
eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TS<:SchurSolverType} =
SaddleSystem(A,B₂,B₁ᵀ,C,v.x[1],v.x[2];eltype=eltype,filter=filter,solver=solver,kwargs...)
# No C operator
SaddleSystem(A,B₂,B₁ᵀ,v::ArrayPartition;
eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TS<:SchurSolverType} =
SaddleSystem(A,B₂,B₁ᵀ,v.x[1],v.x[2];eltype=eltype,filter=filter,solver=solver,kwargs...)
# Unconstrained system
SaddleSystem(A,v::ArrayPartition;
eltype=Float64,filter=I,solver::Type{TS}=Direct,kwargs...) where {TS<:SchurSolverType} =
SaddleSystem(A,v.x[1];eltype=eltype,filter=filter,solver=solver,kwargs...)
### AUXILIARY ROUTINES
function Base.show(io::IO, S::SaddleSystem{T,Ns,Nc,TU,TF,TS}) where {T,Ns,Nc,TU,TF,TS<:SchurSolverType}
println(io, "Saddle system with $Ns states and $Nc constraints and")
println(io, " State vector of type $TU")
println(io, " Constraint vector of type $TF")
println(io, " Elements of type $T")
println(io, "using a $TS solver")
end
"""
Base.size(::SaddleSystem)
Report the size of a [`SaddleSystem`](@ref).
"""
size(::SaddleSystem{T,Ns,Nc}) where {T,Ns,Nc} = (Ns+Nc,Ns+Nc)
"""
Base.eltype(::SaddleSystem)
Report the element type of a [`SaddleSystem`](@ref).
"""
eltype(::SaddleSystem{T,Ns,Nc}) where {T,Ns,Nc} = T
C_zero(f,eltype) = zeros(eltype,length(f),length(f))
_isinvertible(f::LinearMap) = !iszero(f.lmap)
function _isinvertible(f::FunctionMap{T}) where {T}
length(f) == 0 && return false
M, N = size(f)
return !iszero(f*rand(T,N))
end
function _check_sizes(A,B₂,B₁ᵀ,C,P)
mA, nA = size(A)
mB1, nB1 = size(B₁ᵀ)
mB2, nB2 = size(B₂)
mC, nC = size(C)
mP, nP = size(P)
# check compatibility of sizes
mA == nA || error("A is not square")
mA == mB1 || error("Incompatible number of rows in A and B₁ᵀ")
nA == nB2 || error("Incompatible number of columns in A and B₂")
mC == mB2 || error("Incompatible number of rows in C and B₂")
nC == nB1 || error("Incompatible number of columns in C and B₁ᵀ")
mP == nP == mC || error("Filter has incompatible dimensions")
ns = nA
nc = nB1
return ns, nc
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 367 |
function basic_linalg_problem(;Ns=1000,Nc=100)
A = Diagonal(2*ones(Ns))
C = Diagonal(ones(Nc))
B2 = zeros(Nc,Ns)
for j in 1:min(Nc,Ns)
B2[j,j] = 1.0
end
B1t = B2'
rhs1v = ones(Ns)
rhs2v = 2*ones(Nc)
Abig = [A B1t;B2 C]
rhsbig = [rhs1v;rhs2v]
solex = similar(rhsbig)
solex .= Abig\rhsbig;
return A,B2,B1t,C,rhs1v,rhs2v, solex
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 27239 | ## Definitions of algorithms ##
stats_field(integrator) = integrator.stats
# WrayHERK is scheme C in Liska and Colonius (JCP 2016)
# BraseyHairerHERK is scheme B in Liska and Colonius (JCP 2016)
# LiskaIFHERK is scheme A in Liska and Colonius (JCP 2016)
# IFHEEuler is an Euler method with integrating factor
# HETrapezoidalAB2 is a Crank-Nicolson/2nd-order Adams Bashforth method
for (Alg,Order) in [(:WrayHERK,3),(:BraseyHairerHERK,3),(:LiskaIFHERK,2),(:IFHEEuler,1),(:HETrapezoidalAB2,2)]
@eval struct $Alg{solverType} <: ConstrainedOrdinaryDiffEqAlgorithm
maxiter :: Int
tol :: Float64
end
@eval $Alg(;saddlesolver=Direct,maxiter=4,tol=eps(Float64)) = $Alg{saddlesolver}(maxiter,tol)
@eval export $Alg
@eval alg_order(alg::$Alg) = $Order
end
# LiskaIFHERK
@cache struct LiskaIFHERKCache{sc,ni,solverType,uType,rateType,expType1,expType2,saddleType,pType,TabType} <: ConstrainedODEMutableCache{sc,solverType}
u::uType
uprev::uType # qi
k1::rateType # w1
k2::rateType # w2
k3::rateType # w3
utmp::uType # cache
udiff::uType
dutmp::rateType # cache for rates
fsalfirst::rateType
Hhalfdt::expType1
Hzero::expType2
S::saddleType
ptmp::pType
k::rateType
tab::TabType
end
struct LiskaIFHERKConstantCache{sc,ni,solverType,T,T2} <: ConstrainedODEConstantCache{sc,solverType}
ã11::T
ã21::T
ã22::T
ã31::T
ã32::T
ã33::T
c̃1::T2
c̃2::T2
c̃3::T2
function LiskaIFHERKConstantCache{sc,ni,solverType}(T, T2) where {sc,ni,solverType}
ã11 = T(1//2)
ã21 = T(√3/3)
ã22 = T((3-√3)/3)
ã31 = T((3+√3)/6)
ã32 = T(-√3/3)
ã33 = T((3+√3)/6)
c̃1 = T2(1//2)
c̃2 = T2(1.0)
c̃3 = T2(1.0)
new{sc,ni,solverType,T,T2}(ã11,ã21,ã22,ã31,ã32,ã33,c̃1,c̃2,c̃3)
end
end
LiskaIFHERKCache{sc,ni,solverType}(u,uprev,k1,k2,k3,utmp,udiff,dutmp,fsalfirst,
Hhalfdt,Hzero,S,ptmp,k,tab) where {sc,ni,solverType} =
LiskaIFHERKCache{sc,ni,solverType,typeof(u),typeof(k1),typeof(Hhalfdt),typeof(Hzero),
typeof(S),typeof(ptmp),typeof(tab)}(u,uprev,k1,k2,k3,utmp,udiff,dutmp,fsalfirst,
Hhalfdt,Hzero,S,ptmp,k,tab)
function alg_cache(alg::LiskaIFHERK{solverType},u,rate_prototype,uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,p,calck,::Val{true}) where {solverType}
u isa ArrayPartition || error("u must be of type ArrayPartition")
y, z = state(u), constraint(u)
utmp, udiff = (zero(u) for i in 1:2)
k1, k2, k3, dutmp, fsalfirst, k = (zero(rate_prototype) for i in 1:6)
sc = isstatic(f)
ni = needs_iteration(f,u,p,rate_prototype)
tab = LiskaIFHERKConstantCache{sc,ni,solverType}(constvalue(uBottomEltypeNoUnits),
constvalue(tTypeNoUnits))
@unpack ã11,ã22,ã33 = tab
L = _fetch_ode_L(f)
Hhalfdt = exp(L,-dt/2,y)
Hzero = exp(L,zero(dt),y)
S = []
push!(S,SaddleSystem(Hhalfdt,f,p,p,dutmp,solverType;cfact=1.0/(ã11*dt)))
push!(S,SaddleSystem(Hhalfdt,f,p,p,dutmp,solverType;cfact=1.0/(ã22*dt)))
push!(S,SaddleSystem(Hzero,f,p,p,dutmp,solverType;cfact=1.0/(ã33*dt)))
LiskaIFHERKCache{sc,ni,solverType}(u,uprev,k1,k2,k3,utmp,udiff,dutmp,fsalfirst,
Hhalfdt,Hzero,S,deepcopy(p),k,tab)
end
function alg_cache(alg::LiskaIFHERK{solverType},u,rate_prototype,
uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,
p,calck,::Val{false}) where {solverType}
LiskaIFHERKConstantCache{isstatic(f),
needs_iteration(f,u,p,rate_prototype),
solverType}(constvalue(uBottomEltypeNoUnits),
constvalue(tTypeNoUnits))
end
# IFHEEuler
@cache struct IFHEEulerCache{sc,ni,solverType,uType,rateType,expType,saddleType,pType} <: ConstrainedODEMutableCache{sc,solverType}
u::uType
uprev::uType # qi
k1::rateType # w1
utmp::uType # cache
udiff::uType
dutmp::rateType # cache for rates
fsalfirst::rateType
Hdt::expType
S::saddleType
ptmp::pType
k::rateType
end
struct IFHEEulerConstantCache{sc,ni,solverType} <: ConstrainedODEConstantCache{sc,solverType}
end
IFHEEulerCache{sc,ni,solverType}(u,uprev,k1,utmp,udiff,dutmp,fsalfirst,
Hdt,S,ptmp,k) where {sc,ni,solverType} =
IFHEEulerCache{sc,ni,solverType,typeof(u),typeof(k1),typeof(Hdt),
typeof(S),typeof(ptmp)}(u,uprev,k1,utmp,udiff,dutmp,fsalfirst,
Hdt,S,ptmp,k)
function alg_cache(alg::IFHEEuler{solverType},u,rate_prototype,uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,p,calck,::Val{true}) where {solverType}
u isa ArrayPartition || error("u must be of type ArrayPartition")
y, z = state(u), constraint(u)
utmp, udiff = (zero(u) for i in 1:2)
k1, dutmp, fsalfirst, k = (zero(rate_prototype) for i in 1:4)
sc = isstatic(f)
ni = needs_iteration(f,u,p,rate_prototype)
Hdt = exp(_fetch_ode_L(f),-dt,y)
S = []
push!(S,SaddleSystem(Hdt,f,p,p,dutmp,solverType;cfact=1.0/dt))
IFHEEulerCache{sc,ni,solverType}(u,uprev,k1,utmp,udiff,dutmp,fsalfirst,
Hdt,S,deepcopy(p),k)
end
function alg_cache(alg::IFHEEuler{solverType},u,rate_prototype,
uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,
p,calck,::Val{false}) where {solverType}
IFHEEulerConstantCache{isstatic(f),needs_iteration(f,u,p,rate_prototype),solverType}()
end
# Half-explicit trapezoidal-Adams/Bashforth 2 (HETrapezoidalAB2)
@cache struct HETrapezoidalAB2Cache{sc,ni,solverType,uType,rateType,implicitType,saddleType,pType,TabType} <: ConstrainedODEMutableCache{sc,solverType}
u::uType
uprev::uType
ki::rateType
ke::rateType
utmp::uType # cache
udiff::uType
dutmp::rateType # cache for rates
fsalfirst::rateType
A::implicitType
S::saddleType
ptmp::pType
k::rateType
tab::TabType
end
struct HETrapezoidalAB2ConstantCache{sc,ni,solverType,T} <: ConstrainedODEConstantCache{sc,solverType}
α̃1::T
α̃2::T
β̃1::T
β̃2::T
function HETrapezoidalAB2ConstantCache{sc,ni,solverType}(T) where {sc,ni,solverType}
α̃1 = T(1//2)
α̃2 = T(1//2)
β̃1 = T(3//2)
β̃2 = T(-1//2)
new{sc,ni,solverType,T}(α̃1,α̃2,β̃1,β̃2)
end
end
HETrapezoidalAB2Cache{sc,ni,solverType}(u,uprev,ki,ke,utmp,udiff,dutmp,fsalfirst,
A,S,ptmp,k,tab) where {sc,ni,solverType} =
HETrapezoidalAB2Cache{sc,ni,solverType,typeof(u),typeof(ki),typeof(A),
typeof(S),typeof(ptmp),typeof(tab)}(u,uprev,ki,ke,utmp,udiff,dutmp,fsalfirst,A,S,ptmp,k,tab)
function alg_cache(alg::HETrapezoidalAB2{solverType},u,rate_prototype,uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,p,calck,::Val{true}) where {solverType}
u isa ArrayPartition || error("u must be of type ArrayPartition")
y, z = state(u), constraint(u)
utmp, udiff = (zero(u) for i in 1:2)
ki, ke, dutmp, fsalfirst, k = (zero(rate_prototype) for i in 1:5)
sc = isstatic(f)
ni = needs_iteration(f,u,p,rate_prototype)
tab = HETrapezoidalAB2ConstantCache{sc,ni,solverType}(constvalue(uBottomEltypeNoUnits))
@unpack α̃1 = tab
A = implicit_operator(_fetch_ode_L(f),α̃1*dt)
S = []
push!(S,SaddleSystem(A,f,p,p,dutmp,solverType;cfact=1.0/(α̃1*dt)))
push!(S,SaddleSystem(A,f,p,p,dutmp,solverType;cfact=1.0/dt))
HETrapezoidalAB2Cache{sc,ni,solverType}(u,uprev,ki,ke,utmp,udiff,dutmp,fsalfirst,
A,S,deepcopy(p),k,tab)
end
function alg_cache(alg::HETrapezoidalAB2{solverType},u,rate_prototype,
uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,
p,calck,::Val{false}) where {solverType}
HETrapezoidalAB2ConstantCache{isstatic(f),needs_iteration(f,u,p,rate_prototype),solverType}(constvalue(uBottomEltypeNoUnits))
end
#######
function initialize!(integrator,cache::LiskaIFHERKCache)
@unpack k,fsalfirst = cache
integrator.fsalfirst = fsalfirst
integrator.fsallast = k
integrator.kshortsize = 2
resize!(integrator.k, integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.f.odef(integrator.fsalfirst, integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.f.param_update_func(integrator.p,integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
end
@muladd function perform_step!(integrator,cache::LiskaIFHERKCache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,u,f,p,alg,opts = integrator
@unpack internalnorm = opts
@unpack maxiter, tol = alg
@unpack k1,k2,k3,utmp,udiff,dutmp,fsalfirst,Hhalfdt,Hzero,S,ptmp,k = cache
@unpack ã11,ã21,ã22,ã31,ã32,ã33,c̃1,c̃2,c̃3 = cache.tab
@unpack param_update_func = f
init_err = float(1)
init_iter = ni ? 1 : maxiter
# aliases to the state and constraint parts
ytmp, ztmp, xtmp = state(utmp), constraint(utmp), aux_state(utmp)
yprev = state(uprev)
y, z, x = state(u), constraint(u), aux_state(u)
pold_ptr = p
pnew_ptr = ptmp
ttmp = t
@.. u = uprev
## Stage 1
_ode_full_rhs!(k1,f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k1 *= dt*ã11
@.. utmp = uprev + k1
ttmp = t + dt*c̃1
@.. u = utmp
# if applicable, update p, construct new saddle system here, using Hhalfdt
# and solve system. Solve iteratively if saddle operators depend on
# constrained part of the state.
err, numiter = init_err, init_iter
while err > tol && numiter <= maxiter
udiff .= u
param_update_func(pnew_ptr,u,pold_ptr,ttmp)
S[1] = SaddleSystem(S[1],Hhalfdt,f,pnew_ptr,pold_ptr,cache)
_constraint_r2!(utmp,f,u,pnew_ptr,ttmp) # only updates the z part
ldiv!(mainvector(u),S[1],mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
end
zero_vec!(xtmp)
B1_times_z!(utmp,S[1])
pold_ptr = ptmp
pnew_ptr = p
ldiv!(yprev,Hhalfdt,yprev)
ldiv!(state(k1),Hhalfdt,state(k1))
@.. k1 = (k1-utmp)/(dt*ã11) # r1(y,t) - B1T*z
## Stage 2
_ode_full_rhs!(k2,f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k2 *= dt*ã22
@.. utmp = uprev + k2 + dt*ã21*k1
ttmp = t + dt*c̃2
@.. u = utmp
# if applicable, update p, construct new saddle system here, using Hhalfdt
err, numiter = init_err, init_iter
while err > tol && numiter <= maxiter
udiff .= u
param_update_func(pnew_ptr,u,pold_ptr,ttmp)
S[2] = SaddleSystem(S[2],Hhalfdt,f,pnew_ptr,pold_ptr,cache)
_constraint_r2!(utmp,f,u,pnew_ptr,ttmp)
ldiv!(mainvector(u),S[2],mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
end
zero_vec!(xtmp)
B1_times_z!(utmp,S[2])
pold_ptr = p
pnew_ptr = ptmp
ldiv!(yprev,Hhalfdt,yprev)
ldiv!(state(k1),Hhalfdt,state(k1))
ldiv!(state(k2),Hhalfdt,state(k2))
@.. k2 = (k2-utmp)/(dt*ã22)
## Stage 3
_ode_full_rhs!(k3,f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k3 *= dt*ã33
@.. utmp = uprev + k3 + dt*ã32*k2 + dt*ã31*k1
ttmp = t + dt
@.. u = utmp
# if applicable, update p, construct new saddle system here, using Hzero (identity)
err, numiter = init_err, init_iter
while err > tol && numiter <= maxiter
udiff .= u
param_update_func(pnew_ptr,u,pold_ptr,ttmp)
S[3] = SaddleSystem(S[3],Hzero,f,pnew_ptr,pold_ptr,cache)
_constraint_r2!(utmp,f,u,pnew_ptr,t+dt)
ldiv!(mainvector(u),S[3],mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
#println("error = ",err)
end
@.. z /= (dt*ã33)
# Final steps
param_update_func(p,u,ptmp,t+dt)
f.odef(integrator.fsallast, u, p, t+dt)
stats_field(integrator).nf += 1
return nothing
end
function initialize!(integrator,cache::LiskaIFHERKConstantCache)
integrator.kshortsize = 2
integrator.k = typeof(integrator.k)(undef, integrator.kshortsize)
integrator.fsalfirst = integrator.f.odef(integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.p = integrator.f.param_update_func(integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
# Avoid undefined entries if k is an array of arrays
integrator.fsallast = zero(integrator.fsalfirst)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
@muladd function perform_step!(integrator,cache::LiskaIFHERKConstantCache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,f,p,opts,alg = integrator
@unpack internalnorm = opts
@unpack maxiter, tol = alg
@unpack ã11,ã21,ã22,ã31,ã32,ã33,c̃1,c̃2,c̃3 = cache
@unpack param_update_func = f
init_err = float(1)
init_iter = ni ? 1 : maxiter
# set up some cache variables
yprev = state(uprev)
udiff = deepcopy(uprev)
ducache = deepcopy(uprev)
ptmp = deepcopy(p)
L = _fetch_ode_L(f)
Hhalfdt = exp(L,-dt/2,state(uprev))
Hzero = exp(L,zero(dt),state(uprev))
pold_ptr = p
## Stage 1
ttmp = t
k1 = _ode_full_rhs(f,uprev,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k1 *= dt*ã11
utmp = @.. uprev + k1
ttmp = t + dt*c̃1
# if applicable, update p, construct new saddle system here, using Hhalfdt
# and solve system. Solve iteratively if saddle operators depend on
# constrained part of the state.
err, numiter = init_err, init_iter
u = deepcopy(utmp)
while err > tol && numiter <= maxiter
udiff .= u
ptmp = param_update_func(u,pold_ptr,ttmp)
pnew_ptr = ptmp
S = SaddleSystem(Hhalfdt,f,pnew_ptr,pold_ptr,ducache,solverType;cfact=1.0/(ã11*dt))
constraint(utmp) .= constraint(_constraint_r2(f,u,pnew_ptr,ttmp))
ldiv!(mainvector(u),S,mainvector(utmp))
B1_times_z!(ducache,S)
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
end
zero_vec!(aux_state(utmp))
state(utmp) .= state(ducache)
pold_ptr = ptmp
ldiv!(yprev,Hhalfdt,yprev)
ldiv!(state(k1),Hhalfdt,state(k1))
@.. k1 = (k1-utmp)/(dt*ã11) # r1(y,t) - B1T*z
## Stage 2
k2 = _ode_full_rhs(f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k2 *= dt*ã22
@.. utmp = uprev + k2 + dt*ã21*k1
ttmp = t + dt*c̃2
# if applicable, update p, construct new saddle system here, using Hhalfdt
err, numiter = init_err, init_iter
u .= utmp
while err > tol && numiter <= maxiter
udiff .= u
ptmp = param_update_func(u,pold_ptr,ttmp)
S = SaddleSystem(Hhalfdt,f,ptmp,pold_ptr,ducache,solverType;cfact=1.0/(ã22*dt))
constraint(utmp) .= constraint(_constraint_r2(f,u,ptmp,ttmp))
ldiv!(mainvector(u),S,mainvector(utmp))
B1_times_z!(ducache,S)
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
end
zero_vec!(aux_state(utmp))
state(utmp) .= state(ducache)
pold_ptr = ptmp
ldiv!(yprev,Hhalfdt,yprev)
ldiv!(state(k1),Hhalfdt,state(k1))
ldiv!(state(k2),Hhalfdt,state(k2))
@.. k2 = (k2-utmp)/(dt*ã22)
## Stage 3
k3 = _ode_full_rhs(f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k3 *= dt*ã33
@.. utmp = uprev + k3 + dt*ã32*k2 + dt*ã31*k1
ttmp = t + dt
# if applicable, update p, construct new saddle system here, using Hzero (identity)
err, numiter = init_err, init_iter
u .= utmp
while err > tol && numiter <= maxiter
udiff .= u
ptmp = param_update_func(u,pold_ptr,ttmp)
S = SaddleSystem(Hzero,f,ptmp,pold_ptr,ducache,solverType;cfact=1.0/(ã33*dt))
constraint(utmp) .= constraint(_constraint_r2(f,u,ptmp,ttmp))
ldiv!(mainvector(u),S,mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
#println("error = ",err)
end
z = constraint(u)
@.. z /= (dt*ã33)
# Final steps
integrator.p = param_update_func(u,ptmp,t+dt)
k = f.odef(u, integrator.p, t+dt)
stats_field(integrator).nf += 1
integrator.fsallast = k
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.u = u
return nothing
end
####
function initialize!(integrator,cache::IFHEEulerCache)
@unpack k,fsalfirst = cache
integrator.fsalfirst = fsalfirst
integrator.fsallast = k
integrator.kshortsize = 2
resize!(integrator.k, integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.f.odef(integrator.fsalfirst, integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.f.param_update_func(integrator.p,integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
end
@muladd function perform_step!(integrator,cache::IFHEEulerCache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,u,f,p,opts,alg = integrator
@unpack internalnorm = opts
@unpack k1,utmp,udiff,dutmp,fsalfirst,Hdt,S,ptmp,k = cache
@unpack maxiter, tol = alg
@unpack param_update_func = f
init_err = float(1)
#init_iter = ni ? 1 : maxiter
init_iter = maxiter # First-order method does not need iteration
# aliases to the state and constraint parts
ytmp, ztmp = state(utmp), constraint(utmp)
z = constraint(u)
pold_ptr = p
pnew_ptr = ptmp
ttmp = t
u .= uprev
_ode_full_rhs!(k1,f,u,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. k1 *= dt
@.. utmp = uprev + k1
ttmp = t + dt
# if applicable, update p, construct new saddle system here, using Hdt
err, numiter = init_err, init_iter
u .= utmp
while err > tol && numiter <= maxiter
udiff .= u
param_update_func(pnew_ptr,u,pold_ptr,ttmp)
S[1] = SaddleSystem(S[1],Hdt,f,pnew_ptr,pold_ptr,cache)
_constraint_r2!(utmp,f,u,pnew_ptr,t+dt)
ldiv!(mainvector(u),S[1],mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
#println("numiter = ",numiter, ", error = ",err)
end
@.. z /= dt
# Final steps
param_update_func(p,u,pold_ptr,t)
f.odef(integrator.fsallast, u, p, t+dt)
stats_field(integrator).nf += 1
return nothing
end
function initialize!(integrator,cache::IFHEEulerConstantCache)
integrator.kshortsize = 2
integrator.k = typeof(integrator.k)(undef, integrator.kshortsize)
integrator.fsalfirst = integrator.f.odef(integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.p = integrator.f.param_update_func(integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
# Avoid undefined entries if k is an array of arrays
integrator.fsallast = zero(integrator.fsalfirst)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
@muladd function perform_step!(integrator,cache::IFHEEulerConstantCache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,f,p,opts,alg = integrator
@unpack internalnorm = opts
@unpack maxiter, tol = alg
@unpack param_update_func = f
init_err = float(1)
#init_iter = ni ? 1 : maxiter
init_iter = maxiter # First-order method does not need iteration
# set up some cache variables
udiff = deepcopy(uprev)
ducache = deepcopy(uprev)
ptmp = deepcopy(p)
L = _fetch_ode_L(f)
Hdt = exp(L,-dt,state(uprev))
pold_ptr = p
pnew_ptr = ptmp
k1 = _ode_full_rhs(f,uprev,pold_ptr,t)
stats_field(integrator).nf += 1
@.. k1 *= dt
utmp = @.. uprev + k1
# if applicable, update p, construct new saddle system here, using Hdt
err, numiter = init_err, init_iter
u = deepcopy(utmp)
while err > tol && numiter <= maxiter
udiff .= u
pnew_ptr = param_update_func(u,pold_ptr,t+dt)
S = SaddleSystem(Hdt,f,pnew_ptr,pold_ptr,ducache,solverType;cfact=1.0/dt)
constraint(utmp) .= constraint(_constraint_r2(f,u,pnew_ptr,t+dt))
ldiv!(mainvector(u),S,mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,t+dt)
#println("error = ",err)
end
z = constraint(u)
@.. z /= dt
# Final steps
integrator.p = param_update_func(u,pold_ptr,t)
k = f.odef(u, integrator.p, t+dt)
stats_field(integrator).nf += 1
integrator.fsallast = k
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.u = u
end
#### Half-explicit Trapezoidal/Adams-Bashforth 2 (HETrapezoidalAB2) ####
function initialize!(integrator,cache::HETrapezoidalAB2Cache)
@unpack ki,k,fsalfirst = cache
integrator.fsalfirst = fsalfirst
integrator.fsallast = k
integrator.kshortsize = 2
resize!(integrator.k, integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.f.odef(integrator.fsalfirst, integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
#_ode_implicit_rhs!(ki,integrator.f,integrator.uprev,integrator.p,integrator.t)
integrator.f.param_update_func(integrator.p,integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
end
@muladd function perform_step!(integrator,cache::HETrapezoidalAB2Cache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,u,f,p,opts,alg = integrator
@unpack internalnorm = opts
@unpack ki,ke,utmp,udiff,dutmp,fsalfirst,S,ptmp,k,tab,A = cache
@unpack α̃1,α̃2,β̃1,β̃2 = tab
@unpack maxiter, tol = alg
@unpack param_update_func = f
init_err = float(1)
#init_iter = ni ? 1 : maxiter
init_iter = maxiter
cnt = integrator.iter
# aliases to the state and constraint parts
ytmp, ztmp = state(utmp), constraint(utmp)
z = constraint(u)
pold_ptr = p
pnew_ptr = ptmp
ttmp = t
u .= uprev
if cnt == 1
# Use Euler step to replace AB2, but still use trapezoidal for implicit part
@.. utmp = uprev
# If C is not empty, then find the initial constraint to go along
# with the initial state
if _isinvertible(S[1].C)
_constraint_r2!(utmp,f,utmp,pold_ptr,ttmp)
constraint_from_state!(mainvector(utmp),S[1],mainvector(utmp))
@.. ztmp /= (α̃1*dt)
end
# Calculate the initial ki (time level 0)
_ode_implicit_rhs!(ki,f,utmp,pold_ptr,ttmp)
# Calculate the initial ke (time level 0)
_ode_r1!(ke,f,utmp,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. utmp += dt*ke
else
@.. utmp = uprev + β̃2*dt*ke
_ode_r1!(ke,f,uprev,pold_ptr,ttmp)
stats_field(integrator).nf += 1
@.. utmp += β̃1*dt*ke # utmp now corresponds to y* and x*
end
ttmp = t + dt
# if applicable, update p, construct new saddle system here
err, numiter = init_err, init_iter
_ode_r1imp!(dutmp,f,utmp,pold_ptr,ttmp)
@.. u = utmp
@.. utmp += α̃2*dt*ki + α̃1*dt*dutmp
while err > tol && numiter <= maxiter
udiff .= u
param_update_func(pnew_ptr,u,pold_ptr,ttmp)
S[1] = SaddleSystem(S[1],A,f,pnew_ptr,pnew_ptr,cache)
_constraint_r2!(utmp,f,u,pnew_ptr,t+dt)
ldiv!(mainvector(u),S[1],mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,ttmp)
#println("numiter = ",numiter, ", error = ",err)
end
@.. z /= (α̃1*dt)
# Final steps
param_update_func(p,u,pold_ptr,t+dt)
_ode_implicit_rhs!(ki,f,u,p,t+dt)
f.odef(integrator.fsallast, u, p, t+dt)
stats_field(integrator).nf += 1
return nothing
end
function initialize!(integrator,cache::HETrapezoidalAB2ConstantCache)
integrator.kshortsize = 2
integrator.k = typeof(integrator.k)(undef, integrator.kshortsize)
integrator.fsalfirst = integrator.f.odef(integrator.uprev, integrator.p, integrator.t) # Pre-start fsal
integrator.p = integrator.f.param_update_func(integrator.uprev,integrator.p,integrator.t)
stats_field(integrator).nf += 1
# Avoid undefined entries if k is an array of arrays
integrator.fsallast = zero(integrator.fsalfirst)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
@muladd function perform_step!(integrator,cache::HETrapezoidalAB2ConstantCache{sc,ni,solverType},repeat_step=false) where {sc,ni,solverType}
@unpack t,dt,uprev,uprev2,f,p,opts,alg = integrator
@unpack internalnorm = opts
@unpack maxiter, tol = alg
@unpack α̃1,α̃2,β̃1,β̃2 = cache
@unpack param_update_func = f
init_err = float(1)
#init_iter = ni ? 1 : maxiter
init_iter = maxiter
cnt = integrator.iter
# set up some cache variables
udiff = deepcopy(uprev)
ducache = deepcopy(uprev)
ptmp = deepcopy(p)
L = _fetch_ode_L(f)
A = implicit_operator(_fetch_ode_L(f),α̃1*dt)
pold_ptr = p
pnew_ptr = ptmp
if cnt == 1
utmp = uprev
S = SaddleSystem(A,f,pold_ptr,pold_ptr,ducache,solverType;cfact=1.0/(α̃1*dt))
if _isinvertible(S.C)
constraint(utmp) .= constraint(_constraint_r2(f,utmp,pold_ptr,t))
constraint_from_state!(mainvector(utmp),S,mainvector(utmp))
ztmp = constraint(utmp)
@.. ztmp /= (α̃1*dt)
end
# Calculate the initial ki (time level 0)
ki = _ode_implicit_rhs(f,utmp,pold_ptr,t)
ke = _ode_r1(f,uprev,pold_ptr,t)
stats_field(integrator).nf += 1
@.. utmp = utmp + dt*ke
else
ke = _ode_r1(f,uprev2,pold_ptr,t-dt) #ke at step n-1
ki = _ode_implicit_rhs(f,uprev,pold_ptr,t)
utmp = uprev + β̃2*dt*ke
ke = _ode_r1(f,uprev,pold_ptr,t)
stats_field(integrator).nf += 1
@.. utmp = utmp + β̃1*dt*ke # utmp now corresponds to y* and x*
end
@.. uprev2 = uprev
ducache .= _ode_r1imp(f,utmp,pold_ptr,t+dt)
# if applicable, update p, construct new saddle system here, using Hdt
err, numiter = init_err, init_iter
u = deepcopy(utmp)
@.. utmp = utmp + α̃2*dt*ki + α̃1*dt*ducache
while err > tol && numiter <= maxiter
udiff .= u
pnew_ptr = param_update_func(u,pold_ptr,t+dt)
S = SaddleSystem(A,f,pnew_ptr,pnew_ptr,ducache,solverType;cfact=1.0/(α̃1*dt))
constraint(utmp) .= constraint(_constraint_r2(f,u,pnew_ptr,t+dt))
ldiv!(mainvector(u),S,mainvector(utmp))
@.. udiff -= u
numiter += 1
err = internalnorm(udiff,t+dt)
#println("error = ",err)
end
z = constraint(u)
@.. z /= (α̃1*dt)
# Final steps
integrator.p = param_update_func(u,pold_ptr,t)
k = f.odef(u, integrator.p, t+dt)
stats_field(integrator).nf += 1
integrator.fsallast = k
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.u = u
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 3614 | #=
This is pirated from
https://github.com/SciML/OrdinaryDiffEq.jl/blob/0e38705ac4ace80a51961d689ff64bea6b3bce73/src/misc_utils.jl#L20
because for some reason it is not working by importing it
=#
macro cache(expr)
name = expr.args[2].args[1].args[1]
fields = [x for x in expr.args[3].args if typeof(x)!=LineNumberNode]
cache_vars = Expr[]
jac_vars = Pair{Symbol,Expr}[]
for x in fields
if x.args[2] == :uType || x.args[2] == :rateType ||
x.args[2] == :kType || x.args[2] == :uNoUnitsType
push!(cache_vars,:(c.$(x.args[1])))
elseif x.args[2] == :DiffCacheType
push!(cache_vars,:(c.$(x.args[1]).du))
push!(cache_vars,:(c.$(x.args[1]).dual_du))
end
end
quote
$expr
$(esc(:full_cache))(c::$name) = tuple($(cache_vars...))
end
end
# Need to allow UNITLESS_ABS2 to work on empty vectors
# Should add this into DiffEqBase
#@inline UNITLESS_ABS2(x::AbstractArray) = (isempty(x) && return sum(UNITLESS_ABS2,zero(eltype(x))); sum(UNITLESS_ABS2, x))
# A workaround that avoids redefinition
import OrdinaryDiffEq.DiffEqBase: UNITLESS_ABS2, ODE_DEFAULT_NORM, recursive_length
@inline MY_UNITLESS_ABS2(x::Number) = abs2(x)
@inline MY_UNITLESS_ABS2(x::AbstractArray) = (isempty(x) && return sum(MY_UNITLESS_ABS2,zero(eltype(x))); sum(MY_UNITLESS_ABS2, x))
@inline MY_UNITLESS_ABS2(x::ArrayPartition) = sum(MY_UNITLESS_ABS2, x.x)
@inline ODE_DEFAULT_NORM(u::ArrayPartition,t) = sqrt(MY_UNITLESS_ABS2(u)/recursive_length(u))
@inline _l2norm(u) = sqrt(recursive_mean(map(x -> float(x).^2,u)))
zero_vec!(::Nothing) = nothing
zero_vec!(u) = fill!(u,0.0)
function zero_vec!(u::ArrayPartition)
for x in u.x
fill!(x,0.0)
end
return nothing
end
function recursivecopy!(dest :: T, src :: T) where {T}
fields = fieldnames(T)
for f in fields
tmp = getfield(dest,f)
#tmp .= getfield(src,f)
recursivecopy!(tmp,getfield(src,f))
end
dest
end
function recursivecopy!(dest :: AbstractArray{T}, src :: AbstractArray{T}) where {T}
for i in eachindex(src)
recursivecopy!(dest[i],src[i])
end
return dest
end
# Seed the state vector with two sets of random values, apply the constraint operator on a
needs_iteration(f::ConstrainedODEFunction{iip},u,p,rate_prototype) where {iip} = _needs_iteration(f,u,p,rate_prototype,Val(iip))
function _needs_iteration(f,u,p,rate_prototype,::Val{true})
pseed = deepcopy(p)
u_target, useed = (zero(u) for i in 1:2)
yseed = state(useed)
y_target = state(u_target)
fill!(y_target,1.0)
dutmp, dudiff = (zero(rate_prototype) for i in 1:2)
dzdiff = constraint(dudiff)
yseed .= randn(size(yseed))
f.param_update_func(pseed,useed,p,0.0)
_constraint_neg_B2!(dutmp,f,u_target,pseed,0.0)
dudiff .= dutmp
yseed .= randn(size(yseed))
f.param_update_func(pseed,useed,p,0.0)
_constraint_neg_B2!(dutmp,f,u_target,pseed,0.0)
dudiff .-= dutmp
!(_l2norm(dzdiff) == 0.0)
end
function _needs_iteration(f,u,p,rate_prototype,::Val{false})
pseed = deepcopy(p)
u_target, useed = (zero(u) for i in 1:2)
yseed = state(useed)
y_target = state(u_target)
fill!(y_target,1.0)
dutmp, dudiff = (zero(rate_prototype) for i in 1:2)
dzdiff = constraint(dudiff)
yseed .= randn(size(yseed))
pseed = f.param_update_func(useed,p,0.0)
dutmp = _constraint_neg_B2(f,u_target,pseed,0.0)
dudiff .= dutmp
yseed .= randn(size(yseed))
pseed = f.param_update_func(useed,p,0.0)
dutmp = _constraint_neg_B2(f,u_target,pseed,0.0)
dudiff .-= dutmp
!(_l2norm(dzdiff) == 0.0)
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 9543 | struct ProblemParams{P,BT1,BT2}
params :: P
B₁ᵀ :: BT1
B₂ :: BT2
end
function basic_unconstrained_problem(;tmax=1.0,iip=true)
ns = 1
y0 = 1.0
α = 1.02
p = α
ode_rhs!(dy,y,x,p,t) = dy .= p*y
ode_rhs(y,x,p,t) = p*y
y₀ = ones(Float64,ns)
u₀ = solvector(state=y₀)
if iip
f = ConstrainedODEFunction(ode_rhs!,_func_cache=u₀)
else
f = ConstrainedODEFunction(ode_rhs)
end
tspan = (0.0,1.0)
prob = ODEProblem(f,u₀,tspan,p)
xexact(t) = exp(α*t)
return prob, xexact
end
function basic_unconstrained_if_problem(;tmax=1.0,iip=true)
ns = 1
y0 = 1.0
α = 1.02
p = α
ode_rhs!(dy,y,x,p,t) = fill!(dy,0.0)
ode_rhs(y,x,p,t) = zero(y)
L = α*I
y₀ = ones(Float64,ns)
u₀ = solvector(state=y₀)
if iip
f = ConstrainedODEFunction(ode_rhs!,L,_func_cache=u₀)
else
f = ConstrainedODEFunction(ode_rhs,L)
end
tspan = (0.0,1.0)
prob = ODEProblem(f,u₀,tspan,p)
xexact(t) = exp(α*t)
return prob, xexact
end
function basic_constrained_problem(;tmax=1.0,iip=true)
U0 = 1.0
g = 1.0
α = 0.5
ω = 5
y₀ = Float64[0,0,U0,0]
z₀ = Float64[0]
params = [U0,g,α,ω];
p₀ = ProblemParams(params,Array{Float64}(undef,4,1),Array{Float64}(undef,1,4));
u₀ = solvector(state=y₀,constraint=z₀)
du = deepcopy(u₀)
function ode_rhs!(dy::Vector{Float64},y::Vector{Float64},x,p,t)
dy .= 0.0
dy[1] = y[3]
dy[2] = y[4]
dy[4] = -(y[1]-p.params[1]*t)*p.params[2]
return dy
end
ode_rhs(y::Vector{Float64},x,p,t) = ode_rhs!(deepcopy(y₀),y,x,p,t)
constraint_rhs!(dz::Vector{Float64},x,p,t) = dz .= Float64[p.params[1]]
constraint_rhs(x,p,t) = constraint_rhs!(deepcopy(z₀),x,p,t)
function op_constraint_force!(dy::Vector{Float64},z::Vector{Float64},x,p)
@unpack B₁ᵀ = p
dy .= B₁ᵀ*z
end
op_constraint_force(z::Vector{Float64},x,p) = op_constraint_force!(deepcopy(y₀),z,x,p)
function constraint_op!(dz::Vector{Float64},y::Vector{Float64},x,p)
@unpack B₂ = p
dz .= B₂*y
end
constraint_op(y::Vector{Float64},x,p) = constraint_op!(deepcopy(z₀),y,x,p)
function update_p!(q,u,p,t)
y, z = state(u), constraint(u)
@unpack B₁ᵀ, B₂ = q
B₁ᵀ .= 0
B₂ .= 0
B₁ᵀ[3,1] = 1/(1+q.params[3]*sin(q.params[4]*t))
B₂[1,3] = 1/(1+q.params[3]*sin(q.params[4]*t))
return q
end
update_p(u,p,t) = update_p!(deepcopy(p),u,p,t)
if iip
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,op_constraint_force!,
constraint_op!,_func_cache=deepcopy(du),
param_update_func=update_p!)
else
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,op_constraint_force,
constraint_op,param_update_func=update_p)
end
tspan = (0.0,tmax)
p = deepcopy(p₀)
prob = ODEProblem(f,u₀,tspan,p)
yexact(t) = g*α*U0/ω*(-0.5*t^2 - cos(ω*t)/ω^2 + 1/ω^2)
xexact(t) = U0*(t - α*cos(ω*t)/ω+α/ω)
return prob, xexact, yexact
end
function cartesian_pendulum_problem(;tmax=1.0,iip=true)
θ₀ = π/2
l = 1.0
g = 1.0
y₀ = Float64[l*sin(θ₀),-l*cos(θ₀),0,0]
z₀ = Float64[0.0, 0.0]
u₀ = solvector(state=y₀,constraint=z₀)
du = deepcopy(u₀)
params = [l,g]
p₀ = ProblemParams(params,Array{Float64}(undef,4,2),Array{Float64}(undef,2,4))
function pendulum_rhs!(dy::Vector{Float64},y::Vector{Float64},x,p,t)
dy .= 0.0
dy[1] = y[3]
dy[2] = y[4]
dy[4] = -p.params[2]
return dy
end
pendulum_rhs(y::Vector{Float64},x,p,t) = pendulum_rhs!(zero(y),y,x,p,t)
length_constraint_rhs!(dz::Vector{Float64},x,p,t) = dz .= [0.0,p.params[1]^2]
length_constraint_rhs(x,p,t) = length_constraint_rhs!(zero(z₀),x,p,t)
function length_constraint_force!(dy::Vector{Float64},z::Vector{Float64},x,p)
@unpack B₁ᵀ = p
dy .= B₁ᵀ*z
end
length_constraint_force(z::Vector{Float64},x,p) = length_constraint_force!(zero(y₀),z,x,p)
function length_constraint_op!(dz::Vector{Float64},y::Vector{Float64},x,p)
@unpack B₂ = p
dz .= B₂*y
end
length_constraint_op(y::Vector{Float64},x,p) = length_constraint_op!(zero(z₀),y,x,p)
function update_p!(q,u,p,t)
y, z = state(u), constraint(u)
@unpack B₁ᵀ, B₂ = q
fill!(B₁ᵀ,0.0)
fill!(B₂,0.0)
B₁ᵀ[3,1] = y[1]; B₁ᵀ[4,1] = y[2]; B₁ᵀ[1,2] = y[1]; B₁ᵀ[2,2] = y[2]
B₂[1,3] = y[1]; B₂[1,4] = y[2]; B₂[2,1] = y[1]; B₂[2,2] = y[2]
return q
end
update_p(u,p,t) = update_p!(deepcopy(p),u,p,t)
if iip
f = ConstrainedODEFunction(pendulum_rhs!,length_constraint_rhs!,length_constraint_force!,
length_constraint_op!,
_func_cache=deepcopy(du),param_update_func=update_p!)
else
f = ConstrainedODEFunction(pendulum_rhs,length_constraint_rhs,length_constraint_force,
length_constraint_op,param_update_func=update_p)
end
tspan = (0.0,tmax)
p = deepcopy(p₀)
prob = ODEProblem(f,u₀,tspan,p)
# Get superconverged solution from the basic
# problem expressed in theta
function pendulum_theta(u,p,t)
du = similar(u)
du[1] = u[2]
du[2] = -p^2*sin(u[1])
du
end
u0 = [π/2,0.0]
pex = g/l # squared frequency
tspan = (0.0,10.0)
probex = ODEProblem(pendulum_theta,u0,tspan,pex)
solex = solve(probex, Tsit5(), reltol=1e-16, abstol=1e-16);
xexact(t) = sin(solex(t,idxs=1))
yexact(t) = -cos(solex(t,idxs=1))
return prob, xexact, yexact
end
function partitioned_problem(;tmax=1.0,iip=true)
ω = 1.0
βu = -0.2
βv = -0.5
par = [ω,βu,βv]
U₀ = Float64[0,1]
X₀ = Float64[1,0]
Z₀ = Float64[0]
u₀ = solvector(state=X₀,constraint=Z₀,aux_state=U₀)
du = deepcopy(u₀)
B₂ = Array{Float64}(undef,1,2)
B₁ᵀ = Array{Float64}(undef,2,1)
p₀ = ProblemParams(par,B₁ᵀ,B₂)
L = Diagonal([βu,βv])
function X_rhs!(dy,y,x,p,t)
fill!(dy,0.0)
return dy
end
X_rhs(y,x,p,t) = X_rhs!(deepcopy(X₀),y,x,p,t)
function U_rhs!(dy,u,p,t)
fill!(dy,0.0)
ω = p.params[1]
dy[1] = -ω*cos(ω*t)
dy[2] = -ω*sin(ω*t)
return dy
end
U_rhs(u,p,t) = U_rhs!(deepcopy(U₀),u,p,t)
ode_rhs! = ArrayPartition((X_rhs!,U_rhs!))
ode_rhs = ArrayPartition((X_rhs,U_rhs))
constraint_rhs!(dz,x,p,t) = dz .= Float64[0]
constraint_rhs(x,p,t) = constraint_rhs!(deepcopy(Z₀),x,p,t)
function op_constraint_force!(dy,z,x,p)
@unpack B₁ᵀ = p
dy .= B₁ᵀ*z
return dy
end
op_constraint_force(z,x,p) = op_constraint_force!(deepcopy(X₀),z,x,p)
function constraint_op!(dz,y,x,p)
@unpack B₂ = p
dz .= B₂*y
end
constraint_op(y,x,p) = constraint_op!(deepcopy(Z₀),y,x,p)
function update_p!(q,u,p,t)
x = aux_state(u)
@unpack B₁ᵀ, B₂ = q
B₁ᵀ[1,1] = x[1]
B₁ᵀ[2,1] = x[2]
B₂[1,1] = x[1]
B₂[1,2] = x[2]
return q
end
update_p(u,p,t) = update_p!(deepcopy(p),u,p,t)
if iip
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,op_constraint_force!,
constraint_op!,L,_func_cache=deepcopy(du),
param_update_func=update_p!)
else
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,op_constraint_force,
constraint_op,L,param_update_func=update_p)
end
tspan = (0.0,tmax)
p = deepcopy(p₀)
update_p!(p,u₀,p,0.0)
prob = ODEProblem(f,u₀,tspan,p)
# function fex(du,u,p,t)
# UV = u.x[1]
# xy = u.x[2]
# ω = p[1]
# βu = p[2]
# βv = p[3]
# du[1] = -ω*cos(ω*t)
# du[2] = -ω*sin(ω*t)
# Usq = u[1]^2+u[2]^2
# du[3] = (βu-u[1]/Usq*(du[1]+βu*u[1]))*u[3] - u[1]/Usq*(du[2]+βv*u[2])*u[4]
# du[4] = -u[2]/Usq*(du[1]+βu*u[1])*u[3] + (βv-u[2]/Usq*(du[2]+βv*u[2]))*u[4]
# return nothing
# end
#
# tspan = (0.0,tmax)
# u₀ex = SaddleVector(U₀,X₀)
#
# probex = ODEProblem(fex,u₀ex,tspan,par)
# solex = solve(probex, Tsit5(), reltol=1e-16, abstol=1e-16)
# xexact(t) = solex(t,idxs=3)
# yexact(t) = solex(t,idxs=4)
fex(t) = exp(0.5*(βu+βv)*t)*exp(0.25/ω*sin(2ω*t)*(βu-βv))
xexact(t) = cos(ω*t)*fex(t)
yexact(t) = sin(ω*t)*fex(t)
return prob, xexact, yexact
end
function basic_constrained_if_problem_with_cmatrix(;tmax=1.0,iip=true)
y0 = 1
y₀ = Float64[y0]
z₀ = Float64[0]
u₀ = solvector(state=y₀,constraint=z₀)
du = deepcopy(u₀)
α = -1.0
B1T = 1.0
B2 = 1.0
β = 1.2
r2 = 1.0
p = [α,B1T,B2,β,r2]
L = α*I(1)
ode_rhs!(dy,y,x,p,t) = fill!(dy,0.0)
ode_rhs(y,x,p,t) = zero(y)
constraint_rhs!(dz,x,p,t) = fill!(dz,p[5])
constraint_rhs(x,p,t) = p[5]*ones(1)
function op_constraint_force!(dy::Vector{Float64},z::Vector{Float64},x,p)
dy .= p[2]*z
end
op_constraint_force(z::Vector{Float64},x,p) = op_constraint_force!(deepcopy(y₀),z,x,p)
function constraint_op!(dz::Vector{Float64},y::Vector{Float64},x,p)
dz .= p[3]*y
end
constraint_op(y::Vector{Float64},x,p) = constraint_op!(deepcopy(z₀),y,x,p)
function constraint_reg!(dz::Vector{Float64},z::Vector{Float64},x,p)
dz .= p[4]*z
end
constraint_reg(z::Vector{Float64},x,p) = constraint_reg!(deepcopy(z₀),z,x,p)
if iip
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,op_constraint_force!,
constraint_op!,L,constraint_reg!,_func_cache=deepcopy(du))
else
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,op_constraint_force,
constraint_op,L,constraint_reg)
end
tspan = (0.0,tmax)
prob = ODEProblem(f,u₀,tspan,p)
xexact(t) = exp((α+1/β)*t)*y0 + r2/β/(α+1/β)*(1 - exp((α+1/β)*t))
yexact(t) = (r2 - xexact(t))/β
return prob, xexact, yexact
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 2815 |
# called directly by alg_cache for iip algorithms and indirectly by non-sc algorithms
function SaddleSystem(A,f::ConstrainedODEFunction{true},p,pold,ducache,solver;cfact=1.0)
nully, nullz = state(ducache), constraint(ducache)
du_aux = aux_state(ducache)
@inline B₁ᵀ(z) = (zero_vec!(ducache);
_ode_neg_B1!(ducache,f,solvector(state=nully,constraint=z,aux_state=du_aux),pold,0.0);
state(ducache) .*= -1.0; return state(ducache))
@inline B₂(y) = (zero_vec!(ducache);
_constraint_neg_B2!(ducache,f,solvector(state=y,constraint=nullz,aux_state=du_aux),p,0.0);
constraint(ducache) .*= -1.0; return constraint(ducache))
@inline C(z) = (zero_vec!(ducache);
_constraint_neg_C!(ducache,f,solvector(state=nully,constraint=z,aux_state=du_aux),p,0.0);
constraint(ducache) .*= -cfact; return constraint(ducache))
SaddleSystem(A,B₂,B₁ᵀ,C,mainvector(ducache),solver=solver)
end
# called directly by oop algorithms
function SaddleSystem(A,f::ConstrainedODEFunction{false},p,pold,ducache,solver;cfact=1.0)
nully, nullz = state(ducache), constraint(ducache)
du_aux = aux_state(ducache)
@inline B₁ᵀ(z) = (zero_vec!(ducache); ducache .= _ode_neg_B1(f,solvector(state=nully,constraint=z,aux_state=du_aux),pold,0.0);
state(ducache) .*= -1.0; return state(ducache))
@inline B₂(y) = (zero_vec!(ducache);
ducache .= _constraint_neg_B2(f,solvector(state=y,constraint=nullz,aux_state=du_aux),p,0.0);
constraint(ducache) .*= -1.0; return constraint(ducache))
@inline C(z) = (zero_vec!(ducache);
ducache .= _constraint_neg_C(f,solvector(state=nully,constraint=z,aux_state=du_aux),p,0.0);
constraint(ducache) .*= -cfact; return constraint(ducache))
SaddleSystem(A,B₂,B₁ᵀ,C,mainvector(ducache),solver=solver)
end
# this version is called by in-place algorithms
@inline SaddleSystem(S::SaddleSystem,A,f::ConstrainedODEFunction,p,pold,
cache::ConstrainedODEMutableCache{sc,solverType}) where {sc,solverType} =
SaddleSystem(S,A,f,p,pold,cache.dutmp,solverType,Val(sc))
# non-static constraints
@inline SaddleSystem(S::SaddleSystem,A,f::ConstrainedODEFunction,p,pold,ducache,solver,
::Val{false}) = SaddleSystem(A,f,p,pold,ducache,solver)
# static constraints
@inline SaddleSystem(S::SaddleSystem,A,f::ConstrainedODEFunction,p,pold,ducache,solver,
::Val{true}) = S
function B1_times_z!(u,S::SaddleSystem)
state(u) .= typeof(state(u))(S.A⁻¹B₁ᵀf)
return u
end
function B1_times_z(u,S::SaddleSystem)
out = zero(u)
state(out) .= typeof(state(u))(S.A⁻¹B₁ᵀf)
return out
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 14924 | export DiffEqLinearOperator, ConstrainedODEFunction
DEFAULT_PARAM_UPDATE_FUNC(q,u,p,t) = q
DEFAULT_PARAM_UPDATE_FUNC(u,p,t) = p
### Abstract algorithms ###
abstract type ConstrainedOrdinaryDiffEqAlgorithm <: OrdinaryDiffEq.OrdinaryDiffEqAlgorithm end
### Abstract caches ###
# The sc parameter specifies whether it contains static constraint operators or not
# If false, then it expects that the state vector contains a component for updating the opertors
abstract type ConstrainedODEMutableCache{sc,solverType} <: OrdinaryDiffEqMutableCache end
abstract type ConstrainedODEConstantCache{sc,solverType} <: OrdinaryDiffEqConstantCache end
#### Operator and function types ####
mutable struct DiffEqLinearOperator{T,aType} <: AbstractDiffEqLinearOperator{T}
L :: aType
DiffEqLinearOperator(L::aType; update_func=DEFAULT_PARAM_UPDATE_FUNC,
dtype=Float64) where {aType} = new{dtype,aType}(L)
end
(f::DiffEqLinearOperator)(du,u,p,t) = (dy = state(du); mul!(dy,f.L,state(u)))
(f::DiffEqLinearOperator)(u,p,t) = (du = deepcopy(u); zero_vec!(du); dy = state(du); mul!(dy,f.L,state(u)); return du)
import Base: exp
exp(f::DiffEqLinearOperator,args...) = exp(f.L,args...)
exp(f::ArrayPartition,t,x::ArrayPartition) =
ArrayPartition((exp(Li,t,xi) for (Li,xi) in zip(f.L.x,x.x))...)
exp(f::ODEFunction,args...) = exp(f.f,args...)
has_exp(::DiffEqLinearOperator) = true
exp(L::AbstractMatrix,t,x) = exp(factorize(L)*t)
exp(L::UniformScaling,t,x) = exp(Diagonal(L,length(x))*t)
implicit_operator(L::AbstractMatrix,a::Real) = I - a*L
implicit_operator(L::UniformScaling,a::Real) = I - a*L
implicit_operator(f::DiffEqLinearOperator,args...) = implicit_operator(f.L,args...)
implicit_operator(f::ODEFunction,args...) = implicit_operator(f.f,args...)
#=
ConstrainedODEFunction
A function of this type should be able to take arguments (du,u,p,t) (for in-place)
or (u,p,t) for out-of-place, and distribute the parts of u and du as
needed to the component functions. u and du will both be of type ArrayPartition,
with two parts: state(u) and constraint(u).
In some cases we wish to solve a separate (unconstrained) system, e.g., to
update the constraint operators B1 and B2. This update is done via the
parameters, using param_update_func. In this case, u and du are ArrayPartition
with the first part corresponding to the constrained system (and of ArrayPartition type)
and the second part is for the unconstrained system. We supply `r1` as
an ArrayPartition of the component functions (in the same order). The arguments of
each component function of `r1` should only take in and return their own parts
of this state.
In the future we can expand this to take in multiple (coupled) systems of this form,
as in FSI problems. In these cases, the component functions should be supplied as
ArrayPartition's of the functions for each system. Each of the component functions of each system
should be able to take in the *full* state and/or constraint and return the
*full* state/constraint, as appropriate, in order to enable coupling of the
systems.
=#
"""
ConstrainedODEFunction(r1,r2,B1,B2[,L][,C])
This specifies the functions and operators that comprise an ODE problem with the form
``
\\dfrac{dy}{dt} = Ly - B_1 z + r_1(y,t)
``
``
B_2 y + C z = r_2(x,t)
``
where ``y`` is the state, ``z`` is a constraint force, and ``x`` is an auxiliary
state describing the constraints.
The optional linear operator `L` defaults to zeros. The `B1` and `B2` functions must be of the respective
in-place forms `B1(dy,z,x,p)` (to compute the action of `B1` on `z`) and `B2(dz,y,x,p)` (to compute the action
of `B2` on `y`). The function `r1` must of the in-place form `r1(dy,y,x,p,t)`, and `r2` must be in the in-place form
`r2(dz,x,p,t)`. The `C` function can be omitted, but if it is included, then it must be of the form
`C(dz,z,x,p)` (to compute the action of `C` on `z`). Alternatively, one can supply out-of-place forms, respectively, as `B1(z,x,p)`, `B2(y,x,p)`,
`C(z,x,p)`, `r1(y,x,p,t)` and `r2(x,p,t)`.
An optional keyword argument `param_update_func` can be used to set a function that updates problem parameters with
the current solution. This function must take the in-place form `f(q,u,p,t)` or out of place form
`f(u,p,t)` to create some `q` based on `u`, where `y = state(u)`, `z = constraint(u)` and `x = aux_state(u)`.
(Note that `q` might enter the function simply as `p`, to be mutated.) This function can
be used to update `B1`, `B2`, and `C`, for example.
We can also include another (unconstrained) set of equations to the set above
in order to update `x`:
``
\\dfrac{dx}{dt} = r_{1x}(u,p,t)
``
In this case, the right-hand side has access to the entire `u` vector. We would pass
the pair of `r1` functions as an `ArrayPartition`.
"""
struct ConstrainedODEFunction{iip,static,F1,F2,TMM,C,Ta,Tt,TJ,JVP,VJP,JP,SP,TW,TWt,TPJ,S,TCV,PF} <: AbstractODEFunction{iip}
odef :: F1
conf :: F2
mass_matrix::TMM
cache::C
analytic::Ta
tgrad::Tt
jac::TJ
jvp::JVP
vjp::VJP
jac_prototype::JP
sparsity::SP
Wfact::TW
Wfact_t::TWt
paramjac::TPJ
syms::S
colorvec::TCV
param_update_func :: PF
end
function ConstrainedODEFunction(r1,r2,B1,B2,L=DiffEqLinearOperator(0*I),C=nothing;
r1imp=nothing,
param_update_func = DEFAULT_PARAM_UPDATE_FUNC,
mass_matrix=I,_func_cache=nothing,
analytic=nothing,
tgrad = nothing,
jac = nothing,
jvp=nothing,
vjp=nothing,
jac_prototype = nothing,
sparsity=jac_prototype,
Wfact = nothing,
Wfact_t = nothing,
paramjac = nothing,
syms = nothing,
colorvec = nothing)
allempty(r2,B1,B2) || noneempty(r2,B1,B2) || error("Inconsistent null operators")
unconstrained = allempty(r2,B1,B2)
allinplace(r1,r2,B1,B2,r1imp) || alloutofplace(r1,r2,B1,B2,r1imp) || error("Inconsistent function signatures")
iip = allinplace(r1,r2,B1,B2,r1imp)
static = param_update_func == DEFAULT_PARAM_UPDATE_FUNC ? true : false
L_local = (L isa DiffEqLinearOperator) ? L : DiffEqLinearOperator(L)
local_cache = deepcopy(_func_cache)
zero_vec!(local_cache)
odef_imp_nl = SplitFunction(_complete_B1(B1,Val(iip)),
_complete_r1imp(r1imp,Val(iip));_func_cache=deepcopy(local_cache))
odef_imp = SplitFunction(L_local,odef_imp_nl;_func_cache=deepcopy(local_cache))
odef = SplitFunction(_complete_r1(r1,Val(iip),_func_cache=deepcopy(local_cache)), odef_imp ;_func_cache=deepcopy(local_cache))
conf_lhs = SplitFunction(_complete_B2(B2,Val(iip)),_complete_C(C,Val(iip));_func_cache=deepcopy(local_cache))
conf = SplitFunction(_complete_r2(r2,Val(iip)),conf_lhs;_func_cache=deepcopy(local_cache))
ConstrainedODEFunction{iip,static,typeof(odef),
typeof(conf),typeof(mass_matrix),typeof(local_cache),
typeof(analytic),typeof(tgrad),typeof(jac),typeof(jvp),typeof(vjp),
typeof(jac_prototype),typeof(sparsity),
typeof(Wfact),typeof(Wfact_t),typeof(paramjac),typeof(syms),
typeof(colorvec),typeof(param_update_func)}(odef,conf,mass_matrix,local_cache,
analytic,tgrad,jac,jvp,vjp,jac_prototype,
sparsity,Wfact,Wfact_t,paramjac,syms,colorvec,param_update_func)
end
# For unconstrained systems
ConstrainedODEFunction(r1,L=DiffEqLinearOperator(0*I);kwargs...) =
ConstrainedODEFunction(r1,nothing,nothing,nothing,L,nothing;kwargs...)
function Base.show(io::IO, m::MIME"text/plain",f::ConstrainedODEFunction{iip,static}) where {iip,static}
iips = iip ? "in-place" : "out-of-place"
statics = static ? "static" : "variable"
println(io,"Constrained ODE function of $iips type and $statics constraints")
end
# Here is where we define the structure of the function
@inline _fetch_ode_r1(f::ConstrainedODEFunction) = f.odef.f1
@inline _fetch_ode_implicit_rhs(f::ConstrainedODEFunction) = f.odef.f2
@inline _fetch_ode_L(f::ConstrainedODEFunction) = _fetch_ode_implicit_rhs(f).f.f1
@inline _fetch_ode_neg_B1(f::ConstrainedODEFunction) = _fetch_ode_implicit_rhs(f).f.f2.f.f1
@inline _fetch_ode_r1imp(f::ConstrainedODEFunction) = _fetch_ode_implicit_rhs(f).f.f2.f.f2
@inline _fetch_constraint_r2(f::ConstrainedODEFunction) = f.conf.f1
@inline _fetch_constraint_neg_B2(f::ConstrainedODEFunction) = f.conf.f2.f.f1
@inline _fetch_constraint_neg_C(f::ConstrainedODEFunction) = f.conf.f2.f.f2
for fcn in (:_ode_L,:_ode_r1,:_ode_r1imp,:_ode_neg_B1,:_constraint_neg_B2,:_constraint_neg_C,:_constraint_r2,:_ode_implicit_rhs)
fetchfcn = Symbol("_fetch",string(fcn))
iipfcn = Symbol(string(fcn),"!")
@eval $iipfcn(du,f::ConstrainedODEFunction,u,p,t) = $fetchfcn(f)(du,u,p,t)
@eval $fcn(f::ConstrainedODEFunction,u,p,t) = $fetchfcn(f)(u,p,t)
end
function _ode_full_rhs!(du,f::ConstrainedODEFunction,u,p,t)
@unpack odef = f
@unpack cache = odef
zero_vec!(cache)
zero_vec!(du)
_ode_r1!(cache,f,u,p,t)
_ode_r1imp!(du,f,u,p,t)
@.. du += cache
return du
end
function _ode_full_rhs(f::ConstrainedODEFunction,u,p,t)
return _ode_r1(f,u,p,t) + _ode_r1imp(f,u,p,t)
end
allempty(::Nothing,::Nothing,::Nothing) = true
allempty(r2,B1,B2) = false
noneempty(r2,B1,B2) = !(isnothing(r2) || isnothing(B1) || isnothing(B2))
allinplace(r1,r2,B1,B2) = _isinplace_r1(r1) && _isinplace_r2(r2) && _isinplace_B1(B1) && _isinplace_B2(B2)
alloutofplace(r1,r2,B1,B2) = _isoop_r1(r1) && _isoop_r2(r2) && _isoop_B1(B1) && _isoop_B2(B2)
allinplace(r1,r2,B1,B2,r1imp) = allinplace(r1,r2,B1,B2) && _isinplace_r1imp(r1imp)
allinplace(r1,r2,B1,B2,::Nothing) = allinplace(r1,r2,B1,B2)
alloutofplace(r1,r2,B1,B2,r1imp) = alloutofplace(r1,r2,B1,B2) && _isoop_r1imp(r1imp)
alloutofplace(r1,r2,B1,B2,::Nothing) = alloutofplace(r1,r2,B1,B2)
allinplace(r1,::Nothing,::Nothing,::Nothing) = _isinplace_r1(r1)
alloutofplace(r1,::Nothing,::Nothing,::Nothing) = _isoop_r1(r1)
for (f,nv,nvaux) in ((:r1,5,4),(:r2,4,0),(:r1imp,4,0),(:B1,4,0),(:B2,4,0),(:C,4,0))
iipfcn = Symbol("_isinplace_",string(f))
oopfcn = Symbol("_isoop_",string(f))
completefcn = Symbol("_complete_",string(f))
@eval $iipfcn(fcn) = isinplace(fcn,$nv)
@eval $iipfcn(fcn::ArrayPartition) = $iipfcn(fcn.x[1]) && isinplace(fcn.x[2],$nvaux)
@eval $oopfcn(fcn) = first(numargs(fcn)) == $(nv-1)
@eval $oopfcn(fcn::ArrayPartition) = $oopfcn(fcn.x[1]) && first(numargs(fcn.x[2])) == $(nvaux-1)
@eval $completefcn(fcn,::Val{iip};_func_cache=nothing) where {iip} = $completefcn(fcn,Val(iip),_func_cache)
@eval $completefcn(::Nothing,::Val{false},_func_cache) = (u,p,t) -> zero(u)
end
_complete_r1(r1,::Val{true},_func_cache) = (du,u,p,t) -> (dy = state(du); y = state(u); x = aux_state(u); r1(dy,y,x,p,t))
_complete_r1(r1,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du); y = state(u); x = aux_state(u); state(du) .= r1(y,x,p,t); return du)
_complete_r1(r1::ArrayPartition,::Val{true},_func_cache) =
SplitFunction((du,u,p,t) ->(dy = state(du); dx = aux_state(du); zero_vec!(dx); y = state(u); x = aux_state(u); state_r1(r1)(dy,y,x,p,t)),
(du,u,p,t) ->(dy = state(du); dx = aux_state(du); zero_vec!(dy); aux_r1(r1)(dx,u,p,t));
_func_cache=deepcopy(_func_cache))
_complete_r1(r1::ArrayPartition,::Val{false},_func_cache) =
SplitFunction((u,p,t) -> (du = deepcopy(u); zero_vec!(du); y = state(u); x = aux_state(u); state(du) .= state_r1(r1)(y,x,p,t); return du),
(u,p,t) -> (du = deepcopy(u); zero_vec!(du); aux_state(du) .= aux_r1(r1)(u,p,t); return du))
_complete_r1imp(r1imp,::Val{true},_func_cache) = (du,u,p,t) -> (dy = state(du); x = aux_state(u); r1imp(dy,x,p,t))
_complete_r1imp(r1imp,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du); x = aux_state(u); state(du) .= r1imp(x,p,t); return du)
_complete_r1imp(::Nothing,::Val{true},_func_cache) = (du,u,p,t) -> zero_vec!(du)
_complete_r2(r2,::Val{true},_func_cache) = (du,u,p,t) -> (dz = constraint(du); x = aux_state(u); r2(dz,x,p,t))
_complete_r2(::Nothing,::Val{true},_func_cache) = (du,u,p,t) -> zero_vec!(constraint(du))
_complete_r2(r2,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du); x = aux_state(u); constraint(du) .= r2(x,p,t); return du)
_complete_B1(B1,::Val{true},_func_cache) = (du,u,p,t) -> (dy = state(du); dx = aux_state(du); zero_vec!(dx);
z = constraint(u); x = aux_state(u); B1(dy,z,x,p); dy .*= -1.0)
_complete_B1(::Nothing,::Val{true},_func_cache) = (du,u,p,t) -> (zero_vec!(aux_state(du)); zero_vec!(state(du)))
_complete_B1(B1,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du);
z = constraint(u); x = aux_state(u); state(du) .= -B1(z,x,p); return du)
_complete_B2(B2,::Val{true},_func_cache) = (du,u,p,t) -> (dz = constraint(du); y = state(u); x = aux_state(u);
B2(dz,y,x,p); dz .*= -1.0)
_complete_B2(::Nothing,::Val{true},_func_cache) = (du,u,p,t) -> zero_vec!(constraint(du))
_complete_B2(B2,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du); y = state(u); x = aux_state(u);
constraint(du) .= -B2(y,x,p); return du)
_complete_C(C,::Val{true},_func_cache) = (du,u,p,t) -> (dz = constraint(du); z = constraint(u); x = aux_state(u);
C(dz,z,x,p); dz .*= -1.0)
_complete_C(::Nothing,::Val{true},_func_cache) = (du,u,p,t) -> zero_vec!(constraint(du))
_complete_C(C,::Val{false},_func_cache) = (u,p,t) -> (du = deepcopy(u); zero_vec!(du); z = constraint(u);
x = aux_state(u); constraint(du) .= -C(z,x,p); return du)
function (f::ConstrainedODEFunction)(du,u,p,t)
zero_vec!(f.cache)
zero_vec!(du)
f.odef(f.cache,u,p,t)
f.conf(du,u,p,t)
du .+= f.cache
end
(f::ConstrainedODEFunction)(u,p,t) = f.odef(u,p,t) + f.conf(u,p,t)
@inline isstatic(f::ConstrainedODEFunction) = f.param_update_func == DEFAULT_PARAM_UPDATE_FUNC ? true : false
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 8145 | using DiffEqDevTools
import DiffEqDevTools: recursive_mean
import ConstrainedSystems: @unpack, _l2norm
dts = 1 ./ 2 .^(9:-1:5)
testTol = 0.2
const TOL = 1e-13
@inline compute_l2err(sol,t,sol_analytic) = _l2norm(sol-sol_analytic.(t))
function compute_error(solutions,idx,sol_analytic)
l2err = [compute_l2err(_sol[idx,:],_sol.t,sol_analytic) for _sol in solutions]
error = Dict(:l2 => l2err)
end
function compute𝒪est(solutions,idx,sol_analytic)
#l2err = [compute_l2err(_sol[idx,:],_sol.t,sol_analytic) for _sol in solutions]
#error = Dict(:l2 => l2err)
error = compute_error(solutions,idx,sol_analytic)
𝒪est = Dict((DiffEqDevTools.calc𝒪estimates(p) for p = pairs(error)))
end
@testset "Convergence test" begin
### In place ###
# Unconstrained
prob, xexact = ConstrainedSystems.basic_unconstrained_problem(iip=true)
# For now, do this quick and dirty until we figure out how to separate
# state from constraint in sol structure
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 3 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
# Unconstrained problem with only an integrating factor. Should achieve machine
# precision
prob, xexact = ConstrainedSystems.basic_unconstrained_if_problem(iip=true)
# For now, do this quick and dirty until we figure out how to separate
# state from constraint in sol structure
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
error1 = compute_error(solutions1,1,xexact)
error2 = compute_error(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
# IF methods should be exact
@test all(error1[:l2] .< TOL)
@test all(error2[:l2] .< TOL)
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
# Constrained
prob, xexact, yexact = ConstrainedSystems.basic_constrained_problem(iip=true)
# For now, do this quick and dirty until we figure out how to separate
# state from constraint in sol structure
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.cartesian_pendulum_problem(iip=true)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.partitioned_problem(iip=true)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.basic_constrained_if_problem_with_cmatrix(iip=true)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 1 atol=testTol # IFHERK only 1st order convergent on this problem
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
### out of place ###
# Unconstrained
prob, xexact = ConstrainedSystems.basic_unconstrained_problem(iip=false)
# For now, do this quick and dirty until we figure out how to separate
# state from constraint in sol structure
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 3 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
# Unconstrained problem with only an integrating factor. Should achieve machine
# precision
prob, xexact = ConstrainedSystems.basic_unconstrained_if_problem(iip=false)
# For now, do this quick and dirty until we figure out how to separate
# state from constraint in sol structure
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
error1 = compute_error(solutions1,1,xexact)
error2 = compute_error(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
# IF methods should be exact
@test all(error1[:l2] .< TOL)
@test all(error2[:l2] .< TOL)
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
# Constrained
prob, xexact, yexact = ConstrainedSystems.basic_constrained_problem(iip=false)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.cartesian_pendulum_problem(iip=false)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.partitioned_problem(iip=false)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 2 atol=testTol
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
prob, xexact, yexact = ConstrainedSystems.basic_constrained_if_problem_with_cmatrix(iip=false)
solutions1 = [solve(prob,LiskaIFHERK();dt=dts[i]) for i=1:length(dts)]
solutions2 = [solve(prob,IFHEEuler();dt=dts[i]) for i=1:length(dts)]
solutions3 = [solve(prob,HETrapezoidalAB2();dt=dts[i]) for i=1:length(dts)]
𝒪est1 = compute𝒪est(solutions1,1,xexact)
𝒪est2 = compute𝒪est(solutions2,1,xexact)
𝒪est3 = compute𝒪est(solutions3,1,xexact)
@test 𝒪est1[:l2][1] ≈ 1 atol=testTol # IFHERK only 1st order convergent on this problem
@test 𝒪est2[:l2][1] ≈ 1 atol=testTol
@test 𝒪est3[:l2][1] ≈ 2 atol=testTol
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 1956 |
using ConstrainedSystems
using Test
using CartesianGrids
using LinearAlgebra
using Literate
import ConstrainedSystems: recursivecopy!, needs_iteration, ArrayPartition
const GROUP = get(ENV, "GROUP", "All")
ENV["GKSwstype"] = "nul" # removes GKS warnings during plotting
macro mysafetestset(args...)
name, expr = args
quote
ex = quote
name_str = $$(QuoteNode(name))
expr_str = $$(QuoteNode(expr))
mod = gensym(name_str)
ex2 = quote
@eval module $mod
using Test
@testset $name_str $expr_str
end
nothing
end
eval(ex2)
end
eval(ex)
end
end
notebookdir = "../examples"
docdir = "../docs/src/manual"
litdir = "./literate"
if GROUP == "All" || GROUP == "Utils"
include("utils.jl")
end
if GROUP == "All" || GROUP == "Types"
include("types.jl")
end
if GROUP == "All" || GROUP == "Saddle"
include("saddle.jl")
end
if GROUP == "All" || GROUP == "Convergence"
include("algconvergence.jl")
end
if GROUP == "All" || GROUP == "Literate"
for (root, dirs, files) in walkdir(litdir)
for file in files
global file_str = "$file"
global body = :(begin include(joinpath($root,$file)) end)
#endswith(file,".jl") && startswith(file,"saddle") && @mysafetestset file_str body
endswith(file,".jl") && @mysafetestset file_str body
end
end
end
if GROUP == "Notebooks"
for (root, dirs, files) in walkdir(litdir)
for file in files
#endswith(file,".jl") && startswith(file,"saddle") && Literate.notebook(joinpath(root, file),notebookdir)
endswith(file,".jl") && Literate.notebook(joinpath(root, file),notebookdir)
end
end
end
if GROUP == "Documentation"
for (root, dirs, files) in walkdir(litdir)
for file in files
endswith(file,".jl") && Literate.markdown(joinpath(root, file),docdir)
end
end
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 6700 | @testset "Saddle-Point Systems" begin
@testset "Matrix tests" begin
A1 = Float64[1 2; 2 1]
B2 = Float64[2 3;-1 -1]
B1 = B2'
C = Matrix{Float64}(undef,2,2)
C .= [5 -2; 3 -4];
A = SaddleSystem(A1,B2,B1,C)
# test inputs and outputs as tuples
rhs = ([1.0,2.0],[3.0,4.0])
sol = (zeros(2),zeros(2))
ldiv!(sol,A,rhs)
@test norm((A*sol)[1]-rhs[1]) < 1e-14
@test norm((A*sol)[2]-rhs[2]) < 1e-14
sol2 = A\rhs
@test norm((A*sol2)[1]-rhs[1]) < 1e-14
@test norm((A*sol2)[2]-rhs[2]) < 1e-14
sol3 = deepcopy(sol)
sol3[2] .= 0.0
constraint_from_state!(sol3,A,rhs)
@test sol2[1] ≈ sol3[1] && sol2[2] ≈ sol3[2]
# test inputs and outputs as vectors
rhs1, rhs2 = rhs
rhsvec = [rhs1;rhs2]
solvec = similar(rhsvec)
solvec = A\rhsvec
@test norm(A*solvec-rhsvec) < 1e-14
solvec2 = deepcopy(solvec)
solvec2[3:4] .= 0.0
constraint_from_state!(solvec2,A,rhsvec)
@test solvec2 ≈ solvec
# test with the other constructor
Aother = SaddleSystem(A1,B2,B1,C,zeros(Float64,2),zeros(Float64,2))
sol_other = Aother\rhs
@test norm((Aother*sol_other)[1]-rhs[1]) < 1e-14
@test norm((Aother*sol_other)[2]-rhs[2]) < 1e-14
# Test with SaddleVector
rhs = SaddleVector(rhs1,rhs2)
As = SaddleSystem(A1,B2,B1,C,rhs)
sol = As\rhs
@test norm(state(As*sol)-state(rhs)) < 1e-14
@test norm(constraint(As*sol)-constraint(rhs)) < 1e-14
end
nx = 130; ny = 130
Lx = 2.0
dx = Lx/(nx-2)
w = Nodes(Dual,(nx,ny))
q = Edges(Primal,w)
L = plan_laplacian(size(w),with_inverse=true)
n = 128
θ = range(0,stop=2π,length=n+1)
R = 0.5
xb = 1.0 .+ R*cos.(θ[1:n])
yb = 1.0 .+ R*sin.(θ[1:n])
ds = (2π/n)*R
X = VectorData(xb,yb)
f = ScalarData(X)
E = Regularize(X,dx;issymmetric=true)
Hmat,Emat = RegularizationMatrix(E,f,w)
@testset "Construction of linear maps" begin
u = similar(w)
wvec = vec(w)
w .= rand(size(w)...)
uvec = zeros(length(w))
Lop = ConstrainedSystems.linear_map(L,w)
u = L*w
uvec = Lop*wvec
@test ConstrainedSystems._wrap_vec(uvec,u) == u
Linv = ConstrainedSystems.linear_inverse_map(L,w)
yvec = Linv*wvec
y = L\w
@test ConstrainedSystems._wrap_vec(yvec,y) == y
# point-wise operators
fvec = vec(f)
f[10] = 1.0
Hop = ConstrainedSystems.linear_map(Hmat,f,w);
y = Hmat*f
yvec = Hop*fvec
@test ConstrainedSystems._wrap_vec(yvec,y) == y
Eop = ConstrainedSystems.linear_map(Emat,w,f)
g = Emat*w
gvec = Eop*vec(w)
@test ConstrainedSystems._wrap_vec(gvec,g) == g
# Test in-place function wrapping
my_ip_fcn!(outp::Vector,inp::Vector) = outp .= 2.0*inp
inp = rand(1000)
outp = zero(inp)
my_ip_fcn!(outp,inp)
my_ip_lm! = ConstrainedSystems._create_vec_function!(my_ip_fcn!,outp,inp)
outp2 = zero(inp)
my_ip_lm!(outp2,inp)
@test outp2 == outp
curl_lm! = ConstrainedSystems._create_vec_function!(CartesianGrids.curl!,q,w)
CartesianGrids.curl!(q,w)
uvec .= wvec
qvec = zeros(length(q))
curl_lm!(qvec,uvec)
@test ConstrainedSystems._wrap_vec(qvec,q) == q
curl_lm! = ConstrainedSystems.linear_map(CartesianGrids.curl!,w,q)
mul!(qvec,curl_lm!,uvec)
@test ConstrainedSystems._wrap_vec(qvec,q) == q
end
ψb = ScalarData(X)
w = Nodes(Dual,(nx,ny))
ψb .= -(xb .- 1)
f .= ones(Float64,n)*ds
ψ = Nodes(Dual,w)
nada = empty(f)
@testset "Field operators" begin
rhs = SaddleVector(w,ψb)
A = SaddleSystem(L,Emat,Hmat,rhs)
A.S*vec(f)
A.S⁻¹*vec(f)
sol = deepcopy(rhs)
ldiv!(sol,A,rhs)
sol2 = A\rhs
@test state(sol2) == state(sol)
@test constraint(sol2) == constraint(sol)
rhs2 = A*sol2
@test norm(constraint(rhs2)-ψb) < 1e-14
fex = -2*cos.(θ[1:n])
@test norm(constraint(sol)-fex*ds) < 0.02
@test ψ[nx,65] ≈ -ψ[1,65]
@test ψ[65,ny] ≈ ψ[65,1]
sol3 = deepcopy(sol2)
constraint(sol3) .= 0.0
@test_throws ErrorException constraint_from_state!(sol3,A,rhs)
end
fv = VectorData(X)
q = Edges(Primal,w)
Hvmat,Evmat = RegularizationMatrix(E,fv,q)
@testset "Vector force data" begin
B₁ᵀ(f) = curl(Hvmat*f)
B₂(w) = -(Evmat*(curl(L\w)))
rhsv = SaddleVector(w,fv)
A = SaddleSystem(I,B₂,B₁ᵀ,rhsv)
sol = A\rhsv
@test state(sol) == zero(w) && constraint(sol) == zero(fv)
end
Ẽ = Regularize(X,dx;weights=ds,filter=true)
H̃mat = RegularizationMatrix(Ẽ,ψb,w)
Ẽmat = InterpolationMatrix(Ẽ,w,ψb)
Pmat = Ẽmat*H̃mat
@testset "Filtering" begin
rhs = SaddleVector(w,ψb)
Afilt = SaddleSystem(L,Emat,Hmat,rhs,filter=Pmat)
sol = Afilt\rhs
fex = -2*cos.(θ[1:n])
@test norm(constraint(sol)-fex*ds) < 0.01
end
@testset "Reduction to unconstrained system" begin
op = linear_map(nothing,nada)
@test size(op) == (0,0)
@test op*nada == ()
op = linear_map(nothing,w,nada)
@test size(op) == (0,length(w))
@test op*vec(w) == ()
op = linear_map(nothing,nada,w)
@test size(op) == (length(w),0)
@test op*nada == vec(zero(w))
rhsnc = SaddleVector(w,nada)
Anc = SaddleSystem(L,rhsnc)
sol = Anc\rhsnc
ψ = state(sol)
fnull = constraint(sol)
@test ψ == L\w
@test fnull == nada
q.u .= 3;
rhsqnc = SaddleVector(q,nada)
Aqnc = SaddleSystem(I,rhsqnc)
sol = Aqnc\rhsqnc
q2 = state(sol)
fnull = constraint(sol)
@test all(q2.u .== 3.0) && all(q2.v .== 0.0)
end
@testset "Tuple of saddle point systems" begin
rhs = SaddleVector(w,ψb)
A = SaddleSystem(L,Emat,Hmat,rhs)
sol = A\rhs
ψ = state(sol)
f = constraint(sol)
rhsnc = SaddleVector(w,nada)
Anc = SaddleSystem(L,rhsnc)
sys = (A,Anc)
sol1, sol2 = sys\(rhs,rhsnc)
@test state(sol1) == ψ && constraint(sol1) == f && state(sol2) == zero(w) && constraint(sol2) == nada
newrhs1,newrhs2 = sys*(sol1,sol2)
@test newrhs1 == A*sol1 && newrhs2 == Anc*sol2
end
@testset "Recursive saddle point with vectors" begin
A1 = Float64[1 2; 2 1]
B21 = Float64[2 3]
B11 = B21'
C1 = Matrix{Float64}(undef,1,1)
C1.= 5
B22 = Float64[-1 -1 3]
B12 = Float64[-1 -1 -2]'
C2 = Matrix{Float64}(undef,1,1)
C2.= -4
rhs11 = [1.0,2.0]
rhs12 = Vector{Float64}(undef,1)
rhs12 .= 3.0
#rhs1 = (rhs11,rhs12)
rhs1 = [rhs11;rhs12]
rhs2 = Vector{Float64}(undef,1)
rhs2 .= 4.0
rhs = (rhs1,rhs2)
A = SaddleSystem(A1,B21,B11,C1)
Abig = SaddleSystem(A,B22,B12,C2,rhs1,rhs2)
sol = Abig\rhs
out = Abig*sol
@test norm(out[1]-rhs1) < 1e-14
end
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 8672 |
ns = 5
nc = 3
na = 2
y = randn(ns)
z = randn(nc)
x = randn(na)
struct MyType{T} <: AbstractVector{T}
data :: Vector{T}
end
Base.similar(A::MyType{T}) where {T} = MyType{T}(similar(A.data))
Base.similar(A::MyType{T},::Type{S}) where {T,S} = MyType(similar(A.data,S))
Base.size(A::MyType) = size(A.data)
Base.getindex(A::MyType, i::Int) = getindex(A.data,i)
Base.setindex!(A::MyType, v, i::Int) = setindex!(A.data,v,i)
Base.IndexStyle(::MyType) = IndexLinear()
Base.BroadcastStyle(::Type{<:MyType}) = Broadcast.ArrayStyle{MyType}()
function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{MyType}},::Type{T}) where {T}
similar(find_mt(bc),T)
end
function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{MyType}})
similar(find_mt(bc))
end
find_mt(bc::Base.Broadcast.Broadcasted) = find_mt(bc.args)
find_mt(args::Tuple) = find_mt(find_mt(args[1]), Base.tail(args))
find_mt(x) = x
find_mt(::Tuple{}) = nothing
find_mt(a::MyType, rest) = a
find_mt(::Any, rest) = find_mt(rest)
@testset "Solution structure" begin
u = solvector(state=y,constraint=z)
@test state(u) === y && constraint(u) === z
@test mainvector(u) === ArrayPartition(y,z)
u = solvector(state=y,constraint=z,aux_state=x)
@test state(u) === y
@test constraint(u) === z
@test aux_state(u) === x
u = solvector(state=y)
@test state(u) === y
@test constraint(u) == empty(y)
@test aux_state(u) == nothing
@test mainvector(u) == ArrayPartition(y,empty(y))
e = ConstrainedSystems._empty(y)
@test isempty(e)
u2 = solvector(state=MyType(zero(y)),constraint=zero(z),aux_state=zero(x))
u2p = u2 .+ 1
@test typeof(state(u2p)) <: MyType
@test typeof(constraint(u2p)) <: typeof(z)
@test typeof(aux_state(u2p)) <: typeof(x)
@test state(u2p) == MyType(fill(1.0,size(y)))
@test constraint(u2p) == fill(1.0,size(z))
@test aux_state(u2p) == fill(1.0,size(x))
u2m = u2p .* 2
@test typeof(state(u2m)) <: MyType
@test state(u2m) == MyType(fill(2.0,size(y)))
@test constraint(u2m) == fill(2.0,size(z))
@test aux_state(u2m) == fill(2.0,size(x))
u2m = similar(u2p)
u2m .= u2p .* 2 .- 3 .+ u2p ./ 2
@test state(u2m) == MyType(fill(-0.5,size(y)))
@test constraint(u2m) == fill(-0.5,size(z))
@test aux_state(u2m) == fill(-0.5,size(x))
end
struct MyType1{F}
x :: F
end
struct MyType2{F}
x :: F
end
@testset "Solution structure with user type" begin
a = MyType1(x)
b = MyType2(y)
@test isempty(ConstrainedSystems._empty(a))
u = solvector(state=a)
u = solvector(state=a,constraint=b)
u = solvector(state=a,constraint=b,aux_state=a)
end
@testset "Function structure" begin
u = solvector(state=y,constraint=z,aux_state=x)
du = similar(u)
fill!(du,0.0)
function state_r1!(dy,y,x,p,t)
fill!(dy,1.0)
end
function aux_r1!(dy,u,p,t)
fill!(dy,2.0)
end
r1! = ArrayPartition((state_r1!,aux_r1!))
r1_c! = ConstrainedSystems._complete_r1(r1!,Val(true),_func_cache=du)
r1_c!(du,u,nothing,0.0)
@test state(du) == fill(1.0,length(y))
@test constraint(du) == fill(0.0,length(z))
@test aux_state(du) == fill(2.0,length(x))
function r2!(dz,x,p,t)
fill!(dz,3.0)
end
fill!(du,0.0)
r2_c! = ConstrainedSystems._complete_r2(r2!,Val(true),_func_cache=du)
r2_c!(du,u,nothing,0.0)
@test state(du) == fill(0.0,length(y))
@test constraint(du) == fill(3.0,length(z))
@test aux_state(du) == fill(0.0,length(x))
B1 = ones(Float64,length(y),length(z))
B2 = ones(Float64,length(z),length(y))
function B1!(dy,z,x,p)
dy .= p[1]*z
end
function B2!(dz,y,x,p)
dz .= p[2]*y
end
fill!(du,0.0)
fill!(u,1.0)
p = [B1,B2]
B1_c! = ConstrainedSystems._complete_B1(B1!,Val(true),_func_cache=du)
B1_c!(du,u,p,0.0)
@test state(du) == fill(-float(length(z)),length(y))
@test constraint(du) == fill(0.0,length(z))
@test aux_state(du) == fill(0.0,length(x))
fill!(du,0.0)
fill!(u,1.0)
p = [B1,B2]
B2_c! = ConstrainedSystems._complete_B2(B2!,Val(true),_func_cache=du)
B2_c!(du,u,p,0.0)
@test state(du) == fill(0.0,length(y))
@test constraint(du) == fill(-float(length(y)),length(z))
@test aux_state(du) == fill(0.0,length(x))
end
@testset "ConstrainedODEFunction constrained" begin
ns = 5
nc = 2
na = 3
y = ones(Float64,ns)
z = ones(Float64,nc)
x = ones(Float64,na)
B1 = Array{Float64}(undef,ns,nc)
B2 = Array{Float64}(undef,nc,ns)
C = zeros(nc,nc)
B1 .= 1:ns
B2 .= transpose(B1)
p = [B1,B2,C]
ode_rhs!(dy,y,x,p,t) = dy .= 1.01*y
constraint_force!(dy,z,x,p) = dy .= p[1]*z
constraint_rhs!(dz,x,p,t) = dz .= 1.0
constraint_op!(dz,y,x,p) = dz .= p[2]*y
constraint_reg!(dz,z,x,p) = dz .= p[3]*z
u₀ = solvector(state=y,constraint=z)
du = zero(u₀)
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,
constraint_force!,constraint_op!,_func_cache=u₀)
f(du,u₀,p,0.0)
@test state(du) == 1.01*y .- B1*z
@test constraint(du) == 1.0 .- B2*y
ode_rhs(y,x,p,t) = 1.01*y
constraint_force(z,x,p) = p[1]*z
constraint_rhs(x,p,t) = fill(1.0,nc)
constraint_op(y,x,p) = p[2]*y
constraint_reg(z,x,p) = p[3]*z
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,
constraint_force,constraint_op)
du = f(u₀,p,0.0)
@test state(du) == 1.01*y .- B1*z
@test constraint(du) == 1.0 .- B2*y
L = 2*I
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,
constraint_force!,constraint_op!,L,_func_cache=u₀)
f(du,u₀,p,0.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y atol=1e-12
@test state(du) ≈ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
@test constraint(du) ≈ 1.0 .- B2*y atol=1e-12
@test constraint(du) ≈ [-14.0, -14.0] atol=1e-12
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,
constraint_force!,constraint_op!,L,constraint_reg!,_func_cache=u₀)
f(du,u₀,p,0.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y atol=1e-12
@test state(du) ≈ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
@test constraint(du) ≈ 1.0 .- B2*y .- C*z atol=1e-12
@test constraint(du) ≈ [-14.0, -14.0] atol=1e-12
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,
constraint_force,constraint_op,L)
du = f(u₀,p,0.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y atol=1e-12
@test state(du) ≈ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
@test constraint(du) ≈ 1.0 .- B2*y atol=1e-12
@test constraint(du) ≈ [-14.0, -14.0] atol=1e-12
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,
constraint_force,constraint_op,L,constraint_reg)
du = f(u₀,p,0.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y atol=1e-12
@test state(du) ≈ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
@test constraint(du) ≈ 1.0 .- B2*y .- C*z atol=1e-12
@test constraint(du) ≈ [-14.0, -14.0] atol=1e-12
r1_implicit!(dy,x,p,t) = dy .= 1.0 .+ 0.5*t
r1_implicit(x,p,t) = 1.0 + 0.5*t
f = ConstrainedODEFunction(ode_rhs!,constraint_rhs!,
constraint_force!,constraint_op!,L,constraint_reg!,r1imp=r1_implicit!,_func_cache=u₀)
f(du,u₀,p,1.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y .+ 1.5 atol=1e-12
@test state(du) ≈ 1.5 .+ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
f = ConstrainedODEFunction(ode_rhs,constraint_rhs,
constraint_force,constraint_op,L,constraint_reg,r1imp=r1_implicit)
du = f(u₀,p,1.0)
@test state(du) ≈ 1.01*y .- B1*z .+ L*y .+ 1.5 atol=1e-12
@test state(du) ≈ 1.5 .+ [1.01,-0.99,-2.99,-4.99,-6.99] atol=1e-12
end
@testset "ConstrainedODEFunction unconstrained" begin
ns = 5
y = ones(Float64,ns)
ode_rhs!(dy,y,x,p,t) = dy .= 1.01*y
ode_rhs(y,x,p,t) = 1.01*y
L = 2*I
p = []
u₀ = solvector(state=y)
du = zero(u₀)
f = ConstrainedODEFunction(ode_rhs!,L,_func_cache=u₀)
f(du,u₀,p,0.0)
@test state(du) == 1.01*y .+ L*y
@test state(du) ≈ fill(3.01,ns) atol=1e-12
@test constraint(du) == empty(y)
# Test that unconstrained systems create a saddle system that works properly
# Should only invert the upper left operator
u = deepcopy(u₀)
S = SaddleSystem(2*I,f,p,p,deepcopy(du),Direct)
u .= S\u₀
@test state(u) ≈ fill(0.5,ns) atol=1e-12
@test constraint(u) == empty(y)
f = ConstrainedODEFunction(ode_rhs,L)
du = f(u₀,p,0.0)
@test state(du) == 1.01*y .+ L*y
@test state(du) ≈ fill(3.01,ns) atol=1e-12
@test constraint(du) == empty(y)
# Test that unconstrained systems create a saddle system that works properly
# Should only invert the upper left operator
u = deepcopy(u₀)
S = SaddleSystem(2*I,f,p,p,deepcopy(du),Direct)
u .= S\u₀
@test state(u) ≈ fill(0.5,ns) atol=1e-12
@test constraint(u) == empty(y)
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 1185 |
const MAXMEM = 100
@testset "Iteration test" begin
Δt = 1e-2
prob1, _, _ = ConstrainedSystems.basic_constrained_problem()
integrator = ConstrainedSystems.init(prob1, LiskaIFHERK(),dt=Δt)
@test needs_iteration(integrator.f,integrator.u,integrator.p,integrator.u) == false
prob2, _, _ = ConstrainedSystems.cartesian_pendulum_problem()
integrator = ConstrainedSystems.init(prob2, LiskaIFHERK(),dt=Δt)
@test needs_iteration(integrator.f,integrator.u,integrator.p,integrator.u) == true
end
@testset "Recursive copy" begin
struct MyStruct{T}
a :: T
end
n = 1000
a = rand(n,n)
z = similar(a)
s1 = MyStruct(a)
s2 = MyStruct(z)
recursivecopy!(s2,s1)
@test s2.a == s1.a
@test !(s2.a === s1.a)
@test @allocated(recursivecopy!(s2,s1)) < MAXMEM
ss1 = MyStruct(s1)
ss2 = MyStruct(MyStruct(z))
recursivecopy!(ss2,ss1)
@test ss2.a.a == ss1.a.a
@test !(ss2.a.a === ss1.a.a)
@test @allocated(recursivecopy!(s2,s1)) < MAXMEM
t1 = [s1,s1]
t2 = [MyStruct(z),MyStruct(z)]
recursivecopy!(t2,t1)
@test t2[1].a == t1[1].a
@test t2[2].a == t1[2].a
@test !(t2[1].a === t1[1].a)
@test @allocated(recursivecopy!(s2,s1)) < MAXMEM
end
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 6209 | # # Saddle point systems
#md # ```@meta
#md # CurrentModule = ConstrainedSystems
#md # ```
#=
```math
\def\ddt#1{\frac{\mathrm{d}#1}{\mathrm{d}t}}
\renewcommand{\vec}{\boldsymbol}
\newcommand{\uvec}[1]{\vec{\hat{#1}}}
\newcommand{\utangent}{\uvec{\tau}}
\newcommand{\unormal}{\uvec{n}}
\renewcommand{\d}{\,\mathrm{d}}
```
=#
#=
Saddle systems comprise an important part of solving mechanics problems with
constraints. In such problems, there is an underlying system to solve, and the
addition of constraints requires that the system is subjected to additional
forces (constraint forces, or Lagrange multipliers) that enforce these constraints
in the system. Examples of such constrained systems are the divergence-free
velocity constraint in incompressible flow (for which pressure is the associated
Lagrange multiplier field), the no-slip and/or no-flow-through condition in
general fluid systems adjacent to impenetrable bodies, and joint constraints in
rigid-body mechanics.
A general saddle-point system has the form
$$\left[ \begin{array}{cc} A & B_1^T \\ B_2 & C\end{array}\right] \left(\begin{array}{c}u\\f \end{array}\right) = \left(\begin{array}{c}r_1\\r_2 \end{array}\right)$$
We are primarily interested in cases when the operator $A$ is symmetric and positive semi-definite,
which is fairly typical. It is also fairly common for $B_1 = B_2$, so that the
whole system is symmetric.
[ConstrainedSystems.jl](https://github.com/JuliaIBPM/ConstrainedSystems.jl) allows us to solve such systems for $u$ and $f$ in a fairly easy way.
We need only to provide rules for how to evaluate the actions of the various
operators in the system. Let us use an example to show how this can be done.
=#
using ConstrainedSystems
using CartesianGrids
using Plots
#=
## Translating cylinder in potential flow
In irrotational, incompressible flow, the streamfunction $\psi$ satisfies Laplace's equation,
$$\nabla^2 \psi = 0$$
On the surface of an impenetrable body, the streamfunction must obey the constraint
$$\psi = \psi_b$$
where $\psi_b$ is the streamfunction associated with the body's motion. Let us
suppose the body is moving vertically with velocity 1. Then $\psi_b = -x$ for all
points inside or on the surface of the body. Thus, the streamfunction field outside
this body is governed by Laplace's equation subject to the constraint.
Let us solve this problem on a staggered grid, using the tools discussed in
[CartesianGrids](https://juliaibpm.github.io/CartesianGrids.jl/latest/), including the regularization and interpolation methods to
immerse the body shape on the grid. Then our saddle-point system has the form
$$\left[ \begin{array}{cc} L & R \\ E & 0\end{array}\right] \left(\begin{array}{c}\psi\\f \end{array}\right) = \left(\begin{array}{c}0\\\psi_b \end{array}\right)$$
where $L$ is the discrete Laplacian, $R$ is the regularization operator, and
$E$ is the interpolation operator.
Physically, $f$ isn't really a force here, but
rather, represents the strengths of distributed singularities on the surface.
In fact, this strength represents the jump in normal derivative of $\psi$ across
the surface. Since this normal derivative is equivalent to the tangential velocity,
$f$ is the strength of the bound vortex sheet on the surface. This will be useful
to know when we check the value of $f$ obtained in our solution.
First, let us set up the body, centered at $(1,1)$ and of radius $1/2$. We will
also initialize a data structure for the force:
=#
n = 128; θ = range(0,stop=2π,length=n+1);
xb = 1.0 .+ 0.5*cos.(θ[1:n]); yb = 1.0 .+ 0.5*sin.(θ[1:n]);
X = VectorData(xb,yb);
ψb = ScalarData(X);
f = similar(ψb);
#=
Now let's set up a grid of size $102\times 102$ (including the usual layer
of ghost cells) and physical dimensions $2\times 2$.
=#
nx = 102; ny = 102; Lx = 2.0; dx = Lx/(nx-2);
w = Nodes(Dual,(nx,ny));
ψ = similar(w);
#=
We need to set up the operators now. First, the Laplacian:
=#
L = plan_laplacian(size(w),with_inverse=true)
#=
Note that we have made sure that this operator has an inverse. It is important
that this operator, which represents the `A` matrix in our saddle system, comes
with an associated backslash `\\` operation to carry out the inverse.
Now we need to set up the regularization `R` and interpolation `E` operators.
=#
regop = Regularize(X,dx;issymmetric=true)
Rmat, Emat = RegularizationMatrix(regop,ψb,w);
#=
Now we are ready to set up the system. The solution and right-hand side vectors
are set up using `SaddleVector`.
=#
rhs = SaddleVector(w,ψb)
sol = SaddleVector(ψ,f)
#=
and the saddle system is then set up with the three operators; the $C$ operator
is presumed to be zero when it is not provided.
=#
A = SaddleSystem(L,Emat,Rmat,rhs)
#=
Note that all of the operators we have provided are either matrices (like `Emat` and `Rmat`)
or functions or function-like operators (like `L`). The `SaddleSystem` constructor
allows either. However, the order is important: we must supply $A$, $B_2$, $B_1^T$, and possibly $C$, in that order.
Let's solve the system. We need to set the right-hand side. We will set `ψb`,
but this will also change `rhs`, since that vector is pointing to the same object.
=#
ψb .= -(xb.-1);
#=
The right-hand side of the Laplace equation is zero. The right-hand side of the
constraint is the specified streamfunction on the body. Note that we have
subtracted the circle center from the $x$ positions on the body. The reason for
this will be discussed in a moment.
We solve the system with the convenient shorthand of the backslash:
=#
sol .= A\rhs # hide
@time sol .= A\rhs
#=
Just to point out how fast it can be, we have also timed it. It's pretty fast.
We can obtain the state vector and the constraint vector from `sol` using some
convenience functions `state(sol)` and `constraint(sol)`.
Now, let's plot the solution in physical space. We'll plot the body shape for
reference, also.
=#
xg, yg = coordinates(w,dx=dx)
plot(xg,yg,state(sol),xlim=(-Inf,Inf),ylim=(-Inf,Inf))
plot!(xb,yb,fillcolor=:black,fillrange=0,fillalpha=0.25,linecolor=:black)
#=
The solution shows the streamlines for a circle in vertical motion, as expected.
All of the streamlines inside the circle are vertical.
=#
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | code | 9030 | # # Time marching
#md # ```@meta
#md # CurrentModule = ConstrainedSystems
#md # ```
#=
```math
\def\ddt#1{\frac{\mathrm{d}#1}{\mathrm{d}t}}
\renewcommand{\vec}{\boldsymbol}
\newcommand{\uvec}[1]{\vec{\hat{#1}}}
\newcommand{\utangent}{\uvec{\tau}}
\newcommand{\unormal}{\uvec{n}}
\renewcommand{\d}{\,\mathrm{d}}
```
=#
#=
[ConstrainedSystems.jl](https://github.com/JuliaIBPM/ConstrainedSystems.jl) is equipped with tools for solving systems of equations of the
general form of half-explicit differential-algebraic equations,
$$\ddt y = L y - B_1^T(y,t) z + r_1(y,t), \quad B_2(y,t) y + C(y,t) z = r_2(t), \quad y(0) = y_0$$
where $z$ is the Lagrange multiplier for enforcing the constraints on $y$. Note
that the constraint operators may depend on the state and on time. The linear operator $L$ may be a matrix or a scalar, but is generally independent of time. (The method of integrating factors can deal with time-dependent $L$, but we don't encounter such systems in the constrained systems context so we won't discuss them.) Our objective is to solve
for $y(t)$ and $z(t)$.
=#
using ConstrainedSystems
using CartesianGrids
using Plots
#=
## Constrained integrating factor systems
Let's demonstrate this on the example of heat diffusion from a circular ring whose temperature
is held constant. In this case, $L$ is the discrete Laplace operator times the heat diffusivity,
$r_1$ is zero (in the absence of volumetric heating sources), and $r_2$ is the temperature of
the ring. The operators $B_1^T$ and $B_2$ will be the regularization and interpolation
operators between discrete point-wise data on the ring and the field data. We will also
include a $C$ operator that slightly regularizes the constraint.
The ring will have radius $1/2$ and fixed temperature $1$, and
the heat diffusivity is $1$. (In other words, the problem has been non-dimensionalized
by the diameter of the circle, the dimensional ring temperature, and the dimensional diffusivity.)
First, we will construct a field to accept the temperature on
=#
nx = 129; ny = 129; Lx = 2.0; Δx = Lx/(nx-2);
w₀ = Nodes(Dual,(nx,ny)); # field initial condition
#=
Now set up a ring of points on the circle at center $(1,1)$.
=#
n = 128; θ = range(0,stop=2π,length=n+1);
R = 0.5; xb = 1.0 .+ R*cos.(θ); yb = 1.0 .+ R*sin.(θ);
X = VectorData(xb[1:n],yb[1:n]);
z = ScalarData(X); # to be used as the Lagrange multiplier
#=
Together, `w₀` and `z` comprise the initial solution vector:
=#
u₀ = solvector(state=w₀,constraint=z)
#=
Now set up the operators. We first set up the linear operator, a Laplacian endowed
with its inverse:
=#
L = plan_laplacian(w₀,with_inverse=true)
#=
Now the right-hand side operators for the ODEs and constraints. Both must take a standard form:
$r_1$ must accept arguments `w₀`, `p` (parameters not used in this problem), and `t`; $r_2$ must accept arguments `p` and `t`. We will implement these in in-place form to make
it more efficient. $r_1$ will return rate-of-change data of the same type as `w₀`
and $r_2$ will return data `dz` of the same type as `z`
=#
diffusion_rhs!(dw::Nodes,w::Nodes,x,p,t) = fill!(dw,0.0) # this is r1
boundary_constraint_rhs!(dz::ScalarData,x,p,t) = fill!(dz,1.0) # this is r2, and sets uniformly to 1
#=
Construct the regularization and interpolation operators in their usual
symmetric form, and then set up routines that will provide these operators inside the integrator:
=#
reg = Regularize(X,Δx;issymmetric=true)
Hmat, Emat = RegularizationMatrix(reg,z,w₀)
boundary_constraint_force!(dw::Nodes,z::ScalarData,x,p) = dw .= Hmat*z # This is B1T
boundary_constraint_op!(dz::ScalarData,y::Nodes,x,p) = dz .= Emat*y; # This is B2
#=
Construct a constraint regularization operator (the $C$ operator)
=#
boundary_constraint_reg!(dz::ScalarData,z::ScalarData,x,p) = dz .= -0.1*z; # This is C
#=
Note that these last two functions are also in-place, and return data of the same
respective types as $r_1$ and $r_2$.
All of these are assembled into a single `ConstrainedODEFunction`:
=#
f = ConstrainedODEFunction(diffusion_rhs!,boundary_constraint_rhs!,boundary_constraint_force!,
boundary_constraint_op!,L,boundary_constraint_reg!,_func_cache=u₀)
#=
With the last argument, we supplied a cache variable to enable evaluation of this function.
Now set up the problem, using the same basic notation as in [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl).
=#
tspan = (0.0,20.0)
prob = ODEProblem(f,u₀,tspan)
#=
Now solve it. We will set the time-step size to a large value ($1.0$) for demonstration purposes. The method remains stable for any choice.
=#
Δt = 1.0
sol = solve(prob,IFHEEuler(),dt=Δt);
#=
Now let's plot it
=#
xg, yg = coordinates(w₀,dx=Δx);
plot(xg,yg,state(sol.u[end]))
plot!(xb,yb,linecolor=:black,linewidth=1.5)
#=
From a side view, we can see that it enforces the boundary condition:
=#
plot(xg,state(sol.u[end])[65,:],xlabel="x",ylabel="u(x,1)")
#=
The Lagrange multiplier distribution is nearly uniform
=#
plot(constraint(sol.u[end]),ylim=(-0.5,0))
#=
## Systems with variable constraints
In some cases, the constraint operators may vary with the state vector. A
good example of this is a swinging pendulum, with its equations expressed in
Cartesian coordinates. The constraint we wish to enforce is that the length
of the pendulum is constant: $x^2+y^2 = l^2$. Though not mathematically necessary,
it also helps to enforce a tangency condition, $xu + yv = 0$, where $u$ and $v$
are the rates of change of $x$ and $y$. Note that this is simply the derivative
of the first constraint. (If expressed in cylindrical coordinates, the constraint is enforced
automatically, simply by expressing the equations for $\theta$.)
The governing equations are
$$\ddt x = u - x \mu ,\, \ddt y = v - y \mu , \, \ddt u = -x \lambda , \, \ddt v = -g - y\lambda$$
with Lagrange multipliers $\mu$ and $\lambda$, and the constraints are
$$x^2+y^2 = l^2,\, xu + yu = 0$$
These are equivalently expressed as
$$\left[ \begin{array}{cccc} x & y & 0 & 0\end{array}\right]\left[ \begin{array}{c} x \\ y \\ u \\ v \end{array}\right] = l^2$$
and
$$\left[ \begin{array}{cccc} 0 & 0 & x & y\end{array}\right]\left[ \begin{array}{c} x \\ y \\ u \\ v \end{array}\right] = 0$$
The operators $B_1^T$ and $B_2$ are thus
$$B_1^T = \left[ \begin{array}{cc} x & 0 \\ y & 0 \\ 0 & x \\ 0 & y \end{array}\right]$$
and
$$B_2 = \left[ \begin{array}{cccc} x & y & 0 & 0 \\ 0 & 0 & x & y \end{array}\right]$$
That is, the operators are dependent on the state. In this package, we handle this
by providing a parameter that can be dynamically updated. We will get to that later. First,
let's set up the physical parameters
=#
l = 1.0
g = 1.0
params = [l,g]
#=
and initial condition:
=#
θ₀ = π/2
y₀ = Float64[l*sin(θ₀),-l*cos(θ₀),0,0]
z₀ = Float64[0.0, 0.0] # Lagrange multipliers
u₀ = solvector(state=y₀,constraint=z₀)
#=
Now, we will set up the basic form of the constraint operators and assemble
these with the other parameters with the help of a type we'll define here:
=#
struct ProblemParams{P,BT1,BT2}
params :: P
B₁ᵀ :: BT1
B₂ :: BT2
end
B1T = zeros(4,2) # set to zeros for now
B2 = zeros(2,4) # set to zeros for now
p₀ = ProblemParams(params,B1T,B2);
#=
We will now define the operators of the problem, all in in-place form:
=#
function pendulum_rhs!(dy::Vector{Float64},y::Vector{Float64},x,p,t)
dy[1] = y[3]
dy[2] = y[4]
dy[3] = 0.0
dy[4] = -p.params[2]
return dy
end # r1
function length_constraint_rhs!(dz::Vector{Float64},x,p,t)
dz[1] = p.params[1]^2
dz[2] = 0.0
return dz
end # r2
# The B1 function. This returns B1*z. It uses an existing B1 supplied by p.
function length_constraint_force!(dy::Vector{Float64},z::Vector{Float64},x,p)
dy .= p.B₁ᵀ*z
end
# The B2 function. This returns B2*y. It uses an existing B2 supplied by p.
function length_constraint_op!(dz::Vector{Float64},y::Vector{Float64},x,p)
dz .= p.B₂*y
end
#=
Now, we need to provide a means of updating the parameter structure with
the current state of the system. This is done in-place, just as for the
other operators:
=#
function update_p!(q,u,p,t)
y = state(u)
fill!(q.B₁ᵀ,0.0)
fill!(q.B₂,0.0)
q.B₁ᵀ[1,1] = y[1]; q.B₁ᵀ[2,1] = y[2]; q.B₁ᵀ[3,2] = y[1]; q.B₁ᵀ[4,2] = y[2]
q.B₂[1,1] = y[1]; q.B₂[1,2] = y[2]; q.B₂[2,3] = y[1]; q.B₂[2,4] = y[2]
return q
end
#=
Finally, assemble all of them together:
=#
f = ConstrainedODEFunction(pendulum_rhs!,length_constraint_rhs!,length_constraint_force!,
length_constraint_op!,
_func_cache=deepcopy(u₀),param_update_func=update_p!)
#=
Now solve the system
=#
tspan = (0.0,10.0)
prob = ODEProblem(f,u₀,tspan,p₀)
Δt = 1e-2
sol = solve(prob,LiskaIFHERK(),dt=Δt);
#=
Plot the solution
=#
plot(sol.t,sol[1,:],label="x",xlabel="t")
plot!(sol.t,sol[2,:],label="y")
#=
and here is the trajectory
=#
plot(sol[1,:],sol[2,:],ratio=1,legend=:false,title="Trajectory",xlabel="x",ylabel="y")
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | docs | 2193 | # ConstrainedSystems.jl
_Tools for solving constrained dynamical systems_
| Documentation | Build Status |
|:---:|:---:|
| [](https://JuliaIBPM.github.io/ConstrainedSystems.jl/stable) [](https://JuliaIBPM.github.io/ConstrainedSystems.jl/dev) | [](https://github.com/JuliaIBPM/ConstrainedSystems.jl/actions) [](https://codecov.io/gh/JuliaIBPM/ConstrainedSystems.jl) |
This package contains several tools for solving and advancing (large-scale) dynamical systems with constraints. These systems generically have the form
dy/dt = L y - B<sub>1</sub><sup>T</sup> z + r<sub>1</sub>(y,t)
B<sub>2</sub> y + C z = r<sub>2</sub>(y,t)
y(0) = y<sub>0</sub>
where y is a state vector, L is a linear operator with an associated matrix exponential (integrating factor), and z is a constraint force vector (i.e., Lagrange multipliers).
Some of the key components of this package are
* Tools for solving linear algebra problems with constraints and associated Lagrange multipliers, known generically as *saddle point systems*. The sizes of these systems might be large.
* Time integrators that can incorporate these constraints, such as half-explicit Runge-Kutta (HERK) and integrating factor Runge-Kutta (IFRK), or their combination (IF-HERK). These
extend the tools in the [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl) package, and utilize the same basic syntax for setting
up a problem and solving it.
* Allowance for variable constraint operators B<sub>1</sub><sup>T</sup> and B<sub>2</sub>,
through the use of a variable parameter argument and an associated parameter update
function.
* The ability to add an auxiliary (unconstrained) system of equations that the
constraint operators B<sub>1</sub><sup>T</sup> and B<sub>2</sub> depend upon.
The package is agnostic to the type of systems, and might arise from, e.g., fluid dynamics or rigid-body mechanics.
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | docs | 2218 | # ConstrainedSystems.jl
*tools for solving constrained dynamical systems*
```math
\def\ddt#1{\frac{\mathrm{d}#1}{\mathrm{d}t}}
\renewcommand{\vec}{\boldsymbol}
\newcommand{\uvec}[1]{\vec{\hat{#1}}}
\newcommand{\utangent}{\uvec{\tau}}
\newcommand{\unormal}{\uvec{n}}
\renewcommand{\d}{\,\mathrm{d}}
```
This package contains several tools for solving and advancing (large-scale) dynamical systems with constraints. These systems generically have the form
$$\ddt{y} = L u - B_{1}^{T} z + r_{1}(y,t), \quad B_{2} y = r_{2}(t), \quad y(0) = y_{0}$$
where $y$ is a state vector, $L$ is a linear operator with an associated matrix exponential (integrating factor), and $z$ is a constraint force vector (i.e., Lagrange multipliers). Systems of this type might arise from, e.g., incompressible fluid dynamics, rigid-body mechanics,
or couplings of such systems.
Some of the key components of this package are
* Tools for solving linear algebra problems with constraints and associated Lagrange multipliers, known generically as *saddle point systems*. The sizes of these systems might be large.
* Time integrators that can incorporate these constraints, such as half-explicit Runge-Kutta (HERK) and integrating factor Runge-Kutta (IFRK), or their combination (IF-HERK). These
extend the tools in the [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl) package, and utilize the same basic syntax for setting
up a problem and solving it.
* Allowance for variable constraint operators $B_1^T$ and $B_2$,
through the use of a variable parameter argument and an associated parameter update
function.
* The ability to add an auxiliary (unconstrained) system of equations and state that the
constraint operators $B_1^T$ and $B_2$ and the right-hand side $r_2$ depend upon.
## Installation
This package works on Julia `1.6` and above and is registered in the general Julia registry. To install from the REPL, type
e.g.,
```julia
] add ConstrainedSystems
```
Then, in any version, type
```julia
julia> using ConstrainedSystems
```
The plots in this documentation are generated using [Plots.jl](http://docs.juliaplots.org/latest/). You might want to install that, too, to follow the examples.
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | docs | 153 | # Index
```@meta
DocTestSetup = quote
using ConstrainedSystems
end
```
```@autodocs
Modules = [ConstrainedSystems]
Order = [:type, :function]
```
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | docs | 6661 | ```@meta
EditURL = "../../../test/literate/saddlesystems.jl"
```
# Saddle point systems
```@meta
CurrentModule = ConstrainedSystems
```
```math
\def\ddt#1{\frac{\mathrm{d}#1}{\mathrm{d}t}}
\renewcommand{\vec}{\boldsymbol}
\newcommand{\uvec}[1]{\vec{\hat{#1}}}
\newcommand{\utangent}{\uvec{\tau}}
\newcommand{\unormal}{\uvec{n}}
\renewcommand{\d}{\,\mathrm{d}}
```
Saddle systems comprise an important part of solving mechanics problems with
constraints. In such problems, there is an underlying system to solve, and the
addition of constraints requires that the system is subjected to additional
forces (constraint forces, or Lagrange multipliers) that enforce these constraints
in the system. Examples of such constrained systems are the divergence-free
velocity constraint in incompressible flow (for which pressure is the associated
Lagrange multiplier field), the no-slip and/or no-flow-through condition in
general fluid systems adjacent to impenetrable bodies, and joint constraints in
rigid-body mechanics.
A general saddle-point system has the form
$$\left[ \begin{array}{cc} A & B_1^T \\ B_2 & C\end{array}\right] \left(\begin{array}{c}u\\f \end{array}\right) = \left(\begin{array}{c}r_1\\r_2 \end{array}\right)$$
We are primarily interested in cases when the operator $A$ is symmetric and positive semi-definite,
which is fairly typical. It is also fairly common for $B_1 = B_2$, so that the
whole system is symmetric.
[ConstrainedSystems.jl](https://github.com/JuliaIBPM/ConstrainedSystems.jl) allows us to solve such systems for $u$ and $f$ in a fairly easy way.
We need only to provide rules for how to evaluate the actions of the various
operators in the system. Let us use an example to show how this can be done.
````@example saddlesystems
using ConstrainedSystems
using CartesianGrids
using Plots
````
## Translating cylinder in potential flow
In irrotational, incompressible flow, the streamfunction $\psi$ satisfies Laplace's equation,
$$\nabla^2 \psi = 0$$
On the surface of an impenetrable body, the streamfunction must obey the constraint
$$\psi = \psi_b$$
where $\psi_b$ is the streamfunction associated with the body's motion. Let us
suppose the body is moving vertically with velocity 1. Then $\psi_b = -x$ for all
points inside or on the surface of the body. Thus, the streamfunction field outside
this body is governed by Laplace's equation subject to the constraint.
Let us solve this problem on a staggered grid, using the tools discussed in
[CartesianGrids](https://juliaibpm.github.io/CartesianGrids.jl/latest/), including the regularization and interpolation methods to
immerse the body shape on the grid. Then our saddle-point system has the form
$$\left[ \begin{array}{cc} L & R \\ E & 0\end{array}\right] \left(\begin{array}{c}\psi\\f \end{array}\right) = \left(\begin{array}{c}0\\\psi_b \end{array}\right)$$
where $L$ is the discrete Laplacian, $R$ is the regularization operator, and
$E$ is the interpolation operator.
Physically, $f$ isn't really a force here, but
rather, represents the strengths of distributed singularities on the surface.
In fact, this strength represents the jump in normal derivative of $\psi$ across
the surface. Since this normal derivative is equivalent to the tangential velocity,
$f$ is the strength of the bound vortex sheet on the surface. This will be useful
to know when we check the value of $f$ obtained in our solution.
First, let us set up the body, centered at $(1,1)$ and of radius $1/2$. We will
also initialize a data structure for the force:
````@example saddlesystems
n = 128; θ = range(0,stop=2π,length=n+1);
xb = 1.0 .+ 0.5*cos.(θ[1:n]); yb = 1.0 .+ 0.5*sin.(θ[1:n]);
X = VectorData(xb,yb);
ψb = ScalarData(X);
f = similar(ψb);
nothing #hide
````
Now let's set up a grid of size $102\times 102$ (including the usual layer
of ghost cells) and physical dimensions $2\times 2$.
````@example saddlesystems
nx = 102; ny = 102; Lx = 2.0; dx = Lx/(nx-2);
w = Nodes(Dual,(nx,ny));
ψ = similar(w);
nothing #hide
````
We need to set up the operators now. First, the Laplacian:
````@example saddlesystems
L = plan_laplacian(size(w),with_inverse=true)
````
Note that we have made sure that this operator has an inverse. It is important
that this operator, which represents the `A` matrix in our saddle system, comes
with an associated backslash `\\` operation to carry out the inverse.
Now we need to set up the regularization `R` and interpolation `E` operators.
````@example saddlesystems
regop = Regularize(X,dx;issymmetric=true)
Rmat, Emat = RegularizationMatrix(regop,ψb,w);
nothing #hide
````
Now we are ready to set up the system. The solution and right-hand side vectors
are set up using `SaddleVector`.
````@example saddlesystems
rhs = SaddleVector(w,ψb)
sol = SaddleVector(ψ,f)
````
and the saddle system is then set up with the three operators; the $C$ operator
is presumed to be zero when it is not provided.
````@example saddlesystems
A = SaddleSystem(L,Emat,Rmat,rhs)
````
Note that all of the operators we have provided are either matrices (like `Emat` and `Rmat`)
or functions or function-like operators (like `L`). The `SaddleSystem` constructor
allows either. However, the order is important: we must supply $A$, $B_2$, $B_1^T$, and possibly $C$, in that order.
Let's solve the system. We need to set the right-hand side. We will set `ψb`,
but this will also change `rhs`, since that vector is pointing to the same object.
````@example saddlesystems
ψb .= -(xb.-1);
nothing #hide
````
The right-hand side of the Laplace equation is zero. The right-hand side of the
constraint is the specified streamfunction on the body. Note that we have
subtracted the circle center from the $x$ positions on the body. The reason for
this will be discussed in a moment.
We solve the system with the convenient shorthand of the backslash:
````@example saddlesystems
sol .= A\rhs # hide
@time sol .= A\rhs
````
Just to point out how fast it can be, we have also timed it. It's pretty fast.
We can obtain the state vector and the constraint vector from `sol` using some
convenience functions `state(sol)` and `constraint(sol)`.
Now, let's plot the solution in physical space. We'll plot the body shape for
reference, also.
````@example saddlesystems
xg, yg = coordinates(w,dx=dx)
plot(xg,yg,state(sol),xlim=(-Inf,Inf),ylim=(-Inf,Inf))
plot!(xb,yb,fillcolor=:black,fillrange=0,fillalpha=0.25,linecolor=:black)
````
The solution shows the streamlines for a circle in vertical motion, as expected.
All of the streamlines inside the circle are vertical.
---
*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.3.8 | 33a01817b0e32c1b43a50db7e43181d9c31bf801 | docs | 9917 | ```@meta
EditURL = "../../../test/literate/timemarching.jl"
```
# Time marching
```@meta
CurrentModule = ConstrainedSystems
```
```math
\def\ddt#1{\frac{\mathrm{d}#1}{\mathrm{d}t}}
\renewcommand{\vec}{\boldsymbol}
\newcommand{\uvec}[1]{\vec{\hat{#1}}}
\newcommand{\utangent}{\uvec{\tau}}
\newcommand{\unormal}{\uvec{n}}
\renewcommand{\d}{\,\mathrm{d}}
```
[ConstrainedSystems.jl](https://github.com/JuliaIBPM/ConstrainedSystems.jl) is equipped with tools for solving systems of equations of the
general form of half-explicit differential-algebraic equations,
$$\ddt y = L y - B_1^T(y,t) z + r_1(y,t), \quad B_2(y,t) y + C(y,t) z = r_2(t), \quad y(0) = y_0$$
where $z$ is the Lagrange multiplier for enforcing the constraints on $y$. Note
that the constraint operators may depend on the state and on time. The linear operator $L$ may be a matrix or a scalar, but is generally independent of time. (The method of integrating factors can deal with time-dependent $L$, but we don't encounter such systems in the constrained systems context so we won't discuss them.) Our objective is to solve
for $y(t)$ and $z(t)$.
````@example timemarching
using ConstrainedSystems
using CartesianGrids
using Plots
````
## Constrained integrating factor systems
Let's demonstrate this on the example of heat diffusion from a circular ring whose temperature
is held constant. In this case, $L$ is the discrete Laplace operator times the heat diffusivity,
$r_1$ is zero (in the absence of volumetric heating sources), and $r_2$ is the temperature of
the ring. The operators $B_1^T$ and $B_2$ will be the regularization and interpolation
operators between discrete point-wise data on the ring and the field data. We will also
include a $C$ operator that slightly regularizes the constraint.
The ring will have radius $1/2$ and fixed temperature $1$, and
the heat diffusivity is $1$. (In other words, the problem has been non-dimensionalized
by the diameter of the circle, the dimensional ring temperature, and the dimensional diffusivity.)
First, we will construct a field to accept the temperature on
````@example timemarching
nx = 129; ny = 129; Lx = 2.0; Δx = Lx/(nx-2);
w₀ = Nodes(Dual,(nx,ny)); # field initial condition
nothing #hide
````
Now set up a ring of points on the circle at center $(1,1)$.
````@example timemarching
n = 128; θ = range(0,stop=2π,length=n+1);
R = 0.5; xb = 1.0 .+ R*cos.(θ); yb = 1.0 .+ R*sin.(θ);
X = VectorData(xb[1:n],yb[1:n]);
z = ScalarData(X); # to be used as the Lagrange multiplier
nothing #hide
````
Together, `w₀` and `z` comprise the initial solution vector:
````@example timemarching
u₀ = solvector(state=w₀,constraint=z)
````
Now set up the operators. We first set up the linear operator, a Laplacian endowed
with its inverse:
````@example timemarching
L = plan_laplacian(w₀,with_inverse=true)
````
Now the right-hand side operators for the ODEs and constraints. Both must take a standard form:
$r_1$ must accept arguments `w₀`, `p` (parameters not used in this problem), and `t`; $r_2$ must accept arguments `p` and `t`. We will implement these in in-place form to make
it more efficient. $r_1$ will return rate-of-change data of the same type as `w₀`
and $r_2$ will return data `dz` of the same type as `z`
````@example timemarching
diffusion_rhs!(dw::Nodes,w::Nodes,x,p,t) = fill!(dw,0.0) # this is r1
boundary_constraint_rhs!(dz::ScalarData,x,p,t) = fill!(dz,1.0) # this is r2, and sets uniformly to 1
````
Construct the regularization and interpolation operators in their usual
symmetric form, and then set up routines that will provide these operators inside the integrator:
````@example timemarching
reg = Regularize(X,Δx;issymmetric=true)
Hmat, Emat = RegularizationMatrix(reg,z,w₀)
boundary_constraint_force!(dw::Nodes,z::ScalarData,x,p) = dw .= Hmat*z # This is B1T
boundary_constraint_op!(dz::ScalarData,y::Nodes,x,p) = dz .= Emat*y; # This is B2
nothing #hide
````
Construct a constraint regularization operator (the $C$ operator)
````@example timemarching
boundary_constraint_reg!(dz::ScalarData,z::ScalarData,x,p) = dz .= -0.1*z; # This is C
nothing #hide
````
Note that these last two functions are also in-place, and return data of the same
respective types as $r_1$ and $r_2$.
All of these are assembled into a single `ConstrainedODEFunction`:
````@example timemarching
f = ConstrainedODEFunction(diffusion_rhs!,boundary_constraint_rhs!,boundary_constraint_force!,
boundary_constraint_op!,L,boundary_constraint_reg!,_func_cache=u₀)
````
With the last argument, we supplied a cache variable to enable evaluation of this function.
Now set up the problem, using the same basic notation as in [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl).
````@example timemarching
tspan = (0.0,20.0)
prob = ODEProblem(f,u₀,tspan)
````
Now solve it. We will set the time-step size to a large value ($1.0$) for demonstration purposes. The method remains stable for any choice.
````@example timemarching
Δt = 1.0
sol = solve(prob,IFHEEuler(),dt=Δt);
nothing #hide
````
Now let's plot it
````@example timemarching
xg, yg = coordinates(w₀,dx=Δx);
plot(xg,yg,state(sol.u[end]))
plot!(xb,yb,linecolor=:black,linewidth=1.5)
````
From a side view, we can see that it enforces the boundary condition:
````@example timemarching
plot(xg,state(sol.u[end])[65,:],xlabel="x",ylabel="u(x,1)")
````
The Lagrange multiplier distribution is nearly uniform
````@example timemarching
plot(constraint(sol.u[end]),ylim=(-0.5,0))
````
## Systems with variable constraints
In some cases, the constraint operators may vary with the state vector. A
good example of this is a swinging pendulum, with its equations expressed in
Cartesian coordinates. The constraint we wish to enforce is that the length
of the pendulum is constant: $x^2+y^2 = l^2$. Though not mathematically necessary,
it also helps to enforce a tangency condition, $xu + yv = 0$, where $u$ and $v$
are the rates of change of $x$ and $y$. Note that this is simply the derivative
of the first constraint. (If expressed in cylindrical coordinates, the constraint is enforced
automatically, simply by expressing the equations for $\theta$.)
The governing equations are
$$\ddt x = u - x \mu ,\, \ddt y = v - y \mu , \, \ddt u = -x \lambda , \, \ddt v = -g - y\lambda$$
with Lagrange multipliers $\mu$ and $\lambda$, and the constraints are
$$x^2+y^2 = l^2,\, xu + yu = 0$$
These are equivalently expressed as
$$\left[ \begin{array}{cccc} x & y & 0 & 0\end{array}\right]\left[ \begin{array}{c} x \\ y \\ u \\ v \end{array}\right] = l^2$$
and
$$\left[ \begin{array}{cccc} 0 & 0 & x & y\end{array}\right]\left[ \begin{array}{c} x \\ y \\ u \\ v \end{array}\right] = 0$$
The operators $B_1^T$ and $B_2$ are thus
$$B_1^T = \left[ \begin{array}{cc} x & 0 \\ y & 0 \\ 0 & x \\ 0 & y \end{array}\right]$$
and
$$B_2 = \left[ \begin{array}{cccc} x & y & 0 & 0 \\ 0 & 0 & x & y \end{array}\right]$$
That is, the operators are dependent on the state. In this package, we handle this
by providing a parameter that can be dynamically updated. We will get to that later. First,
let's set up the physical parameters
````@example timemarching
l = 1.0
g = 1.0
params = [l,g]
````
and initial condition:
````@example timemarching
θ₀ = π/2
y₀ = Float64[l*sin(θ₀),-l*cos(θ₀),0,0]
z₀ = Float64[0.0, 0.0] # Lagrange multipliers
u₀ = solvector(state=y₀,constraint=z₀)
````
Now, we will set up the basic form of the constraint operators and assemble
these with the other parameters with the help of a type we'll define here:
````@example timemarching
struct ProblemParams{P,BT1,BT2}
params :: P
B₁ᵀ :: BT1
B₂ :: BT2
end
B1T = zeros(4,2) # set to zeros for now
B2 = zeros(2,4) # set to zeros for now
p₀ = ProblemParams(params,B1T,B2);
nothing #hide
````
We will now define the operators of the problem, all in in-place form:
````@example timemarching
function pendulum_rhs!(dy::Vector{Float64},y::Vector{Float64},x,p,t)
dy[1] = y[3]
dy[2] = y[4]
dy[3] = 0.0
dy[4] = -p.params[2]
return dy
end # r1
function length_constraint_rhs!(dz::Vector{Float64},x,p,t)
dz[1] = p.params[1]^2
dz[2] = 0.0
return dz
end # r2
````
The B1 function. This returns B1*z. It uses an existing B1 supplied by p.
````@example timemarching
function length_constraint_force!(dy::Vector{Float64},z::Vector{Float64},x,p)
dy .= p.B₁ᵀ*z
end
````
The B2 function. This returns B2*y. It uses an existing B2 supplied by p.
````@example timemarching
function length_constraint_op!(dz::Vector{Float64},y::Vector{Float64},x,p)
dz .= p.B₂*y
end
````
Now, we need to provide a means of updating the parameter structure with
the current state of the system. This is done in-place, just as for the
other operators:
````@example timemarching
function update_p!(q,u,p,t)
y = state(u)
fill!(q.B₁ᵀ,0.0)
fill!(q.B₂,0.0)
q.B₁ᵀ[1,1] = y[1]; q.B₁ᵀ[2,1] = y[2]; q.B₁ᵀ[3,2] = y[1]; q.B₁ᵀ[4,2] = y[2]
q.B₂[1,1] = y[1]; q.B₂[1,2] = y[2]; q.B₂[2,3] = y[1]; q.B₂[2,4] = y[2]
return q
end
````
Finally, assemble all of them together:
````@example timemarching
f = ConstrainedODEFunction(pendulum_rhs!,length_constraint_rhs!,length_constraint_force!,
length_constraint_op!,
_func_cache=deepcopy(u₀),param_update_func=update_p!)
````
Now solve the system
````@example timemarching
tspan = (0.0,10.0)
prob = ODEProblem(f,u₀,tspan,p₀)
Δt = 1e-2
sol = solve(prob,LiskaIFHERK(),dt=Δt);
nothing #hide
````
Plot the solution
````@example timemarching
plot(sol.t,sol[1,:],label="x",xlabel="t")
plot!(sol.t,sol[2,:],label="y")
````
and here is the trajectory
````@example timemarching
plot(sol[1,:],sol[2,:],ratio=1,legend=:false,title="Trajectory",xlabel="x",ylabel="y")
````
---
*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| ConstrainedSystems | https://github.com/JuliaIBPM/ConstrainedSystems.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 797 | using TemplateMatching
using Documenter
DocMeta.setdocmeta!(TemplateMatching, :DocTestSetup, :(using TemplateMatching); recursive=true)
makedocs(;
modules=[TemplateMatching],
authors="mleseach <[email protected]>",
repo="https://github.com/mleseach/TemplateMatching.jl/blob/{commit}{path}#{line}",
sitename="TemplateMatching.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://mleseach.github.io/TemplateMatching.jl",
edit_link="master",
assets=String[],
),
checkdocs=:exports,
pages=[
"Get started" => "index.md",
"Reference" => "reference.md"
],
)
deploydocs(;
repo="github.com/mleseach/TemplateMatching.jl",
devbranch="master",
)
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 3686 | using Statistics: mean
import Base.sum
"""
biased_cumsum!(B, A, bias; dims)
Equivalent to `cumsum!(B, A .+ bias, dims=dims)`.
"""
function biased_cumsum!(B, A, bias; dims)
result = accumulate!(B, A, dims=dims, init=0) do a, b
a + b + bias
end
return result
end
"""
integral_array!(output, array, bias)
Compute the integral array.
"""
function integral_array!(
output::AbstractArray{T,N},
array::AbstractArray{T,N},
bias::T
) where {T,N}
biased_cumsum!(output, array, bias, dims=1)
for d in 2:N
cumsum!(output, output, dims=d)
end
return output
end
"""
IntegralArray(integral, bias)
IntegralArray(array)
IntegralArray(f, array)
Structure that represents an
[integral array](https://en.wikipedia.org/wiki/Summed-area_table).
# Arguments
- `integral`: the already computed integral array.
- `bias`: the bias associated to computed integral array.
- `array`: the input array from which the integral image will be computed.
- `f`: a function to apply to each element of `array` before computing the integral.
Integral arrays allow fast summation queries over rectangular subregions using
[`sum(::IntegralArray, x, h)`](@ref).
"""
struct IntegralArray{T,A<:AbstractArray{T}}
integral::A
bias::T
function IntegralArray(integral::AbstractArray{T,N}, bias::T) where {T,N}
@assert N < 64 "array with more than 64 dims are not yet supported"
return new{T,typeof(integral)}(integral, bias)
end
function IntegralArray(array::AbstractArray{T}) where T
bias = -mean(array)
integral = integral_array!(similar(array), array, bias)
return IntegralArray(integral, bias)
end
function IntegralArray(f, array::AbstractArray{T}) where T
new_array = broadcast(f, array)
bias = -mean(new_array)
integral = integral_array!(new_array, new_array, bias)
return IntegralArray(integral, bias)
end
end
"""
sum(integral, x, h)
Calculate the sum of values within a rectangle defined by its top-left corner
`x` and dimensions `h` in an [`IntegralArray`](@ref).
# Examples
```jldoctest
using TemplateMatching: IntegralArray
sum(IntegralArray([1. 2. 3.; 4. 5. 6.; 7. 8. 9.]), (2, 1), (2, 2))
# output
24.0
```
"""
@inline function sum(integral::IntegralArray{T}, x, h) where T
d = ndims(integral.integral)
x = Tuple(x)
h = Tuple(h)
result = 0
# TODO: optimize this loop
# - make sure the compiler can unroll the loop and infer value of most variables
# ie: `i`, `n` and `sign` are comptime known
# maybe write a macro
# we can replace this by Iterators.product(ntuple(_ -> 0:1, d)...)
# but this confuse the compiler and is much slower
for i in 0:(2^d - 1)
# index = x .+ h .* int_to_tuple(i) .- 1
int_to_tuple(i, d)
index = broadcast(
muladd,
h,
int_to_tuple(i, d),
x .- 1,
)
# if we are in the border just skip
if any(==(0), index)
continue
end
# n = (sum(int_to_tuple(i)) + d) % 2
n = (count_ones(i) + d) & 1
sign = if n == 0
1
else
-1
end
result += sign * integral.integral[index...]
end
result -= integral.bias * prod(h)
return result
end
"""
int_to_tuple(n, d)
Transform the `d`` lower bits of `n` into a tuple
# Examples
```jldoctest
using TemplateMatching: int_to_tuple
int_to_tuple(0b0111011, 4)
# output
(1, 1, 0, 1)
```
"""
function int_to_tuple(n, d)
return ntuple(i -> (n >> (i-1)) & 1, d)
end | TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 9973 | module TemplateMatching
export match_template, match_template!,
SquareDiff, NormalizedSquareDiff,
CrossCorrelation, NormalizedCrossCorrelation,
CorrelationCoeff, NormalizedCorrelationCoeff
include("IntegralArray.jl")
@doc raw"""
Base type for specifying the algorithm used in template matching operations.
`Algorithm` serves as an abstract base type for all template matching algorithms within the
package. It is meant to be subclassed by specific algorithm implementations.
"""
abstract type Algorithm end
@doc raw"""
Algorithm that calculate the squared difference between the source and the template.
This method is used to find how much each part of the source array differs from the
template by squaring the difference between corresponding values. It is straightforward
and works well when the brightness of the images does not vary much.
Lower values indicate a better match.
# Formula
``R_i = \sum_j (S_{i+j} - T_j)^2``
See also [`NormalizedSquareDiff`](@ref)
"""
struct SquareDiff <: Algorithm end
@doc raw"""
Normalized algorithm for computing the squared difference between the source and the
template.
This method extends the [`SquareDiff`](@ref) algorithm by normalizing the squared
differences. It is more robust against variations in brightness and contrast between
the source and template images.
Lower values indicate a better match.
# Formula
``R_i = \frac{\sum_j (S_{i+j} - T_j)^2}{\sqrt{\sum_j S_{i+j}^2 \cdot \sum_j T_j^2}}``
See also [`SquareDiff`](@ref)
"""
struct NormalizedSquareDiff <: Algorithm end
@doc raw"""
Algorithm that calculates the cross-correlation between the source and the template.
This method computes the similarity between the source and template by multiplying
their corresponding elements and summing up the results. This approach inherently
favors the brighter regions of the source image over the darker ones since the product
of higher intensity values will naturally be greater.
Higher values indicate a better match.
# Formula
``R_i = \sum_j (S_{i+j} \cdot T_j)``
See also [`NormalizedCrossCorrelation`](@ref)
"""
struct CrossCorrelation <: Algorithm end
@doc raw"""
Normalized algorithm for computing the cross-correlation between the source and the
template.
This method improves upon the [`CrossCorrelation`](@ref) by normalizing the results. It
calculates the similarity by multiplying corresponding elements, summing up those products,
and then dividing by the product of their norms. This reduces the bias toward brighter
areas, providing a more balanced measurement of similarity.
Higher values indicate a better match.
# Formula
``R_i = \frac{\sum_j (S_{i+j} \cdot T_j)}{\sqrt{\sum_j S_{i+j}^2 \cdot \sum_j T_j^2}}``
See also [`CrossCorrelation`](@ref)
"""
struct NormalizedCrossCorrelation <: Algorithm end
@doc raw"""
Algorithm that measures the correlation coefficient between the source and the template.
This method quantifies the degree to which the source and template match by computing their
correlation coefficient. It offers a balance between capturing the structural similarity
and adjusting for brightness variations, making it less biased towards the brighter parts
in comparison to simple cross-correlation methods.
A higher value indicates a better match.
# Formula
``
R_i = \frac{
\sum_j ((S_{i+j} - \bar{S}_i) \cdot (T_j - \bar{T}))
}{
\sqrt{\sum_j (S_{i+j} - \bar{S}_i)^2 \cdot \sum_j (T_j - \bar{T})^2}
}
``
Where ``\bar{S}_i`` is the mean of the source values within the region considered for
matching and ``\bar{T}`` is the mean of the template values.
See also [`NormalizedCorrelationCoeff`](@ref)
"""
struct CorrelationCoeff <: Algorithm end
@doc raw"""
Normalized algorithm for computing the correlation coefficient between the source and the
template.
This method extends the [`CorrelationCoeff`](@ref) by applying additional normalization
steps, aiming to further minimize the influence of the absolute brightness levels and
enhance the robustness of matching. It calculates the normalized correlation coefficient,
providing a standardized measure.
A higher value indicates a better match.
# Formula
``
R_i = \frac{
\sum_j ((S_{i+j} - \bar{S}_i) \cdot (T_j - \bar{T}))
}{
\sqrt{\sum_j (S_{i+j} - \bar{S}_i)^2} \cdot \sqrt{\sum_j (T_j - \bar{T})^2}
}
``
Where ``\bar{S}_i`` is the mean of the source values within the region considered for
matching and ``\bar{T}`` is the mean of the template values.
See also [`CorrelationCoeff`](@ref)
"""
struct NormalizedCorrelationCoeff <: Algorithm end
include("square_diff.jl")
include("cross_correlation.jl")
include("correlation_coeff.jl")
@doc raw"""
match_template(source, template, alg::Algorithm)
Performs template matching between the source image and template using a specified
algorithm.
Compare a template to a source image using the algorithm specified by the `alg` parameter.
It is designed to work with arrays of more than two dimensions, making it suitable for
multidimensional arrays or sets of images. The function slides the template over the source
array in all possible positions, computing a similarity metric at each position.
# Arguments
- `source`: Source array to search within.
- `template`: Template array to search for.
- `alg::Algorithm`: Algorithm to use for calculating the similarity metric.
The dimensions of the `source` array should be greater than or equal to the dimensions of
the `template` array. If the `source` is of size `(S_1, S_2, ...)` and `template` is
`(T_1, T_2, ...)`, then the size of the resultant match array will be
`(S_1-T_1+1, S_2-T_2+1, ...)`, representing the similarity metric for each possible
position of the template over the source.
The algorithm for matching is chosen by passing an instance of one of the following structs
as the `alg` parameter: [`SquareDiff`](@ref), [`NormalizedSquareDiff`](@ref),
[`CrossCorrelation`](@ref), [`NormalizedCrossCorrelation`](@ref),
[`CorrelationCoeff`](@ref), or [`NormalizedCorrelationCoeff`](@ref).
# Returns
Return an array of the same number of dimensions as the input arrays,
containing the calculated similarity metric at each position of the template over the
source image.
# Examples
```julia
source = rand(100, 100)
template = rand(10, 10)
result = match_template(source, template, CrossCorrelation())
```
```jldoctest
source = rand(100, 100)
template = source[10:15, 20:30]
result = match_template(source, template, SquareDiff())
argmin(result)
# output
CartesianIndex(10, 20)
```
```jldoctest
source = rand(100, 100)
template = source[10:15, 20:30]
result = match_template(source, template, CorrelationCoeff())
argmax(result)
# output
CartesianIndex(10, 20)
```
See also [`match_template!`](@ref) for a version of this function that writes the result
into a preallocated array.
"""
function match_template(source, template, alg::Algorithm)
dest_size = Tuple(size(source) .- size(template) .+ 1)
# similar doesn't work on channelview
dest = Array{eltype(source)}(undef, dest_size)
return match_template!(dest, source, template, alg)
end
@doc raw"""
match_template!(dest, source, template, alg::Algorithm)
Performs template matching and writes the results into a preallocated destination array.
Inplace counterpart to `match_template`, designed to perform the template matching
operation and store the results in a preallocated array `dest` passed by the user.
This reduces memory allocations and can be more efficient when performing multiple
template matching operations. It compares a template to a source image using the
specified algorithm, suitable for multidimensional arrays or sets of images.
See [`match_template`](@ref) for further documentation.
# Arguments
- `dest`: Preallocated destination array where the result will be stored.
- `source`: Source array to search within.
- `template`: Template array to search for.
- `alg::Algorithm`: Algorithm for calculating the similarity.
# Returns
Return its first argument `dest` containing the calculated similarity metric at each
position of the template over the source image.
The dimensions of the `source` array should be greater than or equal to the dimensions of
the template array. If the source is of size `(S_1, S_2, ...)` and `template` is
`(T_1, T_2, ...)`, then `dest` must be preallocated with dimensions
`(S_1-T_1+1, S_2-T_2+1, ...)`, representing the similarity metric for each possible
position of the template over the source.
The algorithm for matching is chosen by passing an instance of one of the following structs
as the `alg` parameter: [`SquareDiff`](@ref), [`NormalizedSquareDiff`](@ref),
[`CrossCorrelation`](@ref), [`NormalizedCrossCorrelation`](@ref),
[`CorrelationCoeff`](@ref), or [`NormalizedCorrelationCoeff`](@ref).
# Examples
```julia
source = rand(100, 100)
template = rand(10, 10)
dest = Array{Float64}(undef, 91, 91)
match_template!(dest, source, template, CrossCorrelation())
dest = Array{Float64}(undef, 100, 100)
match_template!(dest, source, template, CrossCorrelation()) # will fail
```
See also [`match_template`](@ref) for an immutable version of this function.
"""
match_template!(dest, source, template, alg)
function match_template!(dest, source, template, ::SquareDiff)
return square_diff!(dest, source, template)
end
function match_template!(dest, source, template, ::NormalizedSquareDiff)
return normalized_square_diff!(dest, source, template)
end
function match_template!(dest, source, template, ::CrossCorrelation)
return cross_correlation!(dest, source, template)
end
function match_template!(dest, source, template, ::NormalizedCrossCorrelation)
return normalized_cross_correlation!(dest, source, template)
end
function match_template!(dest, source, template, ::CorrelationCoeff)
return correlation_coeff!(dest, source, template)
end
function match_template!(dest, source, template, ::NormalizedCorrelationCoeff)
return normalized_correlation_coeff!(dest, source, template)
end
end | TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 1860 | """
Implementation of the CorrelationCoeff method
"""
function correlation_coeff!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
source_integral = IntegralArray(source)
template_sum = sum(template)
# we use dest as temporary storage for cross_correlation
cross_correlation = cross_correlation!(dest, source, template)
n = length(template)
h = CartesianIndex(size(template))
for i in CartesianIndices(dest)
source_sum = sum(source_integral, i, h)
dest[i] = cross_correlation[i] - template_sum * source_sum / n
end
return dest
end
"""
Implementation of the NormalizedCorrelationCoeff method
"""
function normalized_correlation_coeff!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
source_integral = IntegralArray(source)
source_square_integral = IntegralArray(x -> x^2, source)
template_sum = sum(template)
template_square_sum = sum(x -> x^2, template)
# we use dest as temporary storage for cross_correlation
cross_correlation = cross_correlation!(dest, source, template)
n = length(template)
h = CartesianIndex(size(template))
for i in CartesianIndices(dest)
source_sum = sum(source_integral, i, h)
source_square_sum = sum(source_square_integral, i, h)
dest[i] = cross_correlation[i] - template_sum * source_sum / n
dest[i] /= sqrt((template_square_sum - template_sum^2 / n) *
(source_square_sum - source_sum^2 / n))
end
return dest
end
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 1324 | using ImageFiltering: imfilter!, Inner
using OffsetArrays: OffsetArray
"""
Implementation of the CrossCorrelation method
"""
function cross_correlation!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
return imfilter!(
dest,
source,
OffsetArray(template, ntuple(_ -> -1, ndims(source))),
Inner(),
)
end
"""
Implementation of the NormalizedCrossCorrelation method
"""
function normalized_cross_correlation!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
source_square_integral = IntegralArray(x -> x^2, source)
template_square_sum = sum(v -> v^2, template)
dest = cross_correlation!(dest, source, template)
h = CartesianIndex(size(template))
for i in CartesianIndices(dest)
source_square_sum = sum(source_square_integral, i, h)
dest[i] /= sqrt(source_square_sum * template_square_sum)
end
return dest
end
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 1679 | """
Implementation of the SquareDiff method
"""
function square_diff!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
source_square_integral = IntegralArray(x -> x^2, source)
template_square_sum = sum(v -> v^2, template)
# we use dest as temporary storage for cross_correlation
cross_correlation = cross_correlation!(dest, source, template)
h = CartesianIndex(size(template))
for i in CartesianIndices(dest)
source_square_sum = sum(source_square_integral, i, h)
dest[i] = template_square_sum - 2 * cross_correlation[i] + source_square_sum
end
return dest
end
"""
Implementation of the NormalizedSquareDiff method
"""
function normalized_square_diff!(dest, source, template)
@assert ndims(source) == ndims(template) "source and template should have same number of dims"
dest_size = Tuple(size(source) .- size(template) .+ 1)
@assert dest_size == size(dest) "size(dest) should be $(dest_size), $(size(dest)) given"
source_square_integral = IntegralArray(x -> x^2, source)
template_square_sum = sum(v -> v^2, template)
# we use dest as temporary storage for cross_correlation
cross_correlation = cross_correlation!(dest, source, template)
h = CartesianIndex(size(template))
for i in CartesianIndices(dest)
source_square_sum = sum(source_square_integral, i, h)
dest[i] = template_square_sum - 2 * cross_correlation[i] + source_square_sum
dest[i] /= sqrt(source_square_sum * template_square_sum)
end
return dest
end
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 102 | using TemplateMatching
using Test
include("src/IntegralArray.jl")
include("src/TemplateMatching.jl")
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 2738 | using Test
using TemplateMatching: IntegralArray, int_to_tuple, sum
using Random
@testset "IntegralArray" begin
@testset "int_to_tuple" begin
test_table() = [
# (n, d, expected result)
(0b010101010, 1, (0,)),
(0b010101010, 4, (0, 1, 0, 1)),
(0b010101010, 3, (0, 1, 0)),
(0b001010101, 4, (1, 0, 1, 0)),
(0b000001111, 4, (1, 1, 1, 1)),
(0b000101000, 4, (0, 0, 0, 1)),
(0xf0f0f0f0f, 20, (1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1)),
]
for (n, d, expected) in test_table()
@test int_to_tuple(n, d) == expected
end
end
@testset "sum" begin
# test that naive_sum give the same result as sum for
# various size and indices
naive_sum(array, x, h) = sum(array[x:x+h-oneunit(h)])
naive_sum(f, array, x, h) = sum(f, array[x:x+h-oneunit(h)], init=0)
rng = Xoshiro(0)
test_table() = [
# (size, x, h)
(10, 1, 10),
(10, 4, 3),
(10, 1, 3),
(10, 7, 4),
(10, 7, 0),
((10, 5), (1, 1), (10, 5)),
((10, 5), (5, 1), (5, 5)),
((10, 5), (1, 2), (2, 2)),
((10, 5), (1, 2), (2, 3)),
((10, 5), (8, 2), (2, 3)),
((10, 5), (8, 2), (2, 3)),
((10, 5), (8, 2), (2, 0)),
((10, 5), (8, 2), (0, 2)),
((10, 5), (8, 2), (0, 0)),
((7, 7, 7), (1, 1, 1), (7, 7, 7)),
((7, 7, 7), (3, 2, 5), (0, 3, 1)),
((7, 7, 7), (3, 2, 5), (1, 1, 1)),
((7, 7, 7), (3, 1, 5), (2, 0, 2)),
((7, 7, 7), (2, 1, 2), (5, 6, 5)),
((7, 7, 7), (2, 1, 2), (1, 6, 1)),
((7, 7, 7, 7), (1, 1, 1, 1), (7, 7, 7, 7)),
((7, 7, 7, 7), (1, 1, 1, 1), (7, 0, 7, 7)),
((7, 7, 7, 7), (1, 1, 1, 1), (7, 0, 0, 7)),
((7, 7, 7, 7), (1, 1, 1, 1), (0, 0, 0, 0)),
((7, 7, 7, 7), (1, 1, 1, 1), (0, 7, 7, 7)),
]
for (size, x, h) in test_table()
x = CartesianIndex(x)
h = CartesianIndex(h)
array = rand(rng, size...)
integral = IntegralArray(array)
@test isapprox(sum(integral, x, h), naive_sum(array, x, h), rtol=1e-5)
end
for f in [exp, sqrt, log, (x -> 2x)]
for (size, x, h) in test_table()
x = CartesianIndex(x)
h = CartesianIndex(h)
array = rand(rng, size...)
integral = IntegralArray(f, array)
@test isapprox(sum(integral, x, h), naive_sum(f, array, x, h), rtol=1e-5)
end
end
end
end
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | code | 7827 | using Test
using TemplateMatching
using ImageFiltering
using Statistics
using Random
rng = Xoshiro(0)
@testset "::SquareDiff" begin
function naive_square_diff(source, template)
sum2(a) = sum(x -> x^2, a)
result = mapwindow(source, size(template), border=Inner()) do subsection
sum2(subsection .- template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, SquareDiff())
@test argmin(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_square_diff(source, template)
result = match_template(source, template, SquareDiff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_square_diff(source, template)
result = match_template(source, template, SquareDiff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
end
@testset "::NormalizedSquareDiff" begin
function naive_normalized_square_diff(source, template)
sum2(a) = sum(x -> x^2, a)
sum2_template = sum2(template)
result = mapwindow(source, size(template), border=Inner()) do subsection
sum2(subsection .- template) / sqrt(sum2(subsection) * sum2_template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, NormalizedSquareDiff())
@test argmin(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_normalized_square_diff(source, template)
result = match_template(source, template, NormalizedSquareDiff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_normalized_square_diff(source, template)
result = match_template(source, template, NormalizedSquareDiff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
end
@testset "::CrossCorrelation" begin
function naive_cross_correlation(source, template)
result = mapwindow(source, size(template), border=Inner()) do subsection
sum(subsection .* template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, CrossCorrelation())
@test argmax(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_cross_correlation(source, template)
result = match_template(source, template, CrossCorrelation())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_cross_correlation(source, template)
result = match_template(source, template, CrossCorrelation())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
end
@testset "::NormalizedCrossCorrelation" begin
function naive_normalized_cross_correlation(source, template)
sum2(a) = sum(x -> x^2, a)
sum2_template = sum2(template)
result = mapwindow(source, size(template), border=Inner()) do subsection
sum(subsection .* template) / sqrt(sum2(subsection) * sum2_template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, NormalizedCrossCorrelation())
@test argmax(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_normalized_cross_correlation(source, template)
result = match_template(source, template, NormalizedCrossCorrelation())
@test all(isapprox.(result, naive_result, rtol=1e-5))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_normalized_cross_correlation(source, template)
result = match_template(source, template, NormalizedCrossCorrelation())
@test all(isapprox.(result, naive_result, rtol=1e-5))
end
end
@testset "::CorrelationCoeff" begin
function naive_correlation_coeff(source, template)
template = template .- mean(template)
result = mapwindow(source, size(template), border=Inner()) do subsection
subsection = subsection .- mean(subsection)
sum(subsection .* template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, CorrelationCoeff())
@test argmax(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_correlation_coeff(source, template)
result = match_template(source, template, CorrelationCoeff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_correlation_coeff(source, template)
result = match_template(source, template, CorrelationCoeff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
end
@testset "::NormalizedCorrelationCoeff" begin
function naive_normalized_correlation_coeff(source, template)
sum2(a) = sum(x -> x^2, a)
template = template .- mean(template)
sum2_template = sum2(template)
result = mapwindow(source, size(template), border=Inner()) do subsection
subsection = subsection .- mean(subsection)
sum(subsection .* template) / sqrt(sum2(subsection) * sum2_template)
end
return parent(result)
end
let
source = rand(rng, 100, 3, 100)
template = source[50:70, :, 30:35]
dest = similar(source, 80, 1, 95)
result = match_template!(dest, source, template, NormalizedCorrelationCoeff())
@test argmax(result) === CartesianIndex(50, 1, 30)
@test dest === result
end
let
source = rand(rng, 50, 50)
template = rand(rng, 35, 25)
naive_result = naive_normalized_correlation_coeff(source, template)
result = match_template(source, template, NormalizedCorrelationCoeff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
let
source = rand(rng, 20, 20, 20)
template = rand(rng, 7, 5, 3)
naive_result = naive_normalized_correlation_coeff(source, template)
result = match_template(source, template, NormalizedCorrelationCoeff())
@test all(isapprox.(result, naive_result, rtol=1e-10))
end
end
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | docs | 3461 | # TemplateMatching
[](https://mleseach.github.io/TemplateMatching.jl/stable/)
[](https://mleseach.github.io/TemplateMatching.jl/dev/)
[](https://github.com/mleseach/TemplateMatching.jl/actions/workflows/CI.yml?query=branch%3Amaster)
TemplateMatching is a Julia package designed to offer a native Julia implementation of
template matching functionalities similar to those available in OpenCV. This package aims
to provide an easy-to-use interface for image processing and computer vision applications,
allowing users to leverage the high-performance capabilities of Julia for template matching
operations. The package offers performance slightly below that of OpenCV but significantly
better than a naive implementation.
## Features
Masks are not yet supported in the current version of the package.
Unlike OpenCV, TemplateMatching.jl supports n-dimensional arrays[^1].
Below is a table summarising available methods and their equivalent in opencv.
| TemplateMatching.jl | Mask | OpenCV equivalent |
|:--------------------------------|:-------------------:|:-----------------------|
| SquareDiff | Not yet supported | `TM_SQDIFF` |
| NormalizedSquareDiff | Not yet supported | `TM_SQDIFF_NORMED` |
| CrossCorrelation | Not yet supported | `TM_CCORR` |
| NormalizedCrossCorrelation | Not yet supported | `TM_CCORR_NORMED` |
| CorrelationCoeff | Not yet supported | `TM_CCOEFF` |
| NormalizedCorrelationCoeff | Not yet supported | `TM_CCOEFF_NORMED` |
[^1]: Up to 64 dimensions because of an implementation detail, but this shouldn't be a
problem in most cases.
## Installation
To install TemplateMatching, use the Julia package manager.
As it is not yet registered, use the full url of the repository.
Open your Julia command-line interface and run:
```julia
using Pkg
Pkg.add("https://github.com/mleseach/TemplateMatching.jl")
```
## Usage
Almost everything you need is the function `match_template` and its inplace counterpart
`match_template!`.
Below is a quick start example on how to use the TemplateMatching package to perform
template matching:
```julia
using TemplateMatching
using Images
# Load your source image and template
source = rand(1000, 1000)
template = source[400:500, 100:150]
# Perform template matching using square difference
result = match_template(source, template, SquareDiff)
# Get the best match
argmin(result) # CartesianIndex(400, 100)
# Perform template matching using normalized correlation coefficient
result = match_template(source, template, NormalizedCorrCoeff)
# Get the best match
argmax(result) # CartesianIndex(400, 100)
```
## Documentation
For more detailed information on all the functions and their parameters, please refer to
the full [documentation](https://mleseach.github.io/TemplateMatching.jl/stable/).
## Possible improvements
- [ ] Support for template mask (planned)
- [ ] Support for GPU
- [ ] Improve performances
- [ ] More tests and examples in documentation
- [ ] Better errors
## License
TemplateMatching is provided under the [MIT License](LICENSE). Feel free to use it in your
projects.
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | docs | 2731 | ```@meta
CurrentModule = TemplateMatching
```
# TemplateMatching
Documentation for [TemplateMatching](https://github.com/mleseach/TemplateMatching.jl).
TemplateMatching is a Julia package designed to offer a native Julia implementation of
template matching functionalities similar to those available in OpenCV. This package aims
to provide an easy-to-use interface for image processing and computer vision applications,
allowing users to leverage the high-performance capabilities of Julia for template matching
operations. The package offers performance slightly below that of OpenCV but significantly
better than a naive implementation.
## Features
Masks are not yet supported in the current version of the package.
Unlike OpenCV, TemplateMatching.jl supports n-dimensional arrays[^1].
Below is a table summarising available methods and their equivalent in opencv.
| TemplateMatching.jl | Mask | OpenCV equivalent |
|:--------------------------------------|:-------------------:|:-----------------------|
| [`SquareDiff`](@ref) | Not yet supported | `TM_SQDIFF` |
| [`NormalizedSquareDiff`](@ref) | Not yet supported | `TM_SQDIFF_NORMED` |
| [`CrossCorrelation`](@ref) | Not yet supported | `TM_CCORR` |
| [`NormalizedCrossCorrelation`](@ref) | Not yet supported | `TM_CCORR_NORMED` |
| [`CorrelationCoeff`](@ref) | Not yet supported | `TM_CCOEFF` |
| [`NormalizedCorrelationCoeff`](@ref) | Not yet supported | `TM_CCOEFF_NORMED` |
[^1]: Up to 64 dimensions because of an implementation detail, but this shouldn't be a
problem in most cases.
## Installation
To install TemplateMatching, use the Julia package manager.
As it is not yet registered, use the full url of the repository.
Open your Julia command-line interface and run:
```julia
using Pkg
Pkg.add("https://github.com/mleseach/TemplateMatching.jl")
```
## Usage
Almost everything you need is the function [`match_template`](@ref) and its inplace counterpart
[`match_template!`](@ref).
Below is a quick start example on how to use the TemplateMatching package to perform
template matching:
```julia
using TemplateMatching
using Images
# Load your source image and template
source = rand(1000, 1000)
template = source[400:500, 100:150]
# Perform template matching using square difference
result = match_template(source, template, SquareDiff)
# Get the best match
argmin(result) # CartesianIndex(400, 100)
# Perform template matching using normalized correlation coefficient
result = match_template(source, template, NormalizedCorrCoeff)
# Get the best match
argmax(result) # CartesianIndex(400, 100)
```
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.1.2 | 462f5ce223ccc053ea8240fa30453b20625c7fcc | docs | 215 | # Functions
```@docs
match_template
match_template!
```
# Available algorithms
```@docs
SquareDiff
NormalizedSquareDiff
CrossCorrelation
NormalizedCrossCorrelation
CorrelationCoeff
NormalizedCorrelationCoeff
```
| TemplateMatching | https://github.com/mleseach/TemplateMatching.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 339 | using Documenter, CellularAutomata
include("pages.jl")
makedocs(;
sitename="CellularAutomata.jl",
modules=[CellularAutomata],
clean=true,
doctest=true,
linkcheck=true,
warnonly=[:missing_docs],
pages=pages,
)
deploydocs(;
repo="github.com/MartinuzziFrancesco/CellularAutomata.jl.git", push_preview=true
)
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 374 | pages = [
"CellularAutomata.jl" => "index.md",
"Examples" => [
"One dimensional CA" => "onedim/onedimensionca.md"
"Two dimensional CA" => "twodim/twodimensionca.md"
],
"API Documentation" => Any[
"General APIs" => "api/general.md"
"One Dimensional CA" => "api/onedim.md"
"Two Dimensial CA" => "api/twodim.md"
],
]
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 3558 | module CellularAutomata
abstract type AbstractRule end
abstract type AbstractODRule <: AbstractRule end
abstract type AbstractTDRule <: AbstractRule end
abstract type AbstractCA end
include("dca.jl")
include("cca.jl")
include("tca.jl")
include("life.jl")
include("measures.jl")
struct CellularAutomaton{F,E} <: AbstractCA
generations::Int
generation_fun::F
evolution::E
end
"""
CellularAutomaton(rule::AbstractODRule, initial_conditions, generations)
CellularAutomaton(rule::AbstractTDRule, initial_conditions, generations)
Constructs the evolution of a cellular automaton based on a specified rule,
initial conditions, and the number of generations to simulate.
This function supports both one-dimensional (OD) and two-dimensional (TD) cellular automata,
determined by the type of `rule` provided.
# Arguments
- `rule`: An instance of `AbstractODRule` for one-dimensional cellular automata
or `AbstractTDRule` for two-dimensional cellular automata. Defines the evolution
rule for the cellular automaton.
- `initial_conditions`: An array (for OD) or a matrix (for TD) representing the
initial state of the cellular automaton.
- `generations`: The number of generations (or time steps) for which the automaton
should be evolved.
# Usage
For a one-dimensional cellular automaton:
```julia
rule = DCA(30) # Define or instantiate a one-dimensional rule
initial_conditions = [0, 1, 0, 1, 1, 0, 1] # Initial state array
generations = 50 # Number of generations to simulate
automaton_od = CellularAutomaton(rule, initial_conditions, generations)
```
For a two-dimensional cellular automaton:
```julia
rule = Life(((3,), (2, 3))) # Define or instantiate a two-dimensional rule
initial_conditions = [ # Initial state matrix
[0, 1, 0],
[1, 0, 1],
[0, 1, 0],
]
generations = 50 # Number of generations to simulate
automaton_td = CellularAutomaton(rule, initial_conditions, generations)
```
This function constructs a CellularAutomaton instance that encapsulates the
entire evolution history of the cellular automaton, according to the provided
rule and initial conditions over the specified number of generations.
The exact nature of the evolution—whether it is for a one-dimensional or
two-dimensional automaton—depends on the type of rule supplied.
You can access the evolution by calling the `evolution` field of `CellularAutomaton`
```julia
automaton_td.evolution
```
# Notes
- The `rule` parameter determines the dimensionality of the cellular automaton.
Ensure that your `initial_conditions` and `rule` are compatible in terms of dimensions.
"""
function CellularAutomaton(rule::AbstractODRule, initial_conditions, generations)
evolution = zeros(
typeof(initial_conditions[2]), generations, length(initial_conditions)
)
evolution[1, :] = initial_conditions
for i in 2:generations
evolution[i, :] = rule(evolution[i - 1, :])
end
return CellularAutomaton(generations, rule, evolution)
end
function CellularAutomaton(rule::AbstractTDRule, initial_conditions, generations)
evolution = zeros(
typeof(initial_conditions[2]),
size(initial_conditions, 1),
size(initial_conditions, 2),
generations,
)
evolution[:, :, 1] = initial_conditions
for i in 2:generations
evolution[:, :, i] = rule(evolution[:, :, i - 1])
end
return CellularAutomaton(generations, rule, evolution)
end
export CellularAutomaton
export DCA
export CCA
export TCA
export Life
export lempel_ziv
end # module
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 2275 | abstract type AbstractCCARule <: AbstractODRule end
struct CCA{T<:Real} <: AbstractCCARule
rule::T
radius::Int
end
"""
CCA(rule; radius=1)
Create a Continuous Cellular Automaton (CCA) object.
# Arguments
- `rule`: A numeric code defining the evolution rule for the cellular automaton.
- `radius` (optional): The radius of the neighborhood around each cell considered
for its update at each step. Defaults to `1`.
# Returns
`CCA`: A `CCA` object initialized with the given rule and radius.
# Examples
```julia
cca = CCA(0.5)
```
Once created, the `CCA` object can be used to evolve a given starting array of cell states:
```julia
cca = CCA(0.45; radius=1) # Initialize with rule 0.45 and default radius
starting_array = [0, 1, 0, 1, 0.5, 1] # Initial state
next_generation = cca(starting_array) # Evolve to next generation
```
The evolution is determined by the rule applied to the sum of the neighborhood states,
normalized by their count, for each cell in the array.
"""
function CCA(rule::T; radius=1) where {T<:Real}
return CCA(rule, radius)
end
function (cca::CCA)(starting_array::AbstractArray)
return nextgen = evolution(starting_array, cca.rule, cca.radius)
end
function c_state_reader(neighborhood::AbstractArray, radius)
return sum(neighborhood) / length(neighborhood)
end
function evolution(cell::AbstractArray, rule::T, radius::Number) where {T<:Real}
neighborhood_size = radius * 2 + 1
output = zeros(length(cell))
cell = vcat(
cell[(end - neighborhood_size ÷ 2 + 1):end], cell, cell[1:(neighborhood_size ÷ 2)]
)
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = modf(
c_state_reader(cell[i:(i + neighborhood_size - 1)], radius) + rule
)[1]
end
return output
end
function evolution(cell::AbstractArray, rule::T, radius::Tuple) where {T<:Real}
neighborhood_size = sum(radius) + 1
output = zeros(length(cell))
cell = vcat(
cell[(end - neighborhood_size ÷ 2 + 1):end], cell, cell[1:(neighborhood_size ÷ 2)]
)
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = modf(
c_state_reader(cell[i:(i + neighborhood_size - 1)], radius) + rule
)[1]
end
return output
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 3007 | abstract type AbstractDCARule <: AbstractODRule end
struct DCA{T<:Integer,R,P} <: AbstractDCARule
rule::T
ruleset::R
states::Int
radius::P
end
"""
DCA(rule; states=2, radius=1)
Creates a `DCA` object given a specific rule. It automatically computes the ruleset for
the provided rule, number of states, and radius.
# Arguments
- `rule`: The rule identifier used for the cellular automaton's evolution.
- `states` (optional): The number of possible states for each cell. Defaults to 2.
- `radius` (optional): The neighborhood radius around each cell considered during the
evolution. Defaults to 1.
# Usage
```julia
dca = DCA(30; states=2, radius=1) # Creates a DCA with rule 30, 2 states, and radius 1.
```
Once instantiated, the `DCA` object can evolve a given starting array of cell states
through its callable interface:
```julia
dca = DCA(110; states=2, radius=1) # Initialize with rule 110, 2 states, and a radius of 1
starting_array = [0, 1, 0, 1, 1, 0] # Initial state
next_generation = dca(starting_array) # Evolve to the next generation
```
"""
function DCA(rule::T; states::Int=2, radius=1) where {T<:Integer}
ruleset = conversion(rule, states, radius)
return DCA(rule, ruleset, states, radius)
end
function (dca::DCA)(starting_array::AbstractArray)
return evolution(starting_array, dca.ruleset, dca.states, dca.radius)
end
function conversion(rule::T, states::Int, radius::Int) where {T<:Integer}
rule_len = states^(2 * radius + 1)
rule_bin = parse.(Int, split(string(rule; base=states), ""))
rule_bin = vcat(zeros(typeof(rule_bin[1]), rule_len - length(rule_bin)), rule_bin)
return reverse!(rule_bin)
end
function conversion(rule::T, states::Int, radius::Tuple) where {T<:Integer}
rule_len = states^(sum(radius) + 1)
rule_bin = parse.(Int, split(string(rule; base=states), ""))
rule_bin = vcat(zeros(typeof(rule_bin[1]), rule_len - length(rule_bin)), rule_bin)
return reverse!(rule_bin)
end
function state_reader(neighborhood::AbstractArray, states::Int)
return parse(Int, join(convert(Array{Int}, neighborhood)); base=states) + 1 #ugly
end
function evolution(cell::AbstractArray, ruleset, states::Int, radius::Int)
neighborhood_size = radius * 2 + 1
output = zeros(length(cell))
cell = vcat(
cell[(end - neighborhood_size ÷ 2 + 1):end], cell, cell[1:(neighborhood_size ÷ 2)]
)
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = ruleset[state_reader(cell[i:(i + neighborhood_size - 1)], states)]
end
return output
end
function evolution(cell::AbstractArray, ruleset, states::Int, radius::Tuple)
neighborhood_size = sum(radius) + 1
output = zeros(length(cell))#da qui in poi da modificare
cell = vcat(cell[(end - radius[1] + 1):end], cell, cell[1:radius[2]])
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = ruleset[state_reader(cell[i:(i + neighborhood_size - 1)], states)]
end
return output
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 3379 |
abstract type AbstractLifeRule <: AbstractTDRule end
struct Life{T,A} <: AbstractLifeRule
born::T
survive::A
radius::Int
end
"""
Life(life_description; radius=1)
Create a `Life` object to simulate a cellular automaton based on a variation of
the Conway's Game of Life, using custom rules for cell birth and survival.
The rules are defined using the Golly notation.
# Arguments
- `life_description`: A tuple of two tuples (`(b, s)`) specifying the birth (`b`)
and survival (`s`) rules.
+ `b`: A tuple containing the numbers of neighbouring cells that cause a dead
cell to become alive in the next generation.
+ `s`: A tuple containing the numbers of neighbouring cells that allow a live
cell to remain alive in the next generation.
- `radius` (optional): The radius of the neighborhood considered for determining
cell fate. Defaults to 1.
# Usage
```julia
life = Life(((3,), (2, 3)); radius=1) # Initializes Life
```
After instantiation, the `Life` object can be used to evolve a given starting
array representing the initial state of the cellular automaton:
```julia
# Initialize Life with custom rules: birth if 3 neighbors, survive if 2 or 3 neighbors
life = Life(((3,), (2, 3)); radius=1)
# Example starting state: a 5x5 grid with a "glider" pattern
starting_array = zeros(Int, 5, 5)
starting_array[2, 3] = 1
starting_array[3, 4] = 1
starting_array[4, 2:4] = 1
# Compute the next generation
next_generation = life(starting_array)
```
"""
function Life(life_description::Tuple; radius=1)
born, survive = life_description[1], life_description[2]
return Life(born, survive, radius)
end
function (life::Life)(starting_array::AbstractMatrix)
return life_evolution(starting_array, life.born, life.survive, life.radius)
end
function virtual_expansion(starting_array::AbstractMatrix, radius::Int)
height, width = size(starting_array)
nh, nw = height - radius + 1, width - radius + 1
left = vcat(
starting_array[nh:end, nw:end],
starting_array[:, nw:end],
starting_array[1:radius, nw:end],
)
right = vcat(
starting_array[nh:end, 1:radius],
starting_array[:, 1:radius],
starting_array[1:radius, 1:radius],
)
middle = vcat(starting_array[nh:end, :], starting_array, starting_array[1:radius, :])
return hcat(left, middle, right)
end
function life_application(state, born, survive)
past_value = state[size(state, 1) ÷ 2 + 1, size(state, 2) ÷ 2 + 1] #save past cell value
state[size(state, 1) ÷ 2 + 1, size(state, 2) ÷ 2 + 1] = 0 #past cell value set to zero
alive = sum(state) #the sum is the number of cell alive in the neighborhood since the central value is equal to zero
if past_value == 1 && alive in survive
return 1
elseif past_value == 0 && alive in born
return 1
else
return 0
end
end
function life_evolution(starting_array::AbstractMatrix, born, survive, radius)
height, width = size(starting_array)
output = zeros(typeof(starting_array[2]), height, width)
virtual_output = virtual_expansion(starting_array, radius)
for i in 1:(height - radius + 1), j in 1:(width - radius + 1)
output[i, j] = life_application(
virtual_output[i:(i + radius + 1), j:(j + radius + 1)], born, survive
)
end
return output
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 762 | function lempel_ziv_complexity(sequence)
sub_strings = Set()
n = length(sequence)
ind = 1
inc = 1
while true
if ind + inc > n
break
end
sub_str = sequence[ind:(ind + inc)]
if sub_str in sub_strings
inc += 1
else
push!(sub_strings, sub_str)
ind += inc
inc = 1
end
end
return length(sub_strings)
end
"""
function lempel_ziv(ca::AbstractCA)
Computes the lempel ziv complexity of a given Cellular Automaton.
"""
function lempel_ziv(ca::AbstractCA)
ca_size = size(ca.evolution, 1)
lz_tot = 0
for i in 1:ca_size
lz_tot += lempel_ziv_complexity(ca.evolution[i, :])
end
return lz_tot / ca_size
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 3112 | abstract type AbstractTCARule <: AbstractDCARule end
struct TCA{B,R,T} <: AbstractTCARule
code::B
codeset::R
states::Int
radius::T
end
"""
TCA(code; states=2, radius=1)
Constructs a Totalistic Cellular Automaton (TCA) with a specified code, number of states,
and neighborhood radius. It automatically computes the codeset for the provided code
and configuration, which is used for the automaton's evolution.
# Arguments
- `code`: An integer or string representing the rule code for the automaton's evolution.
- `states` (optional): The number of possible states for each cell. Defaults to 2.
- `radius` (optional): The neighborhood radius around each cell considered during the
evolution. Defaults to 1.
# Usage
```julia
tca = TCA(30; states=2, radius=1) # Creates a TCA with rule code 30, 2 states, and radius 1.
```
After instantiation, the `TCA` object can be used to evolve a given starting array of
cell states:
```julia
# Initialize TCA with a specific code, default states, and radius
tca = TCA(102; states=3, radius=1)
# Example starting state: a 1D array of cells
starting_array = [0, 2, 1, 0, 1, 2]
# Compute the next generation
next_generation = tca(starting_array)
```
"""
function TCA(code; states=2, radius=1)
codeset = tca_conversion(code, states, radius)
return TCA(code, codeset, states, radius)
end
function (tca::TCA)(starting_array::AbstractArray)
return nextgen = tca_evolution(starting_array, tca.codeset, tca.states, tca.radius)
end
function tca_conversion(code, states, radius::Number)
code_len = (2 * radius + 1) * states - 2
code_bin = parse.(Int, split(string(code; base=states), ""))
code_bin = vcat(zeros(typeof(code_bin[1]), code_len - length(code_bin)), code_bin)
return reverse!(code_bin)
end
function tca_conversion(code, states, radius::Tuple)
code_len = (sum(radius) + 1) * states - 2
code_bin = parse.(Int, split(string(code; base=states), ""))
code_bin = vcat(zeros(typeof(code_bin[1]), code_len - length(code_bin)), code_bin)
return reverse!(code_bin)
end
function tca_state_reader(neighborhood::AbstractArray, codeset_len)
return mod1(sum(neighborhood) + 1, codeset_len)
end
function tca_evolution(cell::AbstractArray, codeset, states, radius::Number)
neighborhood_size = radius * 2 + 1
output = zeros(length(cell))
cell = vcat(
cell[(end - neighborhood_size ÷ 2 + 1):end], cell, cell[1:(neighborhood_size ÷ 2)]
)
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = codeset[tca_state_reader(
cell[i:(i + neighborhood_size - 1)], length(codeset)
)]
end
return output
end
function tca_evolution(cell::AbstractArray, codeset, states, radius::Tuple)
neighborhood_size = sum(radius) + 1
output = zeros(length(cell))
cell = vcat(cell[(end - radius[1] + 1):end], cell, cell[1:radius[2]])
for i in 1:(length(cell) - neighborhood_size + 1)
output[i] = codeset[tca_state_reader(
cell[i:(i + neighborhood_size - 1)], length(codeset)
)]
end
return output
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 415 | using CellularAutomata
const gens = 3
blinker = [[0, 0, 0, 0, 0] [0, 0, 1, 0, 0] [0, 0, 1, 0, 0] [0, 0, 1, 0, 0] [0, 0, 0, 0, 0]]
ca = CellularAutomaton(Life((3, (2, 3))), blinker, gens)
@test ca.evolution == cat(
[0 0 0 0 0; 0 0 0 0 0; 0 1 1 1 0; 0 0 0 0 0; 0 0 0 0 0],
[0 0 0 0 0; 0 0 1 0 0; 0 0 1 0 0; 0 0 1 0 0; 0 0 0 0 0],
[0 0 0 0 0; 0 0 0 0 0; 0 1 1 1 0; 0 0 0 0 0; 0 0 0 0 0];
dims=3,
)
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 278 | using CellularAutomata, Random
Random.seed!(42)
const radius = 1
const generations = 10
const ncells = 11
const starting_val = rand(ncells)
const rule = 0.05
ca = ca = CellularAutomaton(CCA(rule), starting_val, generations)
@test size(ca.evolution) == (generations, ncells)
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 571 | using CellularAutomata, Random
Random.seed!(42)
const states = 4
const radius = 1
const generations = 10
const ncells = 11
const starting_array = rand(0:(states - 1), ncells)
const rule = 107396
#testing states > 2
ca = CellularAutomaton(DCA(rule; states=states), starting_array, generations)
@test size(ca.evolution) == (generations, ncells)
#testing states == 2
const bstates = 2
const brule = 110
const bstarting_array = rand(Bool, ncells)
bca = ca = CellularAutomaton(DCA(brule), bstarting_array, generations)
@test size(bca.evolution) == (generations, ncells)
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 552 | using CellularAutomata
const rule0 = DCA(0)
@test rule0.ruleset == [0, 0, 0, 0, 0, 0, 0, 0] #http://atlas.wolfram.com/01/01/0/
const rule1 = DCA(1)
@test rule1.ruleset == [1, 0, 0, 0, 0, 0, 0, 0] #http://atlas.wolfram.com/01/01/1/
const rule2 = DCA(2)
@test rule2.ruleset == [0, 1, 0, 0, 0, 0, 0, 0] #http://atlas.wolfram.com/01/01/2/
const rule3 = DCA(3)
@test rule3.ruleset == [1, 1, 0, 0, 0, 0, 0, 0] #http://atlas.wolfram.com/01/01/2/
const rule30 = DCA(30)
@test rule30.ruleset == [0, 1, 1, 1, 1, 0, 0, 0] #http://atlas.wolfram.com/01/01/30/
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 735 | using CellularAutomata
const size_space = 6
const gens = 5
glider = [[0, 0, 1, 0, 0] [0, 0, 0, 1, 0] [0, 1, 1, 1, 0]]
space = zeros(Bool, size_space, size_space)
space[1:size(glider, 1), 1:size(glider, 2)] = glider
ca = CellularAutomaton(Life((3, (2, 3))), space, gens)
@test ca.evolution == cat(
[0 0 0 0 0 0; 0 0 1 0 0 0; 1 0 1 0 0 0; 0 1 1 0 0 0; 0 0 0 0 0 0; 0 0 0 0 0 0],
[0 0 0 0 0 0; 0 1 0 0 0 0; 0 0 1 1 0 0; 0 1 1 0 0 0; 0 0 0 0 0 0; 0 0 0 0 0 0],
[0 0 0 0 0 0; 0 0 1 0 0 0; 0 0 0 1 0 0; 0 1 1 1 0 0; 0 0 0 0 0 0; 0 0 0 0 0 0],
[0 0 0 0 0 0; 0 0 0 0 0 0; 0 1 0 1 0 0; 0 0 1 1 0 0; 0 0 1 0 0 0; 0 0 0 0 0 0],
[0 0 0 0 0 0; 0 0 0 0 0 0; 0 0 0 1 0 0; 0 1 0 1 0 0; 0 0 1 1 0 0; 0 0 0 0 0 0];
dims=3,
)
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 128 | using CellularAutomata
using Aqua: Aqua
Aqua.test_all(CellularAutomata; ambiguities=false, deps_compat=(check_extras = false))
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 680 | using Test
using SafeTestsets
@safetestset "Quality Assurance" begin
include("qa.jl")
end
@testset "DCA" begin
@safetestset "Size tests" begin
include("dca_test.jl")
end
@safetestset "ECA ruleset tests" begin
include("eca_ruleset_test.jl")
end
end
@testset "TCA" begin
@safetestset "Size tests" begin
include("tca_test.jl")
end
end
@testset "CCA" begin
@safetestset "Size tests" begin
include("cca_test.jl")
end
end
@testset "Life-like" begin
@safetestset "Life glider" begin
include("glider_test.jl")
end
@safetestset "Life blinker" begin
include("blinker_test.jl")
end
end
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | code | 577 | using CellularAutomata, Random
Random.seed!(42)
const states = 4
const radius = 1
const generations = 10
const ncells = 11
const starting_array = rand(0:(states - 1), ncells)
const rule = 107396
#testing states > 2
ca = CellularAutomaton(DCA(rule; states=states), starting_array, generations)
@test size(ca.evolution) == (generations, ncells)
#=
#testing states == 2
const bstates = 2
const brule = 110
const bstarting_array = rand(Bool, ncells)
bca = ca = CellularAutomaton(DCA(brule), bstarting_array, generations)
@test size(bca.evolution) == (generations, ncells)
=#
| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
|
[
"MIT"
] | 0.0.5 | 5ba5cdf37fb2104dd6a653b20be82b0ceb13888b | docs | 11437 | # CellularAutomata.jl
<p align="center">
<img width="400px" src="docs/src/assets/logo.png"/>
</p>
| **Documentation** | **Build Status** | **Julia** | **Testing** | **DOI** |
|:-----------------:|:----------------:|:---------:|:-----------:|:-------:|
| [![docs][docs-img]][docs-url] | [![CI][ci-img]][ci-url] [![codecov][cc-img]][cc-url] | [![Julia][julia-img]][julia-url] [![Code Style: Blue][style-img]][style-url] | [![Aqua QA][aqua-img]][aqua-url] | [![DOI][doi-img]][doi-url]
[docs-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-url]: [https://awesome-spectral-indices.github.io/SpectralIndices.jl/dev/](https://martinuzzifrancesco.github.io/CellularAutomata.jl/dev/)
[ci-img]: https://github.com/MartinuzziFrancesco/CellularAutomata.jl/actions/workflows/CI.yml/badge.svg
[ci-url]: https://github.com/MartinuzziFrancesco/CellularAutomata.jl/actions/workflows/CI.yml
[cc-img]: https://codecov.io/gh/MartinuzziFrancesco/CellularAutomata.jl/coverage.svg?branch=master
[cc-url]: https://codecov.io/gh/MartinuzziFrancesco/CellularAutomata.jl?branch=master
[julia-img]: https://img.shields.io/badge/julia-v1.9+-blue.svg
[julia-url]: https://julialang.org/
[style-img]: https://img.shields.io/badge/code%20style-blue-4495d1.svg
[style-url]: https://github.com/invenia/BlueStyle
[aqua-img]: https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg
[aqua-url]: https://github.com/JuliaTesting/Aqua.jl
[doi-img]: https://zenodo.org/badge/244027385.svg
[doi-url]: https://zenodo.org/badge/latestdoi/244027385
## Installation
CellularAutomata.jl is registered on the general registry. For the installation follow:
```julia
julia> using Pkg
julia> Pkg.add("CellularAutomata")
```
or, if you prefer:
```julia
julia> using Pkg
julia> Pkg.add("https://github.com/MartinuzziFrancesco/CellularAutomata.jl")
```
## Discrete Cellular Automata
The package offers creation of all the cellular automata described in A New Kind of Science by Wolfram, and the rules for the creation are labelled as in the book.
We will recreate some of the examples that can be found in the [wolfram atlas](http://atlas.wolfram.com/TOC/TOC_200.html) both for elementary and totalistic cellular automata.
### Elementary Cellular Automata
Elementary Cellular Automata (ECA) have a radius of one and can be in only two possible states. Here we show a couple of examples:
[Rule 18](http://atlas.wolfram.com/01/01/18/)
```julia
using CellularAutomata, Plots
states = 2
radius = 1
generations = 50
ncells = 111
starting_val = zeros(Bool, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 18
ca = CellularAutomaton(DCA(rule), starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

[Rule 30](http://atlas.wolfram.com/01/01/30/)
```julia
using CellularAutomata, Plots
states = 2
radius = 1
generations = 50
ncells = 111
starting_val = zeros(Bool, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 30
ca = CellularAutomaton(DCA(rule), starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

### General Cellular Automata
General Cellular Automata have the same rule of ECA but they can have a radius larger than unity and/or a number of states greater than two. Here are provided examples for every possible permutation, starting with a Cellular Automaton with 3 states.
[Rule 7110222193934](https://www.wolframalpha.com/input/?i=rule+7%2C110%2C222%2C193%2C934+k%3D3&lk=3)
```julia
using CellularAutomata, Plots
states = 3
radius = 1
generations = 50
ncells = 111
starting_val = zeros(ncells)
starting_val[Int(floor(ncells/2)+1)] = 2
rule = 7110222193934
ca = CellularAutomaton(DCA(rule,states=states,radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false,
size=(ncells*10, generations*10))
```

The following examples shows a Cellular Automaton with radius=2, with two only possible states:
[Rule 1388968789](https://www.wolframalpha.com/input/?i=rule+1%2C388%2C968%2C789+r%3D2&lk=3)
```julia
using CellularAutomata, Plots
states = 2
radius = 2
generations = 30
ncells = 111
starting_val = zeros(ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 1388968789
ca = CellularAutomaton(DCA(rule,states=states,radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false,
size=(ncells*10, generations*10))
```

And finally, three states with a radius equal to two:
[Rule 914752986721674989234787899872473589234512347899](https://www.wolframalpha.com/input/?i=CA+k%3D3+r%3D2+rule+914752986721674989234787899872473589234512347899&lk=3)
```julia
using CellularAutomata, Plots
states = 3
radius = 2
generations = 30
ncells = 111
starting_val = zeros(ncells)
starting_val[Int(floor(ncells/2)+1)] = 2
rule = 914752986721674989234787899872473589234512347899
ca = CellularAutomaton(DCA(rule,states=states,radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false,
size=(ncells*10, generations*10))
```

It is also possible to specify asymmetric neighborhoods, giving a tuple to the kwarg detailing the number of neighbors to considerate at the left and right of the cell:
[Rule 1235](https://www.wolframalpha.com/input/?i=radius+3%2F2+rule+1235&lk=3)
```julia
using CellularAutomata, Plots
states = 2
radius = (2,1)
generations = 30
ncells = 111
starting_val = zeros(ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 1235
ca = CellularAutomaton(DCA(rule,states=states,radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false,
size=(ncells*10, generations*10))
```

### Totalistic Cellular Automata
Totalistic Cellular Automata takes the sum of the neighborhood to calculate the value of the next step.
[Rule 1635](http://atlas.wolfram.com/01/02/1635/)
```julia
using CellularAutomata, Plots
states = 3
radius = 1
generations = 50
ncells = 111
starting_val = zeros(Integer, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 1635
ca = CellularAutomaton(TCA(rule, states=states),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

[Rule 107398](http://atlas.wolfram.com/01/03/107398/)
```julia
using CellularAutomata, Plots
states = 4
radius = 1
generations = 50
ncells = 111
starting_val = zeros(Integer, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 107398
ca = CellularAutomaton(TCA(rule, states=states),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

Here are some results for a bigger radius, using a radius of 2 as an example.
[Rule 53](http://atlas.wolfram.com/01/06/Rules/53/index.html#01_06_9_53)
```julia
using CellularAutomata, Plots
states = 2
radius = 2
generations = 50
ncells = 111
starting_val = zeros(Integer, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1
rule = 53
ca = CellularAutomaton(TCA(rule, radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

## Continuous Cellular Automata
Continuous Cellular Automata work in the same way as the totalistic but with real values. The examples are taken from the already mentioned book [NKS](https://www.wolframscience.com/nks/p159--continuous-cellular-automata/).
Rule 0.025
```julia
using CellularAutomata, Plots
generations = 50
ncells = 111
starting_val = zeros(Float64, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1.0
rule = 0.025
ca = CellularAutomaton(CCA(rule), starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

Rule 0.2
```julia
using CellularAutomata, Plots
radius = 1
generations = 50
ncells = 111
starting_val = zeros(Float64, ncells)
starting_val[Int(floor(ncells/2)+1)] = 1.0
rule = 0.2
ca = CellularAutomaton(CCA(rule, radius=radius),
starting_val, generations)
heatmap(ca.evolution,
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
axis=false,
ticks=false)
```

## Game of Life
This package can also reproduce Conway's Game of Life, and any variation based on it. The ```Life()``` function takes in a tuple containing the number of neighbors that will gave birth to a new cell, or that will make an existing cell survive. (For example in the Conways's Life the tuple (3, (2,3)) indicates having 3 live neighbors will give birth to an otherwise dead cell, and having either 2 or 3 lie neighbors will make an alive cell continue living.) The implementation follows the [Golly](http://golly.sourceforge.net/Help/changes.html) notation.
This script reproduces the famous glider:
```julia
using CellularAutomata, Plots
glider = [[0, 0, 1, 0, 0] [0, 0, 0, 1, 0] [0, 1, 1, 1, 0]]
space = zeros(Bool, 30, 30)
insert = 1
space[insert:insert+size(glider, 1)-1, insert:insert+size(glider, 2)-1] = glider
gens = 100
space_gliding = CellularAutomaton(Life((3, (2,3))), space, gens)
anim = @animate for i = 1:gens
heatmap(space_gliding.evolution[:,:,i],
yflip=true,
c=cgrad([:white, :black]),
legend = :none,
size=(1080,1080),
axis=false,
ticks=false)
end
gif(anim, "glider.gif", fps = 15)
```

| CellularAutomata | https://github.com/MartinuzziFrancesco/CellularAutomata.jl.git |
Subsets and Splits