licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 5233 | ###############################################################################
# Build methods for ATC algorithm #
###############################################################################
"""
ATC algorithm module contains build and update methods
"""
module atc_methods
using ..PowerModelsADA
"solve distributed OPF using ATC algorithm"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf(data, model_type, optimizer, atc_methods; kwargs...)
end
"initialize the ATC algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ATC parameters
data["parameter"] = Dict(
"alpha" => Float64(get(kwargs, :alpha, 1.05)),
"beta" => Float64(get(kwargs, :beta, 1)),
"beta_max" => Float64(get(kwargs, :beta_max, 1e6)))
end
"build PowerModel object for the ATC algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_atc)
end
"ATC algorithm objective function"
function objective_atc(pm::AbstractPowerModel)
## ATC parameters
beta = pm.data["parameter"]["beta"]
## data
shared_variable_local = pm.data["shared_variable"]
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
## objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
objective += (beta * (v - v_central))^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"update the ATC algorithm data after each iteration"
function update_method(data::Dict{String, <:Any})
## ATC parameters
alpha = data["parameter"]["alpha"]
beta = data["parameter"]["beta"]
beta_max = data["parameter"]["beta_max"]
## data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = data["dual_variable"]
## update dual variable
## update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_primal = shared_variable_local[area][variable][idx]
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx] = v_dual + 2 * beta^2 * (v_primal - v_central)
end
end
end
## update ATC parameter
if beta < beta_max
data["parameter"]["beta"] *= alpha
end
calc_mismatch!(data, central=true)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
post_processors = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable")
end
"""
solve_dopf_atc(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level = true, print_optimizer_info::Bool=false, alpha::Real=1000, beta::Real = 1)
Solve the distributed OPF problem using ATC algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : print mismatch after each iteration and result summary
- alpha::Real=1.05 : algorithm parameter
- beta::Real=1.0 : algorithm parameter
"""
solve_dopf_atc = atc_methods.solve_method
# export the algorithm methods module and solve method
export atc_methods, solve_dopf_atc | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 46208 | ###############################################################################
# Base method for all distributed OPF algorithms #
###############################################################################
""
function solve_pmada_model(data::Dict{String,<:Any}, model_type::Type, optimizer, build_method;
ref_extensions=[], solution_processors=[], relax_integrality=false,
multinetwork=false, multiconductor=false, kwargs...)
if multinetwork != _IM.ismultinetwork(data)
model_requirement = multinetwork ? "multi-network" : "single-network"
data_type = _IM.ismultinetwork(data) ? "multi-network" : "single-network"
end
if multiconductor != ismulticonductor(data)
model_requirement = multiconductor ? "multi-conductor" : "single-conductor"
data_type = ismulticonductor(data) ? "multi-conductor" : "single-conductor"
end
pm = instantiate_pmada_model(data, model_type, build_method; ref_extensions=ref_extensions, kwargs...)
result = optimize_model!(pm, relax_integrality=relax_integrality, optimizer=optimizer, solution_processors=solution_processors)
return result
end
""
function instantiate_pmada_model(data::Dict{String,<:Any}, model_type::Type, build_method; kwargs...)
return _IM.instantiate_model(data, model_type, build_method, ref_add_core!, _pmada_global_keys, pm_it_sym; kwargs...)
end
""
function build_pmada_ref(data::Dict{String,<:Any}; ref_extensions=[])
return _IM.build_ref(data, ref_add_core!, _pmada_global_keys, pm_it_name; ref_extensions=ref_extensions)
end
"""
solve_dopf(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using fully distributed algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize_method::Function : initialize the algorithm parameters and shared variables
- update_method::Function : update the algorithm after each iteration
- build_method::Function : problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- multiprocessors::Bool=false : enable multiprocessors using available workers. Multiprocessors feature requires loading the PowerModelsADA and the optimizer packages on all the processors using @everywhere using <package_name>.
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
# arrange and get areas id
arrange_areas_id!(data)
areas_id = get_areas_id(data)
diameter = get_diameter(data)
if length(areas_id) < 2
error("Number of areas is less than 2, at least 2 areas is needed")
end
# decompose the system into subsystems
data_area = Dict{Int64, Any}()
for area in areas_id
data_area[area] = decompose_system(data, area)
end
solve_dopf(data_area, model_type, optimizer, dopf_method; print_level, multiprocessors=multiprocessors, diameter=diameter, all_areas=areas_id, kwargs...)
end
function solve_dopf(data::String, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
data = parse_file(data)
solve_dopf(data, model_type, optimizer, dopf_method; print_level=print_level, multiprocessors=multiprocessors, kwargs...)
end
function solve_dopf(data::Dict{Int64, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
if !multiprocessors
solve_dopf_sp(data, model_type, optimizer, dopf_method; print_level, kwargs...)
else
solve_dopf_mp(data, model_type, optimizer, dopf_method; print_level, kwargs...)
end
end
"""
solve_dopf_sp(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using fully distributed algorithm on single-processor.
# Arguments:
- data::Dict{Int64, <:Any} : dictionary contains area data in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize_method::Function : initialize the algorithm parameters and shared variables
- update_method::Function : update the algorithm after each iteration
- build_method::Function : problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf_sp(data_area::Dict{Int64, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, kwargs...)
# get areas ids
areas_id = get_areas_id(data_area)
# initilize distributed power model parameters
for area in areas_id
dopf_method.initialize_method(data_area[area], model_type; kwargs...)
end
# get global parameters
max_iteration = get(kwargs, :max_iteration, 1000)
# initialize the algorithms global counters
iteration = 1
flag_convergence = false
# start iteration
while iteration <= max_iteration && !flag_convergence
# solve local problem and update solution
info = @capture_out begin
Threads.@threads for area in areas_id
result = solve_pmada_model(data_area[area], model_type, optimizer, dopf_method.build_method, solution_processors=dopf_method.post_processors)
update_data!(data_area[area], result["solution"])
end
end
# share solution with neighbors, the shared data is first obtained to facilitate distributed implementation
for area in areas_id # sender subsystem
for neighbor in data_area[area]["neighbors"] # receiver subsystem
shared_data = prepare_shared_data(data_area[area], neighbor)
receive_shared_data!(data_area[neighbor], deepcopy(shared_data), area)
end
end
# calculate mismatches and update convergence flags
Threads.@threads for area in areas_id
dopf_method.update_method(data_area[area])
end
# print solution
print_iteration(data_area, print_level, [info])
# check global convergence and update iteration counters
flag_convergence = update_global_flag_convergence(data_area)
iteration += 1
end
print_convergence(data_area, print_level)
return data_area
end
"""
solve_dopf_mp(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using fully distributed algorithm on multiprocessors. Multiprocessors feature requires loading the PowerModelsADA and the optimizer packages on all the processors using @everywhere using <package_name>.
# Arguments:
- data::Dict{Int64, <:Any} : dictionary contains area data in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize_method::Function : initialize the algorithm parameters and shared variables
- update_method::Function : update the algorithm after each iteration
- build_method::Function : problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf_mp(data_area::Dict{Int64, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, kwargs...)
# lookup dictionaries for worker-area pairs
areas_id = get_areas_id(data_area)
worker_id = Distributed.workers()
number_workers = length(worker_id)
k = 1
area_worker = Dict()
for i in areas_id
if k > number_workers
k = 1
end
area_worker[i] = worker_id[k]
k += 1
end
worker_area = Dict([i => findall(x -> x==i, area_worker) for i in worker_id if i in values(area_worker)])
# initiate communication channels
comms = Dict(0 => Dict(area => Distributed.RemoteChannel(1) for area in areas_id))
for area1 in areas_id
comms[area1] = Dict()
for area2 in [0; areas_id]
if area1 != area2
comms[area1][area2] = Distributed.RemoteChannel(area_worker[area1])
end
end
end
# initilize distributed power model parameters
for area in areas_id
dopf_method.initialize_method(data_area[area], model_type; kwargs...)
put!(comms[0][area], data_area[area])
end
# get global parameters
max_iteration = get(kwargs, :max_iteration, 1000)
# initialize the algorithms global counters
iteration = 1
global_flag_convergence = false
global_counters = Dict{Int64, Any}()
# share global variables
Distributed.@everywhere keys(worker_area) begin
comms = $comms
areas_id = $areas_id
worker_area = $worker_area
area_worker = $area_worker
dopf_method = $dopf_method
model_type = $model_type
optimizer = $optimizer
area_id = worker_area[myid()]
data_local = Dict{Int64, Any}(area => take!(comms[0][area]) for area in area_id)
end
# start iteration
while iteration <= max_iteration && !global_flag_convergence
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# solve local problem and update solution
result = solve_pmada_model(data_local[area], model_type, optimizer, dopf_method.build_method, solution_processors=dopf_method.post_processors)
update_data!(data_local[area], result["solution"])
# send data to neighboring areas
for neighbor in data_local[area]["neighbors"]
shared_data = prepare_shared_data(data_local[area], neighbor)
put!(comms[area][neighbor], shared_data)
end
end
end
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# receive data to neighboring areas
for neighbor in data_local[area]["neighbors"]
received_data = take!(comms[neighbor][area])
receive_shared_data!(data_local[area], received_data, neighbor)
end
# calculate and share mismatches
dopf_method.update_method(data_local[area])
counters = Dict("option"=> data_local[area]["option"], "counter" => data_local[area]["counter"], "mismatch" => data_local[area]["mismatch"])
if data_local[area]["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
counters["dual_residual"] = data_local[area]["dual_residual"]
end
put!(comms[area][0], deepcopy(counters))
end
end
# receive the mismatches from areas
for area in areas_id
counters = take!(comms[area][0])
global_counters[area] = counters
end
# print progress
print_iteration(global_counters, print_level)
# update flag convergence and iteration number
global_flag_convergence = update_global_flag_convergence(global_counters)
iteration += 1
end
# receive the final solution
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# send the area data
put!(comms[area][0], data_local[area])
end
end
for area in areas_id
data_area[area] = take!(comms[area][0])
end
# close the communication channels
for i in keys(comms)
for j in keys(comms[i])
close(comms[i][j])
end
end
print_convergence(data_area, print_level)
return data_area
end
"""
solve_dopf_coordinated(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using distributed algorithm with central coordinator.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize\\_method_local::Function : initialize the local algorithm parameters and shared variables
- initialize\\_method_coordinator::Function : initialize the coordinator algorithm parameters and shared variables
- update\\_method_local::Function : update the local data after each iteration
- update\\_method_coordinator::Function : update the coordinator data after each iteration
- build\\_method_local::Function : local problem formulation
- build\\_method_coordinator::Function : coordinator problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- multiprocessors::Bool=false : enable multiprocessors using available workers. Multiprocessors feature requires loading the PowerModelsADA and the optimizer packages on all the processors using @everywhere using <package_name>.
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous\\_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf_coordinated(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
# arrange and get areas id
arrange_areas_id!(data)
areas_id = get_areas_id(data)
if length(areas_id) < 2
error("Number of areas is less than 2, at least 2 areas is needed")
end
# decompose the system into subsystems
data_area = Dict{Int64, Any}(0 => decompose_coordinator(data))
for area in areas_id
data_area[area] = decompose_system(data, area)
end
solve_dopf_coordinated(data_area, model_type, optimizer, dopf_method; print_level=print_level, multiprocessors=multiprocessors, kwargs...)
end
function solve_dopf_coordinated(data::String, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
data = parse_file(data)
solve_dopf_coordinated(data, model_type, optimizer, dopf_method; print_level=print_level, multiprocessors=multiprocessors, kwargs...)
end
function solve_dopf_coordinated(data::Dict{Int64, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, kwargs...)
if !multiprocessors
solve_dopf_coordinated_sp(data, model_type, optimizer, dopf_method; print_level=print_level, kwargs...)
else
solve_dopf_coordinated_mp(data, model_type, optimizer, dopf_method; print_level=print_level, kwargs...)
end
end
"""
solve_dopf_coordinated_sp(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using distributed algorithm with central coordinator on single-processors.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize\\_method_local::Function : initialize the local algorithm parameters and shared variables
- initialize\\_method_coordinator::Function : initialize the coordinator algorithm parameters and shared variables
- update\\_method_local::Function : update the local data after each iteration
- update\\_method_coordinator::Function : update the coordinator data after each iteration
- build\\_method_local::Function : local problem formulation
- build\\_method_coordinator::Function : coordinator problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous\\_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf_coordinated_sp(data_area::Dict{Int64, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, kwargs...)
# get areas ids
areas_id = get_areas_id(data_area)
deleteat!(areas_id, areas_id .== 0)
# initilize distributed power model parameters
dopf_method.initialize_method_coordinator(data_area[0], model_type; kwargs...)
for area in areas_id
dopf_method.initialize_method_local(data_area[area], model_type; kwargs...)
end
## get global parameters
max_iteration = get(kwargs, :max_iteration, 1000)
# initialize the algorithms global counters
iteration = 0
flag_convergence = false
# start iteration
while iteration <= max_iteration && !flag_convergence
# solve local area problems in parallel
info1 = @capture_out begin
Threads.@threads for area in areas_id
result = solve_pmada_model(data_area[area], model_type, optimizer, dopf_method.build_method_local, solution_processors=dopf_method.post_processors_local)
update_data!(data_area[area], result["solution"])
end
end
# share solution of local areas with the coordinator
for area in areas_id # sender subsystem
shared_data = prepare_shared_data(data_area[area], 0, serialize = false)
receive_shared_data!(data_area[0], deepcopy(shared_data), area)
end
# solve coordinator problem
info2 = @capture_out begin
result = solve_pmada_model(data_area[0], model_type, optimizer, dopf_method.build_method_coordinator, solution_processors=dopf_method.post_processors_coordinator)
update_data!(data_area[0], result["solution"])
end
# share coordinator solution with local areas
for area in areas_id # sender subsystem
shared_data = prepare_shared_data(data_area[0], area, serialize = false)
receive_shared_data!(data_area[area], deepcopy(shared_data), 0)
end
# update local areas and coordinator problems after
dopf_method.update_method_coordinator(data_area[0])
for area in areas_id
dopf_method.update_method_local(data_area[area])
end
# print solution
print_iteration(data_area, print_level, [info1; info2])
# check global convergence and update iteration counters
flag_convergence = data_area[0]["counter"]["flag_convergence"]
iteration += 1
end
print_convergence(data_area, print_level)
return data_area
end
"""
solve_dopf_coordinated_mp(data::Dict{String, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, multiprocessors::Bool=false, mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, save_data::Vector{String}=[], kwargs...)
Solve OPF problem using distributed algorithm with central coordinator on multiprocessors. Multiprocessors feature requires loading the PowerModelsADA and the optimizer packages on all the processors using @everywhere using <package_name>.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- dopf_method::Module : module contains the distributed algorithm methods as follows:
- initialize\\_method_local::Function : initialize the local algorithm parameters and shared variables
- initialize\\_method_coordinator::Function : initialize the coordinator algorithm parameters and shared variables
- update\\_method_local::Function : update the local data after each iteration
- update\\_method_coordinator::Function : update the coordinator data after each iteration
- build\\_method_local::Function : local problem formulation
- build\\_method_coordinator::Function : coordinator problem formulation
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- save_data::Vector{String}=[] : vector contains the keys of the dictionaries to be saved at each iteration in "previous\\_solution". For example, save_data=["solution", "shared_variable", "mismatch"]
- kwargs = includes algorithm-specific and initialization parameters
"""
function solve_dopf_coordinated_mp(data_area::Dict{Int, <:Any}, model_type::DataType, optimizer, dopf_method::Module; print_level::Int64=1, kwargs...)
# lookup dictionaries for worker-area pairs
areas_id = get_areas_id(data_area)
deleteat!(areas_id, areas_id .== 0)
worker_id = Distributed.workers()
number_workers = length(worker_id)
k = 1
area_worker = Dict()
for i in areas_id
if k > number_workers
k = 1
end
area_worker[i] = worker_id[k]
k += 1
end
worker_area = Dict([i => findall(x -> x==i, area_worker) for i in worker_id if i in values(area_worker)])
# initiaiate communication channels
comms = Dict(0 => Dict(area => Distributed.RemoteChannel(1) for area in areas_id))
for area1 in areas_id
comms[area1] = Dict()
for area2 in [0; areas_id]
if area1 != area2
comms[area1][area2] = Distributed.RemoteChannel(area_worker[area1])
end
end
end
# initilize distributed power model parameters
dopf_method.initialize_method_coordinator(data_area[0], model_type; kwargs...)
for area in areas_id
dopf_method.initialize_method_local(data_area[area], model_type; kwargs...)
put!(comms[0][area], data_area[area])
end
## get global parameters
max_iteration = get(kwargs, :max_iteration, 1000)
# initialize the algorithms global counters
iteration = 0
flag_convergence = false
# share global variables
Distributed.@everywhere keys(worker_area) begin
comms = $comms
areas_id = $areas_id
worker_area = $worker_area
area_worker = $area_worker
dopf_method = $dopf_method
model_type = $model_type
optimizer = $optimizer
area_id = worker_area[myid()]
data_local = Dict(area => take!(comms[0][area]) for area in area_id)
end
# start iteration
while iteration <= max_iteration && !flag_convergence
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# solve local problem and update solution
result = solve_pmada_model(data_local[area], model_type, optimizer, dopf_method.build_method_local, solution_processors=dopf_method.post_processors_local)
update_data!(data_local[area], result["solution"])
# send data to coordinator
shared_data = prepare_shared_data(data_local[area], 0)
put!(comms[area][0], shared_data)
end
end
# share solution of local areas with the coordinator
for area in areas_id
received_data = take!(comms[area][0])
receive_shared_data!(data_area[0], received_data, area)
end
# solve coordinator problem
result = solve_pmada_model(data_area[0], model_type, optimizer, dopf_method.build_method_coordinator, solution_processors=dopf_method.post_processors_coordinator)
update_data!(data_area[0], result["solution"])
dopf_method.update_method_coordinator(data_area[0])
# share coordinator solution with local areas
for area in areas_id
shared_data = prepare_shared_data(data_area[0], area)
put!(comms[0][area], shared_data)
end
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# receive data to neighboring areas
received_data = take!(comms[0][area])
receive_shared_data!(data_local[area], received_data, 0)
# calculate mismatches and update convergence flags
dopf_method.update_method_local(data_local[area])
end
end
# print solution
print_iteration_coordinator(data_area, print_level, [])
# check global convergence and update iteration counters
flag_convergence = data_area[0]["counter"]["flag_convergence"]
iteration += 1
end
if number_workers > 1
Distributed.@everywhere keys(worker_area) begin
for area in area_id
# send the area data
put!(comms[area][0], data_local[area])
end
end
for area in areas_id
data_area[area] = take!(comms[area][0])
end
end
for i in keys(comms)
for j in keys(comms[i])
close(comms[i][j])
end
end
print_convergence(data_area, print_level)
return data_area
end
"initialize dopf parameters"
function initialize_dopf!(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
# options
data["option"] = Dict{String, Any}()
data["option"]["tol"] = get(kwargs, :tol, 1e-4)
data["option"]["max_iteration"] = get(kwargs, :max_iteration, 1000)
data["option"]["mismatch_method"] = get(kwargs, :mismatch_method, "norm")
data["option"]["model_type"] = model_type
data["option"]["termination_method"] = get(kwargs, :termination_method, "global")
data["option"]["termination_measure"] = get(kwargs, :termination_measure, "mismatch")
# counters
data["counter"] = Dict{String, Any}()
data["counter"]["iteration"] = Int64(1)
data["counter"]["flag_convergence"] = false
data["counter"]["convergence_iteration"] = Int64(0)
areas_id = get_areas_id(data)
area_id = get_area_id(data)
# mismatch
data["mismatch"] = Dict{String, Any}()
data["neighbors"] = [area for area in areas_id if area != area_id]
# distributed termination method
if data["option"]["termination_method"] in ["local", "distributed"]
areas_id = string.(areas_id)
area_id = string(area_id)
deleteat!(areas_id, areas_id .== area_id)
all_areas = string.(get(kwargs, :all_areas, []))
data["shared_flag_convergence"] = Dict(area => Dict(area_ => false for area_ in all_areas) for area in areas_id)
data["received_flag_convergence"] = Dict(area => Dict(area_ => false for area_ in all_areas) for area in areas_id)
data["shared_convergence_iteration"] = Dict(area => 0 for area in areas_id)
data["received_convergence_iteration"] = Dict(area => 0 for area in areas_id)
data["counter"]["local_flag_convergence"] = false
data["option"]["diameter"] = get(kwargs, :diameter, size(all_areas)[1])
end
# last solution
initialization_method = get(kwargs, :initialization_method, "flat")
data["solution"] = initialize_all_variable(data, model_type, initialization_method)
# previous solutions
save_data = get(kwargs, :save_data, [])
if !isempty(save_data)
data["previous_solution"] = Dict{String, Any}([str=>Vector{Dict}() for str in save_data])
end
end
"update the area data solution dictionary"
function update_solution!(pm::AbstractPowerModel, solution::Dict{String, <:Any})
solution["solution"] = pm.data["solution"]
for variable in keys(solution["solution"])
for idx in keys(solution["solution"][variable])
solution["solution"][variable][idx] = JuMP.value(PowerModelsADA._var(pm, variable, idx))
end
end
end
"update primal variables after obtaining a solution at each iteraton"
function update_shared_variable!(pm::AbstractPowerModel, solution::Dict{String, <:Any})
solution["shared_variable"] = pm.data["shared_variable"]
for area in keys(solution["shared_variable"])
for var in keys(solution["shared_variable"][area])
for idx in keys(solution["shared_variable"][area][var])
solution["shared_variable"][area][var][idx] = solution["solution"][var][idx]
end
end
end
end
"save last solution in previous_solutions vector"
function save_solution!(data::Dict{String, <:Any})
if haskey(data, "previous_solution")
for str in keys(data["previous_solution"])
push!(data["previous_solution"][str], deepcopy(data[str]))
end
end
end
"update iteration"
function update_iteration!(data::Dict{String, <:Any})
data["counter"]["iteration"] += 1
end
"""
calc_mismatch!(data::Dict{String, <:Any}; central::Bool=false)
calculate the mismatch and return the area data dictionary with the mismatch as seen by the area. Set central=true if the algorithm uses the optimality condition of a central coordinator.
"""
function calc_mismatch!(data::Dict{String, <:Any}; central::Bool=false)
area_id = string(get_area_id(data))
mismatch_method = data["option"]["mismatch_method"]
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
mismatch = Dict{String, Any}([
area => Dict{String, Any}([
variable => Dict{String, Any}([
idx => central ? (shared_variable_local[area][variable][idx] - (shared_variable_received[area][variable][idx] +shared_variable_local[area][variable][idx] )/2) : (shared_variable_local[area][variable][idx] - shared_variable_received[area][variable][idx])
for idx in keys(shared_variable_local[area][variable])])
for variable in keys(shared_variable_local[area])])
for area in keys(shared_variable_local) if area != area_id && area in keys(shared_variable_received) ])
if mismatch_method == "norm"
mismatch[area_id] = LinearAlgebra.norm([value for area in keys(mismatch) if area != area_id for variable in keys(mismatch[area]) for (idx,value) in mismatch[area][variable]], 2)
elseif mismatch_method == "max" || mismatch_method == "maximum"
mismatch[area_id] = LinearAlgebra.maximum([abs(value) for area in keys(mismatch) if area != area_id for variable in keys(mismatch[area]) for (idx,value) in mismatch[area][variable]])
end
data["mismatch"] = mismatch
end
"""
calc_dual_residual!(data::Dict{String, <:Any}; central::Bool=false)
calculate the dual redidual as seen by the area. Set central=true if the algorithm uses the optimality condition of a central coordinator.
"""
function calc_dual_residual!(data::Dict{String, <:Any}; central::Bool=false)
area_id = string(get_area_id(data))
mismatch_method = data["option"]["mismatch_method"]
alpha = data["parameter"]["alpha"]
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
if data["counter"]["iteration"] == 1
dual_dual_residual = Dict{String, Any}([
area => Dict{String, Any}([
variable => Dict{String, Any}([
idx => central ? -alpha* (shared_variable_local[area][variable][idx]+shared_variable_received[area][variable][idx])/2 : -alpha* shared_variable_local[area][variable][idx]
for idx in keys(shared_variable_local[area][variable])])
for variable in keys(shared_variable_local[area])])
for area in keys(shared_variable_local)])
else
previous_shared_variable_local = data["previous_solution"]["shared_variable"][end]
previous_shared_variable_received = data["previous_solution"]["received_variable"][end]
dual_dual_residual = Dict{String, Any}([
area => Dict{String, Any}([
variable => Dict{String, Any}([
idx => central ? -alpha * ((shared_variable_local[area][variable][idx]+shared_variable_received[area][variable][idx])/2 - (previous_shared_variable_local[area][variable][idx] +previous_shared_variable_received[area][variable][idx] )/2) : -alpha * (shared_variable_local[area][variable][idx] - previous_shared_variable_local[area][variable][idx])
for idx in keys(shared_variable_local[area][variable])])
for variable in keys(shared_variable_local[area])])
for area in keys(shared_variable_local) ])
end
if mismatch_method == "norm"
dual_dual_residual[area_id] = LinearAlgebra.norm([value for area in keys(dual_dual_residual) if area != area_id for variable in keys(dual_dual_residual[area]) for (idx,value) in dual_dual_residual[area][variable]])
elseif mismatch_method == "max" || mismatch_method == "maximum"
dual_dual_residual[area_id] = LinearAlgebra.maximum([abs(value) for area in keys(dual_dual_residual) if area != area_id for variable in keys(dual_dual_residual[area]) for (idx,value) in dual_dual_residual[area][variable]])
end
data["dual_residual"] = dual_dual_residual
end
# "get the parameter for each consistency variable as a dictionary"
# function get_parameter(data::Dict{String, <:Any}, parameter::String)
# if haskey(data, parameter)
# return data[parameter]
# else
# return Dict{String, Any}([area => Dict{String, Any}([variable => Dict{String, Any}([idx => data["parameter"][parameter] for idx in keys(data[parameter][area][variable])]) for variable in keys(data[parameter][area])]) for area in keys(data[parameter])])
# end
# end
"check flag convergance using mismatch and dual residual"
function flag_convergance(data::Dict{String, <:Any})
area_id = string(data["area"])
termination_measure = data["option"]["termination_measure"]
tol = data["option"]["tol"]
mismatch = data["mismatch"][area_id]
if termination_measure in ["dual_residual", "mismatch_dual_residual"]
tol_dual = data["option"]["tol_dual"]
dual_residual = data["dual_residual"][area_id]
flag_convergence = (mismatch < tol && dual_residual < tol_dual)
else
flag_convergence = mismatch < tol
end
return flag_convergence
end
"check the shared variables of a local area are within tol"
function update_flag_convergence!(data::Dict{String, <:Any})
area_id = string(data["area"])
areas_id = string.(get_areas_id(data))
deleteat!(areas_id, areas_id .== area_id)
iteration = data["counter"]["iteration"]
flag_convergence = flag_convergance(data)
if data["option"]["termination_method"] == "global"
# the convergence flag is communicated globally
if flag_convergence && !data["counter"]["flag_convergence"]
data["counter"]["convergence_iteration"] = iteration
end
data["counter"]["flag_convergence"] = flag_convergence
else # the convergence flag is decided locally
# Rule 1
if flag_convergence && !data["counter"]["local_flag_convergence"]
data["counter"]["convergence_iteration"] = iteration
data["counter"]["local_flag_convergence"] = flag_convergence
for area in keys(data["shared_flag_convergence"])
data["shared_flag_convergence"][area][area_id] = flag_convergence
end
end
# Rule 2
all_areas = string.(collect(keys(data["shared_flag_convergence"][areas_id[1]])))
shared_convergence_iteration = maximum([data["counter"]["convergence_iteration"] ; [data["received_convergence_iteration"][area] for area in areas_id] ])
for area1 in areas_id
data["shared_convergence_iteration"][area1] = shared_convergence_iteration
for area2 in all_areas
if area2 != area_id
data["shared_flag_convergence"][area1][area2] = reduce( | , [data["received_flag_convergence"][area][area2] for area in areas_id])
end
end
end
# Rule 3
global_flag_convergence = reduce( & , [ val for (area, val) in first(data["shared_flag_convergence"])[2]])
if global_flag_convergence && (shared_convergence_iteration + data["option"]["diameter"] <= iteration)
data["counter"]["flag_convergence"] = global_flag_convergence
end
end
end
# "calculate the global mismatch based on local mismatch"
# function calc_global_mismatch(data_area::Dict{Int, <:Any})
# mismatch_method = first(data_area)[2]["option"]["mismatch_method"]
# termination_measure = first(data_area)[2]["option"]["termination_measure"]
# termination_method = first(data_area)[2]["option"]["termination_method"]
# if termination_method == "global"
# if mismatch_method == "norm"
# if termination_measure in ["dual_residual", "mismatch_dual_residual"]
# mismatch = LinearAlgebra.norm([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
# dual_residual = LinearAlgebra.norm([data_area[i]["dual_residual"][string(i)] for i in keys(data_area) if i != 0])
# return LinearAlgebra.maximum([mismatch, dual_residual])
# else
# return LinearAlgebra.norm([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
# end
# elseif mismatch_method == "max" || mismatch_method == "maximum"
# if termination_measure in ["dual_residual", "mismatch_dual_residual"]
# mismatch = LinearAlgebra.maximum([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
# dual_residual = LinearAlgebra.maximum([data_area[i]["dual_residual"][string(i)] for i in keys(data_area) if i != 0])
# return LinearAlgebra.maximum([mismatch, dual_residual])
# else
# return LinearAlgebra.maximum([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
# end
# end
# else
# if termination_measure in ["dual_residual", "mismatch_dual_residual"]
# return LinearAlgebra.maximum([[data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0]; [data_area[i]["dual_residual"][string(i)] for i in keys(data_area) if i != 0]])
# else
# return LinearAlgebra.maximum([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
# end
# end
# end
"calculate the global mismatch based on local mismatch"
function calc_global_mismatch(data_area::Dict{Int, <:Any})
mismatch_method = first(data_area)[2]["option"]["mismatch_method"]
if mismatch_method == "norm"
return LinearAlgebra.norm([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
elseif mismatch_method == "max" || mismatch_method == "maximum"
return LinearAlgebra.maximum([data_area[i]["mismatch"][string(i)] for i in keys(data_area) if i != 0])
end
end
"calculate the global mismatch based on local mismatch"
function calc_global_dual_residual(data_area::Dict{Int, <:Any})
mismatch_method = first(data_area)[2]["option"]["mismatch_method"]
if mismatch_method == "norm"
return LinearAlgebra.norm([data_area[i]["dual_residual"][string(i)] for i in keys(data_area) if i != 0])
elseif mismatch_method == "max" || mismatch_method == "maximum"
return LinearAlgebra.maximum([data_area[i]["dual_residual"][string(i)] for i in keys(data_area) if i != 0])
end
end
"check the flag convergence for all areas and return a global variables"
function update_global_flag_convergence(data_area::Dict{Int64, <:Any})
if first(data_area)[2]["option"]["termination_method"] == "global"
termination_measure = first(data_area)[2]["option"]["termination_measure"]
mismatch = calc_global_mismatch(data_area)
tol = first(data_area)[2]["option"]["tol"]
if termination_measure in ["dual_residual", "mismatch_dual_residual"]
tol_dual = first(data_area)[2]["option"]["tol_dual"]
dual_residual = calc_global_dual_residual(data_area)
return mismatch < tol && dual_residual < tol_dual
else
mismatch = calc_global_mismatch(data_area)
tol = first(data_area)[2]["option"]["tol"]
return mismatch < tol
end
else
return reduce( &, [data_area[i]["counter"]["flag_convergence"] for i in keys(data_area)])
end
end
"print iteration information"
function print_iteration(data::Dict{Int64, <:Any}, print_level::Int64, info_list::Vector=[])
if print_level > 0
iteration = first(data)[2]["counter"]["iteration"]-1
mismatch = calc_global_mismatch(data)
println("Iteration = $iteration, mismatch = $mismatch")
if print_level > 1
for info in info_list
println(info)
end
end
end
end
"print iteration information"
function print_iteration_coordinator(data::Dict{Int64, <:Any}, print_level::Int64, info_list::Vector=[])
if print_level > 0
iteration = data[0]["counter"]["iteration"]-1
mismatch = data[0]["mismatch"]["0"]
println("Iteration = $iteration, mismatch = $mismatch")
if print_level > 1
for info in info_list
println(info)
end
end
end
end
# function print_iteration(mismatch::Dict, iteration::Int64, print_level::Int64, info_list::Vector=[])
# if print_level > 0
# mismatch = LinearAlgebra.norm([mismatch[i] for i in keys(mismatch)])
# println("Iteration = $iteration, mismatch = $mismatch")
# if print_level > 1
# for info in info_list
# println(info)
# end
# end
# end
# end
"print final solution status"
function print_convergence(data::Dict, print_level::Int64)
if print_level > 0
iteration = first(data)[2]["counter"]["iteration"]-1
mismatch = calc_global_mismatch(data)
tol =first(data)[2]["option"]["tol"]
flag_convergence = update_global_flag_convergence(data)
if flag_convergence
println("*******************************************************")
println("")
println("Consistency achieved within $tol mismatch tolerance")
println("Number of iterations = $iteration")
objective = calc_dist_gen_cost(data)
println("Objective function value = $objective")
println("")
println("*******************************************************")
else
println("*******************************************************")
println("")
println("Consistency did not achieved within $tol mismatch tolerance and $iteration iteration")
println("Shared variables mismatch = $mismatch")
println("")
println("*******************************************************")
end
end
end | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 12743 | ###############################################################################
# Methods for data wrangling and extracting information from data #
###############################################################################
"assign area to the system data using a dictionary with (bus => area) integers pairs"
function assign_area!(data::Dict{String, <:Any}, partition::Dict)
for i in keys(data["bus"])
data["bus"][i]["area"] = partition[parse(Int64,i)]
end
end
"assign area to the system data using a CVS file with buses and area ID"
function assign_area!(data::Dict{String, <:Any}, partition_path::String)
partition_mat = DelimitedFiles.readdlm(partition_path, ',', Int, '\n')
# explicit conversion from Matrix{Int64} to Vector{Pair{Int64, Int64}}
partition = Vector{Pair{Int64, Int64}}([
Pair{Int64, Int64}(row[1], row[2])
for row in eachrow(partition_mat)
])
assign_area!(data, partition)
end
"assign area to the system data using a vector with (bus => area) pairs"
function assign_area!(data::Dict{String, <:Any}, partition::Vector{Pair{Int64, Int64}})
assign_area!(data, Dict(partition))
end
"assign area to the system data using a matrix with [bus, area] columnsor rows"
function assign_area!(data::Dict{String, <:Any}, partition::Array{Int64, 2})
if size(partition)[2] != 2 && length(data["bus"]) != 2
partition = partition'
if size(partition)[2] != 2
#through error
error("Partitioning data does not contain correct area assignments")
end
end
assign_area!(data, Dict(partition[i,1] => partition[i,2] for i in 1:size(partition)[1] ))
end
"""
decompose_system(data::Dict{String, <:Any})
decompose a system into areas defined by bus area.
"""
function decompose_system(data::Dict{String, <:Any})
areas_id = get_areas_id(data)
data_area = Dict([i => decompose_system(data, i) for i in areas_id])
return data_area
end
"obtain an area decomposition with area ID"
function decompose_system(data::Dict{String, <:Any}, area_id::Int64)
# identify local buses
local_bus = get_local_bus(data, area_id)
neighbor_bus = get_neighbor_bus(data, area_id)
## add virtual generators
virtual_gen = add_virtual_gen(data, neighbor_bus, area_id)
## area data
data_area = Dict{String, Any}()
data_area["area"] = area_id
data_area["name"]= "$(data["name"])_area_$area_id"
data_area["source_version"] = data["source_version"]
data_area["source_type"] = data["source_type"]
data_area["baseMVA"] = data["baseMVA"]
data_area["per_unit"] = data["per_unit"]
data_area["bus"] = Dict([j => bus for (j,bus) in data["bus"] if bus["bus_i"] in [local_bus;neighbor_bus] && bus["bus_type"] != 4])
data_area["branch"] = Dict([j => branch for (j,branch) in data["branch"] if (branch["f_bus"] in local_bus || branch["t_bus"] in local_bus) && branch["br_status"] == 1])
data_area["gen"] = merge(Dict([i => gen for (i,gen) in data["gen"] if gen["gen_bus"] in local_bus && gen["gen_status"] == 1]), virtual_gen)
data_area["shunt"] = Dict([i => shunt for (i,shunt) in data["shunt"] if shunt["shunt_bus"] in local_bus])
data_area["load"] = Dict([i => load for (i,load) in data["load"] if load["load_bus"] in local_bus])
data_area["storage"]= Dict([i => storage for (i,storage) in data["storage"] if gen["storage_bus"] in local_bus])
data_area["switch"]=Dict([i => switch for (i,switch) in data["switch"] if gen["switch_bus"] in local_bus])
data_area["dcline"]= Dict([i => dcline for (i,dcline) in data["dcline"] if dcline["f_bus"] in local_bus || dcline["t_bus"] in local_bus ] )
return data_area
end
"obtain system coordinator data"
function decompose_coordinator(data::Dict{String, <:Any})
areas_id = get_areas_id(data)
# identifylocal buses
boundary_bus = unique(reduce(vcat, [get_neighbor_bus(data, area_id) for area_id in areas_id]))
## area data
data_coordinator = Dict{String,Any}()
data_coordinator["area"] = 0
data_coordinator["name"]= "$(data["name"])_coordinator"
data_coordinator["source_version"] = data["source_version"]
data_coordinator["source_type"] = data["source_type"]
data_coordinator["baseMVA"] = data["baseMVA"]
data_coordinator["per_unit"] = data["per_unit"]
data_coordinator["bus"] = Dict([j => bus for (j,bus) in data["bus"] if bus["bus_i"] in boundary_bus])
data_coordinator["branch"] = Dict([j => branch for (j,branch) in data["branch"] if branch["f_bus"] in boundary_bus && branch["t_bus"] in boundary_bus && data["bus"]["$(branch["f_bus"])"]["area"] != data["bus"]["$(branch["t_bus"])"]["area"] ])
data_coordinator["gen"] = Dict{String,Any}()
data_coordinator["shunt"] = Dict{String,Any}()
data_coordinator["load"] = Dict{String,Any}()
data_coordinator["storage"]= Dict{String,Any}()
data_coordinator["switch"]= Dict{String,Any}()
data_coordinator["dcline"]= Dict{String,Any}()
return data_coordinator
end
"add virtual generators at the neighboring buses of an area"
function add_virtual_gen(data::Dict{String, <:Any}, neighbor_bus::Vector, area_id::Int64)
max_gen_ind = maximum([parse(Int,i) for i in keys(data["gen"])])
virtual_gen = Dict{String, Any}()
cost_model = data["gen"][string(max_gen_ind)]["model"]
max_flow = 10*sum(load["pd"] for (i,load) in data["load"])
if cost_model == 1
for i in neighbor_bus
virtual_gen[string(i+max_gen_ind)] = Dict("ncost" => 2, "qc1max" => 0.0, "pg" => 0, "model" => cost_model, "shutdown" => 0.0, "startup" => 0.0, "qc2max" => 0.0, "ramp_agc" => 0.0, "qg" => 0.0, "gen_bus" => i, "pmax" => max_flow, "ramp_10" => 0.0, "vg" => 1.05, "mbase" => data["baseMVA"], "source_id" => Any["gen", max_gen_ind+i], "pc2" => 0.0, "index" => i+max_gen_ind, "cost" => [0.01; 0.0; 0.02; 0.0], "qmax" => max_flow, "gen_status" => 1, "qmin" => -max_flow, "qc1min" => 0.0, "qc2min" => 0.0, "pc1" => 0.0, "ramp_q" => 0.0, "ramp_30" => 0.0, "pmin" => -max_flow, "apf" => 0.0)
end
else
for i in neighbor_bus
virtual_gen[string(i+max_gen_ind)] = Dict("ncost" => 3, "qc1max" => 0.0, "pg" => 0, "model" => cost_model, "shutdown" => 0.0, "startup" => 0.0, "qc2max" => 0.0, "ramp_agc" => 0.0, "qg" => 0.0, "gen_bus" => i, "pmax" => max_flow, "ramp_10" => 0.0, "vg" => 1.05, "mbase" => data["baseMVA"], "source_id" => Any["gen", max_gen_ind+i], "pc2" => 0.0, "index" => i+max_gen_ind, "cost" => [0.0; 0.0; 0.0], "qmax" => max_flow, "gen_status" => 1, "qmin" => -max_flow, "qc1min" => 0.0, "qc2min" => 0.0, "pc1" => 0.0, "ramp_q" => 0.0, "ramp_30" => 0.0, "pmin" => -max_flow, "apf" => 0.0)
end
end
return virtual_gen
end
# "add virtual bus at the tie-lines with other areas"
# function _add_virtual_bus!(data::Dict{String, <:Any}, neighbor_bus::Vector, area_id::Int)
# max_bus_ind = maximum([parse(Int,i) for i in keys(data["bus"])])
# vmax = first(data["bus"])[2]["vmax"]
# vmin = first(data["bus"])[2]["vmin"]
# virtual_bus = Dict{String, Any}()
# for i in neighbor_bus
# bus_area = data["bus"][string(i)]["area"]
# base_kv = data["bus"][string(i)]["base_kv"]
# common_lines = [idx for (idx,branch) in data["branch"] if (branch["f_bus"] == i && data["bus"][string(branch["t_bus"])]["area"] == area_id) || (branch["t_bus"] == i && data["bus"][string(branch["f_bus"])]["area"] == area_id) ]
# for j in common_lines
# bus_id = parse(Int64, j) + max_bus_ind
# virtual_bus[string(bus_id)] = Dict{String, Any}("zone" => bus_area, "bus_i" => bus_id, "bus_type" => 1, "vmax" => vmax, "source_id" => Any["bus", bus_id], "area"=> bus_area, "vmin" => vmin, "index" => 0.0, "va" => 1.0, "vm" => 0.0, "base_kv" => base_kv)
# end
# end
# return virtual_bus
# end
"""
arrange area ID from 1 to number of areas. This step is necessary when having area number 0 and using central coordinator
"""
function arrange_areas_id!(data::Dict{String, <:Any})
areas_id = get_areas_id(data)
new_areas_id = collect(1:length(areas_id))
area_id_lookup = Dict(areas_id[i] => i for i in new_areas_id)
for (i,bus) in data["bus"]
bus["area"] = area_id_lookup[bus["area"]]
end
end
"helper function to get all areas IDs"
get_areas_id(data::Dict{String, <:Any})::Vector{Int64} = unique([bus["area"] for (i, bus) in data["bus"]])
"helper function to get all areas IDs"
get_areas_id(data::Dict{Int, <:Any})::Vector{Int64} = collect(keys(data))
"helper function to get all areas IDs"
get_areas_id(pm::AbstractPowerModel)::Vector{Int64} = get_areas_id(pm.data)
"helper function to get the area ID"
get_area_id(data::Dict{String, <:Any})::Int64 = get(data,"area", NaN)
"helper function to get the area ID"
get_area_id(pm::AbstractPowerModel)::Int64 = get_area_id(pm.data)
"helper functions to get the area's local buses"
get_local_bus(data::Dict{String, <:Any}, area::Int64)::Vector{Int64} = [bus["bus_i"] for (i,bus) in data["bus"] if bus["area"] == area]
"helper functions to get the area's local buses"
get_local_bus(pm::AbstractPowerModel, area::Int64)::Vector{Int64} = get_local_bus(pm.data, area)
"helper functions to get the area's neighbor buses"
function get_neighbor_bus(data::Dict{String, <:Any}, local_bus::Vector)::Vector{Int64}
neighbor_bus = Vector{Int64}()
for (i,branch) in data["branch"]
if branch["br_status"] == 1
if branch["f_bus"] in local_bus && !(branch["t_bus"] in local_bus)
push!(neighbor_bus,branch["t_bus"])
elseif !(branch["f_bus"] in local_bus) && branch["t_bus"] in local_bus
push!(neighbor_bus,branch["f_bus"])
end
end
end
return neighbor_bus
end
"helper functions to get the area's neighbor buses"
get_neighbor_bus(pm::AbstractPowerModel, local_bus::Vector)::Vector{Int64} = get_neighbor_bus(pm.data, local_bus)
"helper functions to get the area's neighbor buses"
get_neighbor_bus(data::Dict{String, <:Any}, area::Int64)::Vector{Int64} = get_neighbor_bus(data, get_local_bus(data,area))
"helper functions to get the area's neighbor buses"
get_neighbor_bus(pm::AbstractPowerModel, area::Int64)::Vector{Int64} = get_neighbor_bus(pm.data, area)
"helper functions to all areas buses in a dicrionary"
function get_areas_bus(data::Dict{String, <:Any})
areas_id = get_areas_id(data)
areas_bus = Dict{Int64, Vector{Int64}}()
for area in areas_id
areas_bus[area] = [bus["bus_i"] for (idx,bus) in data["bus"] if bus["area"]==area]
end
areas_bus[0] = [bus["bus_i"] for (idx,bus) in data["bus"]]
return areas_bus
end
"helper functions to all areas buses in a dicrionary"
get_areas_bus(pm::AbstractPowerModel) = get_areas_bus(pm.data)
"get the shared buses and branches between an area and all other areas"
function get_shared_component(data::Dict{String, <:Any}, area_id::Int64)
areas_id = get_areas_id(data)
areas_bus = get_areas_bus(data)
shared_branch = Dict{Int64, Any}()
shared_bus = Dict{Int64, Any}()
for area in areas_id
if area != area_id
shared_branch[area] = Vector{Int64}(unique([parse(Int64,idx) for (idx,branch) in data["branch"] if branch["br_status"] == 1 && ((branch["f_bus"] in areas_bus[area] && branch["t_bus"] in areas_bus[area_id]) || (branch["f_bus"] in areas_bus[area_id] && branch["t_bus"] in areas_bus[area])) ]))
else
shared_branch[area] = Vector{Int64}(unique([parse(Int64,idx) for (idx,branch) in data["branch"] if branch["br_status"] == 1 && xor(branch["f_bus"] in areas_bus[area], branch["t_bus"] in areas_bus[area]) ]))
end
shared_bus[area] = Vector{Int64}(unique(vcat([branch["f_bus"] for (idx,branch) in data["branch"] if parse(Int64,idx) in shared_branch[area]], [branch["t_bus"] for (idx,branch) in data["branch"] if parse(Int64,idx) in shared_branch[area]] )))
end
shared_bus[0] = Vector{Int64}(unique([idx for area in areas_id for idx in shared_bus[area]]))
shared_branch[0] = Vector{Int64}(unique([idx for area in areas_id for idx in shared_branch[area]]))
return shared_bus, shared_branch
end
"get the shared buses and branches between defined area and all other areas"
get_shared_component(pm::AbstractPowerModel, area::Int64) = get_shared_component(pm.data, area)
"get the shared buses and branches between defined area and all other areas"
function get_shared_component(data::Dict{String, <:Any})
area = get_area_id(data)
get_shared_component(data, area)
end
"get the shared buses and branches between defined area and all other areas"
get_shared_component(pm::AbstractPowerModel) = get_shared_component(pm.data) | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 1517 | ###############################################################################
# Methods for sharing data between areas #
###############################################################################
"prepare the shared data with or without serialization"
function prepare_shared_data(data::Dict{String, <:Any}, to_area::Int64; serialize::Bool=false)
shared_data_key = filter(x -> startswith(string(x), "shared"), keys(data))
shared_data = Dict([key => get(data[key], string(to_area), Dict()) for key in shared_data_key])
if serialize
shared_data = _serialize_shared_data!(shared_data)
end
return shared_data
end
function _serialize_shared_data!(shared_data::Dict{String, <:Any})
io = IOBuffer()
Serialization.serialize(io, shared_data)
return take!(io)
end
"deserialize and store the received data in the local data dictionary"
function receive_shared_data!(data::Dict{String, <:Any}, shared_data::Vector, from_area::Int64)
shared_data = Serialization.deserialize(IOBuffer(shared_data))
receive_shared_data!(data, shared_data, from_area)
end
"store received data in the local data dictionary"
function receive_shared_data!(data::Dict{String, <:Any}, shared_data::Dict{String, <:Any}, from_area::Int64)
for shared_data_key in keys(shared_data)
key = "received_$(shared_data_key[8:end])"
if haskey(data, key)
data[key][string(from_area)] = shared_data[shared_data_key]
end
end
end | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 2060 | ###############################################################################
# Export PowerModelsADA methods #
###############################################################################
# PowerModelsADA methods
export
solve_dopf,
solve_dopf_sp,
solve_dopf_mp,
solve_dopf_coordinated,
solve_dopf_coordinated_sp,
solve_dopf_coordinated_mp,
solve_local!,
solve_pmada_model,
instantiate_pmada_model,
build_pmada_ref,
assign_area!,
# partition_system!,
decompose_system,
decompose_coordinator,
calc_mismatch!,
calc_dual_residual!,
calc_global_mismatch,
update_solution!,
update_shared_variable!,
update_flag_convergence!,
update_iteration!,
update_global_flag_convergence,
save_solution!,
prepare_shared_data,
receive_shared_data!,
arrange_areas_id!,
get_diameter,
get_areas_id,
get_area_id,
get_local_bus,
get_neighbor_bus,
get_areas_bus,
get_shared_component,
variable_names,
variable_shared_names,
initialize_dopf!,
initialize_solution!,
initialize_all_variable,
initialize_shared_variable,
objective_min_fuel_and_consensus!,
variable_opf,
constraint_opf,
calc_number_shared_variables,
calc_number_areas_variables,
calc_number_all_variables,
calc_number_variables,
calc_dist_gen_cost,
compare_solution,
print_iteration,
print_convergence,
_pmada_global_keys
# Distributed algorithms modules
export
admm_methods,
atc_methods,
app_methods,
admm_coordinated_methods,
atc_coordinated_methods,
adaptive_admm_methods,
adaptive_admm_coordinated_methods
# JuMP optimizer initlization
import JuMP: optimizer_with_attributes
export optimizer_with_attributes
# PowerModels types
powermodels = names(_PM)
powermodels = filter(x -> endswith(string(x), "PowerModel"), powermodels)
powermodels = filter(x -> !occursin("Abstract", string(x)), powermodels)
for type in powermodels
@eval import PowerModels: $(type)
@eval export $(type)
end
# PowerModels functions
export AbstractPowerModel, parse_file, ids, ref, var, con, sol, nw_ids, nws, optimize_model!, nw_id_default, ismultinetwork, update_data!, silence
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 1774 | ###############################################################################
# OPF problem variable, objective, and constraints #
###############################################################################
"define OPF problem variable"
function variable_opf(pm::AbstractPowerModel)
_PM.variable_bus_voltage(pm)
_PM.variable_gen_power(pm)
_PM.variable_branch_power(pm)
_PM.variable_dcline_power(pm)
end
"define objective function using PowerModels and algorithm-specific objective"
function objective_min_fuel_and_consensus!(pm::AbstractPowerModel, objective_method::Function=no_objective)
# if subsystem has generator minimize the cost of generator and consistency otherwise minimize consistency only
if isempty(pm.data["gen"])
objective = objective_method(pm)
else
_PM.objective_min_fuel_and_flow_cost(pm)
objective = JuMP.objective_function(pm.model) + objective_method(pm)
end
JuMP.@objective(pm.model, Min, objective)
end
"no objective function case"
function no_objective(pm::AbstractPowerModel)
# do nothing
end
"define OPF problem constraints"
function constraint_opf(pm::AbstractPowerModel)
_PM.constraint_model_voltage(pm)
for i in ids(pm, :ref_buses)
_PM.constraint_theta_ref(pm, i)
end
for i in ids(pm, :bus)
_PM.constraint_power_balance(pm, i)
end
for i in ids(pm, :branch)
_PM.constraint_ohms_yt_from(pm, i)
_PM.constraint_ohms_yt_to(pm, i)
_PM.constraint_thermal_limit_from(pm, i)
_PM.constraint_thermal_limit_to(pm, i)
_PM.constraint_voltage_angle_difference(pm, i)
end
for i in ids(pm, :dcline)
_PM.constraint_dcline_power_losses(pm, i)
end
end
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 5431 | ###############################################################################
# Helper methods for all distributed algorithms #
###############################################################################
# partition_system! function is removed due to a compiling issue of the KaHyPar package with Windows
# """
# partition_system!(data::Dict, n::Int64; configuration::Symbol=:edge_cut, print_info::Bool=false)
# Partition a system into n areas using KaHyPar partition algorithm
# # Arguments:
# - data::Dict{String, <:Any} : dictionary contains case in PowerModel format
# - n::Int : number of areas
# - configuration::Symbol=:edge_cut : partition meteric (:edge_cut or :connectivity)
# - print_info::Bool=false : print partition algorithm information
# """
# function partition_system!(data::Dict, n::Int64; configuration::Symbol=:edge_cut, print_info::Bool=false)
# nbus = length(data["bus"])
# nbranch = length(data["branch"])
# bus_index = [x.second["index"] for x in data["bus"]]
# branch_index = [x.second["index"] for x in data["branch"]]
# sort!(bus_index)
# sort!(branch_index)
# W = zeros(nbus,nbranch)
# for (i,branch) in data["branch"]
# f_bus = findfirst(x->x==branch["f_bus"], bus_index)
# t_bus = findfirst(x->x==branch["t_bus"], bus_index)
# indx = branch["index"]
# W[f_bus,indx] = 1
# W[t_bus,indx] = 1
# end
# W = SparseArrays.sparse(W)
# h = KaHyPar.HyperGraph(W)
# info = @capture_out begin
# partitions = KaHyPar.partition(h, n, configuration=configuration)
# end
# partitions = Dict([bus_index[i]=>partitions[i]+1 for i in 1:nbus])
# for (i,bus) in data["bus"]
# bus["area"] = partitions[bus["index"]]
# end
# if print_info
# println(info)
# end
# end
"calculate distributed solution operation cost"
function calc_dist_gen_cost(data_area::Dict{Int, <:Any})
gen_cost = 0
# Calculate objective function
for i in keys(data_area)
gen_cost += _PM.calc_gen_cost(data_area[i])
end
return gen_cost
end
"compare the distributed algorithm solution with PowerModels centralized solution"
function compare_solution(data::Dict{String, <:Any}, data_area::Dict{Int, <:Any}, model_type::DataType, optimizer)
# Solve Centralized OPF
Central_solution = _PM.solve_opf(data, model_type, optimizer)
# Calculate objective function
Obj_distributed = calc_dist_gen_cost(data_area)
Obj_centeral = Central_solution["objective"]
# Calculate optimality gap
Relative_Error = abs(Obj_distributed - Obj_centeral)/ Obj_centeral * 100
return Relative_Error
end
# used in previous versions with ALADIN algorithm
# "get the number of variables in each area"
# function calc_number_areas_variables(data::Dict{String, <:Any}, model_type::DataType)
# areas_id = get_areas_id(data)
# data_area = Dict{Int64, Any}()
# num_variables = Dict{Int64, Any}()
# for area in areas_id
# data_area = decompose_system(data, area)
# num_variables[area] = calc_number_all_variables(data_area, model_type)
# end
# return num_variables
# end
# "get the number of shared variable in a area"
# function calc_number_shared_variables(data::Dict{String, <:Any}, model_type::DataType)
# areas_id = get_areas_id(data)
# area_id = get_area_id(data)
# shared_variable = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", "flat")
# num_variables = calc_number_variables(shared_variable)
# return num_variables
# end
# "get the number of shared variable in all area"
# function calc_number_system_shared_variables(data::Dict{String, <:Any}, model_type::DataType)
# areas_id = get_areas_id(data)
# num_variables = Dict()
# for area in areas_id
# data_area = decompose_system(data, area)
# num_variables[area] = calc_number_shared_variables(data_area, model_type)
# end
# return num_variables
# end
# "get the number of variables in an area"
# function calc_number_all_variables(data::Dict{String, <:Any}, model_type::DataType)
# variables = initialize_all_variable(data, model_type)
# num_variables = calc_number_variables(variables)
# return num_variables
# end
# "get the number of variables"
# function calc_number_variables(data::Dict{String, <:Any})
# num_variables = 0
# for (key,val) in data
# if isa(val, Dict{String, <:Any})
# num_variables += calc_number_variables(val)
# else
# num_variables += length(val)
# end
# end
# return num_variables
# end
"get the communication network diameter"
function get_diameter(data)
areas_id = get_areas_id(data)
narea = length(areas_id)
data_area = decompose_system(data)
for i in areas_id
initialize_dopf!(data_area[i], DCPPowerModel)
end
D = zeros(Int64, narea,narea)
for id1 in 1:narea
for id2 in 1:narea
if id2 in data_area[id1]["neighbors"]
D[id1, id2] = 1
else
D[id1, id2] = narea
end
end
end
for k in 1:narea
for i in 1:narea
for j in 1:narea
if D[i,j] > D[i,k] + D[k,j]
D[i,j] = D[i,k] + D[k,j]
end
end
end
end
return maximum(D)
end | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 8692 | ###############################################################################
# Variable initialization and updating for all distributed OPF algorithms #
###############################################################################
# Template for variable shared
"initialize shared variable dictionary"
function initialize_shared_variable(data::Dict{String, <:Any}, model_type::DataType, from::Int64 ,to::Vector{Int64}, dics_name::String="shared_variable", initialization_method::String="flat", value::Float64=0.0)
bus_variables_name, branch_variables_name = variable_shared_names(model_type)
shared_bus, shared_branch = get_shared_component(data, from)
if initialization_method in ["previous", "previous_solution", "warm", "warm_start"]
if !haskey(data, dics_name)
error("no previous solutions exist to use warm start")
else
variables_dics = data[dics_name]
end
elseif occursin("dual",dics_name)
variables_dics = Dict([
string(area) => Dict(
vcat(
[variable => Dict([string(idx) => 0.0 for idx in shared_bus[area]]) for variable in bus_variables_name],
[variable => Dict([string(idx) => 0.0 for idx in shared_branch[area]]) for variable in branch_variables_name]
)
)
for area in to])
else
variables_dics = Dict([
string(area) => Dict(
vcat(
[variable => Dict([string(idx) => initial_value(variable, initialization_method, value) for idx in shared_bus[area]]) for variable in bus_variables_name],
[variable => Dict([string(idx) => initial_value(variable, initialization_method, value) for idx in shared_branch[area]]) for variable in branch_variables_name]
)
)
for area in to])
end
return variables_dics
end
function initialize_shared_variable(data::Dict{String, <:Any}, model_type::DataType, from::Int64 ,to::Int64, dics_name::String="shared_variable", initialization_method::String="flat")
initialize_shared_variable(data, model_type, from, [to], dics_name, initialization_method)
end
"""
initial_value(variable::String, initialization_method::String="flat", value::Float64=0.0)
assign initial value based on initialization method
# Arguments:
- variable::String : variable names
- initialization_method::String="flat" : ("flat", "previous_solution", "constant")
- value::Float64=0.0 : return value if initialization_method = "constant"
"""
function initial_value(variable::String, initialization_method::String, value::Float64=0.0)::Float64
if initialization_method in ["flat" , "flat_start"]
return initial_value(variable)
else initialization_method in ["constant"]
return value
end
end
function initial_value(variable::String)::Float64
if variable in ["vm", "w", "wr"]
return 1.0
else
return 0.0
end
end
# function previous_value(data::Dict{String, <:Any}, variable::String, idx::String)::Float64
# if variable in ["vm", "va"]
# return data["bus"][idx][variable]
# elseif variable in ["w"]
# if haskey(data["bus"][idx], "w")
# return data["bus"][idx]["w"]
# else
# return data["bus"][idx]["vm"]^2
# end
# elseif variable in ["pf", "pt", "qf", "qt","wr", "wi", "vv", "ccm", "cs", "si", "td"]
# if haskey(data["branch"][idx], variable)
# return data["branch"][idx][variable]
# else
# error("no previous solutions exist to use warm start or the PowerModel is not supported")
# end
# elseif variable in ["pg", "qg"]
# return data["gen"][idx][variable]
# else
# error("no previous solutions exist to use warm start or the PowerModel is not supported")
# end
# end
"""
initialize_all_variable(data::Dict{String, <:Any}, model_type::DataType, dics_name::String="solution", initialization_method::String="flat")
return a dictionary contains all the problem variables. can be used to store the solutions.
# Arguments:
- data::Dict{String, <:Any} : area data
- model_type::DataType : power flow formulation (PowerModel type)
- dics_name::String="solution" : location of existing dicrionary to be used to worm start the output
- initialization_method::String="flat" : "flat" or "worm" initialization
"""
function initialize_all_variable(data::Dict{String, <:Any}, model_type::DataType, initialization_method::String="flat")
bus_variables_name, branch_variables_name, gen_variables_name = variable_names(model_type)
all_variables = Dict{String, Any}()
for variable in bus_variables_name
all_variables[variable] = Dict([idx => initial_value(variable, initialization_method) for idx in keys(data["bus"])])
end
for variable in branch_variables_name
all_variables[variable] = Dict([idx => initial_value(variable, initialization_method) for idx in keys(data["branch"])])
end
for variable in gen_variables_name
all_variables[variable] = Dict([idx => initial_value(variable, initialization_method) for idx in keys(data["gen"])])
end
return all_variables
end
function initialize_solution!(data::Dict{String, <:Any}, model_type::DataType, initialization_method::String="flat")
data["solution"] = initialize_all_variable(data, model_type, initialization_method)
end
"return JuMP variable object from PowerModel object"
function _var(pm::AbstractPowerModel, key::String, idx::String)
bus_variables_name, branch_variables_name, gen_variables_name = variable_names(typeof(pm))
idx = parse(Int64,idx)
if key in bus_variables_name || key in gen_variables_name
var = _PM.var(pm, Symbol(key), idx)
elseif key in branch_variables_name
branch = _PM.ref(pm, :branch, idx)
f_bus = branch["f_bus"]
t_bus = branch["t_bus"]
if key in ["pf", "qf"]
var = _PM.var(pm, Symbol(key[1]), (idx, f_bus, t_bus))
elseif key in ["pt", "qt"]
var = _PM.var(pm, Symbol(key[1]), (idx, t_bus, f_bus))
else
var = _PM.var(pm, Symbol(key), (f_bus, t_bus))
end
end
return var
end
"identifythe shared bus and branch variables names"
function variable_shared_names(model_type::DataType)
if model_type <: Union{DCPPowerModel, DCMPPowerModel}
return ["va"], ["pf"]
elseif model_type <: NFAPowerModel
return [], ["pf"]
elseif model_type <: DCPLLPowerModel
return ["va"], ["pf", "pt"]
elseif model_type <: LPACCPowerModel
return ["va", "phi"], ["pf", "pt", "qf", "qt", "cs"]
elseif model_type <: ACPPowerModel
return ["va", "vm"], ["pf", "pt", "qf", "qt"]
elseif model_type <: ACRPowerModel
return ["vr", "vi"], ["pf", "pt", "qf", "qt"]
elseif model_type <: ACTPowerModel
return ["w", "va"], ["pf", "pt", "qf", "qt", "wr", "wi"]
elseif model_type <: Union{SOCWRPowerModel, SOCWRConicPowerModel, SDPWRMPowerModel, SparseSDPWRMPowerModel }
return ["w"], ["pf", "pt", "qf", "qt", "wr", "wi"]
elseif model_type <: QCRMPowerModel
return ["vm", "va" , "w"], ["pf", "pt", "qf", "qt", "wr", "wi", "vv", "ccm", "cs", "si", "td"]
else
error("PowerModel type is not supported yet!")
end
end
"identifyall the variables names"
function variable_names(model_type::DataType)
if model_type <: Union{DCPPowerModel, DCMPPowerModel}
return ["va"], ["pf"], ["pg"]
elseif model_type <: NFAPowerModel
return [], ["pf"], ["pg"]
elseif model_type <: DCPLLPowerModel
return ["va"], ["pf", "pt"], ["pg"]
elseif model_type <: LPACCPowerModel
return ["va", "phi"], ["pf", "pt", "qf", "qt", "cs"], ["pg", "qg"]
elseif model_type <: ACPPowerModel
return ["va", "vm"], ["pf", "pt", "qf", "qt"], ["pg", "qg"]
elseif model_type <: ACRPowerModel
return ["vr", "vi"], ["pf", "pt", "qf", "qt"], ["pg", "qg"]
elseif model_type <: ACTPowerModel
return ["w", "va"], ["pf", "pt", "qf", "qt", "wr", "wi"], ["pg", "qg"]
elseif model_type <: Union{SOCWRPowerModel, SOCWRConicPowerModel, SDPWRMPowerModel, SparseSDPWRMPowerModel }
return ["w"], ["pf", "pt", "qf", "qt", "wr", "wi"], ["pg", "qg"]
elseif model_type <: QCRMPowerModel
return ["vm", "va" , "w"], ["pf", "pt", "qf", "qt", "wr", "wi", "vv", "ccm", "cs", "si", "td"], ["pg", "qg"]
elseif model_type <: AbstractPowerModel
error("PowerModel type is not supported yet!")
else
error("model_type $model_type is not PowerModel type!")
end
end | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 5683 | using PowerModelsADA
import HiGHS
import Ipopt
using Distributed
using Test
## default setup for solvers
milp_solver = optimizer_with_attributes(HiGHS.Optimizer, "output_flag"=>false)
nlp_solver = optimizer_with_attributes(Ipopt.Optimizer, "tol"=>1e-6, "print_level"=>0)
silence()
## load test systems
data_14 = parse_file("../test/data/case14.m")
data_RTS = parse_file("../test/data/case_RTS.m")
@testset "PowerModelsADA" begin
## assign area test
@testset "assign area to busses" begin
assign_area!(data_14, "../test/data/case14_2areas.csv")
area_bus = get_areas_bus(data_14)
area_1 = [1, 2, 3, 4, 5]
area_2 = [6, 7, 8, 9, 10, 11, 12, 13, 14]
@test sort(area_bus[1]) == area_1
@test sort(area_bus[2]) == area_2
end
## decompose system test
@testset "decmpose system into subsystems" begin
@testset "data_RTS" begin
data_area = decompose_system(data_RTS)
bus_size = [28, 28, 27]
gen_size = [35, 38, 33]
branch_size = [42, 42, 41]
load_size = [17, 17, 17]
neighbor_size = [4, 4, 2]
test_bus = [length(data_area[i]["bus"]) for i in 1:3]
test_gen = [length(data_area[i]["gen"]) for i in 1:3]
test_branch = [length(data_area[i]["branch"]) for i in 1:3]
test_load = [length(data_area[i]["load"]) for i in 1:3]
@test test_bus == test_bus
@test gen_size == test_gen
@test branch_size == test_branch
@test load_size == test_load
end
end
@testset "serialize shared data" begin
data_area = decompose_system(data_14)
for i in [1,2]
admm_methods.initialize_method(data_area[i], QCRMPowerModel)
end
shared_data = prepare_shared_data(data_area[1], 2; serialize=true)
receive_shared_data!(data_area[2], shared_data, 1)
@test data_area[1]["shared_variable"]["2"] == data_area[2]["received_variable"]["1"]
end
# ## paritiotioning test
# @testset "partition system" begin
# @testset "case_RTS" begin
# partition_system!(data_RTS, 3)
# test_count = [count(c -> c["area"] == k, [bus for (i,bus) in data_RTS["bus"]]) for k in 1:3]
# test_count_24 = count(==(24), test_count)
# test_count_25 = count(==(25), test_count)
# @test test_count_24 == 2
# @test test_count_25 == 1
# end
# end
## ADMM test
@testset "admm algorithm with DC power flow" begin
data_area = solve_dopf_admm("../test/data/case14.m", DCPPowerModel, nlp_solver; alpha=1000, tol=1e-3, max_iteration=1000, print_level=0, multiprocessors=true, termination_method="local", mismatch_method="max", termination_measure="dual_residual")
error = compare_solution(data_14, data_area, DCPPowerModel, milp_solver)
@test isapprox(error, 0, atol=1e-3)
end
@testset "coordinated admm algorithm with AC polar power flow" begin
data_area = solve_dopf_admm_coordinated("../test/data/case14.m", ACPPowerModel, nlp_solver; alpha=1000, tol=1e-3, max_iteration=1000, print_level=0, multiprocessors=true, mismatch_method="max",termination_measure="dual_residual")
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 8081.52, atol=5)
end
# ## Adaptive ADMM test
@testset "adaptive admm algorithm with DC power flow" begin
data_area = solve_dopf_adaptive_admm(data_14, DCPPowerModel, milp_solver; alpha=1000, tol=1e-3, max_iteration=1000, print_level=0)
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 7642.59, atol=5)
end
@testset "adaptive coordinated admm algorithm with AC polar power flow" begin
data_area = solve_dopf_adaptive_admm_coordinated(data_14, SOCWRPowerModel, nlp_solver; alpha=1000, tol=1e-3, max_iteration=1000, print_level=0)
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 8075.12, atol=5)
end
## ATC test
@testset "coordinated atc algorithm with SOC relaxation of power flow" begin
data_area = solve_dopf_atc_coordinated(data_14, SOCWRPowerModel, nlp_solver; alpha=1.1, tol=1e-3, max_iteration=1000, print_level=0)
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 8075.12, atol=5)
end
@testset "atc algorithm with DC power flow" begin
data_area = solve_dopf_atc(data_14, DCPPowerModel, milp_solver; alpha=1.1, tol=1e-3, max_iteration=1000, print_level = 0)
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 7642.59, atol=5)
end
## APP test
@testset "app algorithm with DC power flow" begin
data_area = solve_dopf_app(data_14, DCPPowerModel, milp_solver; alpha=1000, tol=1e-3, max_iteration=2, print_level = 0)
data_area = solve_dopf_app(data_area, DCPPowerModel, milp_solver; alpha=1000, tol=1e-3, max_iteration=1000, print_level = 1, initialization_method="previous")
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 7642.59, atol =5)
end
## ALADIN test
@testset "aladin algorithm with AC polar power flow" begin
sigma = Dict{String, Real}("va" => 100, "vm" => 50, "pf" => 1, "pt" => 1, "qf" => 1, "qt" => 1, "pg" => 1, "qg" => 1)
data_area = solve_dopf_aladin_coordinated(data_14, ACPPowerModel, nlp_solver; tol=1e-2, max_iteration=20, print_level=0, p=100, mu=1000, r_p=1.5, r_mu=2, q_gamma=0, sigma=sigma)
dist_cost = calc_dist_gen_cost(data_area)
@test isapprox(dist_cost, 8081.53, atol =5)
end
end
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 3362 | # PowerModelsADA.jl
Status:
[](https://github.com/mkhraijah/PowerModelsADA.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/mkhraijah/PowerModelsADA.jl)
[](https://mkhraijah.github.io/PowerModelsADA.jl/)
</p>
## Overview
[`PowerModelsADA.jl`](https://github.com/mkhraijah/PowerModelsADA.jl) (Power Models Alternating Distributed Algorithms) provides a framework to solve Optimal Power Flow (OPF) problems using alternating distributed algorithms. The package allows to use different distributed algorithms. `PowerModelsADA` is built on top of [`PowerModels.jl`](https://github.com/lanl-ansi/PowerModels.jl) and [`JuMP.jl`](https://github.com/jump-dev/JuMP.jl) to model and solve the subproblems.
## Distributed Algorithms
The `PowerModelsADA` framework is designed to easily incorporate new alternating distributed algorithms. The framework provides means to decompose a test case into multiple areas, model the subproblems associated with each area using `PowerModels`, solve the supropblems in parallel using multi-threading or multi-processing via [`Distributed Computing`](https://docs.julialang.org/en/v1/manual/distributed-computing/), communicate the shared data between the areas, and calculate the mismatches to decide if the termination criteria are satisfied.
The current version of `PowerModelsADA` implements four distributed algorithms:
- Alternating Direction Method of Multipliers (ADMM)
- Analytical Target Cascading (ATC)
- Auxiliary Problem Principle (APP)
- Augmented Lagrangian Alternating Direction Inexact Newton (ALADIN)
`PowerModelsADA` can be extended to include variations of the existing algorithms or new user-defined algorithms. More details about the formulations and algorithm implementations are shown in [Technical Specifications](https://mkhraijah.github.io/PowerModelsADA.jl/dev/specification/)
## Installation
`PowerModelsADA` can be installed using the Julia package manager with
```julia
using Pkg
Pkg.add("PowerModelsADA")
```
## Examples
An example demonstrating how to code up and solve the OPF problem with distributed algorithms is found in [Quick Start Guide](https://mkhraijah.github.io/PowerModelsADA.jl/dev/quickguide/) section of the documentation.
## Contributions
Contributions and enhancements of `PowerModelADA` are welcomed and encouraged. Please feel free to fork this repository and share your contributions to the main branch with a pull request.
## Citation
If you find `PowerModelsADA` useful for your work, please cite our [paper](https://ieeexplore.ieee.org/document/10262198):
```bibtex
@ARTICLE{alkhraijah2023powermodelsada,
author={Alkhraijah, Mohannad and Harris, Rachel and Coffrin, Carleton and Molzahn, Daniel K.},
journal={IEEE Transactions on Power Systems},
title={PowerModelsADA: A Framework for Solving Optimal Power Flow using Distributed Algorithms},
year={2023},
volume={},
number={},
pages={1-4},
doi={10.1109/TPWRS.2023.3318858}
}
```
## Acknowledgments
This work is partially supported by the NSF AI Institute for Advances in Optimization (Award #2112533).
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 309 | # Adaptive Alternating Direction Method of Multipliers (Adaptive ADMM)
```@meta
CurrentModule = PowerModelsADA
```
```@docs
solve_dopf_adaptive_admm
solve_dopf_adaptive_admm_coordinated
```
```@autodocs
Modules = [PowerModelsADA.adaptive_admm_methods, PowerModelsADA.adaptive_admm_coordinated_methods]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 255 | # Alternating Direction Method of Multipliers (ADMM)
```@meta
CurrentModule = PowerModelsADA
```
```@docs
solve_dopf_admm
solve_dopf_admm_coordinated
```
```@autodocs
Modules = [PowerModelsADA.admm_methods, PowerModelsADA.admm_coordinated_methods]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 230 | # Augmented Lagrangian Alternating Direction Inexact Newton (ALADIN)
```@meta
CurrentModule = PowerModelsADA
```
```@docs
solve_dopf_aladin_coordinated
```
```@autodocs
Modules = [PowerModelsADA.aladin_coordinated_methods]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 167 | # Auxiliary Problem Principle (APP)
```@meta
CurrentModule = PowerModelsADA
```
```@docs
solve_dopf_app
```
```@autodocs
Modules = [PowerModelsADA.app_methods]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 234 | # Analytical Target Cascading (ATC)
```@meta
CurrentModule = PowerModelsADA
```
```@docs
solve_dopf_atc
solve_dopf_atc_coordinated
```
```@autodocs
Modules = [PowerModelsADA.atc_methods, PowerModelsADA.atc_coordinated_methods]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 6037 | # Comparison Results
The results of using `PowerModelsADA`v0.1.1 on 9 test cases from **[PGLib-OPF](https://github.com/power-grid-lib/pglib-opf)** is shown here. We benchmark three distributed algorithms with 5 power flow formulations.
### Simulation Setup
We run the three distributed algorithms on a high-performance computing service with a 16-core CPU and 16GB of RAM. We produced the results shown here using `Julia` v1.8, and [`Ipopt`](https://github.com/jump-dev/Ipopt.jl) solver.
We report the results that achieved the $l_2$-norm of the mismatches less than 0.01 (radians and per unit) within 10,000 iterations and the absolute value of the relative error less than 1% of the central solution from `PowerModels.jl`.
We tune the ADAs parameters by selecting large values and gradually reducing the values until reaching a good solution. For the ADMM and APP, we started with $\alpha = 10^6$ and divided by 10 for the next run, while for the ATC we started with $\alpha =1.2$ and subtracted 0.099 for the next run.
### Polar Form ACOPF
| **Algorithm** | | **ADMM** | | **ATC** | | **APP** | |
|------------- |:-----:|---------: |------:|--------: |------:|---------: |------:|
| **Case name** |**Area**| **Time** |**Itr.**| **Time** |**Itr.**| **Time** |**Itr.**|
| 14_ieee | 2 | 1.13 | 14 | 1.70 | 28 | 5.36 | 31 |
| 24\_ieee_rts | 4 | 21.02 | 97 | 7.82 | 67 | 37.84 | 207 |
| 30_ieee | 3 | 2.18 | 24 | 3.89 | 43 | 2.33 | 25 |
| 30pwl | 3 | 7.40 | 24 | 4.08 | 36 | 10.73 | 49 |
| 39_epri | 3 | 20.14 | 89 | 239.80 | 1261 | 179.63 | 873 |
| 73\_ieee_rts | 3 | 14.37 | 61 | 18.61 | 58 | 23.02 | 83 |
| 179_goc | 3 | 31.42 | 66 | 62.06 | 81 | 76.82 | 166 |
| 300_ieee | 4 | 21.51 | 66 | 651.26 | 920 | 28.16 | 77 |
| 588_sdet | 8 | 295.05 | 871 | 3133.82 | 1971 | 437.17 | 1283 |
### Rectangular Form ACOPF
| **Algorithm** | | **ADMM** | | **ATC** | | **APP** | |
|------------- |:-----:|---------: |------:|--------: |------:|---------: |------:|
| **Case name** |**Area**| **Time** |**Itr.**| **Time** |**Itr.**| **Time** |**Itr.**|
| 14_ieee | 2 | 1.04 | 14 | 1.75 | 28 | 2.80 | 33 |
| 24\_ieee_rts | 4 | 11.45 | 95 | 8.19 | 66 | 23.70 | 206 |
| 30_ieee | 3 | 3.20 | 22 | 3.53 | 40 | 3.36 | 24 |
| 30pwl | 3 | 1.07 | 10 | 6.82 | 59 | 6.13 | 48 |
| 39_epri | 3 | 11.40 | 86 | 323.25 | 1243 | 12.12 | 95 |
| 73\_ieee_rts | 3 | 10.38 | 60 | 21.85 | 58 | 19.76 | 116 |
| 179_goc | 3 | 47.25 | 122 | 63.58 | 83 | 64.74 | 170 |
| 300_ieee | 4 | 17.89 | 44 | 1088.83 | 900 | 33.34 | 94 |
| 588_sdet | 8 | 401.70 | 1031 | 3838.72 | 1977 | 473.00 | 1181 |
### DC Approximation
| **Algorithm** | | **ADMM** | | **ATC** | | **APP** | |
|------------- |:-----:|---------: |------:|--------: |------:|---------: |------:|
| **Case name** |**Area**| **Time** |**Itr.**| **Time** |**Itr.**| **Time** |**Itr.**|
| 14_ieee | 2 | 1.24 | 15 | 0.79 | 45 | 1.24 | 21 |
| 24\_ieee_rts | 4 | 12.02 | 174 | 2.61 | 60 | 14.05 | 199 |
| 30_ieee | 3 | 1.41 | 21 | 0.35 | 45 | 1.44 | 20 |
| 30pwl | 3 | 1.53 | 18 | 0.34 | 42 | 1.38 | 20 |
| 39_epri | 3 | 4.90 | 69 | 6.181 | 69 | 4.51 | 64 |
| 73\_ieee_rts | 3 | 6.81 | 75 | 4.88 | 55 | 6.77 | 79 |
| 179_goc | 3 | 4.56 | 37 | 3.24 | 27 | 5.21 | 44 |
| 300_ieee | 4 | 3.60 | 26 | 11.31 | 58 | 6.03 | 36 |
| 588_sdet | 8 | 106.01 | 656 | 18.535 | 655 | 185.20 | 1156 |
### SOCP Relaxation
| **Algorithm** | | **ADMM** | | **ATC** | | **APP** | |
|------------- |:-----:|---------: |------:|--------: |------:|---------: |------:|
| **Case name** |**Area**| **Time** |**Itr.**| **Time** |**Itr.**| **Time** |**Itr.**|
| 14_ieee | 2 | 1.29 | 12 | 2.16 | 39 | 0.98 | 12 |
| 24\_ieee_rts | 4 | 3.85 | 39 | 4.18 | 32 | 4.94 | 51 |
| 30_ieee | 3 | 1.32 | 12 | 3.96 | 33 | 1.46 | 13 |
| 30pwl | 3 | 1.30 | 11 | 4.53 | 29 | 1.42 | 13 |
| 39_epri | 3 | 5.34 | 41 | 8.53 | 47 | 16.12 | 119 |
| 73\_ieee_rts | 3 | 0.51 | 3 | 8.03 | 23 | 0.45 | 3 |
| 179_goc | 3 | 7.56 | 16 | 20.87 | 23 | 3.54 | 8 |
| 300_ieee | 4 | 5.64 | 11 | 51.75 | 42 | 7.18 | 14 |
| 588_sdet | 8 | 87.89 | 131 | 145.32 | 53 | 85.91 | 130 |
### QC Relaxation
| **Algorithm** | | **ADMM** | | **ATC** | | **APP** | |
|------------- |:-----:|---------: |------:|--------: |------:|---------: |------:|
| **Case name** |**Area**| **Time** |**Itr.**| **Time** |**Itr.**| **Time** |**Itr.**|
| 14_ieee | 2 | 2.09 | 15 | 4.37 | 24 | 4.47 | 15 |
| 24\_ieee_rts | 4 | 15.53 | 55 | 22.07 | 50 | 19.10 | 66 |
| 30_ieee | 3 | 11.05 | 32 | 12.66 | 33 | 9.89 | 30 |
| 30pwl | 3 | 1.73 | 9 | 11.25 | 29 | 2.68 | 14 |
| 39_epri | 3 | 31.80 | 85 | 82.02 | 104 | 36.31 | 101 |
| 73\_ieee_rts | 3 | 2.59 | 7 | 1268.75 | 1009 | 2.81 | 7 |
| 179_goc | 3 | 49.63 | 24 | 118.34 | 24 | 77.16 | 39 |
| 300_ieee | 4 | 18.52 | 11 | 602.90 | 60 | 57.31 | 27 |
| 588_sdet | 8 | 321.28 | 177 | 500.23 | 56 | 316.85 | 177 |
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 2451 | # Data Structure
```@meta
CurrentModule = PowerModelsADA
```
## Input Data
### Case
`PowerModelsADA` uses a dictionary of dictionaries to store the case data and the subproblem data. The case data dictionary is similar to the one in `PowerModels` with area assignment for each bus. The buses in the data dictionary must contain an area key with more than one distinct area ID. The subproblem data is similar to the case data with additional information. The area data contains the area-specific data and the distributed algorithm parameters.
To load a data file, we use `parse_file` function as follow:
```julia
case_path = "test/data/case14.m"
data = parse_file(case_path)
```
### Partitioning
To check the areas ID in a data dictionary, use `get_areas_id(data)` to get all areas' IDs in `data`. If the data dictionary doesn't contain more than one area, we can partition the system manually using `assign_area!` function. An example of partition file is shown in [partition example](https://github.com/mkhraijah/PowerModelsADA.jl/blob/main/test/data/case14_2areas.csv).
```@docs
assign_area!
```
Before running the distributed algorithm, `PowerModelsADA` internally decomposes the original system into subsystems using `decompose_system` function. the function decouples the tie-lines between two areas by introducing dummy buses and virtual generators at the tie-lines' ends.
```@docs
decompose_system
```
## Output Data
The output of the distributed algorithms is stored in a dictionary. The dictionary's keys are the areas ID, and the dictionary's values are the areas data dictionary with the results stored in `solution` dictionary.
## Saving Iterations Data
To save a specific data during the distributed algorithm (e.g., store the `"shared_variable"` dictionary each iteration), use the option `save_data::Vector{String}=[]` in the solve function and add the key of the data (e.g., `save_data=["shared variable"]`). The output of the solve function will contain a dictionary with a key called `"previous_solution"` that contains vectors of the selected stored data ordered by the iteration number.
## Generation Cost
To calculate the objective function of the central algorithm use `calc_dist_gen_cost`.
```@docs
calc_dist_gen_cost
```
To compare the distributed algorithm objective function value with the central OPF, use `compare_solution` to get the absolute value of the relative error.
```@docs
compare_solution
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 4339 | # PowerModelsADA.jl
```@meta
CurrentModule = PowerModelsADA
```
## Overview
[`PowerModelsADA.jl`](https://github.com/mkhraijah/PowerModelsADA.jl) (Power Models Alternating Distributed Algorithms) provides a framework to solve Optimal Power Flow (OPF) problems using alternating distributed algorithms. The package allows to use different distributed algorithms. `PowerModelsADA` is built on top of [`PowerModels.jl`](https://github.com/lanl-ansi/PowerModels.jl) and [`JuMP.jl`](https://github.com/jump-dev/JuMP.jl) to model and solve the subproblems.
## Distributed Algorithms
The `PowerModelsADA` framework is designed to easily incorporate new alternating distributed algorithms. The framework provides means to decompose a test case into multiple areas, model the subproblems associated with each area using `PowerModels`, solve the supropblems in parallel using multi-threading or multi-processing via [`Distributed Computing`](https://docs.julialang.org/en/v1/manual/distributed-computing/), communicate the shared data between the areas, and calculate the mismatches to decide if the termination criteria are satisfied.
The current version of `PowerModelsADA` implements four distributed algorithms:
- Alternating Direction Method of Multipliers (ADMM)
- Analytical Target Cascading (ATC)
- Auxiliary Problem Principle (APP)
- Augmented Lagrangian Alternating Direction Inexact Newton (ALADIN)
The specifications of the distributed algorithms are contained in modules within `PowerModelsADA` and can be used with the algorithms' solve functions. The distributed algorithms variations and solve functions are listed below.
| **Algorithm** | **Module** | **Solve Function** |
|------------------------------------|-------------------------------------|----------------------------------------|
| ADMM (fully distributed) | `admm_methods` | `solve_dopf_admm` |
| ADMM (with a coordinator) | `admm_coordinated_methods` | `solve_dopf_admm_coordinated` |
| Adaptive ADMM (fully distributed) | `adaptive_admm_methods` | `solve_dopf_adaptive_admm` |
| Adaptive ADMM (with a coordinator) | `adaptive_admm_coordinated_methods` | `solve_dopf_adaptive_admm_coordinated` |
| ATC (fully distributed) | `atc_methods` | `solve_dopf_atc` |
| ATC (with a coordinator) | `atc_coordinated_methods` | `solve_dopf_atc_coordinated` |
| APP | `app_methods` | `solve_dopf_app` |
| ALADIN (with a coordinator) | `aladin_coordinated_methods` | `solve_dopf_aladin_coordinated` |
`PowerModelsADA` can be extended to include variations of the existing algorithms or new user-defined algorithms. More details about the formulations and algorithm implementations are shown in [Technical Specifications](https://mkhraijah.github.io/PowerModelsADA.jl/dev/specification/)
## Installation
`PowerModelsADA` can be installed using the Julia package manager with
```julia
using Pkg
Pkg.add("PowerModelsADA")
```
## Examples
An example demonstrating how to code up and solve the OPF problem with distributed algorithms is found in [Quick Start Guide](https://mkhraijah.github.io/PowerModelsADA.jl/dev/quickguide/) section of the documentation.
## Contributions
Contributions and enhancements of `PowerModelADA` are welcomed and encouraged. Please feel free to fork this repository and share your contributions to the main branch with a pull request.
## Citation
If you find `PowerModelsADA` useful for your work, please cite our [paper](https://ieeexplore.ieee.org/document/10262198):
```bibtex
@ARTICLE{alkhraijah2023powermodelsada,
author={Alkhraijah, Mohannad and Harris, Rachel and Coffrin, Carleton and Molzahn, Daniel K.},
journal={IEEE Transactions on Power Systems},
title={PowerModelsADA: A Framework for Solving Optimal Power Flow using Distributed Algorithms},
year={2023},
volume={},
number={},
pages={1-4},
doi={10.1109/TPWRS.2023.3318858}
}
```
## Acknowledgments
This work is partially supported by the NSF AI Institute for Advances in Optimization (Award #2112533).
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 189 | # Library
```@meta
CurrentModule = PowerModelsADA
```
```@autodocs
Modules = [PowerModelsADA]
Pages = ["base.jl", "data.jl", "data_sharing.jl", "opf.jl", "util.jl", "variables.jl"]
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 5256 | # User-Defined Algorithm
```@meta
CurrentModule = PowerModelsADA
```
To define a new algorithm, we need to define a module for the new algorithm that contains the main solve function in addition to three algorithm-specific functions. The three algorithm-specific are: initialize, build, and update. You can follow the example in the [template file](https://github.com/mkhraijah/PowerModelsADA.jl/blob/main/example/template.jl).
The module of `xx` algorithm should be defined and exported as `xx_methods` as follows:
```julia
"""
template for xx distributed algorithm
"""
module xx_methods
using ..PowerModelsADA
### functions ###
end
# export the algorithm methods module and call method
export xx_methods, solve_dopf_xx
```
The solve function is the main method to use the `xx` algorithm. The function takes the data, power flow formulation (`model_type`), JuMP solver object, and algorithm's parameters as required. The solve function should use the pre-defined algorithm flow as follows:
```julia
"solve distributed OPF using xx algorithm"
function solve_method(data, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, parameters...)
solve_dopf(data, model_type, optimizer, xx_methods;
mismatch_method=mismatch_method, tol=tol, max_iteration=max_iteration,
print_level=print_level, parameters...)
end
```
The first algorithm-specific function is the initialize function. The function takes the area data file and adds to it the required parameters, counters, and shared variables. There are multiple built-in functions in `PowerModelsADA` that can be used to define the shared and received variables, as well as the dual variables. Note that the initialization function should include the `initialize_dopf!` to define the counters and convergence flags. We use `kwargs` with the `...` to combine the algorithm's parameters and pass them to the `initialize_method`.
```julia
"initialize the xx algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::Type; kwargs...)
# initiate primal and dual shared variables
data["shared_variable"] = Dict(to_area=> variable_name=> variable_index=> value)
data["received_variable"] = Dict(from_area=> variable_name=>variable_index=> value)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# xx parameters
data["parameter"] = Dict("alpha"=> get(kwargs, :alpha, 1000))
end
```
The second function is the build function, which builds the `PowerModels` object of the subproblem. The subproblems typically have the same variables and constraints as the central OPF problem and differ in the objective functions. To build a subproblem with the same variables and constraints as the central OPF problem with a specific objective function, we need to define the objective function using the template shown below. The objective function definition takes the `PowerModels` object and returns a `JuMP` expression. You can use the internal helper function `_var` to obtain the `JuMP` model variables' object defined in the `PowerModels` object.
```julia
"build PowerModel using xx algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_function)
end
"set the xx algorithm objective"
function objective_function(pm::AbstractPowerModel)
# to get the JuMP object of the active power of generator 1 use:
pg1 = _var(pm, :pg, 1)
###
objective = pg1
###
return objective
end
```
```@docs
_var
```
The last function is to update the area dictionary after communicating the shared variables results with other areas.
```julia
"update the xx algorithm before each iteration"
function update_method(data::Dict{String, <:Any})
### update subproblem parameters for the next iteration
###
### you can use predefined function to calculate the mismatches, check convergence, save progress etc.
calc_mismatch!(data, central=true)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
```
The final step is defining the post-processing functions and global keys. The post-processing functions perform tasks to the `PowerModels` object after solving the subproblem. `PowerModelsADA` comes with two post-processing functions. The first function updates the solution dictionary, and the second function updates the shared variables dictionary. The global keys are the keys that are used in the data area dictionary (related to the `xx` algorithm) and should be explicitly given by extending the existing `_pmada_global_keys` set of strings.
```julia
post_processors = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable")
```
This is a general way to define a distributed algorithm that is fully distributed with the same main algorithm flow as the pre-defined algorithms. For other algorithm flows, the solve function needs to be defined fully instead of using the pre-define function `solve_dopf`.
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 374 | # Quick Start Guide
To solve the OPF problem using the ADMM use the solve function `solve_dopf_admm`. The solve function stores the result in a data dictionary contains subsystems information.
```julia
using PowerModelsADA
using Ipopt
model_type = ACPPowerModel
result = solve_dopf_admm("test/data/case_RTS.m", model_type, Ipopt.Optimizer; print_level=1, alpha=1000)
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 128 | # Technical Specifications
TODO
## Power Flow Formulation
TODO
## Optimization Solver
TODO
## Distributed Algorithm
TODO
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | docs | 6808 | # Tutorial
`PowerModelsADA` solves OPF problems using either a pre-defined distributed algorithm or a user-defined algorithm. This page shows examples of solving the OPF problem using the pre-defined algorithms and how to define a new alternating distributed algorithm.
The distributed algorithm-specific functions are stored in modules. Each module contains at least three main functions: initialize, build, and update functions. Each module also contains a solve function that solves the OPF by passing the case, solver, and power flow model.
The distributed algorithm module and solve function are:
| **Algorithm** | **Module** | **Solve Function** |
|------------------------------------|-------------------------------------|----------------------------------------|
| ADMM (fully distributed) | `admm_methods` | `solve_dopf_admm` |
| ADMM (with a coordinator) | `admm_coordinated_methods` | `solve_dopf_admm_coordinated` |
| Adaptive ADMM (fully distributed) | `adaptive_admm_methods` | `solve_dopf_adaptive_admm` |
| Adaptive ADMM (with a coordinator) | `adaptive_admm_coordinated_methods` | `solve_dopf_adaptive_admm_coordinated` |
| ATC (fully distributed) | `atc_methods` | `solve_dopf_atc` |
| ATC (with a coordinator) | `atc_coordinated_methods` | `solve_dopf_atc_coordinated` |
| APP | `app_methods` | `solve_dopf_app` |
| ALADIN (with a coordinator) | `aladin_coordinated_methods` | `solve_dopf_aladin_coordinated` |
## Run Distributed Algorithm
To solve the OPF problem, we need first to import the `PowerModelsADA` package and an optimization solver. In this case we use the NLP solver `Ipopt`. You can install the solver using `using Pkg; Pkg.add("Ipopt")`. Then run the following code:
```julia
## Import package
using PowerModelsADA
using Ipopt
```
Next, we need to upload a test case. We will use IEEE 14-bus system in `/test/data/` folder in MATPOWER format. The file can be loaded using `parse_file` from `PowerModels` package. The test system needs to be divided into multiple distinct areas. This can be checked by looking into `data["bus"][bus_id]["area"]`.
```julia
## Read case with partition file and return dictionary of the partitioned case
case_path = "test/data/case14.m"
partition_file_path = "test/data/case14_2areas.csv"
data = parse_file(case_path)
assign_area!(data, partition_file_path)
```
Now, the case study is loaded and ready to be used to solve the OPF problem using distributed algorithms. We first need to define parameters, load the solver, and select a power flow formulation `model_type` as follows:
```julia
## Settings and optimizer initiation
max_iteration = 1000
tol = 1e-4
alpha = 1000
optimizer = optimizer_with_attributes(Ipopt.Optimizer, "print_level"=>0)
```
PowerModelsADA supports the following power flow models:
### Exact power flow
```julia
model_type = ACPPowerModel # AC power flow model with polar bus voltage variables.
model_type = ACRPowerModel # AC power flow model with rectangular bus voltage variables.
```
### Approximations
```julia
model_type = DCPPowerModel # Linearized 'DC' power flow model.
model_type = LPACCPowerModel # LP AC power flow approximation.
```
### Convex relaxations
```julia
model_type = SOCWRPowerModel # Second-order cone relaxation of bus injection model of AC power flow.
model_type = QCRMPowerModel # Quadratic-Convex relaxation of the AC power flow.
model_type = SDPWRMPowerModel # Semidefinite relaxation of AC power flow.
model_type = SparseSDPWRMPowerModel # Sparsity-exploiting semidefinite relaxation of AC power flow.
```
To solve the OPF problem using ADMM algorithm using the solve function, we use the following:
```julia
data_area = solve_dopf_admm(data, model_type, optimizer, tol=tol, max_iteration=max_iteration, alpha=alpha)
```
To use multiprocessing features, we need to use the Distributed library, add processors, and upload the PowerModelsADA and the solver packages to the processors. For the best performance, the number of processors should be equal to the number of areas. The code becomes as follows:
```julia
using Distributed
num_area = 4 # change the number to be equal to number of areas
addprocs(num_area, exeflags="--project")
@everywhere using PowerModelsADA
@everywhere using Ipopt
data_area = solve_dopf_admm(data, model_type, optimizer, tol=tol, max_iteration=max_iteration, alpha=alpha, multiprocessors=true)
```
To compare the distributed algorithm objective function value with the central OPF, use `compare_solution` to get the absolute value of the relative error.
```julia
optimality_gap = compare_solution(data, data_area, model_type, optimizer)
```
PowerModelsADA also provides the flexibility for more granular control of the distributed algorithm. We can use the following code to initialize the distributed algorithm (we use ADMM in this example).
```julia
## define parameters and power flow model
max_iteration = 1000
tol = 1e-4
alpha = 1000
model_type = DCPPowerModel
## obtain areas idx
areas_id = get_areas_id(data)
## decompose the system into subsystems
data_area = decompose_system(data)
## initialize parameters using the algorithm-specific initialize function
for i in areas_id
admm_methods.initialize_method(data_area[i], model_type; tol=tol, max_iteration=max_iteration, alpha = alpha)
end
```
We then start the iterative process of the distributed algorithm using while loop with a pre-define termination criteria as follows:
```julia
## initialize global counters
iteration = 0
flag_convergence = false
## start iteration
while iteration < max_iteration && flag_convergence == false
## solve local problem and update solution
for i in areas_id
result = solve_pmada_model(data_area[i], model_type, optimizer, admm_methods.build_method, solution_processors=admm_methods.post_processors)
update_data!(data_area[i], result["solution"])
end
## share solution with neighbors
for i in areas_id # sender subsystem
for j in data_area[i]["neighbors"] # receiver subsystem
shared_data = prepare_shared_data(data_area[i], j)
receive_shared_data!(data_area[j], deepcopy(shared_data), i)
end
end
# calculate mismatches and update convergence flags
for i in areas_id
dopf_method.update_method(data_area[i])
end
## check global convergence and update iteration counters
flag_convergence = update_global_flag_convergence(data_area)
iteration += 1
end
```
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.2.0 | 3fd7a279de0fa6191f064f6de4601a43e659b867 | code | 671 | using Resizing
using Documenter
DocMeta.setdocmeta!(Resizing, :DocTestSetup, :(using Resizing); recursive=true)
makedocs(;
modules=[Resizing],
authors="Zachary P. Christensen <[email protected]> and contributors",
repo="https://github.com/Tokazama/Resizing.jl/blob/{commit}{path}#{line}",
sitename="Resizing.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Tokazama.github.io/Resizing.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/Tokazama/Resizing.jl",
devbranch="main",
)
| Resizing | https://github.com/Tokazama/Resizing.jl.git |
|
[
"MIT"
] | 0.2.0 | 3fd7a279de0fa6191f064f6de4601a43e659b867 | code | 7684 | module Resizing
import Compat: @assume_effects
const UNSAFE_GROW_DOC = """
This method assumes that `collection` will grow without any errors and may result in
undefined behavior if the user isn't certain `collection` can safely grow. For example, an
instance of `Vector` cannot be shared with another instance of `Array` to change sizes.
"""
const UNSAFE_SHRINK_DOC = """
This method assumes that `collection` will shrink without any errors and may result in
undefined behavior if the user isn't certain `collection` can safely shrink. For example, an
instance of `Vector` cannot be shared with another instance of `Array` to change sizes. It
also assumes that the provided number of elements the will be removed does not exceed the
size of `collection`.
"""
# https://github.com/JuliaLang/julia/issues/34478
# FIXME is hacky. should replace with https://github.com/JuliaLang/julia/pull/47540
const FLAG_OFFSET = 1 + sizeof(Csize_t)>>1 + sizeof(Ptr{Cvoid}) >>1
function isshared(x::Array)
ptr = pointer_from_objref(x)
GC.@preserve x begin
out = (unsafe_load(convert(Ptr{UInt16}, ptr), FLAG_OFFSET) & 0x4000) !== 0x0000
end
return out
end
"""
shrink_end!(collection, n::Integer) -> Bool
Deletes `n` elements from begining at the last index of `collection`. If successful will
return `true`.
See also: [`shrink_beg!`](@ref), [`shrink_at!`](@ref), [`unsafe_shrink_end!`](@ref)
"""
shrink_end!(x, n::Integer) = false
function shrink_end!(x::Vector, n::Integer)
if isshared(x) || (length(x) - n) < 0
return false
else
unsafe_shrink_end!(x, n)
return true
end
end
"""
unsafe_shrink_end!(collection, n::Integer) -> Nothing
Deletes `n` elements from the last index of `collection`.
$(UNSAFE_SHRINK_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_shrink_end!(x::Vector, n::Integer)
ccall(:jl_array_del_end, Cvoid, (Any, UInt), x, n)
end
"""
shrink_at!(collection, i::Int, n::Integer) -> Bool
Shrink `collection` by `n` elements at index `i`. If successful this will return `true`.
See also: [`shrink_beg!`](@ref), [`shrink_end!`](@ref), [`unsafe_shrink_at!`](@ref)
"""
shrink_at!(x, i::Int, n::Integer) = false
function shrink_at!(x::Vector, i::Int, n::Integer)
if isshared(x) || i < 1 || (i + n) > length(x)
return false
else
unsafe_shrink_at!(x, i, n)
return true
end
end
"""
unsafe_shrink_at!(collection, i, n::Integer) -> Nothing
Deletes `n` elements from index `i` of `collection`.
$(UNSAFE_SHRINK_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_shrink_at!(x::Vector, i, n::Integer)
ccall(:jl_array_del_at, Cvoid, (Any, Int, UInt), x, i-1, n)
end
"""
shrink_beg!(collection, n::Integer) -> Bool
Deletes `n` elements from the first index of `collection`. If successful will
return `true`.
See also: [`shrink_at!`](@ref), [`shrink_end!`](@ref), [`unsafe_shrink_beg!`](@ref)
"""
shrink_beg!(x, n::Integer) = false
function shrink_beg!(x::Vector, n::Integer)
if isshared(x) || (length(x) - n) < 0
return false
else
unsafe_shrink_beg!(x, n)
return true
end
end
"""
unsafe_shrink_beg!(collection, n::Integer) -> Nothing
Deletes `n` elements from the first index of `collection`.
$(UNSAFE_SHRINK_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_shrink_beg!(x::Vector, n::Integer)
ccall(:jl_array_del_beg, Cvoid, (Any, UInt), x, n)
end
"""
grow_at!(collection, i::Int, n::Integer) -> Bool
Grow `collection` by `n` elements at index `i`. This does not ensure that new
elements are defined. If successful this will return true return `true`.
See also: [`grow_beg!`](@ref), [`grow_end!`](@ref), [`unsafe_grow_at!`](@ref)
"""
grow_at!(x, i, n::Integer) = false
function grow_at!(x::Vector, i, n::Integer)
if isshared(x) || 1 > i || i > (length(x) + 1)
return false
else
unsafe_grow_at!(x, i, n)
return true
end
end
"""
unsafe_grow_at!(collection, i::Int, n::Integer) -> Nothing
Grows by `n` elements at index `i` of `collection`.
$(UNSAFE_GROW_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_grow_at!(x::Vector, i::Int, n::Integer)
ccall(:jl_array_grow_at, Cvoid, (Any, Int, UInt), x, i-1, n)
end
"""
grow_end!(collection, n::Integer) -> Bool
Grow `collection` by `n` elements from its last index. This does not ensure that new
elements are defined. If successful will return `true`.
See also: [`grow_beg!`](@ref), [`grow_at!`](@ref), [`unsafe_grow_end!`](@ref)
"""
grow_end!(x, n::Integer) = false
grow_end!(x::Vector, n::Integer) = isshared(x) ? false : (unsafe_grow_end!(x, n); true)
"""
unsafe_grow_end!(collection, n) -> Nothing
Grows by `n` elements at the last index of `collection`.
$(UNSAFE_GROW_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_grow_end!(x::Vector, n)
ccall(:jl_array_grow_end, Cvoid, (Any, UInt), x, n)
end
"""
grow_beg!(collection, n::Integer) -> Bool
Grow `collection` by `n` elements from its first index. This does not ensure that new
elements are defined. If successful will return `true`.
See also: [`grow_at!`](@ref), [`grow_end!`](@ref), [`unsafe_grow_beg!`](@ref)
"""
grow_beg!(x, n::Integer) = false
grow_beg!(x::Vector, n::Integer) = isshared(x) ? false : (unsafe_grow_beg!(x, n); true)
"""
unsafe_grow_beg!(collection, n) -> Nothing
Grows by `n` elements at the first index of `collection`.
$(UNSAFE_GROW_DOC)
"""
@assume_effects :terminates_locally :nothrow function unsafe_grow_beg!(x::Vector, n)
ccall(:jl_array_grow_end, Cvoid, (Any, UInt), x, n)
end
"""
assert_shrink_end!(collection, n::Integer) -> Nothing
Executes `shrink_end!(collection, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_shrink_end!(x, n)
shrink_end!(x, n) && return nothing
throw(ArgumentError("$(x), cannot shrink from its last index by $(Int(n)) elements"))
end
"""
assert_shrink_beg!(collection, n::Integer) -> Nothing
Executes `shrink_beg!(collection, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_shrink_beg!(x, n)
shrink_beg!(x, n) && return nothing
throw(ArgumentError("$(x), cannot shrink from its first index by $(Int(n)) elements"))
end
"""
assert_shrink_at!(collection, i::Int, n::Integer) -> Nothing
Executes `shrink_at!(collection, i, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_shrink_at!(x, i, n)
shrink_at!(x, i, n) && return nothing
throw(ArgumentError("$(x), cannot shrink at index $(i) by $(Int(n)) elements"))
end
"""
assert_grow_end!!(collection, n::Integer) -> Nothing
Executes `grow_end!(collection, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_grow_end!(x, n)
grow_end!(x, n) && return nothing
throw(ArgumentError("$(x), cannot grow from its last index by $(Int(n)) elements"))
end
"""
assert_grow_beg!(collection, n::Integer) -> Nothing
Executes `grow_beg!(collection, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_grow_beg!(x, n)
grow_beg!(x, n) && return nothing
throw(ArgumentError("$(x), cannot grow from its first index by $(Int(n)) elements"))
end
"""
assert_grow_at!(collection, i::Int, n::Integer) -> Nothing
Executes `grow_at!(collection, i, n)`, throwing an error if unsuccessful or `nothing` if
successful.
"""
function assert_grow_at!(x, i, n)
grow_at!(x, i, n) && return nothing
throw(ArgumentError("$(x), cannot grow at index $(i) by $(Int(n)) elements"))
end
end
| Resizing | https://github.com/Tokazama/Resizing.jl.git |
|
[
"MIT"
] | 0.2.0 | 3fd7a279de0fa6191f064f6de4601a43e659b867 | code | 1458 | using Resizing
using Test
@testset "grow_end!" begin
v = Vector{Int}(undef, 10)
@test !Resizing.grow_end!(1:2, 2)
@test_throws ArgumentError Resizing.assert_grow_end!(1:2, 2)
@test Resizing.grow_end!(v, 2)
@test length(v) == 12
end
@testset "grow_beg!" begin
v = Vector{Int}(undef, 10)
@test !Resizing.grow_beg!(1:2, 2)
@test_throws ArgumentError Resizing.assert_grow_beg!(1:2, 2)
@test Resizing.grow_beg!(v, 2)
@test length(v) == 12
end
@testset "grow_at!" begin
v = Vector{Int}(undef, 10)
@test !Resizing.grow_at!(1:2, 2, 2)
@test_throws ArgumentError Resizing.assert_grow_at!(1:2, 2, 2)
@test !Resizing.grow_at!(v, 12, 2)
@test Resizing.grow_at!(v, 2, 2)
@test length(v) == 12
end
@testset "shrink_end!" begin
@test !Resizing.shrink_end!(1:2, 2)
v = Vector{Int}(undef, 10)
@test_throws ArgumentError Resizing.assert_shrink_end!(v, 11)
@test Resizing.shrink_end!(v, 2)
@test length(v) == 8
end
@testset "shrink_beg!" begin
@test !Resizing.shrink_beg!(1:2, 2)
v = Vector{Int}(undef, 10)
@test_throws ArgumentError Resizing.assert_shrink_beg!(v, 11)
@test Resizing.shrink_beg!(v, 2)
@test length(v) == 8
end
@testset "shrink_at!" begin
@test !Resizing.shrink_at!(1:2, 2, 2)
v = Vector{Int}(undef, 10)
@test_throws ArgumentError Resizing.assert_shrink_at!(v, 2, 9)
@test Resizing.shrink_at!(v, 2, 2)
@test length(v) == 8
end
| Resizing | https://github.com/Tokazama/Resizing.jl.git |
|
[
"MIT"
] | 0.2.0 | 3fd7a279de0fa6191f064f6de4601a43e659b867 | docs | 1810 | # Resizing
[](https://Tokazama.github.io/Resizing.jl/stable/)
[](https://Tokazama.github.io/Resizing.jl/dev/)
[](https://github.com/Tokazama/Resizing.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/Tokazama/Resizing.jl)
Julia does not currently have a well developed interface for changing the size of collections. `Resizing` provides common methods for growing and shrinking collections. Although the relative position where a resizing method is executed may vary by method and collection type, a common pattern makes them straightforward to use and overload for new collections. For example, the following methods are responsible for growing a collection from the last index:
* `Resizing.unsafe_grow_end!(collection, n)`: assumes that all necessary conditions for growing `collection` by `n` elements from the last index are met without checking.
* `Resizing.grow_end!(collection, n)`: Calls `Resizing.unsafe_grow_end!(collection, n)` if it can determine that `collection` can safely grow by `n` elements from the last index, returning `true` if successful.
* `Resizing.assert_grow_end!(collection, n)`: Calls `Resizing.grow_end!(collection, n)`. If `false` is returned it throws an error, otherwise returns `nothing`.
Note that `grow_end!` and `unsafe_grow_end!` must be defined for each new collection type, but `assert_grow_end!` can rely completely on the former two methods.
This same pattern exists for shrinking or growing at the beginning, end, or a specified index.
| Resizing | https://github.com/Tokazama/Resizing.jl.git |
|
[
"MIT"
] | 0.2.0 | 3fd7a279de0fa6191f064f6de4601a43e659b867 | docs | 176 | ```@meta
CurrentModule = Resizing
```
# Resizing
Documentation for [Resizing](https://github.com/Tokazama/Resizing.jl).
```@index
```
```@autodocs
Modules = [Resizing]
```
| Resizing | https://github.com/Tokazama/Resizing.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 219 | module LearnBase
# AGGREGATION MODES
include("aggmode.jl")
# OBSERVATION DIMENSIONS
include("obsdim.jl")
# LEARNING COSTS (e.g. loss & penalty)
include("costs.jl")
# OTHER CONCEPTS
include("other.jl")
end # module
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 3649 | """
Baseclass for all aggregation modes.
"""
abstract type AggregateMode end
"""
module AggMode
Types for aggregation of multiple observations.
- `AggMode.None()`
- `AggMode.Sum()`
- `AggMode.Mean()`
- `AggMode.WeightedSum(weights)`
- `AggMode.WeightedMean(weights)`
"""
module AggMode
using ..LearnBase: AggregateMode
"""
AggMode.None()
Opt-out of aggregation. This is usually the default value.
Using `None` will cause the element-wise results to be returned.
"""
struct None <: AggregateMode end
"""
AggMode.Sum()
Causes the method to return the unweighted sum of the
elements instead of the individual elements. Can be used in
combination with `ObsDim`, in which case a vector will be
returned containing the sum for each observation (useful
mainly for multivariable regression).
"""
struct Sum <: AggregateMode end
"""
AggMode.Mean()
Causes the method to return the unweighted mean of the
elements instead of the individual elements. Can be used in
combination with `ObsDim`, in which case a vector will be
returned containing the mean for each observation (useful
mainly for multivariable regression).
"""
struct Mean <: AggregateMode end
"""
AggMode.WeightedSum(weights; [normalize = false])
Causes the method to return the weighted sum of all
observations. The variable `weights` has to be a vector of
the same length as the number of observations.
If `normalize = true`, the values of the weight vector will
be normalized in such as way that they sum to one.
# Arguments
- `weights::AbstractVector`: Vector of weight values that
can be used to give certain observations a stronger
influence on the sum.
- `normalize::Bool`: Boolean that specifies if the weight
vector should be transformed in such a way that it sums to
one (i.e. normalized). This will not mutate the weight
vector but instead happen on the fly during the
accumulation.
Defaults to `false`. Setting it to `true` only really
makes sense in multivalue-regression, otherwise the result
will be the same as for [`WeightedMean`](@ref).
"""
struct WeightedSum{W<:AbstractVector} <: AggregateMode
weights::W
normalize::Bool
end
WeightedSum(weights::AbstractVector; normalize::Bool = false) = WeightedSum(weights, normalize)
"""
AggMode.WeightedMean(weights; [normalize = true])
Causes the method to return the weighted mean of all
observations. The variable `weights` has to be a vector of
the same length as the number of observations.
If `normalize = true`, the values of the weight vector will
be normalized in such as way that they sum to one.
# Arguments
- `weights::AbstractVector`: Vector of weight values that can
be used to give certain observations a stronger influence
on the mean.
- `normalize::Bool`: Boolean that specifies if the weight
vector should be transformed in such a way that it sums to
one (i.e. normalized). This will not mutate the weight
vector but instead happen on the fly during the
accumulation.
Defaults to `true`. Setting it to `false` only really makes
sense in multivalue-regression, otherwise the result will
be the same as for [`WeightedSum`](@ref).
"""
struct WeightedMean{W<:AbstractVector} <: AggregateMode
weights::W
normalize::Bool
end
WeightedMean(weights::AbstractVector; normalize::Bool = true) = WeightedMean(weights, normalize)
end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 11760 | """
Baseclass for any kind of cost. Notable examples for
costs are `Loss` and `Penalty`.
"""
abstract type Cost end
"""
Baseclass for all losses. A loss is some (possibly simplified)
function `L(x, y, yΜ)`, of features `x`, targets `y` and outputs
`yΜ = f(x)` for some function `f`.
"""
abstract type Loss <: Cost end
"""
A loss is considered **supervised**, if all the information needed
to compute `L(x, y, yΜ)` are contained in `y` and `yΜ`, and thus allows
for the simplification `L(y, yΜ)`.
"""
abstract type SupervisedLoss <: Loss end
"""
A supervised loss that can be simplified to `L(y, yΜ) = L(yΜ - y)`
is considered **distance-based**.
"""
abstract type DistanceLoss <: SupervisedLoss end
"""
A supervised loss with targets `y β {-1, 1}`, and which
can be simplified to `L(y, yΜ) = L(yβ
yΜ)` is considered
**margin-based**.
"""
abstract type MarginLoss <: SupervisedLoss end
"""
A loss is considered **unsupervised**, if all the information needed
to compute `L(x, y, yΜ)` are contained in `x` and `yΜ`, and thus allows
for the simplification `L(x, yΜ)`.
"""
abstract type UnsupervisedLoss <: Loss end
"""
Baseclass for all penalties.
"""
abstract type Penalty <: Cost end
"""
value(loss, target, output) -> Number
Compute the (non-negative) numeric result for the `loss` function.
Note that `target` and `output` can be of different numeric type,
in which case promotion is performed in the manner appropriate for
the given loss.
value(loss, targets, outputs) -> AbstractVector
Compute the result for each pair of values in `targets` and `outputs`.
value(loss, targets, outputs, aggmode) -> Number
Compute the weighted or unweighted sum or mean (depending on
aggregation mode `aggmode`) of the individual values of the `loss`
function for each pair in `targets` and `outputs`. This method
will not allocate a temporary array.
In the case that the two parameters are arrays with a different
number of dimensions, broadcast will be performed. Note that the
given parameters are expected to have the same size in the
dimensions they share.
value(loss, targets, outputs, aggmode, obsdim) -> AbstractVector
Compute the aggregated result along dimension with observations `obsdim`.
This method will not allocate a temporary array, but it will allocate
the resulting vector.
Both arrays have to be of the same shape and size. Furthermore they have
to have at least two dimensions (i.e. they must not be vectors).
## Notes
- New loss functions only need to implement the first method with single
`target` and `output`. Fallback implementations are available for other
methods in `LossFunctions.jl`.
"""
function value end
"""
deriv(loss, target, output) -> Number
Compute the analytical derivative with respect to the `output` for the
`loss` function. Note that `target` and `output` can be of different
numeric type, in which case promotion is performed in the manner
appropriate for the given loss.
deriv(loss, targets, outputs) -> AbstractVector
Compute the result for each pair of values in `targets` and `outputs`.
deriv(loss, targets, outputs, aggmode) -> Number
Compute the weighted or unweighted sum or mean (depending on
aggregation mode `aggmode`) of the individual values of the `loss`
function for each pair in `targets` and `outputs`. This method
will not allocate a temporary array.
In the case that the two parameters are arrays with a different
number of dimensions, broadcast will be performed. Note that the
given parameters are expected to have the same size in the
dimensions they share.
deriv(loss, targets, outputs, aggmode, obsdim) -> AbstractVector
Compute the aggregated result along dimension with observations `obsdim`.
This method will not allocate a temporary array, but it will allocate
the resulting vector.
Both arrays have to be of the same shape and size. Furthermore they have
to have at least two dimensions (i.e. they must not be vectors).
## Notes
- New loss functions only need to implement the first method with single
`target` and `output`. Fallback implementations are available for other
methods in `LossFunctions.jl`.
"""
function deriv end
"""
deriv2(loss, target, output) -> Number
Compute the second derivative with respect to the `output` for the
`loss` function. Note that `target` and `output` can be of different
numeric type, in which case promotion is performed in the manner
appropriate for the given loss.
deriv2(loss, targets, outputs) -> AbstractVector
Compute the result for each pair of values in `targets` and `outputs`.
deriv2(loss, targets, outputs, aggmode) -> Number
Compute the weighted or unweighted sum or mean (depending on
aggregation mode `aggmode`) of the individual values of the `loss`
function for each pair in `targets` and `outputs`. This method
will not allocate a temporary array.
In the case that the two parameters are arrays with a different
number of dimensions, broadcast will be performed. Note that the
given parameters are expected to have the same size in the
dimensions they share.
deriv2(loss, targets, outputs, aggmode, obsdim) -> AbstractVector
Compute the aggregated result along dimension with observations `obsdim`.
This method will not allocate a temporary array, but it will allocate
the resulting vector.
Both arrays have to be of the same shape and size. Furthermore they have
to have at least two dimensions (i.e. they must not be vectors).
## Notes
- New loss functions only need to implement the first method with single
`target` and `output`. Fallback implementations are available for other
methods in `LossFunctions.jl`.
"""
function deriv2 end
"""
isconvex(loss) -> Bool
Return `true` if the given `loss` denotes a convex function.
A function `f: ββΏ β β` is convex if its domain is a convex set
and if for all `x, y` in that domain, with `ΞΈ` such that for
`0 β¦ ΞΈ β¦ 1`, we have `f(ΞΈ x + (1 - ΞΈ) y) β¦ ΞΈ f(x) + (1 - ΞΈ) f(y)`.
"""
isconvex(loss::SupervisedLoss) = isstrictlyconvex(loss)
"""
isstrictlyconvex(loss) -> Bool
Return `true` if the given `loss` denotes a strictly convex function.
A function `f : ββΏ β β` is strictly convex if its domain is a convex
set and if for all `x, y` in that domain where `x β y`, with `ΞΈ` such
that for `0 < ΞΈ < 1`, we have `f(ΞΈ x + (1 - ΞΈ) y) < ΞΈ f(x) + (1 - ΞΈ) f(y)`.
"""
isstrictlyconvex(loss::SupervisedLoss) = isstronglyconvex(loss)
"""
isstronglyconvex(loss) -> Bool
Return `true` if the given `loss` denotes a strongly convex function.
A function `f : ββΏ β β` is `m`-strongly convex if its domain is a convex
set, and if for all `x, y` in that domain where `x β y`, and `ΞΈ` such that
for `0 β€ ΞΈ β€ 1`, we have
`f(ΞΈ x + (1 - ΞΈ)y) < ΞΈ f(x) + (1 - ΞΈ) f(y) - 0.5 m β
ΞΈ (1 - ΞΈ) | x - y |βΒ²`
In a more familiar setting, if the loss function is differentiable we have
`(βf(x) - βf(y))α΅ (x - y) β₯ m | x - y |βΒ²`
"""
isstronglyconvex(loss::SupervisedLoss) = false
"""
isdifferentiable(loss, [x]) -> Bool
Return `true` if the given `loss` is differentiable
(optionally limited to the given point `x` if specified).
A function `f : ββΏ β βα΅` is differentiable at a point `x` in the interior
domain of `f` if there exists a matrix `Df(x) β β^(m Γ n)` such that
it satisfies:
`lim_{z β x, z β x} (|f(z) - f(x) - Df(x)(z-x)|β) / |z - x|β = 0`
A function is differentiable if its domain is open and it is
differentiable at every point `x`.
"""
isdifferentiable(loss::SupervisedLoss) = istwicedifferentiable(loss)
isdifferentiable(loss::SupervisedLoss, at) = isdifferentiable(loss)
"""
istwicedifferentiable(loss, [x]) -> Bool
Return `true` if the given `loss` is differentiable
(optionally limited to the given point `x` if specified).
A function `f : ββΏ β β` is said to be twice differentiable
at a point `x` in the interior domain of `f`, if the function
derivative for `βf` exists at `x`: `βΒ²f(x) = Dβf(x)`.
A function is twice differentiable if its domain is open and it
is twice differentiable at every point `x`.
"""
istwicedifferentiable(loss::SupervisedLoss) = false
istwicedifferentiable(loss::SupervisedLoss, at) = istwicedifferentiable(loss)
"""
islocallylipschitzcont(loss) -> Bool
Return `true` if the given `loss` function is locally-Lipschitz
continous.
A supervised loss `L : Y Γ β β [0, β)` is called locally Lipschitz
continuous if for all `a β₯ 0` there exists a constant `cβ β₯ 0`,
such that
`sup_{y β Y} | L(y,t) β L(y,tβ²) | β€ cβ |t β tβ²|, t, tβ² β [βa,a]`
Every convex function is locally lipschitz continuous.
"""
islocallylipschitzcont(loss::SupervisedLoss) =
isconvex(loss) || islipschitzcont(loss)
"""
islipschitzcont(loss) -> Bool
Return `true` if the given `loss` function is Lipschitz continuous.
A supervised loss function `L : Y Γ β β [0, β)` is Lipschitz continous,
if there exists a finite constant `M < β` such that
`|L(y, t) - L(y, tβ²)| β€ M |t - tβ²|, β (y, t) β Y Γ β`
"""
islipschitzcont(loss::SupervisedLoss) = false
"""
isnemitski(loss) -> Bool
Return `true` if the given `loss` denotes a Nemitski loss function.
We call a supervised loss function `L : Y Γ β β [0,β)` a Nemitski
loss if there exist a measurable function `b : Y β [0, β)` and an
increasing function `h : [0, β) β [0, β)` such that
`L(y,yΜ) β€ b(y) + h(|yΜ|), (y, yΜ) β Y Γ β`
If a loss if locally lipsschitz continuous then it is a Nemitski loss.
"""
isnemitski(loss::SupervisedLoss) = islocallylipschitzcont(loss)
isnemitski(loss::MarginLoss) = true
"""
isunivfishercons(loss) -> Bool
"""
isunivfishercons(loss::Loss) = false
"""
isfishercons(loss) -> Bool
Return `true` if the givel `loss` is Fisher consistent.
We call a supervised loss function `L : Y Γ β β [0,β)` a Fisher
consistent loss if the population minimizer of the risk `E[L(y,f(x))]`
for all measurable functions leads to the Bayes optimal decision rule.
"""
isfishercons(loss::Loss) = isunivfishercons(loss)
"""
isclipable(loss) -> Bool
Return `true` if the given `loss` function is clipable. A
supervised loss `L : Y Γ β β [0,β)` can be clipped at `M > 0`
if, for all `(y,t) β Y Γ β`, `L(y, tΜ) β€ L(y, t)` where
`tΜ` denotes the clipped value of `t` at `Β± M`.
That is
`tΜ = -M` if `t < -M`, `tΜ = t` if `t β [-M, M]`, and `t = M` if `t > M`.
"""
isclipable(loss::SupervisedLoss) = false
isclipable(loss::DistanceLoss) = true # can someone please double check?
"""
isdistancebased(loss) -> Bool
Return `true` if the given `loss` is a distance-based loss.
A supervised loss function `L : Y Γ β β [0,β)` is said to be
distance-based, if there exists a representing function `Ο : β β [0,β)`
satisfying `Ο(0) = 0` and `L(y, yΜ) = Ο (yΜ - y), (y, yΜ) β Y Γ β`.
"""
isdistancebased(loss::Loss) = false
isdistancebased(loss::DistanceLoss) = true
"""
ismarginbased(loss) -> Bool
Return `true` if the given `loss` is a margin-based loss.
A supervised loss function `L : Y Γ β β [0,β)` is said to be
margin-based, if there exists a representing function `Ο : β β [0,β)`
satisfying `L(y, yΜ) = Ο(yβ
yΜ), (y, yΜ) β Y Γ β`.
"""
ismarginbased(loss::Loss) = false
ismarginbased(loss::MarginLoss) = true
"""
isclasscalibrated(loss) -> Bool
"""
isclasscalibrated(loss::SupervisedLoss) = false
isclasscalibrated(loss::MarginLoss) =
isconvex(loss) && isdifferentiable(loss, 0) && deriv(loss, 0) < 0
"""
issymmetric(loss) -> Bool
Return `true` if the given loss is a symmetric loss.
A function `f : β β [0,β)` is said to be symmetric
about origin if we have `f(x) = f(-x), β x β β`.
A distance-based loss is said to be symmetric if its
representing function is symmetric.
"""
issymmetric(loss::SupervisedLoss) = false
"""
isminimizable(loss) -> Bool
Return `true` if the given `loss` is a minimizable loss.
"""
isminimizable(loss::SupervisedLoss) = isconvex(loss)
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 2694 | """
Baseclass for all observation dimensions.
"""
abstract type ObsDimension end
"""
module ObsDim
Singleton types to define which dimension of some data structure
(e.g. some `Array`) denotes the observations.
- `ObsDim.First()`
- `ObsDim.Last()`
- `ObsDim.Constant(dim)`
Used for efficient dispatching
"""
module ObsDim
using ..LearnBase: ObsDimension
"""
Default value for most functions. Denotes that the concept of
an observation dimension is not defined for the given data.
"""
struct Undefined <: ObsDimension end
"""
ObsDim.Last <: ObsDimension
Defines that the last dimension denotes the observations
"""
struct Last <: ObsDimension end
"""
ObsDim.Constant{DIM} <: ObsDimension
Defines that the dimension `DIM` denotes the observations
"""
struct Constant{DIM} <: ObsDimension end
Constant(dim::Int) = Constant{dim}()
"""
ObsDim.First <: ObsDimension
Defines that the first dimension denotes the observations
"""
const First = Constant{1}
end
Base.convert(::Type{ObsDimension}, dim) = throw(ArgumentError("Unknown way to specify a obsdim: $dim"))
Base.convert(::Type{ObsDimension}, dim::ObsDimension) = dim
Base.convert(::Type{ObsDimension}, ::Nothing) = ObsDim.Undefined()
Base.convert(::Type{ObsDimension}, dim::Int) = ObsDim.Constant(dim)
Base.convert(::Type{ObsDimension}, dim::String) = convert(ObsDimension, Symbol(lowercase(dim)))
Base.convert(::Type{ObsDimension}, dims::Tuple) = map(d->convert(ObsDimension, d), dims)
function Base.convert(::Type{ObsDimension}, dim::Symbol)
if dim == :first || dim == :begin
ObsDim.First()
elseif dim == Symbol("end") || dim == :last
ObsDim.Last()
elseif dim == Symbol("nothing") || dim == :none || dim == :null || dim == :na || dim == :undefined
ObsDim.Undefined()
else
throw(ArgumentError("Unknown way to specify a obsdim: $dim"))
end
end
"""
default_obsdim(data)
The specify the default obsdim for a specific type of data.
Defaults to `ObsDim.Undefined()`
"""
default_obsdim(data) = ObsDim.Undefined()
default_obsdim(A::AbstractArray) = ObsDim.Last()
default_obsdim(tup::Tuple) = map(default_obsdim, tup)
"""
datasubset(data, [idx], [obsdim])
Return a lazy subset of the observations in `data` that correspond
to the given `idx`. No data should be copied except of the
indices. Note that `idx` can be of type `Int` or `AbstractVector`.
Both options must be supported by a custom type.
If it makes sense for the type of `data`, `obsdim` can be used
to disptach on which dimension of `data` denotes the observations.
See `?ObsDim`.
"""
function datasubset end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 10385 | """
Return the gradient of the learnable parameters w.r.t. some objective
"""
function grad end
function grad! end
"""
Proximal operator of a function (https://en.wikipedia.org/wiki/Proximal_operator)
"""
function prox end
function prox! end
"""
Anything that takes an input and performs some kind
of function to produce an output. For example a linear
prediction function.
"""
abstract type Transformation end
abstract type StochasticTransformation <: Transformation end
abstract type Learnable <: Transformation end
"""
Do a forward pass, and return the output
"""
function transform end
function transform! end
"""
Baseclass for any prediction model that can be minimized.
This means that an object of a subclass contains all the
information needed to compute its own current loss.
"""
abstract type Minimizable <: Learnable end
function update end
function update! end
function learn end
function learn! end
# getobs(data, [idx], [obsdim])
#
# Return the observations corresponding to the observation-index
# `idx`. Note that `idx` can be of type `Int` or `AbstractVector`.
# Both options must be supported by a custom type.
#
# The returned observation(s) should be in the form intended to
# be passed as-is to some learning algorithm. There is no strict
# interface requirement on how this "actual data" must look like.
# Every author behind some custom data container can make this
# decision him-/herself. We do, however, expect it to be consistent
# for `idx` being an integer, as well as `idx` being an abstract
# vector, respectively.
#
# If it makes sense for the type of `data`, `obsdim` can be used
# to disptach on which dimension of `data` denotes the observations.
# See `?ObsDim`
#
# This function is implemented in MLDataPattern
function getobs end
# getobs!(buffer, data, [idx], [obsdim])
#
# Inplace version of `getobs(data, idx, obsdim)`. If this method
# is defined for the type of `data`, then `buffer` should be used
# to store the result, instead of allocating a dedicated object.
#
# Implementing this function is optional. In the case no such
# method is provided for the type of `data`, then `buffer` will be
# *ignored* and the result of `getobs` returned. This could be
# because the type of `data` may not lend itself to the concept
# of `copy!`. Thus supporting a custom `getobs!(::MyType, ...)`
# is optional and not required.
#
# If it makes sense for the type of `data`, `obsdim` can be used
# to disptach on which dimension of `data` denotes the observations.
# See `?ObsDim`
#
# This function is implemented in MLDataPattern
function getobs! end
# --------------------------------------------------------------------
# gettarget([f], observation)
#
# Use `f` (if provided) to extract the target from the single
# `observation` and return it. It is used internally by
# `targets` (only if `f` is provided) and by
# `eachtarget` (always) on each individual observation.
#
# Even though this function is not exported, it is intended to be
# extended by users to support their custom data storage types.
#
# This function is implemented in MLDataPattern
function gettarget end # not exported
# gettargets(data, [idx], [obsdim])
#
# Return the targets corresponding to the observation-index `idx`.
# Note that `idx` can be of type `Int` or `AbstractVector`.
#
# Implementing this function for a custom type of `data` is
# optional. It is particularly useful if the targets in `data` can
# be provided without invoking `getobs`. For example if you have a
# remote data-source where the labels are part of some metadata
# that is locally available.
#
# If it makes sense for the type of `data`, `obsdim` can be used
# to disptach on which dimension of `data` denotes the observations.
# See `?ObsDim`
#
# This function is implemented in MLDataPattern
function gettargets end # not exported
# targets([f], data, [obsdim])
#
# This function is implemented in MLDataPattern
function targets end
# --------------------------------------------------------------------
"""
abstract DataView{TElem, TData} <: AbstractVector{TElem}
Baseclass for all vector-like views of some data structure.
This allow for example to see some design matrix as a vector of
individual observation-vectors instead of one matrix.
see `MLDataPattern.ObsView` and `MLDataPattern.BatchView` for examples.
"""
abstract type DataView{TElem, TData} <: AbstractVector{TElem} end
"""
abstract AbstractObsView{TElem, TData} <: DataView{TElem, TData}
Baseclass for all vector-like views of some data structure,
that views it as some form or vector of observations.
see `MLDataPattern.ObsView` for a concrete example.
"""
abstract type AbstractObsView{TElem, TData} <: DataView{TElem, TData} end
"""
abstract AbstractBatchView{TElem, TData} <: DataView{TElem, TData}
Baseclass for all vector-like views of some data structure,
that views it as some form or vector of equally sized batches.
see `MLDataPattern.BatchView` for a concrete example.
"""
abstract type AbstractBatchView{TElem, TData} <: DataView{TElem, TData} end
# --------------------------------------------------------------------
"""
abstract DataIterator{TElem,TData}
Baseclass for all types that iterate over a `data` source
in some manner. The total number of observations may or may
not be known or defined and in general there is no contract that
`getobs` or `nobs` has to be supported by the type of `data`.
Furthermore, `length` should be used to query how many elements
the iterator can provide, while `nobs` may return the underlying
true amount of observations available (if known).
see `MLDataPattern.RandomObs`, `MLDataPattern.RandomBatches`
"""
abstract type DataIterator{TElem,TData} end
"""
abstract ObsIterator{TElem,TData} <: DataIterator{TElem,TData}
Baseclass for all types that iterate over some data source
one observation at a time.
```julia
using MLDataPattern
@assert typeof(RandomObs(X)) <: ObsIterator
for x in RandomObs(X)
# ...
end
```
see `MLDataPattern.RandomObs`
"""
abstract type ObsIterator{TElem,TData} <: DataIterator{TElem,TData} end
"""
abstract BatchIterator{TElem,TData} <: DataIterator{TElem,TData}
Baseclass for all types that iterate over of some data source one
batch at a time.
```julia
@assert typeof(RandomBatches(X, size=10)) <: BatchIterator
for x in RandomBatches(X, size=10)
@assert nobs(x) == 10
# ...
end
```
see `MLDataPattern.RandomBatches`
"""
abstract type BatchIterator{TElem,TData} <: DataIterator{TElem,TData} end
# --------------------------------------------------------------------
# just for dispatch for those who care to
const AbstractDataIterator{E,T} = Union{DataIterator{E,T}, DataView{E,T}}
const AbstractObsIterator{E,T} = Union{ObsIterator{E,T}, AbstractObsView{E,T}}
const AbstractBatchIterator{E,T} = Union{BatchIterator{E,T},AbstractBatchView{E,T}}
# --------------------------------------------------------------------
import Base: AbstractSet
"A continuous range (inclusive) between a lo and a hi"
struct IntervalSet{T} <: AbstractSet{T}
lo::T
hi::T
end
function IntervalSet(lo::A, hi::B) where {A,B}
T = promote_type(A,B)
IntervalSet{T}(convert(T,lo), convert(T,hi))
end
# numeric interval
randtype(s::IntervalSet{T}) where T <: Number = Float64
Base.rand(s::IntervalSet{T}, dims::Integer...) where T <: Number = rand(dims...) .* (s.hi - s.lo) .+ s.lo
Base.in(x::Number, s::IntervalSet{T}) where T <: Number = s.lo <= x <= s.hi
Base.length(s::IntervalSet{T}) where T <: Number = 1
Base.:(==)(s1::IntervalSet{T}, s2::IntervalSet{T}) where T = s1.lo == s2.lo && s1.hi == s2.hi
# vector of intervals
randtype(s::IntervalSet{T}) where T <: AbstractVector = Vector{Float64}
Base.rand(s::IntervalSet{T}) where T <: AbstractVector = Float64[rand() * (s.hi[i] - s.lo[i]) + s.lo[i] for i=1:length(s)]
Base.in(x::AbstractVector, s::IntervalSet{T}) where T <: AbstractVector = all(i -> s.lo[i] <= x[i] <= s.hi[i], 1:length(s))
Base.length(s::IntervalSet{T}) where T <: AbstractVector = length(s.lo)
"Set of discrete items"
struct DiscreteSet{T<:AbstractArray} <: AbstractSet{T}
items::T
end
randtype(s::DiscreteSet) = eltype(s.items)
Base.rand(s::DiscreteSet, dims::Integer...) = rand(s.items, dims...)
Base.in(x, s::DiscreteSet) = x in s.items
Base.length(s::DiscreteSet) = length(s.items)
Base.getindex(s::DiscreteSet, i::Int) = s.items[i]
Base.:(==)(s1::DiscreteSet, s2::DiscreteSet) = s1.items == s2.items
# operations on arrays of sets
randtype(sets::AbstractArray{S,N}) where {S <: AbstractSet, N} = Array{promote_type(map(randtype, sets)...), N}
Base.rand(sets::AbstractArray{S}) where S <: AbstractSet = eltype(randtype(sets))[rand(s) for s in sets]
function Base.rand(sets::AbstractArray{S}, dim1::Integer, dims::Integer...) where S <: AbstractSet
A = Array{randtype(sets)}(undef, dim1, dims...)
for i in eachindex(A)
A[i] = rand(sets)
end
A
end
function Base.in(xs::AbstractArray, sets::AbstractArray{S}) where S <: AbstractSet
size(xs) == size(sets) && all(map(in, xs, sets))
end
"Groups several heterogenous sets. Used mainly for proper dispatch."
struct TupleSet{T<:Tuple} <: AbstractSet{T}
sets::T
end
TupleSet(sets::AbstractSet...) = TupleSet(sets)
# rand can return arrays or tuples, but defaults to arrays
randtype(sets::TupleSet, ::Type{Vector}) = Vector{promote_type(map(randtype, sets.sets)...)}
Base.rand(sets::TupleSet, ::Type{Vector}) = eltype(randtype(sets, Vector))[rand(s) for s in sets.sets]
randtype(sets::TupleSet, ::Type{Tuple}) = Tuple{map(randtype, sets.sets)...}
Base.rand(sets::TupleSet, ::Type{Tuple}) = map(rand, sets.sets)
function Base.rand(sets::TupleSet, ::Type{OT}, dim1::Integer, dims::Integer...) where OT
A = Array{randtype(sets, OT)}(undef, dim1, dims...)
for i in eachindex(A)
A[i] = rand(sets, OT)
end
A
end
Base.length(sets::TupleSet) = sum(length(s) for s in sets.sets)
Base.iterate(sets::TupleSet) = iterate(sets.sets)
Base.iterate(sets::TupleSet, i) = iterate(sets.sets, i)
randtype(sets::TupleSet) = randtype(sets, Vector)
Base.rand(sets::TupleSet, dims::Integer...) = rand(sets, Vector, dims...)
Base.in(x, sets::TupleSet) = all(map(in, x, sets.sets))
"Returns an AbstractSet representing valid input values"
function inputdomain end
"Returns an AbstractSet representing valid output/target values"
function targetdomain end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 407 | @testset "AggMode" begin
@test typeof(LearnBase.AggMode.None()) <: LearnBase.AggregateMode
@test typeof(LearnBase.AggMode.Sum()) <: LearnBase.AggregateMode
@test typeof(LearnBase.AggMode.Mean()) <: LearnBase.AggregateMode
@test typeof(LearnBase.AggMode.WeightedSum([1,2,3])) <: LearnBase.AggregateMode
@test typeof(LearnBase.AggMode.WeightedMean([1,2,3])) <: LearnBase.AggregateMode
end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 1937 | # dummy types for testing
struct MyStronglyConvexType <: LearnBase.SupervisedLoss end
LearnBase.isstronglyconvex(::MyStronglyConvexType) = true
LearnBase.islipschitzcont(::MyStronglyConvexType) = true
@testset "Costs" begin
@test LearnBase.Cost <: Any
@test LearnBase.Penalty <: LearnBase.Cost
@test LearnBase.Loss <: LearnBase.Cost
@test LearnBase.UnsupervisedLoss <: LearnBase.Loss
@test LearnBase.SupervisedLoss <: LearnBase.Loss
@test LearnBase.MarginLoss <: LearnBase.SupervisedLoss
@test LearnBase.DistanceLoss <: LearnBase.SupervisedLoss
@test typeof(LearnBase.value) <: Function
@test typeof(LearnBase.deriv) <: Function
@test typeof(LearnBase.deriv2) <: Function
@test typeof(LearnBase.isminimizable) <: Function
@test typeof(LearnBase.isdifferentiable) <: Function
@test typeof(LearnBase.istwicedifferentiable) <: Function
@test typeof(LearnBase.isconvex) <: Function
@test typeof(LearnBase.isstrictlyconvex) <: Function
@test typeof(LearnBase.isstronglyconvex) <: Function
@test typeof(LearnBase.isnemitski) <: Function
@test typeof(LearnBase.islipschitzcont) <: Function
@test typeof(LearnBase.islocallylipschitzcont) <: Function
@test typeof(LearnBase.isfishercons) <: Function
@test typeof(LearnBase.isunivfishercons) <: Function
@test typeof(LearnBase.isclipable) <: Function
@test typeof(LearnBase.ismarginbased) <: Function
@test typeof(LearnBase.isdistancebased) <: Function
@test typeof(LearnBase.isclasscalibrated) <: Function
@test typeof(LearnBase.issymmetric) <: Function
# test fallback methods
@test LearnBase.isstronglyconvex(MyStronglyConvexType())
@test LearnBase.isstrictlyconvex(MyStronglyConvexType())
@test LearnBase.isconvex(MyStronglyConvexType())
@test LearnBase.islipschitzcont(MyStronglyConvexType())
@test LearnBase.islocallylipschitzcont(MyStronglyConvexType())
end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 4687 | struct SomeType end
@testset "ObsDim" begin
@testset "Type tree" begin
@test_throws MethodError LearnBase.ObsDim.Constant(2.0)
@test typeof(LearnBase.ObsDim.First()) <: LearnBase.ObsDimension
@test typeof(LearnBase.ObsDim.First()) <: LearnBase.ObsDim.First
@test typeof(LearnBase.ObsDim.First()) <: LearnBase.ObsDim.Constant{1}
@test typeof(LearnBase.ObsDim.Last()) <: LearnBase.ObsDimension
@test typeof(LearnBase.ObsDim.Last()) <: LearnBase.ObsDim.Last
@test typeof(LearnBase.ObsDim.Constant(2)) <: LearnBase.ObsDimension
@test typeof(LearnBase.ObsDim.Constant(2)) <: LearnBase.ObsDim.Constant{2}
end
@testset "Constructors" begin
@test_throws ArgumentError convert(LearnBase.ObsDimension, "test")
@test_throws ArgumentError convert(LearnBase.ObsDimension, 1.0)
@test @inferred(convert(LearnBase.ObsDimension, LearnBase.ObsDim.First())) === LearnBase.ObsDim.First()
@test @inferred(convert(LearnBase.ObsDimension, LearnBase.ObsDim.First())) === LearnBase.ObsDim.Constant(1)
@test @inferred(convert(LearnBase.ObsDimension, LearnBase.ObsDim.Last())) === LearnBase.ObsDim.Last()
@test @inferred(convert(LearnBase.ObsDimension, LearnBase.ObsDim.Constant(2))) === LearnBase.ObsDim.Constant(2)
@test_throws ErrorException @inferred convert(LearnBase.ObsDimension, 1)
@test_throws ErrorException @inferred convert(LearnBase.ObsDimension, 6)
@test convert(LearnBase.ObsDimension, 1) === LearnBase.ObsDim.First()
@test convert(LearnBase.ObsDimension, 2) === LearnBase.ObsDim.Constant(2)
@test convert(LearnBase.ObsDimension, 6) === LearnBase.ObsDim.Constant(6)
@test_throws ErrorException @inferred convert(LearnBase.ObsDimension, :first)
@test_throws ErrorException @inferred convert(LearnBase.ObsDimension, "first")
@test convert(LearnBase.ObsDimension, (:first,:last)) === (LearnBase.ObsDim.First(),LearnBase.ObsDim.Last())
@test convert(LearnBase.ObsDimension, :first) === LearnBase.ObsDim.First()
@test convert(LearnBase.ObsDimension, :begin) === LearnBase.ObsDim.First()
@test convert(LearnBase.ObsDimension, "first") === LearnBase.ObsDim.First()
@test convert(LearnBase.ObsDimension, "BEGIN") === LearnBase.ObsDim.First()
@test convert(LearnBase.ObsDimension, :end) === LearnBase.ObsDim.Last()
@test convert(LearnBase.ObsDimension, :last) === LearnBase.ObsDim.Last()
@test convert(LearnBase.ObsDimension, "End") === LearnBase.ObsDim.Last()
@test convert(LearnBase.ObsDimension, "LAST") === LearnBase.ObsDim.Last()
@test convert(LearnBase.ObsDimension, :nothing) === LearnBase.ObsDim.Undefined()
@test convert(LearnBase.ObsDimension, :none) === LearnBase.ObsDim.Undefined()
@test convert(LearnBase.ObsDimension, :na) === LearnBase.ObsDim.Undefined()
@test convert(LearnBase.ObsDimension, :null) === LearnBase.ObsDim.Undefined()
@test convert(LearnBase.ObsDimension, :undefined) === LearnBase.ObsDim.Undefined()
@test convert(LearnBase.ObsDimension, nothing) === LearnBase.ObsDim.Undefined()
end
@testset "Default values" begin
@testset "Arrays, SubArrays, and Sparse Arrays" begin
@test @inferred(LearnBase.default_obsdim(rand(10))) === LearnBase.ObsDim.Last()
@test @inferred(LearnBase.default_obsdim(view(rand(10),:))) === LearnBase.ObsDim.Last()
@test @inferred(LearnBase.default_obsdim(rand(10,5))) === LearnBase.ObsDim.Last()
@test @inferred(LearnBase.default_obsdim(view(rand(10,5),:,:))) === LearnBase.ObsDim.Last()
@test @inferred(LearnBase.default_obsdim(sprand(10,0.5))) === LearnBase.ObsDim.Last()
@test @inferred(LearnBase.default_obsdim(sprand(10,5,0.5))) === LearnBase.ObsDim.Last()
end
@testset "Types with no specified default" begin
@test @inferred(LearnBase.default_obsdim(SomeType())) === LearnBase.ObsDim.Undefined()
end
@testset "Tuples" begin
@test @inferred(LearnBase.default_obsdim((SomeType(),SomeType()))) === (LearnBase.ObsDim.Undefined(), LearnBase.ObsDim.Undefined())
@test @inferred(LearnBase.default_obsdim((SomeType(),rand(2,2)))) === (LearnBase.ObsDim.Undefined(), LearnBase.ObsDim.Last())
@test @inferred(LearnBase.default_obsdim((rand(10),SomeType()))) === (LearnBase.ObsDim.Last(), LearnBase.ObsDim.Undefined())
@test @inferred(LearnBase.default_obsdim((rand(10),rand(2,2)))) === (LearnBase.ObsDim.Last(), LearnBase.ObsDim.Last())
end
end
end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 6609 | @testset "Other" begin
@test LearnBase.Minimizable <: Any
@test LearnBase.Transformation <: Any
@test LearnBase.StochasticTransformation <: LearnBase.Transformation
@test typeof(LearnBase.transform) <: Function
@test typeof(LearnBase.transform!) <: Function
@test typeof(LearnBase.getobs) <: Function
@test typeof(LearnBase.getobs!) <: Function
@test typeof(LearnBase.learn) <: Function
@test typeof(LearnBase.learn!) <: Function
@test typeof(LearnBase.update) <: Function
@test typeof(LearnBase.update!) <: Function
@test typeof(LearnBase.grad) <: Function
@test typeof(LearnBase.grad!) <: Function
@test typeof(LearnBase.prox) <: Function
@test typeof(LearnBase.prox!) <: Function
@test typeof(LearnBase.datasubset) <: Function
@test typeof(LearnBase.targets) <: Function
@test typeof(LearnBase.gettarget) <: Function
@test typeof(LearnBase.gettargets) <: Function
@test LearnBase.DataView <: AbstractVector
@test LearnBase.DataView <: LearnBase.AbstractDataIterator
@test LearnBase.DataView{Int} <: AbstractVector{Int}
@test LearnBase.DataView{Int,Vector{Int}} <: LearnBase.AbstractDataIterator{Int,Vector{Int}}
@test LearnBase.AbstractObsView <: LearnBase.DataView
@test LearnBase.AbstractObsView <: LearnBase.AbstractObsIterator
@test LearnBase.AbstractObsView{Int,Vector{Int}} <: LearnBase.DataView{Int,Vector{Int}}
@test LearnBase.AbstractObsView{Int,Vector{Int}} <: LearnBase.AbstractObsIterator{Int,Vector{Int}}
@test LearnBase.AbstractBatchView <: LearnBase.DataView
@test LearnBase.AbstractBatchView <: LearnBase.AbstractBatchIterator
@test LearnBase.AbstractBatchView{Int,Vector{Int}} <: LearnBase.DataView{Int,Vector{Int}}
@test LearnBase.AbstractBatchView{Int,Vector{Int}} <: LearnBase.AbstractBatchIterator{Int,Vector{Int}}
@test LearnBase.DataIterator <: LearnBase.AbstractDataIterator
@test LearnBase.DataIterator{Int,Vector{Int}} <: LearnBase.AbstractDataIterator{Int,Vector{Int}}
@test LearnBase.ObsIterator <: LearnBase.DataIterator
@test LearnBase.ObsIterator <: LearnBase.AbstractObsIterator
@test LearnBase.ObsIterator{Int,Vector{Int}} <: LearnBase.DataIterator{Int,Vector{Int}}
@test LearnBase.ObsIterator{Int,Vector{Int}} <: LearnBase.AbstractObsIterator{Int,Vector{Int}}
@test LearnBase.BatchIterator <: LearnBase.DataIterator
@test LearnBase.BatchIterator <: LearnBase.AbstractBatchIterator
@test LearnBase.BatchIterator{Int,Vector{Int}} <: LearnBase.DataIterator{Int,Vector{Int}}
@test LearnBase.BatchIterator{Int,Vector{Int}} <: LearnBase.AbstractBatchIterator{Int,Vector{Int}}
@test LearnBase.ObsDim.Constant <: LearnBase.ObsDimension
@test LearnBase.ObsDim.First <: LearnBase.ObsDimension
@test LearnBase.ObsDim.Last <: LearnBase.ObsDimension
@test LearnBase.ObsDim.Undefined <: LearnBase.ObsDimension
@test typeof(LearnBase.ObsDim.Constant(2)) <: LearnBase.ObsDim.Constant{2}
# IntervalSet
let s = LearnBase.IntervalSet(-1,1)
@test typeof(s) == LearnBase.IntervalSet{Int}
@test typeof(s) <: AbstractSet
for x in (-1,0,0.5,1,1.0)
@test x in s
end
for x in (-1-1e-10, 1+1e-10, -Inf, Inf, 2, NaN)
@test !(x in s)
end
for i=1:10
x = rand(s)
@test typeof(x) == Float64
@test x in s
end
xs = rand(s, 10)
@test typeof(xs) == Vector{Float64}
for x in xs
@test typeof(x) == Float64
@test x in s
end
@test LearnBase.randtype(s) == Float64
# @show s LearnBase.randtype(s)
end
let s = LearnBase.IntervalSet(-1,1.0)
@test typeof(s) == LearnBase.IntervalSet{Float64}
@test typeof(s) <: AbstractSet
@test 1 in s
# @show s LearnBase.randtype(s)
@test length(s) == 1
end
# IntervalSet{Vector}
let s = LearnBase.IntervalSet([-1.,0.], [1.,1.])
@test typeof(s) == LearnBase.IntervalSet{Vector{Float64}}
@test typeof(s) <: AbstractSet
@test LearnBase.randtype(s) == Vector{Float64}
@test typeof(rand(s)) == Vector{Float64}
@test rand(s) in s
@test [-1, 0] in s
@test !([-1.5,0] in s)
@test !([0,2] in s)
@test length(s) == 2
end
# DiscreteSet
let s = LearnBase.DiscreteSet([-1,1])
@test typeof(s) == LearnBase.DiscreteSet{Vector{Int}}
@test typeof(s) <: AbstractSet
for x in (-1, 1, -1.0, 1.0)
@test x in s
end
for x in (0, Inf, -Inf, NaN)
@test !(x in s)
end
for i=1:10
x = rand(s)
@test typeof(x) == Int
@test x in s
end
xs = rand(s, 10)
@test typeof(xs) == Vector{Int}
for x in xs
@test typeof(x) == Int
@test x in s
end
@test LearnBase.randtype(s) == Int
@test length(s) == 2
@test s[1] == -1
end
let s = LearnBase.DiscreteSet([-1,1.0])
@test typeof(s) == LearnBase.DiscreteSet{Vector{Float64}}
@test typeof(s) <: AbstractSet
@test typeof(rand(s)) == Float64
@test typeof(rand(s, 2)) == Vector{Float64}
end
# TupleSet
let s = LearnBase.TupleSet(LearnBase.IntervalSet(0,1), LearnBase.DiscreteSet([0,1]))
@test typeof(s) == LearnBase.TupleSet{Tuple{LearnBase.IntervalSet{Int}, LearnBase.DiscreteSet{Vector{Int}}}}
@test typeof(s) <: AbstractSet
for x in ([0,0], [0.0,0.0], [0.5,1.0])
@test x in s
end
for x in ([0,0.5], [-1,0])
@test !(x in s)
end
@test typeof(rand(s)) == Vector{Float64}
@test typeof(rand(s, 2)) == Vector{Vector{Float64}}
@test typeof(rand(s, Tuple)) == Tuple{Float64,Int}
@test typeof(rand(s, Tuple, 2)) == Vector{Tuple{Float64,Int}}
@test LearnBase.randtype(s) == Vector{Float64}
tot = 0
for (i,x) in enumerate(s)
@test x == s.sets[i]
tot += length(x)
end
@test length(s) == tot
end
# arrays of sets
let s = [LearnBase.IntervalSet(0,1), LearnBase.DiscreteSet([0,1])]
@test typeof(s) == Vector{AbstractSet}
for x in ([0,0], [0.0,0.0], [0.5,1.0])
@test x in s
end
for x in ([0,0.5], [-1,0])
@test !(x in s)
end
@test typeof(rand(s)) == Vector{Float64}
@test typeof(rand(s, 2)) == Vector{Vector{Float64}}
end
end
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | code | 130 | using LearnBase
using SparseArrays
using Test
include("aggmode.jl")
include("obsdim.jl")
include("costs.jl")
include("other.jl")
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.6.1 | f9de1fa263131b629e261ac275eace510357a4fc | docs | 1149 | # LearnBase
[](http://www.repostatus.org/#active)
[](LICENSE.md)
[](https://travis-ci.org/JuliaML/LearnBase.jl)
[](https://ci.appveyor.com/project/Evizero/learnbase-jl/branch/master)
[](https://coveralls.io/github/JuliaML/LearnBase.jl?branch=master)
This package embodies a community effort to provide common types and function-definitions for Machine Learning packages in Julia.
See [src/LearnBase.jl](https://github.com/JuliaML/LearnBase.jl/blob/master/src/LearnBase.jl) for more information
## WARNING
This package has been discontinued. Most functionalities have been moved [MLUtils.jl](https://github.com/JuliaML/MLUtils.jl).
| LearnBase | https://github.com/JuliaML/LearnBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 648 | using Documenter, ModiaBase
makedocs(
#modules = [ModiaBase],
sitename = "ModiaBase",
authors = "Hilding Elmqvist (Mogram) and Martin Otter (DLR-SR)",
format = Documenter.HTML(prettyurls = false),
pages = [
"Home" => "index.md",
"Tutorial" => "Tutorial.md",
"Data Structures" => "DataStructures.md",
"Equation Sorting" => "EquationSorting.md",
"Equation Reduction" => "EquationReduction.md",
"Transformation to ODE System" => "TransformationToODEs.md",
"Nonlinear Equations" => "NonlinearEquations.md",
]
)
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 9628 | """
Module with graph theoretical methods for maximum matching, Pantelides index reduction and Tarjan's strongly connected components.
* Author: Hilding Elmqvist, Mogram AB
* Date: July-August 2016 (rewritten).
* License: MIT
A bipartite graph (E, V) is defined by an array of arrays. Each E-entry has an integer array of indices to V-entries.
Example bipartite graph:
G = [
[3, 5],
[4, 6],
[1, 7, 9],
[2, 8, 9],
[1, 2]
]
"""
module BLTandPantelides
export matching, pantelides!, BLT, checkAssign
using ..BLTandPantelidesUtilities
"Controls logging"
const log = false
"""
function augmentPath!(G, i, assign, vColour, eColour, vPassive)
Construction of augmenting path
Reference:
Pantelides, C.: The consistent initialization of differential-algebraic systems. SIAM Journal
of Scientific and Statistical Computing, 9(2), pp. 213β231 (1988).
"""
function augmentPath!(G, i, assign, vColour, eColour, vPassive)
# returns pathFound
# assign: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
# i: E-node
# vPassive: set to != 0 has the same effect as deleting V-node and corresponding edges
# j: V-node
if log
println("augmentPath: equation $i")
end
pathFound = false
eColour[i] = true
# If a V-node j exists such that edge (i-j) exists and assign[j] == 0
for j in G[i]
if vPassive[j] == 0 && assign[j] == 0
pathFound = true
assign[j] = i
return pathFound
end
end
# For every j such that edge (i-j) exists and j is uncoloured
for j in G[i]
if vPassive[j] == 0 && !vColour[j]
vColour[j] = true
k = assign[j]
pathFound = augmentPath!(G, k, assign, vColour, eColour, vPassive)
if pathFound
assign[j] = i
return pathFound
end
end
end
return pathFound
end
function checkAssign(assign, VSizes, VTypes, ESizes, ETypes, equationsInfix, variableNames, A, vPassive=A)
println("Checking assignment")
assignmentOK = true
for j in 1:length(assign)
if vPassive[j] == 0
i = assign[j]
if i > 0 && VSizes[j] != ESizes[i]
assignmentOK = false
print("Error: Variable ")
printList(variableNames, [j], A, newLine=false)
println(" (($j)) with size=$(VSizes[j]) is assigned in equation (($i)) with size $(ESizes[i])")
end
end
end
if assignmentOK
println("Assignment is OK")
else
# error("Assignment not OK")
end
end
"""
function matching(G, M, vActive=fill(true, M))
Find maximum matching in bipartite graph
* `G`: bipartite graph
* `M`: number of V-nodes
* `vActive`: set to false has the same effect as deleting V-node and corresponding edges
* `return assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
Reference:
Pantelides, C.: The consistent initialization of differential-algebraic systems. SIAM Journal
of Scientific and Statistical Computing, 9(2), pp. 213β231 (1988).
"""
function matching(G, M, vActive=fill(true, M))
assign::Array{Int,1} = fill(0, M)
eColour::Array{Bool,1} = fill(false, length(G))
vColour::Array{Bool,1} = fill(false, M)
vPassive::Array{Int,1} = [if va; 0 else 1 end for va in vActive]
for i in 1:length(G)
fill!(eColour, false)
fill!(vColour, false)
pathFound = augmentPath!(G, i, assign, vColour, eColour, vPassive)
end
return assign
end
# -------------------------------------------------------
"""
function pantelides!(G, M, A)
Perform index reduction with Pantelides algorithm.
* `G`: bipartite graph (updated)
* `M`: number of V-nodes
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `return assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `return A`: A[j] = if V[k] = der(V[j]) then k else 0
* `return B`: B[i] = if E[l] = der(E[i]) then l else 0
Reference:
Pantelides, C.: The consistent initialization of differential-algebraic systems. SIAM Journal
of Scientific and Statistical Computing, 9(2), pp. 213β231 (1988).
"""
function pantelides!(G, M, A)
assign::Array{Int,1} = fill(0, M)
B::Array{Int,1} = fill(0, length(G))
eColour::Array{Bool,1} = fill(false, length(G))
vColour::Array{Bool,1} = fill(false, M)
N = length(G)
N2 = N
for k in 1:N2
pathFound = false
i = k
while !pathFound
# Delete all V-nodes with A[.] != 0 and all their incidence edges from the graph
# Designate all nodes as "uncoloured"
if length(eColour) == length(G)
fill!(eColour, false)
else
eColour = fill(false, length(G))
end
if length(vColour) == length(M)
fill!(vColour, false)
else
vColour = fill(false, M)
end
pathFound = augmentPath!(G, i, assign, vColour, eColour, A)
if !pathFound
if log
println("\nDifferentiate:")
end
# For every coloured V-node j do
for j in 1:length(vColour)
if vColour[j]
M += 1
if log
println("New variable derivative: var($M) = der($j)")
end
push!(A, 0)
A[j] = M
push!(assign, 0)
end
end
# For every coloured E-node l do
for l in 1:N
if eColour[l]
N += 1
if log
println("New equation derivative: equ($N) = DER($l)")
end
# Create new E-node N
push!(G, copy(G[l]))
# Create edges from E-node N to all V-nodes j and A[j] such that edge (l-j) exists
for m in 1:length(G[l])
j = G[l][m]
if !(A[j] in G[N])
push!(G[N], A[j])
end
end
push!(B, 0)
# Set B[l] = N
B[l] = N
end
end
# For every coloured V-node j
for j in 1:length(vColour)
if vColour[j]
if log
println("Assigning derivative of variable $(A[j]) to derivative of equation: $(B[assign[j]])")
end
assign[A[j]] = B[assign[j]]
end
end
i = B[i]
end
end
end
return assign, A, B
end
const notOnStack = 1000000000
"""
Find minimal systems of equations that have to be solved simultaneously.
Reference:
Tarjan, R. E. (1972), "Depth-first search and linear graph algorithms", SIAM Journal on Computing 1 (2): 146β160, doi:10.1137/0201010
"""
function strongConnect!(G, assign, v, nextnode, stack, components, lowlink, number)
# println("strongConnect: ", v)
if v == 0
return nextnode
end
nextnode += 1
lowlink[v] = number[v] = nextnode
push!(stack, v)
for w in [assign[j] for j in G[v]] # for w in the adjacency list of v
if w > 0 # Is assigned
if number[w] == 0 # if not yet numbered
nextnode = strongConnect!(G, assign, w, nextnode, stack, components, lowlink, number)
lowlink[v] = min(lowlink[v], lowlink[w])
else
if number[w] < number[v]
# (v, w) is a frond or cross-link
# if w is on the stack of points. Always valid since otherwise number[w]=notOnStack (a big number)
lowlink[v] = min(lowlink[v], number[w])
end
end
end
end
if lowlink[v] == number[v]
# v is the root of a component
# start a new strongly connected component
comp = []
repeat = true
while repeat
# delete w from point stack and put w in the current component
# println("delete w from point stack and put w in the current component")
w = pop!(stack)
number[w] = notOnStack
push!(comp, w)
repeat = w != v
end
push!(components, comp)
end
return nextnode
end
"""
function BLT(G, assign)
Find Block Lower Triangular structure for a bipartite graph `G` with assignment `assign`
* `G`: bipartite graph
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `return components`: cell array of components. Each component is a list of indices to E-nodes
"""
function BLT(G, assign)
nextnode::Int = 0
stack = []
components = []
lowlink = fill(0, length(G))
number = fill(0, length(G))
for v in 1:length(G)
if number[v] == 0
nextnode = strongConnect!(G, assign, v, nextnode, stack, components, lowlink, number)
end
end
return components
end
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 10794 | """
Module with utility functions for BLTandPantelides.
* Author: Hilding Elmqvist, Mogram AB
* Date: July-August 2016
* License: MIT
"""
module BLTandPantelidesUtilities
export buildExtendedSystem, addDependencies, buildFullIncidence
export invertDer, invertAssign
export createNames, printList, printAssignedEquations, printSortedEquations, printUnassigned, makeList
logTranslation = true
logModia(args...) = if logTranslation; print(stdout, args...) else end
loglnModia(args...) = if logTranslation; println(stdout, args...) else end
"""
function buildExtendedSystem(A)
Extend a system according to Pantelides equation (15), i.e. return the incidence for function h(x, der(x)).
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `return G`: bipartite graph
Example:
julia> BLTandPantelidesUtilities.buildExtendedSystem([5,6,7,8,0,0,0,0,0])
4-element Array{Any,1}:
[1,5]
[2,6]
[3,7]
[4,8]
"""
function buildExtendedSystem(A)
G = []
for i in 1:length(A)
a = A[i]
if a > 0
push!(G, [i, a]) # h(x, der(x))
end
end
return G
end
function addDependencies(G, Vindices)
newG = []
for g in G
push!(newG, [g; Vindices])
end
return newG
end
"""
buildFullIncidence(n,m)
Build a bipartite graph with full incidence, i.e. all of the n E-nodes refer to all of the m V-nodes.
* `n`: number of E-nodes
* `m`: number of V-nodes
* `return G`: bipartite graph
Example:
julia> BLTandPantelidesUtilities.buildFullIncidence(2,3)
2-element Array{Any,1}:
[1,2,3]
[1,2,3]
"""
function buildFullIncidence(n, m)
G = []
for i in 1:n
push!(G, [j for j in 1:m])
end
return G
end
"""
function invertDer(A)
Invert derivative relationships for variables and equations
* `A`: A[j] = if V[k] = der(V[j]) then k else 0 (or correspondingly for E-nodes)
* `return orgIndex`: index of original variable or equation
* `return derOrder`: derivative order
Note that invertDer can be used to invert from list of E-nodes to list of V-nodes as well.
Example:
julia> BLTandPantelidesUtilities.invertDer([5,6,7,8,10,11,0,0,0,0,0])
([1,2,3,4,1,2,3,4,9,1,2],[0,0,0,0,1,1,1,1,0,2,2])
"""
function invertDer(A)
orgIndex = [i for i in 1:length(A)] # Index of original variable or equation
derOrder = fill(0, length(A)) # Derivative order
for i in 1:length(A)
a = A[i]
if a > 0
derOrder[a] = derOrder[i] + 1
orgIndex[a] = orgIndex[i]
end
end
return orgIndex, derOrder
end
"""
invertAssign(assign, n=length(assign))
Invert assignment relationships for variables and equations.
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `n`: number of E-nodes
* `return invAssign`: invAssign[i] contains the V-node to which E-node i is assigned or 0 if E-node i not assigned
* `return unAssigned`: unassigned V-nodes
Note that invertAssign can be used to invert from list of E-nodes to list of V-nodes as well.
Example:
julia> inv=BLTandPantelidesUtilities.invertAssign([0,0,0,0,1,2,7,4,3,9,8])
([5,6,9,8,0,0,7,11,10,0,0],[1,2,3,4])
julia> BLTandPantelides.invertAssign(inv[1])
([0,0,0,0,1,2,7,4,3,9,8],[5,6,10,11])
"""
function invertAssign(assign, n=length(assign))
invAssign = fill(0, n)
unAssigned::Vector{Int} = []
for j in 1:length(assign)
i = assign[j]
if i > 0
invAssign[i] = j
else
push!(unAssigned, j)
end
end
return invAssign, unAssigned
end
"""
function createNames(infixes, A)
Creates names.
* `infixes`: infix strings for original variable
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
Example:
julia> BLTandPantelidesUtilities.createNames(["x", "y", "w", "z", "", "", "", "", "T"], [5,6,7,8,10,11,0,0,0,0,0])
x, y, w, z, der(x), der(y), der(w), der(z), T, der2(x), der2(y)
"""
function createNames(infixes, A)
names = []
(orgIndex, derOrder) = invertDer(A)
for j in 1:length(A)
if derOrder[j] > 0
d = "der"
if derOrder[j] > 1
d = d * string(derOrder[j])
end
d = "$d($(infixes[orgIndex[j]]))"
else
d = infixes[orgIndex[j]]
end
push!(names, d)
end
names
end
"""
function printList(infixes, indices, A, vertical=false)
Print list of variables or equations.
* `infixes`: infix strings for original variable or equation
* `indices`: indices for the variables or equations to be printed
* `A`: A[j] = if V[k] = der(V[j]) then k else 0 (or correspondingly for E-nodes)
* `vertical`: if vertical then new line separation else comma separation
Example:
julia> BLTandPantelidesUtilities.printList(["x", "y", "w", "z", "", "", "", "", "T"], 1:11, [5,6,7,8,10,11,0,0,0,0,0])
x, y, w, z, der(x), der(y), der(w), der(z), T, der2(x), der2(y)
"""
function printList(infixes, indices, A, vertical=false)
(orgIndex, derOrder) = invertDer(A)
for ind in 1:length(indices)
j = indices[ind]
if j > 0
if ind > 1
if vertical
loglnModia()
else
logModia(", ")
end
end
if derOrder[j] > 0
if vertical
logModia("DER")
else
logModia("der")
end
if derOrder[j] > 1
logModia(derOrder[j])
end
logModia("(")
end
logModia(infixes[orgIndex[j]])
if derOrder[j] > 0
logModia(")")
end
end
end
loglnModia()
end
function makeList(infixes, indices, A, vertical=false)
l = String[]
(orgIndex, derOrder) = invertDer(A)
for ind in 1:length(indices)
s = ""
j = indices[ind]
if j > 0
if derOrder[j] > 0
if vertical
s = "DER"
else
s = "der"
end
if derOrder[j] > 1
s *= string(derOrder[j])
end
s *= "_" # "("
# s *= "this."
end
s *= string(infixes[orgIndex[j]])
if derOrder[j] > 0
# s *= ")"
end
push!(l, s)
end
end
return l
end
const variableColumn = 5
const equationColumn = 50
"""
printAssignedEquations(equations, variables, indices, assign, A, B)
Print assigned equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `indices`: indices for the equations to be printed
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printAssignedEquations(equations, variables, indices, assign, A, B)
(orgIndexVar, derOrderVar) = invertDer(A)
(orgIndexEqu, derOrderEqu) = invertDer(B)
(assignedVar, unAssigned) = invertAssign(assign)
for i in indices
logModia(lpad(string(i) * ":", variableColumn, " "))
if i <= length(assignedVar)
j = assignedVar[i]
else
j = 0
end
if j > 0
if derOrderVar[j] == 1
prefix = "der("
suffix = ")"
elseif derOrderVar[j] > 1
prefix = "der" * string(derOrderVar[j]) * "("
suffix = ")"
else
prefix = ""
suffix = ""
end
logModia(lpad(prefix * string(variables[orgIndexVar[j]]) * suffix, equationColumn, " "))
else
logModia(" "^equationColumn)
end
logModia(": ")
if derOrderEqu[i] == 1
prefix = "DER( "
suffix = " )"
elseif derOrderEqu[i] > 1
prefix = "DER" * string(derOrderEqu[i]) * "( "
suffix = " )"
else
prefix = ""
suffix = ""
end
equ = equations[i] # orgIndexEqu[i]]
equ = string(equ)
equ = replace(equ, "\n" => " ")
oldequ = ""
while oldequ != equ
oldequ = equ
equ = replace(oldequ, " " => " ")
end
loglnModia(prefix * equ * suffix)
end
end
"""
printSortedEquations(equations, variables, components, assign, A, B)
Print sorted equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `components`: cell array of components. Each component is a list of indices to E-nodes
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printSortedEquations(equations, variables, components, assign, A, B)
loglnModia("[assigned variable]: [differentiation] equation")
loglnModia("Strongly connected components are enclosed in []")
for c in components
if length(c) > 1
loglnModia("[")
end
printAssignedEquations(equations, variables, c, assign, A, B)
if length(c) > 1
loglnModia("]")
end
end
end
"""
printUnassigned(equations, variables, components, assign, A, B)
Print unassigned variables and equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printUnassigned(equations, variables, assign, A, B, vActive=[])
(invAssign, unAssignedVariables) = invertAssign(assign, length(B))
(ass, unAssignedEquations) = invertAssign(invAssign, length(assign))
if vActive != []
# Don't print not active variables
unass = []
for v in unAssignedVariables
if vActive[v]
push!(unass, v)
end
end
unAssignedVariables = unass
end
loglnModia("\nUnassigned variables:")
printList(variables, unAssignedVariables, A)
loglnModia("\nUnassigned equations:")
printList(equations, unAssignedEquations, B, true)
end
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 3919 | """
Module for differentiation of expressions with regards to time.
* Developer: Hilding Elmqvist, Mogram AB
* First version: December 2020
* License: MIT (expat)
"""
module Differentiate
using Base.Meta: isexpr
using DiffRules
using Unitful
export derivative
"""
der = derivative(ex, timeInvariants=[])
Form the derivative of the expressions `ex` with regards to time. Time derivative of variables are denoted `der(v)`.
* `ex`: Expression to differentiate
* `timeInvariants`: Vector of time invariant variables, i.e. with time derivative equal to zero.
* `return `der`: Time derivative of ex
"""
derivative(ex, timeInvariants=[]) = 0#u"1/s"
derivative(s::Symbol, timeInvariants=[]) = if s == :time; 1 elseif s in timeInvariants; 0 else :(der($s)) end # 0u"1/s"
function derivative(ex::Expr, timeInvariants=[])
if isexpr(ex, :call) && ex.args[1] == :der
:(der($ex))
elseif isexpr(ex, :call)
func = ex.args[1]
arguments = ex.args[2:end]
derArguments = [derivative(a, timeInvariants) for a in arguments]
if func in [:+, :-]
ders = [d for d in derArguments if d != 0]
if length(ders) == 0 # der(1+2+3) == 0, der(1-2-3) == 0
0
elseif func == :- && length(derArguments) > 1 && length(ders) == 1 && derArguments[1] != 0 # der(x-2-3) == der(x)
derArguments[1]
else
Expr(:call, func, ders...)
end
elseif func in [:*]
# der(e1 * e2 * e3 + ...) = der(e1)*e2*e3 + e1*der(e2)*e3 + e1*e2*der(e3) + ...
diff = Expr(:call, :+)
for i in 1:length(arguments)
terms = []
for j in 1:length(arguments)
term = if i == j; derivative(arguments[j], timeInvariants) else arguments[j] end
if term == 0
terms = []
break
else
push!(terms, term)
end
end
if length(terms) > 0
product = Expr(:call, :*, terms...)
push!(diff.args, product)
end
end
if length(diff.args) == 1 # Only + operator
diff = 0
elseif length(diff.args) == 2 # Only one + term, remove +
diff = diff.args[2]
else
diff
end
elseif length(arguments) <= 2
d = DiffRules.diffrule(:Base, ex.args[1], ex.args[2:end]...)
if length(arguments) > 1
sum = []
for i in 1:length(arguments)
if d[i] == :(one($(arguments[i])))
push!(sum, :($(derArguments[i])))
elseif d[i] == :(-one($(arguments[i])))
push!(sum, :(-$(derArguments[i])))
elseif derArguments[i] != 0
push!(sum, :($(d[i]) * $(derArguments[i])))
end
end
if length(sum) > 1
Expr(:call, :+, sum...)
elseif length(sum) == 1
sum[1]
else
0
end
else
:($d * $(derArguments[1]))
end
end
elseif isexpr(ex, :.)
if ex in timeInvariants; 0 else :(der($ex)) end
elseif isexpr(ex, :if) || isexpr(ex, :elseif)
Expr(ex.head, ex.args[1], [derivative(e, timeInvariants) for e in ex.args[2:end]]...) # Don't differentiate condition
elseif isexpr(ex, :ref)
Expr(ex.head, derivative(ex.args[1], timeInvariants), ex.args[2:end]...) # Don't differentiate indices
else
# For example: =, vect, hcat
Expr(ex.head, [derivative(e, timeInvariants) for e in ex.args]...)
end
end
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 26954 | # License for this file: MIT (expat)
# Copyright 2020-2021, DLR Institute of System Dynamics and Control
# Author: Martin Otter, DLR-SR
#=
Algorithm to exactly process linear Integer equations and hereby removing underdeterminism, overdeterminism,
and state constraints that are not structurally visible, as well as eliminating simple equations.
The following is exported:
- [`simplifyLinearIntegerEquations`](@ref)
- [`printSimplifiedLinearIntegerEquations`](@ref)
and the following utility functions that can be applied on the return
argument `vProperty` of [`simplifyLinearIntegerEquations`](@ref):
- `isNotEliminated(vProperty, v)` - if variable v is not eliminated.
- `isEliminated(vProperty, v)` - if variable v is eliminated.
- `isZero(vProperty,v)` - if eliminated variable v is zero.
- `isAlias(vProperty,v)` - if eliminated variable v is an alias variable v = v_alias
- `isNegAlias(vProperty,v)` - if eliminated variable v is a negative alias variable v = -v_alias
- `alias(vProperty,v)` - alias variable v_alias of eliminated variable v (v = v_alias).
- `negAlias(vProperty,v)` - negated alias variable v_alias of eliminated variable v (v = -v_alias).
# Main developer
[Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en)
=#
import OrderedCollections
export simplifyLinearIntegerEquations!
export printSimplifiedLinearIntegerEquations
export isNotEliminated, isEliminated, isZero, isAlias, isNegAlias, alias, negAlias
# Inquire properties of eliminated variables (vEliminated is returned from simplifyLinearIntegerEquations(..)
const IS_PRESENT = typemin(Int) # Variable is not eliminated
isNotEliminated(vProperty::Vector{Int}, v::Int) = vProperty[v] == IS_PRESENT
isEliminated( vProperty::Vector{Int}, v::Int) = vProperty[v] != IS_PRESENT
isZero( vProperty::Vector{Int}, v::Int) = vProperty[v] == 0
isAlias( vProperty::Vector{Int}, v::Int) = vProperty[v] > 0
isNegAlias( vProperty::Vector{Int}, v::Int) = vProperty[v] < 0 && vProperty[v] != IS_PRESENT
alias( vProperty::Vector{Int}, v::Int) = vProperty[v]
negAlias( vProperty::Vector{Int}, v::Int) = -vProperty[v]
"""
getFirstActiveIndex(Gint, vActive, i::Int)
Return the first index j of equation i, such that vActive[ Gint[i][j] ] == true.
If there is no such index, return zero.
"""
function getFirstActiveIndex(Gint, vActive::Vector{Bool}, i::Int)::Int
found = false
j = 0
for (k,vk) in enumerate(Gint[i])
if vActive[vk]
j = k
break
end
end
return j
end
"""
swapEquations!(Gint, eInt, GcInt, GcInt2, i, j)
Swap equations i and j
"""
function swapEquations!(Gint, eInt::Vector{Int}, GcInt, GcInt2, i::Int, j::Int)::Nothing
Gint_i = Gint[i]
GcInt_i = GcInt[i]
GcInt2_i = GcInt2[i]
eInt_i = eInt[i]
Gint[i] = Gint[j]
GcInt[i] = GcInt[j]
GcInt2[i] = GcInt2[j]
eInt[i] = eInt[j]
Gint[j] = Gint_i
GcInt[j] = GcInt_i
GcInt2[j] = GcInt2_i
eInt[j] = eInt_i
return nothing
end
"""
cj = find_v(eq_i, eqc_i, vj)
Return coefficient of variable vj in eq_i or zero, if vj not in eq_i.
"""
function find_v(eq_i, eqc_i, vj)::Int
for (j, vi) in enumerate(eq_i)
if vi == vj
return eqc_i[j]
end
end
return 0
end
mutable struct Buffer
Gint_i::Vector{Int}
GcInt2_i::Vector{Int}
Buffer() = new(Int[], Int[])
end
"""
eliminateVariable!(Gint, GcInt2, k, k_perm, i, pivot, vj, oldPivot)
Eliminate variable vj from equation i and store the result as equation i.
k_perm is the sorting order of equation k (return argument of sortperm(..)).
If equation i does not contain variable vj, the equation is not changed.
If equation i contains variable vj, then multiply equation i with pivot and
equation k with the coefficient of variable vj in equation i.
Subtract the latter from the former equation and divide by oldPivot
(it is guaranteed that the division has no reminder).
If Gc[i,j] would be the coefficient of variable j in equation i, then the
following operation is carried out:
pivot_i = Gc[i,vj]
for j = 1:nv # nv: number of variables
Gc[i,j] = div(pivot * Gc[i,j] - pivot_i * Gc[k,j], oldPivot) # guaranteed no remainder
end
If one of the coeffients of equation i, so GcInt2[i], becomes zero, then this coefficient and the corresponding
variable is removed from the equation. It is therefore guaranteed that after the operation,
no coefficient of the new equation i is zero, so no element of GcInt2[i][:] is zero
and no element of Gint[i][:] is vj.
"""
function eliminateVariable!(Gint, GcInt2, k::Int, i::Int, pivot::Int, vj::Int, oldPivot::Int, buffer::Buffer)::Nothing
# Return if equation i does not contain variable vj
vj_present=false
c_vj = 0 # Coefficient of vj
for (index, value) in enumerate(Gint[i])
if value == vj
vj_present = true
c_vj = GcInt2[i][index]
deleteat!(Gint[i] ,index)
deleteat!(GcInt2[i],index)
break
end
end
if !vj_present
return nothing
end
#println("... Eliminate variable $vj from equation $i = ", Gint[i])
#println(" Gint[$k] = ", Gint[k], ", GcInt2[$k] = ", GcInt2[k], ", pivot = $pivot, c_vj = $c_vj")
# Union of relevant variables
eq_i = Gint[i]
eq_k = Gint[k]
eqc_i = GcInt2[i]
eqc_k = GcInt2[k]
v_all = union(eq_i, eq_k)
#println("v_all = $v_all")
# Eliminate variable vj from equation i
empty!(buffer.Gint_i)
empty!(buffer.GcInt2_i)
for v in v_all
ci = find_v(eq_i, eqc_i, v)
ck = find_v(eq_k, eqc_k, v)
ci_new = div(pivot*ci - c_vj*ck, oldPivot)
if ci_new != 0
push!(buffer.Gint_i , v)
push!(buffer.GcInt2_i, ci_new)
end
end
temp1 = Gint[i]
temp2 = GcInt2[i]
Gint[i] = buffer.Gint_i
GcInt2[i] = buffer.GcInt2_i
buffer.Gint_i = temp1
buffer.GcInt2_i = temp2
return nothing
end
"""
rk = upperTrapezoidal!(Gint, eInt, GcInt2, vActive, ieq)
Transform equations ieq..end to upper trapezoidal form and return the
rank `rk` (`ieq <= rk <= length(Gint)`).
"""
function upperTrapezoidal!(Gint, eInt::Vector{Int}, GcInt, GcInt2, vActive::Vector{Bool}, ieq::Int)::Int
j = 0
e = 0
vj = 0
pivot = 0
oldPivot = 1
neq = length(Gint)
buffer = Buffer()
for k = ieq:neq
# Search for a pivot in equations k:neq
j = 0
e = 0
# Inspect only equations with one variable
for k2 = k:neq
if length(Gint[k2]) == 1
j = getFirstActiveIndex(Gint, vActive, k2)
if j > 0
e = k2
break
end
end
end
if j == 0
# Inspect only equations with two variables
for k2 = k:neq
if length(Gint[k2]) == 2
j = getFirstActiveIndex(Gint, vActive, k2)
if j > 0
e = k2
break
end
end
end
end
if j == 0
# Inspect all equations
for k2 = k:neq
j = getFirstActiveIndex(Gint, vActive, k2)
if j > 0
e = k2
break
end
end
end
if j > 0
# Extract infos
pivot = GcInt2[e][j]
vj = Gint[e][j]
deleteat!(Gint[e] , j) # Remove variable vj in equation e (is added later to the front)
deleteat!(GcInt2[e], j)
# Swap equations k and e
if e != k
swapEquations!(Gint, eInt, GcInt, GcInt2, k, e)
end
else
# no pivot found: equations k:neq have no active variables (rank = k-1)
return k-1
end
# Eliminate variable vj from equations k+1:neq
for i = k+1:neq
eliminateVariable!(Gint, GcInt2, k, i, pivot, vj, oldPivot, buffer)
end
oldPivot = pivot
# Add variable vj in equation k at the front
pushfirst!(Gint[k] , vj)
pushfirst!(GcInt2[k], pivot)
#println("... k = $k")
#println(" Gint = ", Gint)
#println(" GcInt2 = ", GcInt2)
end
return neq
end
"""
AvarRev = revertAssociation(Avar::Vector{Int})
Revert the association Vector `Avar[i] = j`, such that
`AvarRev[j] = i` (`Avar[i]=0` is allowed and is ignored).
"""
function revertAssociation(Avar::Vector{Int})::Vector{Int}
AvarRev = fill(0, length(Avar))
for (i, value) in enumerate(Avar)
if value != 0
AvarRev[ value ] = i
end
end
return AvarRev
end
needsVariableElimination(vProperty, eq_k::Vector{Int}) = findfirst(v->isEliminated(vProperty,v), eq_k) != nothing
"""
equationRemoved = simplifyOneEquation!(eq_k, eqc_k, AvarRev, vEliminated, vProperty)
Simplify one equation has much as possible. Return true, if equation is removed, otherwise return false.
"""
function simplifyOneEquation!(eq_k, eqc_k, AvarRev, vEliminated, vProperty)::Bool
while length(eq_k) > 1 && needsVariableElimination(vProperty, eq_k[2:end])
# Equation has more as one variable and at least one of the variables is zero, alias or negative alias variable
for j = 2:length(eq_k)
v_j = eq_k[j]
# Eliminate variable if possible
if isNotEliminated(vProperty, v_j)
continue
elseif isZero(vProperty, v_j)
deleteat!(eq_k , j)
deleteat!(eqc_k, j)
break
else # alias or negAlias
if isAlias(vProperty, v_j)
v_add = alias(vProperty, v_j)
vc_add = eqc_k[j]
else
v_add = negAlias(vProperty, v_j)
vc_add = -eqc_k[j]
end
# Check whether v_add appears in equation
isPresent = false
for i = 2:length(eq_k)
if i != j && eq_k[i] == v_add
isPresent = true
eqc_k[i] += vc_add
if eqc_k[i] == 0
if i < j
deleteat!(eq_k , [i,j])
deleteat!(eqc_k, [i,j])
else
deleteat!(eq_k , [j,i])
deleteat!(eqc_k, [j,i])
end
else
deleteat!(eq_k , j)
deleteat!(eqc_k, j)
end
break
end
end
if isPresent
break
else
eq_k[j] = v_add
eqc_k[j] = vc_add
end
end
end
end
# Check if equation can be removed
vk = eq_k[1]
if AvarRev[vk] == 0
# vk is not a derivative of a variable -> it can be removed
if length(eq_k) == 1 # equation k is a function of one variable
# Variable is zero -> remove equation
push!(vEliminated, vk)
vProperty[vk] = 0
empty!(eq_k)
empty!(eqc_k)
return true
elseif length(eq_k) == 2 && abs(eqc_k[1]) == abs(eqc_k[2]) # equation k is a function of alias variables
if eqc_k[1] > 0 && eqc_k[2] < 0 ||
eqc_k[1] < 0 && eqc_k[2] > 0
# Alias variable -> remove equation
push!(vEliminated, vk)
vProperty[vk] = eq_k[2]
empty!(eq_k)
empty!(eqc_k)
else
# Negative alias variable -> remove equation
push!(vEliminated, vk)
vProperty[vk] = -eq_k[2]
empty!(eq_k)
empty!(eqc_k)
end
return true
end
end
return false
end
function printLinearIntegerEquations(Gint, eInt, GcInt, var_name::Function; rk=(0,0,0))::Int
ne = 0
for i = 1:length(Gint)
e = Gint[i]
if length(e) > 0
ne += 1
# Construct equation
vc = GcInt[i][1]
prefix = (vc == 1 ? "" : (vc == -1 ? "-" : string(vc) ))
str = "0 = " * prefix * var_name(e[1])
for j = 2:length(e)
v = e[j]
vc = GcInt[i][j]
str = str * (vc > 0 ? " + " : " - ")
if abs(vc) != 1
str = str * string(abs(vc)) * "*"
end
str = str * var_name(v)
end
println(lpad(string(eInt[i]), 8), ": ", str)
end
if i == rk[1]
println(" ---------- rk1")
elseif i == rk[2]
println(" ---------- rk2")
elseif i == rk[3]
println(" ---------- rk3")
end
end
return ne
end
"""
(vEliminated, vProperty, nvArbitrary, redundantEquations) =
simplifyLinearIntegerEquations!(G, eInt, GcInt, Avar)
Remove singularities of the **linear Integer equations** of a DAE system and simplify these equations as much as possible.
The following **singularities** are fixed by this function:
- Equations that are redundant are removed.
- State constraint that are not structurally visible are transformed
to a structurally visible form so that structural index reduction algorithms,
such as the Pantelides algorithm, can handle these state constraints.
- Variables that can have an arbitrary value and do not appear in the remaining set of equations
(so must be solved from the linear Integer equations) are set to zero.
The calling function should print a warning message in such a case (if `nvArbitrary > 0`).
The following **simplifications** are performed recursively (`c` is an arbitrary Integer literal):
- An equation `c*v1 = 0` is removed and `v1` is replaced by zero in all occurrences
of the linear Integer equations and all expressions with `v1` are simplified.
- An equation `c*v2 + c*v3 = 0` or `c*v2 - c*v3 = 0` is removed and
`v2` is replaced by `v3` or by `-v3` in all occurrences of the linear Integer equations
and all expressions with `v2` and `v3` are simplified.
Note:
- Input arguments `G, eInt, GcInt` are changed by a call of this function and
represent the transformed linear Integer equations.
The Abstract Syntax Tree (AST) of all these equations must be
replaced by new AST equations defined by the returned arguments.
- The number of operations in the transformed linear Integer equations is
guaranteed to be not larger as the original equations - with exception
of the structurally visible state constraints, where the number of operations
could be larger.
- Potential states (variables appearing differentiated) and derivatives
of potential states are **not** eliminated by this function.
# Input arguments
- `G`: Bi-partite graph/incidence matrix of all equations. Typically: `G::Vector{Vector{Int}}`
On entry, `G` is the original graph. On exit, the linear Integer equations of `G` are
typically changed.
- `eInt::Vector{Int}`: `G[eInt]` are the linear Integer equations of `G`.
On exit, `eInt` is reordered. If `length(G[eInt[i]]) = 0`, then the corresponding equation is eliminated.
- `GcInt`: `GcInt[i]` is the vector of Integer coefficients that are associated with the variables
of `G[eInt[i]]`. Typically: `GcInt::Vector{Vector{Int}}`.
On exit, `GcInt` is reordered according to `eInt` and typically most of the coefficients have been changed.
- `Avar::Vector{Int}`: Defines the derivatives of the variables:
`A[i] = if der(v_i) == v_k then k else 0`. This vector is not changed by `simplifyLinearIntegerEquations!`.
# Output arguments
- `vEliminated::Vector{Int}`: Variables that are eliminated.
- `vProperty::Vector{Int}`: Defines the properties of the eliminated variables.
These properties can be inquired with the following exported functions:
o `isNotEliminated(vProperty, v)` - if variable v is not eliminated.
o `isEliminated(vProperty, v)` - if variable v is eliminated.
o `isZero(vProperty,v)` - if eliminated variable v is zero.
o `isAlias(vProperty,v)` - if eliminated variable v is an alias variable v = v_alias
o `isNegAlias(vProperty,v)` - if eliminated variable v is a negative alias variable v = -v_alias
o `alias(vProperty,v)` - alias variable v_alias of eliminated variable v (v = v_alias).
o `negAlias(vProperty,v)` - negated alias variable v_alias of eliminated variable v (v = -v_alias).
- `nvArbitrary::Int`: Variables `vEliminated[1:nvArbitrary]` are variables that can be arbitrarily set and that
have been set to zero.
- `redundantEquations::Vector{Int}`: G[redundantEquations] are redundant equations that have been removed.
# Algorithm
The algorithm to remove the singularities is sketched in the paper:
- Otter, Elmqvist (2017):
[Transformation of Differential Algebraic Array Equations to Index One Form](http://www.ep.liu.se/ecp/132/064/ecp17132565.pdf),
section 5. Modelica'2017 Conference.
An error in this algorithm was fixed, the algorithm was improved to handle large equation systems
and to simplify equations as much as possible.
# Main developer
[Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en)
"""
function simplifyLinearIntegerEquations!(G, eInt::Vector{Int}, GcInt, Avar::Vector{Int}; log::Bool=false, var_name::Function = v -> "???")
nv = length(Avar)
# Revert derivative association vector Avar
AvarRev = revertAssociation(Avar)
# Construct vActive1 and vActive2 for first and second transformation to upper trapezoidal form
# (vActiveX[v] = false, if variable shall be ignored when transforming to upper trapezoidal form)
# vActive1[v] = true, if variable is not a potential state (does not appear differentiated)
# and variable v must be solved from the linear Integer equations
# vActive2[v] = true, if variable is not a potential state (does not appear differentiated)
vActive2 = [v==0 for v in Avar]
vActive1 = copy(vActive2)
for e in setdiff(collect(1:length(G)), eInt) # Equations that are not linear Integer equations
for v in G[e]
vActive1[v] = false
end
end
# Save all variables with vActive1[v] = true in vShouldBeSolved
vShouldBeSolved = findall(vActive1)
# Construct the bi-partite graph of the linear Integer equations
# (a deepcopy of the relevant part of G)
Gint = deepcopy(G[eInt])
if log
println("\n+++ Remove singularities")
println(" Linear Integer equations:")
printLinearIntegerEquations(Gint, eInt, GcInt, var_name)
println(" Unknown variables:")
unknowns = collect(OrderedCollections.OrderedSet([v for e in Gint for v in e]))
for v in unknowns
print(lpad(string(v),8), ": ", var_name(v))
if v in vShouldBeSolved
print(" (to be solved by equations)\n")
elseif Avar[v] > 0
print(" (potential state)\n")
else
print("\n")
end
end
end
# Construct a deepcopy of GcInt (this copy is modified by the transformation to upper trapezoidal form)
GcInt2 = deepcopy(GcInt)
# First transformation to upper trapezoidal form:
# Diagonal entries: Variables v that must be solved from the linear Integer equations
#println("\n... before upperTrapezoidal!:")
#println(" Gint = $Gint")
#println(" GcInt2 = $GcInt2")
rk1 = upperTrapezoidal!(Gint, eInt, GcInt, GcInt2, vActive1, 1)
# Eliminate variables that must be solved from the linear Integer equations
# but are not diagonal entries (= variables can be arbitrarily set)
vSolved = [Gint[i][1] for i = 1:rk1]
vEliminated = setdiff(vShouldBeSolved, vSolved)
vProperty = fill(IS_PRESENT, nv)
for v in vEliminated
vProperty[v] = 0
end
nvArbitrary = length(vEliminated)
if log
println("\n After first transformation to trapezoidal form (eliminate variables that must be solved):")
printLinearIntegerEquations(Gint, eInt, GcInt2, var_name, rk=(rk1,0,0))
end
# Second transformation to upper trapezoidal form: Ignore potential states
rk2 = upperTrapezoidal!(Gint, eInt, GcInt, GcInt2, vActive2, rk1+1)
if log
println("\n After second transformation to trapezoidal form (ignore potential states):")
printLinearIntegerEquations(Gint, eInt, GcInt2, var_name, rk=(rk1,rk2,0))
end
# Third transformation to upper trapezoidal form: All remaining variables are potential states
fill!(vActive2, true)
rk3 = upperTrapezoidal!(Gint, eInt, GcInt, GcInt2, vActive2, rk2+1)
#println("\n... after upperTrapezoidal!:")
#println(" Gint = $Gint")
#println(" GcInt2 = $GcInt2")
#println(" rk1 = $rk1, rk2 = $rk2, rk3 = $rk3")
if log
println("\n After third transformation to trapezoidal form (eliminate potential states):")
printLinearIntegerEquations(Gint, eInt, GcInt2, var_name, rk=(rk1, rk2, rk3))
end
# Simplify equations from equation rk2 upto equation 1
for k = rk2:-1:1
simplifyOneEquation!(Gint[k], GcInt2[k], AvarRev, vEliminated, vProperty)
end
if log
println("\n After alias elimination:")
printLinearIntegerEquations(Gint, eInt, GcInt2, var_name, rk=(rk1, rk2, rk3))
end
#println("\n... after equation simplification:")
#println(" Gint = $Gint")
#println(" GcInt2 = $GcInt2")
#println(" vEliminated = ", vEliminated)
#println(" vProperty[vEliminated] = ", vProperty[vEliminated])
# Update GcInt (use GcInt2[i] if it has not more unknowns as the corresponding GcInt[i] equation)
equationsRemoved = false
for i = 1:rk2
if length(GcInt2[i]) <= length(GcInt[i])
# Use transformed equation
GcInt[i] = GcInt2[i]
else
# Use original equation
Gint[i] = G[ eInt[i] ]
equationRemoved = simplifyOneEquation!(Gint[i], GcInt[i], AvarRev, vEliminated, vProperty)
equationsRemoved = equationsRemoved || equationRemoved
end
end
if equationsRemoved
# Simplify equations, until no equation is removed anymore
equationRemoved = false
while true
for i = 1:rk2
if length(Gint[i]) > 0
equationRemoved = simplifyOneEquation!(Gint[i], GcInt[i], AvarRev, vEliminated, vProperty)
if equationRemoved
break
end
end
end
if !equationRemoved
break
end
end
end
# For constraint equations and for removed equations, use transformed equations
for i = rk2+1:length(eInt)
GcInt[i] = GcInt2[i]
end
if log
println("\n Final, simplified equations:")
printLinearIntegerEquations(Gint, eInt, GcInt2, var_name, rk=(rk1, rk2, rk3))
end
# Update G
for (i,e) in enumerate(eInt)
G[e] = Gint[i]
end
redundantEquations = rk3 < length(eInt) ? eInt[rk3+1:end] : Int[]
return (vEliminated, vProperty, nvArbitrary, redundantEquations)
end
"""
printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty,
nvArbitrary, redundantEquations, var_name::Function; printTest=false)
Print result of [`simplifyLinearIntegerEquations!`](@ref).
Function `var_name(v)` returns the name of variable `v` as String.
If `printTest=true`, statements are printed that can be included in a Testset.
"""
function printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty, nvArbitrary, redundantEquations, var_name::Function; printTest=false)::Nothing
if nvArbitrary > 0
println("\n Variables that can be arbitrarily set and have been set to zero:")
for v in vEliminated[1:nvArbitrary]
println(lpad(string(v), 8), ": ", var_name(v), " = 0")
end
end
if length(vEliminated) - nvArbitrary > 0
println("\n Variables that have been eliminated:")
for v in vEliminated[nvArbitrary+1:end]
print(lpad(string(v), 8), ": ", var_name(v), " = ")
if isZero(vProperty, v)
println("0")
elseif isAlias(vProperty, v)
println(var_name(alias(vProperty,v)))
else
println("-", var_name(negAlias(vProperty,v)))
end
end
end
if length(redundantEquations) > 0
println("\n Redundant equations that have been removed:")
for e in redundantEquations
println(" ", e)
end
end
println("\n Remaining transformed linear Integer equations:")
ne = printLinearIntegerEquations(G[eInt], eInt, GcInt, var_name)
if ne == 0
println(" none (all linear Integer equations are removed)")
end
println()
if printTest
println("@test nvArbitrary == ", nvArbitrary)
println("@test vEliminated == ", vEliminated)
println("@test vProperty[vEliminated] == ", vProperty[vEliminated])
println("@test redundantEquations == ", redundantEquations)
println("@test eInt == ", eInt)
println("@test G[eInt] == ", G[eInt])
println("@test GcInt == ", GcInt)
end
return nothing
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 788 | """
Main module of ModiaBase.
* Developers: Hilding Elmqvist, Mogram AB, Martin Otter, DLR
* First version: December 2020
* License: MIT (expat)
"""
module ModiaBase
const path = dirname(dirname(@__FILE__)) # Absolute path of package directory
const Version = "0.11.1"
const Date = "2023-06-03"
#println("\nImporting ModiaBase Version $Version ($Date)")
using Unitful
using StaticArrays
include("LinearIntegerEquations.jl")
include("BLTandPantelidesUtilities.jl")
using .BLTandPantelidesUtilities
include("BLTandPantelides.jl")
using .BLTandPantelides
include("Differentiate.jl")
using .Differentiate
include("Tearing.jl")
include("Simplify.jl")
using .Simplify
include("Symbolic.jl")
using .Symbolic
include("NonlinearEquations.jl")
using .NonlinearEquations
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 17810 | """
module NonlinearEquations - Solve determined or underdetermined nonlinear equation systems
Initial implementation: Gerhard Hippmann, DLR-SR
"""
module NonlinearEquations
export mild, high, extreme, quiet, info, verbose, debug, solveNonlinearEquations!
using Printf
using LinearAlgebra
import ForwardDiff
import FiniteDiff
@enum Nonlinearity begin
mild
high
extreme
end
@enum Loglevel begin
quiet
info
verbose
debug
end
# Calculate RMS value (mean square root norm) of a vector
function rms(x::Vector)
n = length(x)
sum = 0
for i = 1:n
sum = sum + x[i]*x[i]
end
return sqrt(sum/n)
end
"""
solveNonlinearEquations!(F!::Function,
m::Integer,
x::Vector,
scale::Union{Vector, Nothing};
nonlinearity::Nonlinearity = high,
restricted = false,
xtol = 1e-6,
maxiter = 50,
quasi = true,
forwardjac = false,
loglevel::Loglevel = info) -> convergence::Bool
Solve nonlinear equation system ``F(x) = 0`` with ``length(F) <= length(x)`` by global Gauss-Newton method with error oriented convergence criterion and adaptive trust region strategy. Optionally Broyden's 'good' Jacobian rank-1 updates are used. In case of underdetermined and/or rank-deficient equation system, a least squares solution is computed such that the norm of the scaled solution vector is minimal.
`F!` is a C1 continuous function with length ``n`` of input and ``m`` of output vector where ``n >= m`` (determined or underdetermined system of equations). It has to be defined by
F!(F::Vector, x::Vector) -> success::Bool
returning true in case of successful evaluation.
`m` is the length of the output vector of `F!`.
`x` is the vector of unknowns. On input it must contain an initial guess of the problem's solution, which is used as the start vector of the iteration. On output it contains the iteration result. If `solveNonlinearEquations!` returns true, the iteration converged and `x` contains an approximation of the solution which satisfies the error tolerance `xtol`.
`scale` is a vector of length ``n`` which contains positive scaling values used in computations of scaled norms and Jacobian scaling due to ``x_i / {scale}_i``. In case of `nothing` automatic scaling depends on `nonlinearity`.
`nonlinearity` defines the grade of non-linearity of the problem. Possible @enum values are `mild`, `high` and `extreme`. In case of `extreme` `restricted` is automatically set true.
`restricted` can be used to restrict the monotonicity test (i.e. damping factor) such that large iteration steps are avoided.
`xtol` is the error tolerance which the final approximate solution `x` must satisfy.
`maxiter` is the maximum number of allowed iterations.
`quasi` enables switch to local quasi Newton iteration (which avoids costly Jacobian evaluations) in case of advanced convergence.
`forwardjac` enables analytic Jacobian evaluation using [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl) package. Otherwise numeric computation by [FiniteDiff](https://github.com/JuliaDiff/FiniteDiff.jl) is used.
`loglevel` is the log message level. Possible @enum values are `quiet`, `info`, `verbose` and `debug`.
Reference: P. Deuflhard: Newton Methods for Nonlinear Problems. - Affine Invariance and Adaptive Algorithms (section 4.4). Series Computational Mathematics 35, Springer (2004).
"""
function solveNonlinearEquations!(F!::Function,
m::Integer,
x::Vector,
scale::Union{Vector, Nothing};
nonlinearity::Nonlinearity = high,
restricted = false,
xtol = 1e-6,
maxiter = 50,
quasi = true,
forwardjac = false,
loglevel::Loglevel = info)
n = length(x)
@assert(m > 0)
@assert(n >= m)
@assert(xtol > 0.0)
@assert(maxiter > 0)
# Constants
SMALL = 1e-150
LAMBDA_START_DEFAULT = 1e-2
LAMBDA_MIN_DEFAULT = 1e-4
LAMBDA_START_EXTREMELY_DEFAULT = 1e-4
LAMBDA_MIN_EXTREMELY_DEFAULT = 1e-8
THETA_MAX = 0.5
DELKCOMP = false # use [deltabar] = delta_k(x^{k-1}) instead of [deltabar] = 0
# Work arrays
xscale = similar(x, n)
xthresh = similar(x, n)
w = similar(x, n)
dx = similar(x, n)
dxbar = similar(x, n)
fxk = similar(x, m)
jac = similar(x, m, n)
if forwardjac
jcfg = ForwardDiff.JacobianConfig(F!, fxk, x)
else
xche = similar(x, n)
fxche1 = similar(x, m)
fxche2 = similar(x, m)
jche = FiniteDiff.JacobianCache(xche, fxche1, fxche2)
end
dxl = similar(x, n)
Pdxbar = similar(x, n)
fxl = similar(x, m)
jacdxbar = similar(x, m)
if DELKCOMP
fxkm1 = similar(x, m)
jacdxkm1 = similar(x, m)
end
# Initialization
k = 0
cnvg = false
nfcn = 0
njac = 0
normdx = 0.0
normdxbar = 0.0
precs = 0.0
qnerr_iter = false
# Set initial damping factor
if nonlinearity == high
lambda = LAMBDA_START_DEFAULT
lambda_min = LAMBDA_MIN_DEFAULT
elseif nonlinearity == extreme
lambda = LAMBDA_START_EXTREMELY_DEFAULT
lambda_min = LAMBDA_MIN_EXTREMELY_DEFAULT
restricted = true
else # mild
lambda = 1.0
lambda_min = LAMBDA_MIN_DEFAULT
end
# Set scaling vector
if scale === nothing
if nonlinearity == mild
xscale .= 1.0
else
xscale .= xtol
end
else
xscale .= scale
end
if loglevel >= verbose
println("Solve underdetermined nonlinear equations:")
println("Problem dimension = $n")
println("Prescribed relative precision = $xtol")
println("The problem is specified as being $(nonlinearity)ly nonlinear.")
println("The $(restricted ? "restricted" : "standard") monotonicity test will be applied.")
println("The maximum permitted number of iteration steps is: $maxiter")
if loglevel >= debug
println("Start damping factor = $lambda")
println("Start vector = $x")
println("Scale vector = $xscale")
end
end
# Evaluate F(x^0)
okay = F!(fxk, x)
nfcn = nfcn + 1
if !okay
error("Error in function evaluation $nfcn.")
else
normfk = rms(fxk)
w .= x
xthresh .= xscale
if (loglevel >= debug)
println("Start function vector = $fxk\n")
println(" iter norm_scl(dx) norm(fk) lambda")
end
end
# For iteration index k = 0, 1,... do:
while okay
# Update scale vector
# xscale = max.(xthresh, max.(0.5*abs.(x) + abs.(w), SMALL))
for i = 1:n
xscale[i] = max(xthresh[i], max(0.5 * (abs(x[i]) + abs(w[i])), SMALL))
end
if k > 0
# Recompute norms after rescaling
normdxkm1 = rms(dx ./ xscale)
normdxbar = rms(dxbar ./ xscale)
end
# 1. Step k: Evaluate Jacobian matrix F'(x^k)
if forwardjac
ForwardDiff.jacobian!(jac, F!, fxk, x, jcfg)
else
FiniteDiff.finite_difference_jacobian!(jac, F!, x, jche)
end
njac = njac + 1
# Scale Jacobian and change sign of it
for ic = 1:n
jac[:,ic] = jac[:,ic] * -xscale[ic]
end
# Compute QR-factorization of Jacobian
jacqr = qr(jac, ColumnNorm())
# Solve linear system -F'(x^k) * dx^k = F(x^k)
ldiv!(dx, jacqr, fxk)
# Descale predictor increment vector
dx = dx .* xscale
normdx = rms(dx ./ xscale)
precs = normdx
if loglevel >= debug
@printf("%5i %12e %12e %7f\n", k, normdx, normfk, lambda )
end
# Convergence test: If norm(dx^k) <= epsilon: Solution found: x^* := x^k + dx^k
cnvg = normdx <= xtol
if cnvg
x .= x .+ dx
k = k + 1
break
end
# For k > 0: Compute a prediction value for the damping factor
if k > 0
lambdakm1 = lambda # Damping factor of last iterate
# estimate for Lipschitz constant [omegabar_k] (page 228 1st formula)
# Solve -F'(x^k) * Pdxbar = -F'(x^k) * Delta(x^{k-1},x^k) based on (4.79)
mul!(jacdxbar, jac, dxbar ./ xscale)
ldiv!(Pdxbar, jacqr, jacdxbar) # Pdxbar = P(x^k) * Delta(x^{k-1},x^k)
dxh = rms((dxbar .- dx) ./ xscale)^2 - rms((dxbar ./ xscale) .- Pdxbar)^2 # (Delta(x^{k-1},x^k) - Delta(x^k,x^k))^2 - ((I_n - P(x^k)) * Delta(x^{k-1},x^k))^2
omegabar = sqrt(dxh) / (lambdakm1*normdxkm1*normdxbar)
# Damping factor (page 228 2nd formula)
lambda = min(1.0, 1.0/(omegabar*normdx))
end
reduced = false
@label checkregularity
# Regularity test: If lambda_k < lambda_min: Convergence failure
if lambda < lambda_min
if loglevel >= verbose
println("Convergence failure, damping factor became too small.")
end
break
end
# 2. Compute the trial iterate x^{k+1} := x^k + lambda_k * dx^k (3.43)
xkp1 = x .+ lambda*dx
# Evaluate F(x^{k+1})
okay = F!(fxk, xkp1)
nfcn = nfcn + 1
if !okay
println("Error in function evaluation $nfcn. Step reject not implemented.")
break
end
normfkp1 = rms(fxk)
# Simplified Gauss-Newton correction
# Solve linear system (old Jacobian, new right hand side) F'(x^k) * dxbar^{k+1} = F(x^{k+1}) page 199 3rd formula
ldiv!(dxbar, jacqr, fxk)
# Descale corrector increment vector
dxbar = dxbar .* xscale
normdxbar = rms(dxbar ./ xscale)
precs = normdxbar
if loglevel >= debug
@printf("%5i * %12e %12e %7f\n", k, normdxbar, normfkp1, lambda )
end
# 3. Compute the monitoring quantity Theta_k := norm(dxbar^{k+1}) / norm(dx^k) and correction damping factor mue
theta = normdxbar / normdx # Contraction factor (page 148 1st formula resp. page 213 1st formula resp. page 227 3rd formula)
if k > 0
if (lambdakm1 == 1.0) && (lambda == 1.0)
# Iterates approach the solution
del = theta # page 215 1st paragraph
elseif DELKCOMP
# solve F'(x^k) * w = F(x^{k-1}) + F'(x^{k-1})*dx^{k-1}
ldiv!(w, jacqr, fxkm1 + jacdxkm1) # w = F'(x^k)^+ * r(x^{k-1})
del1 = rms(w)
# solve F'(x^k) * w = F(x^{k-1})
ldiv!(w, jacqr, fxkm1) # w = F'(x^k)^+ * F(x^{k-1})
del2 = rms(w)
# delta_k(x^{k-1}) (page 214 8th formula / page 217 1st formula)
del = del1 / del2
else
del = 0.0
end
else
del = 0.0
end
# Solve F'(x^k) * dxl = F(x^k + lambda*Delta(x^k,x^k))
w = x .+ lambda*dx
okay = F!(fxl, w)
nfcn = nfcn + 1
if !okay
println("Error in function evaluation $nfcn. Step reject not implemented.")
break
end
ldiv!(dxl, jacqr, fxl) # dxl = Delta(x^k,x^k+lambda*Delta(x^k,x^k))
hk = 2.0*rms(dxl .- (1.0 - lambda)*dx ./ xscale) / (lambda*lambda*normdx) # [h_k] (4.68)
mue = (1.0 - del) / hk # Correction damping factor (page 213 4th formula)
# If Theta_k >= 1 (or, if restricted Theta_k > 1 - lambda_k/4)
# then replace lambda_k by lambda'_k := min(mu'_k, lambda_k/2). Goto regularity test.
# else let lambda'_k := min(1, lambda'_k)
if (!restricted && theta >= 1.0) || (restricted && theta > 1.0-lambda/4.0)
# Natural/restricted monotonicity test failed
lambda_new = min(mue, 0.5*lambda) # Corrected damping factor (3.48)
if lambda <= lambda_min
lambda = lambda_new # Does not make sense, bug?
else
lambda = max(lambda_new, lambda_min)
end
reduced = true
@goto checkregularity
else
# Monotonicity test succeeded
lambda_new = min(1.0, mue) # (3.45)
end
# If lambda'_k == lambda_k == 1
# then if norm(dxbar^{k+1}) <= epsilon: Solution found: x^* := x^{k+1} + dxbar^{k+1}
# else if Theta_k < 0.5 switch to quasi Gauss-Newton method
# else if lambda'_k >= 4*lambda_k replace lambda_k by lambda'_k and goto 2.
# else accept x^{k+1} as new iterate: goto 1 with k := k + 1.
if (lambda == 1.0) && (lambda_new == 1.0)
# Iterates approach the solution
cnvg = normdxbar <= xtol
# Convergence test
if cnvg
x .= xkp1 .+ dxbar
break
end
if quasi
qnerr_iter = theta < THETA_MAX # page 148 1st formula
end
elseif (lambda_new >= 4.0*lambda) && !reduced # not documented?
lambda = lambda_new
@goto checkregularity
end
# Save previous iterate for scaling purposes and accept new iterate
w .= x
x .= xkp1
normfk = normfkp1
if DELKCOMP
fxkm1 .= fxk
mul!(jacdxkm1, jac, dx ./ xscale)
end
# Next step
k = k + 1
if k >= maxiter
break
end
if qnerr_iter
# Local error-oriented quasi Gauss-Newton method using Broyden's 'good' rank-1 updates
# Input arguments: x, nfcn, normdx, jaclu, F!, n, scale, dx, maxiter
# Output arguments: x, nfcn, normdx, normfk, cnvg, precs, okay, qnerr_iter
# Overwrites: xscale
# Work arrays
dx_qn = similar(x, n, 1)
sigma = similar(x, maxiter+2)
fxk_qn = similar(x, m)
v = similar(x, n)
# Set scaling vector
if scale === nothing
xscale .= 1.0
else
xscale .= scale
end
# Initialization
dx_qn[:,1] .= dx
s = normdx
sigma[1] = s * s
# For k = 0, ..., k_max:
k_qn = 0
while true
if k_qn > 0
# New iterate x_k+1
x .= x .+ dx_qn[:,k_qn+1]
precs = normdx
cnvg = sigma[k_qn+1] <= xtol * xtol
if cnvg
k_qn = k_qn + 1
break
end
end
# Allocate new column of dx_qn
dx_qn = hcat(dx_qn, similar(x, n))
# Evaluate F(x_k+1)
okay = F!(fxk_qn, x)
nfcn = nfcn + 1
if !okay
println("Error in function evaluation $nfcn. Step reject not implemented.")
break
end
normfk = rms(fxk_qn)
# Solve linear system J*v = -F_k+1
ldiv!(v, jacqr, fxk_qn)
# Descale v
v = v .* xscale
for i = 1:k_qn
alpha_bar = dot(v ./ xscale, dx_qn[:,i] ./ xscale)/n / sigma[i]
v = v .+ (alpha_bar * dx_qn[:,i+1])
end
alpha_kp1 = dot(v ./ xscale, dx_qn[:,k_qn+1] ./ xscale)/n / sigma[k_qn+1]
s = 1.0 - alpha_kp1
dx_qn[:,k_qn+2] .= v / s
thetak = rms(dx_qn[:,k_qn+2]) / rms(dx_qn[:,k_qn+1]) # page 225 3rd formula
# Check contraction factor
if thetak > THETA_MAX
if loglevel >= debug
@printf("%5i %12e %12e QNERR THETA!\n", k+k_qn, normdx, normfk )
end
k_qn = k_qn + 1
break
end
sigma[k_qn+2] = dot(dx_qn[:,k_qn+2] ./ xscale, dx_qn[:,k_qn+2] ./ xscale)/n
normdx = sqrt(sigma[k_qn+2])
if loglevel >= debug
@printf("%5i %12e %12e QNERR\n", k+k_qn, normdx, normfk )
end
k_qn = k_qn + 1
if k + k_qn >= maxiter
break
end
end
k = k + k_qn - 1 # Total number of iterations
if cnvg
break
else
normfk = normfkp1
qnerr_iter = false
end
if k >= maxiter
break
end
end
end
if k >= maxiter
println("Max. allowed number of iterations = $maxiter exceeded.")
end
if loglevel >= info
println("Solver $(cnvg ? "converged" : "did not converge").")
if loglevel >= verbose
println("Number of iterations = $k")
println("Number of function evaluations = $nfcn")
println("Number of Jacobian evaluations = $njac")
if loglevel >= debug
println("Solution vector = $x")
end
println("Precision = $precs")
end
end
return cnvg
end
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 2894 | """
Symbolic simplifications of +, -, *, /, ^
* Developer: Hilding Elmqvist, Mogram AB
* First version: December 2020
* License: MIT (expat)
If possible, the operation is performed. Special care about adding zero and multiplying with one.
"""
module Simplify
export add, sub, mult, divide, power, simplify
using Base.Meta: isexpr
using Unitful
isUnit(x) = typeof(x) <: Unitful.FreeUnits || isexpr(x, :macrocall)
function add(x, y)
if typeof(x) in [Float64, Int64] && typeof(y) in [Float64, Int64]
x + y
elseif x == 0
y
elseif y == 0
x
else
:($x + $y)
end
end
function sub(x, y)
if typeof(x) in [Float64, Int64] && typeof(y) in [Float64, Int64]
x - y
elseif x == 0 && isexpr(y, :call) && length(y.args) == 2 && y.args[1] == :-
y.args[2]
elseif x == 0
:(- $y)
elseif y == 0
x
else
:($x - $y)
end
end
function mult(x, y)
if typeof(x) in [Float64, Int64] && typeof(y) in [Float64, Int64]
x * y
elseif (x == 0 && ! isUnit(y)) || (y == 0 && ! isUnit(x)) # Does not remove unit even if 0
0
elseif x == 1 && ! isUnit(y)
y
elseif y == 1 && ! isUnit(x)
x
elseif x == -1 && ! isUnit(y)
sub(0, y)
else
:($x * $y)
end
end
function divide(x, y)
if typeof(x) in [Float64, Int64] && typeof(y) in [Float64, Int64]
x / y
elseif x == 0 || x == 0.0
0
elseif y === 1 || y == 1.0
x
elseif y === -1
sub(0, x)
else
:($x / $y)
end
end
function power(x, y)
if typeof(x) in [Float64, Int64] && typeof(y) in [Float64, Int64]
x ^ y
elseif y == 1
x
else
:($x ^ $y)
end
end
simplify(ex) = ex
function simplify(ex::Expr)
args = [simplify(arg) for arg in ex.args]
if ex.head == :call && ex.args[1] in [:+, :-, :*] && all([typeof(a) in [Float64, Int64] for a in args[2:end]])
if ex.args[1] in [:+]
return sum(args[2:end])
elseif ex.args[1] in [:-]
return args[2] - sum(args[3:end])
else
return prod(args[2:end])
end
elseif ex.head == :call && ex.args[1] == :* && any([a == 0 for a in args[2:end]])
# x*0*y = 0
return 0
elseif ex.head == :call && ex.args[1] == :^ && length(args) == 3 && args[3] == 1
# x^1 = x
return args[2]
elseif ex.head == :call && ex.args[1] == :^ && length(args) == 3 && args[3] == 0
# x^0 = 1
return 1
else
return Expr(ex.head, args...)
end
end
function testSimplify()
@show simplify(:(1+3))
@show simplify(:(3-1))
@show simplify(:(4*3))
@show simplify(:(4*3+x))
@show simplify(:(1+3+x))
@show simplify(:(1+x+3))
@show simplify(:(x^0))
@show simplify(:(x^1))
@show simplify(:(x^(3-2)))
end
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 18326 | """
Structural and symbolic processing.
* Developer: Hilding Elmqvist, Mogram AB
* First version: December 2020
* License: MIT (expat)
Examples of use can be found in TestSymbolic.jl
"""
module Symbolic
export removeBlock, removeQuoteNode, makeDerVar, append, prepend, Incidence, findIncidence!, linearFactor, solveEquation,
isLinear, getCoefficients, substitute, removeUnits, resetEventCounters, getEventCounters, substituteForEvents
using Base.Meta: isexpr
using OrderedCollections
using ModiaBase.Simplify
using Unitful
using Measurements
using MonteCarloMeasurements
"""
e = removeBlock(ex)
Remove :block with LineNumberNode from expressions
* `ex`: Expression or array of expressions
* `return `e`: ex with :block removed
"""
removeBlock(ex) = ex
removeBlock(arr::Array{Any,1}) = [removeBlock(a) for a in arr]
removeBlock(arr::Array{Expr,1}) = [removeBlock(a) for a in arr]
removeBlock(q::QuoteNode) = q
# if typeof(q.value) == Symbol; q.value else q end # begin print("QuoteNode: "); dump(q); q; end # q.value
removeBlock(d::OrderedDict) = OrderedDict{Symbol, Any}([(k,removeBlock(v)) for (k,v) in d])
function removeBlock(ex::Expr)
if isexpr(ex, :block) && length(ex.args) == 2 && typeof(ex.args[1]) == LineNumberNode
ex.args[2]
elseif isexpr(ex, :block) && length(ex.args) == 3 && typeof(ex.args[1]) == LineNumberNode && typeof(ex.args[2]) == LineNumberNode
ex.args[3]
else
Expr(ex.head, [removeBlock(arg) for arg in ex.args]...)
end
end
removeQuoteNode(ex) = ex
removeQuoteNode(arr::Array{Any,1}) = [removeQuoteNode(a) for a in arr]
removeQuoteNode(arr::Array{Expr,1}) = [removeQuoteNode(a) for a in arr]
removeQuoteNode(q::QuoteNode) = q.value
# if typeof(q.value) == Symbol; q.value else q end # begin print("QuoteNode: "); dump(q); q; end # q.value
removeQuoteNode(d::OrderedDict) = OrderedDict{Symbol, Any}([(k,removeQuoteNode(v)) for (k,v) in d])
function removeQuoteNode(ex::Expr)
Expr(ex.head, [removeQuoteNode(arg) for arg in ex.args]...)
end
removeUnits(ex) = if typeof(ex) <: Unitful.Quantity; @show ex; ustrip(ex) else ex end
removeUnits(ex::Expr) = if ex.head == :macrocall && ex.args[1] == Symbol("@u_str"); 1 else Expr(ex.head, [removeUnits(arg) for arg in ex.args]...) end
append(ex, suffix) = if ex == nothing; suffix else Expr(:., ex, QuoteNode(suffix)) end
prepend(ex, prefix) = ex
#prepend(ex, prefix::Array{Any,1}) = if length(prefix) == 0; ex else prepend(prepend(ex, prefix[end]), prefix[1:end-1]) end
#prepend(ex, prefix::Array{Symbol,1}) = if length(prefix) == 0; ex else prepend(prepend(ex, prefix[end]), prefix[1:end-1]) end
#prepend(ex::Symbol, prefix::Array{Symbol,1}) = if length(prefix) == 0; ex else prepend(prepend(ex, prefix[end]), prefix[1:end-1]) end
#prepend(ex::QuoteNode, prefix::Symbol) = Expr(:., prefix, QuoteNode(ex))
prepend(ex::Symbol, prefix) = if prefix == nothing; ex elseif ex in [:time, :instantiatedModel, :_leq_mode, :_x]; ex else Expr(:., prefix, QuoteNode(ex)) end
prepend(ex::Symbol, prefix::Nothing) = ex
prepend(arr::Array{Expr,1}, prefix) = [prepend(a, prefix) for a in arr]
#prepend(dict::OrderedCollections.OrderedDict{Symbol,Expr}, prefix) = OrderedDict([prepend(k, prefix) => prepend(k, prefix) for (k,v) in dict])
function prepend(ex::Expr, prefix)
if typeof(prefix) == Array{Any,1} && length(prefix) == 0
ex
elseif ex.head == :. && ex.args[1] == :up
ex.args[2].value
elseif ex.head in [:call, :kw]
if false #ex.head == :call && ex.args[1] == :der
e = Symbol(ex)
:($prefix.$e)
else
Expr(ex.head, ex.args[1], [prepend(arg, prefix) for arg in ex.args[2:end]]...)
end
elseif ex.head == :macrocall
if length(ex.args) >= 1 && ex.args[1] == Symbol("@u_str")
# Do not expand units, such as u"s", because otherwise logCode=true results in wrong code, because show(u"N") is N and not u"N".
ex
else
eval(ex)
end
else
Expr(ex.head, [prepend(arg, prefix) for arg in ex.args]...)
end
end
nCrossingFunctions = 0
nAfter = 0
nClocks = 0
nSamples = 0
previousVars = []
preVars = []
holdVars = []
function resetEventCounters()
global nCrossingFunctions
global nAfter
global nClocks
global nSamples
global previousVars
global preVars
global holdVars
nCrossingFunctions = 0
nAfter = 0
nClocks = 0
nSamples = 0
previousVars = []
preVars = []
holdVars = []
end
function getEventCounters()
global nCrossingFunctions
global nAfter
global nClocks
global nSamples
global previousVars
global preVars
global holdVars
return (nCrossingFunctions, nAfter, nClocks, nSamples, previousVars, preVars, holdVars)
end
substituteForEvents(ex) = ex
function substituteForEvents(ex::Expr)
global nCrossingFunctions
global nAfter
global nClocks
global nSamples
global previousVar
global preVars
global holdVars
if ex.head in [:call, :kw]
if ex.head == :call && ex.args[1] == :positive
nCrossingFunctions += 1
:(positive(instantiatedModel, $nCrossingFunctions, ustrip($(substituteForEvents(ex.args[2]))), $(string(substituteForEvents(ex.args[2]))), _leq_mode))
elseif ex.head == :call && ex.args[1] == :Clock
@assert 2<=length(ex.args)<=3 "The Clock function takes one or two arguments: $ex"
nClocks += 1
if length(ex.args) == 2
:(Clock(ustrip($(substituteForEvents(ex.args[2]))), instantiatedModel, $nClocks))
else
:(Clock(ustrip($(substituteForEvents(ex.args[2]))), ustrip($(substituteForEvents(ex.args[3]))), instantiatedModel, $nClocks))
end
elseif ex.head == :call && ex.args[1] == :sample
nSamples += 1
:(sample($(substituteForEvents(ex.args[2])), $(substituteForEvents(ex.args[3])), instantiatedModel, $nSamples))
elseif ex.head == :call && ex.args[1] == :pre
if length(ex.args) == 2
push!(preVars, ex.args[2])
nPre = length(preVars)
:(pre(instantiatedModel, $nPre))
else
error("The pre function takes one arguments: $ex")
end
elseif ex.head == :call && ex.args[1] == :previous
if length(ex.args) == 3
push!(previousVars, ex.args[2])
nPrevious = length(previousVars)
:(previous($(substituteForEvents(ex.args[3])), instantiatedModel, $nPrevious))
else
error("The previous function presently takes two arguments: $ex")
end
elseif ex.head == :call && ex.args[1] == :hold
push!(holdVars, ex.args[2])
nHold = length(holdVars)
if length(ex.args) == 3
:(hold($(substituteForEvents(ex.args[2])), $(substituteForEvents(ex.args[3])), instantiatedModel, $nHold))
else
# error("The hold function takes two or three arguments, hold(v, clock) or hold(expr, start, clock) : $ex")
error("The hold function takes two arguments, hold(v, clock): $ex")
end
elseif ex.head == :call && ex.args[1] in [:initial, :terminal]
if length(ex.args) == 1
:($(ex.args[1])(instantiatedModel))
else
error("The $(ex.args[1]) function don't take any arguments: $ex")
end
elseif ex.head == :call && ex.args[1] == :after
# after(instantiatedModel, nr, t, tAsString, leq_mode)
nAfter += 1
:(after(instantiatedModel, $nAfter, ustrip($(substituteForEvents(ex.args[2]))), $(string(substituteForEvents(ex.args[2]))), _leq_mode))
else
Expr(ex.head, ex.args[1], [substituteForEvents(arg) for arg in ex.args[2:end]]...)
end
else
Expr(ex.head, [substituteForEvents(arg) for arg in ex.args]...)
end
end
Incidence = Union{Symbol, Expr}
"""
findIncidence!(ex, incidence::Array{Incidence,1})
Traverses an expression and finds incidences of Symbols and der(...)
* `ex`: Expression or array of expressions
* `incidence`: array of incidences. New incidences of `ex` are pushed.
"""
findIncidence!(ex, incidence::Array{Incidence,1}, includeX::Bool=true) = nothing
findIncidence!(s::Symbol, incidence::Array{Incidence,1}, includeX::Bool=true) = begin if ! (s in [:(:), :end]); push!(incidence, s) end end
findIncidence!(arr::Array{Any,1}, incidence::Array{Incidence,1}, includeX::Bool=true) = [findIncidence!(a, incidence, includeX) for a in arr]
findIncidence!(arr::Array{Expr,1}, incidence::Array{Incidence,1}, includeX::Bool=true) = [findIncidence!(a, incidence, includeX) for a in arr]
function findIncidence!(ex::Expr, incidence::Array{Incidence,1}, includeX::Bool=true)
if ex.head == :macrocall && ex.args[1] == Symbol("@u_str")
nothing
elseif isexpr(ex, :function)
nothing
elseif ex.head == :call
if ex.args[1] == :der
push!(incidence, ex) # der(x)
if includeX
push!(incidence, ex.args[2]) # x
end
elseif ex.args[1] in [:pre, :previous]
[findIncidence!(e, incidence, includeX) for e in ex.args[3:end]] # skip operator/function name and first argument
else
[findIncidence!(e, incidence, includeX) for e in ex.args[2:end]] # skip operator/function name
end
elseif ex.head == :.
push!(incidence, ex)
# if ex.args[2].value != :all
# push!(incidence, ex.args[1])
# end
elseif ex.head == :generator
vars = [v.args[1] for v in ex.args[2:end]]
incid = Incidence[]
[findIncidence!(e, incid, includeX) for e in ex.args]
unique!(incid)
setdiff!(incid, vars)
push!(incidence, incid...)
else
# For example: =, vect, hcat, block, ref
[findIncidence!(e, incidence, includeX) for e in ex.args]
end
nothing
end
"""
(rest, factor, linear) = linearFactor(ex, x)
Finds the linear `factor` and `rest` if `ex` is `linear` with regards to `x` (ex == factor*x + rest)
* `ex`: Expression
* `return (rest, factor, linear)`:
"""
linearFactor(ex, x) = (ex, 0, true)
linearFactor(ex::Symbol, x::Incidence) = if ex == x; (0, 1, true) else (ex, 0, true) end
function linearFactor(ex::Expr, x::Incidence)
if ex.head == :block
linearFactor(ex.args[1], x)
elseif ex.head == :macrocall && ex.args[1] == Symbol("@u_str")
(ex, 0, true)
elseif isexpr(ex, :call) && ex.args[1] == :der
if ex == x; (0, 1, true) else (ex, 0, true) end
elseif isexpr(ex, :call) && ex.args[1] in [:positive, :previous]
(ex, 0, true)
elseif isexpr(ex, :call)
func = ex.args[1]
if func in [:_DUPLICATEEQUATION, :_DUPLICATIMPLICITDEPENDENCY, :implicitDependency]
return (ex, 0, false)
end
arguments = ex.args[2:end]
factored = [linearFactor(a, x) for a in arguments]
rests = [f[1] for f in factored]
factors = [f[2] for f in factored]
linears = [f[3] for f in factored]
if func == :+
rest = foldl(add, rests)
factor = foldl(add, factors)
(rest, factor, all(linears))
elseif func == :-
if length(rests) == 1
rest = sub(0, rests[1])
else
rest = foldl(sub, rests)
end
if length(factors) == 1
factor = sub(0, factors[1])
else
factor = foldl(sub, factors)
end
(rest, factor, all(linears))
elseif func == :*
if length(arguments) > 2
linearFactor(foldl(mult, arguments), x)
else
# (r1 + f1*x)*(r2 + f2*x) = (r1*r2 + r1*f2*x + f1*r2*x + ...)
rest = foldl(mult, rests)
factor = 0
if factors[1] == 0
factor = mult(rests[1], factors[2])
(rest, factor, all(linears))
elseif length(factors) == 2 && factors[2] == 0
factor = mult(rests[2], factors[1])
(rest, factor, all(linears))
else
(0, 0, false)
end
end
elseif func == :/
# (r1 + f1*x)/r2 = (r1/r2 + f1/r2*x)
rest = foldl(divide, rests)
@assert length(factors) == 2 "Non-binary division is not handled."
factor = divide(factors[1], rests[2])
(rest, factor, all(linears) && all(factors[2:end] .== 0))
elseif func == :\
# r1 \ (r2 + f2*x) = (r1\r2 + r1\f2*x)
rest = divide(rests[2], rests[1])
@assert length(factors) == 2 "Non-binary \\ is not handled."
factor = divide(factors[2], rests[1])
(rest, factor, all(linears) && factors[1] == 0)
else
(ex, 0, all(linears) && all(factors .== 0))
end
elseif ex.head == :.
if ex == x; (0, 1, true) else (ex, 0, true) end
elseif isexpr(ex, :if) || isexpr(ex, :elseif)
cond = linearFactor(ex.args[1], x)
then = linearFactor(ex.args[2], x)
els = linearFactor(ex.args[3], x)
if then[1] == els[1]
rest = then[1]
else
rest = :(if $(cond[1]); $(then[1]) else $(els[1]) end)
end
if then[2] == els[2]
factor = then[2]
else
factor = :(if $(cond[1]); $(then[2]) else $(els[2]) end)
end
(rest, factor, cond[3] && then[3] && els[3])
elseif isexpr(ex, :(=))
LHS = linearFactor(ex.args[1], x)
RHS = linearFactor(ex.args[2], x)
rest = sub(LHS[1], RHS[1])
factor = sub(LHS[2], RHS[2])
(rest, factor, LHS[3] && RHS[3])
elseif isexpr(ex, :vect) || isexpr(ex, :vcat) || isexpr(ex, :hcat) || isexpr(ex, :row)
arguments = ex.args[2:end]
factored = [linearFactor(a, x) for a in arguments]
linears = [f[3] for f in factored]
(ex, 0, all(linears))
else
# @warn "Unknown expression type" ex
# dump(ex)
incidence = Incidence[]
findIncidence!(ex, incidence)
if x in incidence
(ex, 0, false)
else
(ex, 0, true)
end
end
end
"""
(solved_equation, solved) = solveEquation(equ::Expr, x)
Solves `equ` for `x` if `ex` is linear with regards to `x` (equ == factor*x + rest = 0)
* `equ`: Equation (expression = expression)
* `return (solved_equation, solved)`: If the equation is `solved`, solved_equation constains the corresponding Expr
"""
function solveEquation(equ::Expr, x)
(rest, factor, linear) = linearFactor(equ, x)
(:($x = $(divide(sub(0, rest), factor))), linear)
end
"""
(linear, constant) = isLinear(ex::Expr, x)
Tests if `ex` is `linear` with regards to `x` (ex == factor*x + rest) and checks if the factor is `constant`
* `ex`: Expression
* `return (linear, constant)`
"""
function isLinear(equ::Expr, x::Incidence)
(rest, factor, linear) = linearFactor(equ, x)
(linear, typeof(factor) != Expr)
end
"""
(incidence, coefficients, rest, linear) = getCoefficients(ex)
If `ex` is `linear` with regards to all `incidence` (Symbols and der(...)), the `coefficients` and `rest` are returned
* `ex`: Expression
* `return (incidence, coefficients, rest, linear)`
"""
function getCoefficients(ex::Expr)
incidence = Incidence[]
findIncidence!(ex, incidence)
coefficients = []
rest = ex
linear = true
for x in incidence
crossIncidence = Incidence[]
findIncidence!(coefficients, crossIncidence)
if x in crossIncidence
linear = false
break
end
(rest, factor, linearRest) = linearFactor(rest, x)
if !linearRest
linear = false
break
end
push!(coefficients, factor)
end
incidence, coefficients, rest, linear
end
substitute(substitutions, ex) = begin nex = get(substitutions, ex, ex); if nex != ex; nex = substitute(substitutions, nex) else nex end; nex end
substitute(substitutions, ex::Vector{Symbol}) = [substitute(substitutions, e) for e in ex]
substitute(substitutions, ex::Vector{Expr}) = [substitute(substitutions, e) for e in ex]
substitute(substitutions, ex::MonteCarloMeasurements.StaticParticles) = ex
substitute(substitutions, ex::Measurements.Measurement{Float64}) = ex
function substitute(substitutions, ex::Expr)
if isexpr(ex, :quote)
ex
elseif ex.head == :.
nex = get(substitutions, ex, ex)
if nex != ex
nex = substitute(substitutions, nex)
end
nex
elseif ex.head == :call && ex.args[1] == :+ && length(ex.args) ==3
add(substitute(substitutions, ex.args[2]), substitute(substitutions,ex.args[3]))
elseif ex.head == :call && ex.args[1] == :- && length(ex.args) ==3
sub(substitute(substitutions, ex.args[2]), substitute(substitutions,ex.args[3]))
elseif ex.head == :call && ex.args[1] == :* && length(ex.args) ==3
mult(substitute(substitutions, ex.args[2]), substitute(substitutions,ex.args[3]))
else
Expr(ex.head, [substitute(substitutions, arg) for arg in ex.args]...)
end
end
function testSubstitutions()
ex = substitute(Dict(:a=>:b), :(a+b=0))
@show ex
ex = substitute(Dict(:a=>:b), :(a+b+c=0))
@show ex
ex = substitute(Dict(:b=>0), :(a+b=0))
@show ex
ex = substitute(Dict(:b=>0.0), :(a+b=0))
@show ex
ex = substitute(Dict(:b=>0), :(a+b+c=0))
@show ex
ex = substitute(Dict(:b=>0.0), :(a+b+c=0))
@show ex
ex = substitute(Dict(:b=>0.0), :(a+b*(c+d)=0))
@show ex
println("TEST getCoefficients")
n = 10000
@show n
e = Expr(:call, :f, fill(:x, n)...)
# @show e
@time incidence, coefficients, rest, linear = getCoefficients(e)
# @show incidence coefficients rest linear
e = Expr(:call, :_DUPLICATEEQUATION, fill(:x, n)...)
# @show e
@time incidence, coefficients, rest, linear = getCoefficients(e)
# @show incidence coefficients rest linear
end
# testSubstitutions()
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 21707 | # License for this file: MIT (expat)
# Copyright 2017-2021, DLR Institute of System Dynamics and Control
# Author: Martin Otter, DLR-SR
#
# Functions to tear systems of equations.
export TearingSetup, tearEquations!
const Undefined = typemin(Int)
#=
Bender, Fineman, Gilbert, Tarjan (2016):
A New Approach to Incremental Cycle Detection and Related Problems.
ACM Transactions on Algorithms, Volume 12, Issue 2, Feb. 2016
http://dl.acm.org/citation.cfm?id=2756553
Text excerpt from this paper (the advantage of Algorithm N is that it
is simple to implement and needs no sophisticated data structures)
3. A ONE-WAY-SEARCH ALGORITHM FOR DENSE GRAPHS
[..]
To develop the algorithm, we begin with a simple algorithm and then modify it to
improve its running time. We call the simple algorithm Algorithm N (for βnaiveβ). The
algorithm initializes k(v) = 1 for each vertex v. The initial vertex levels are a weak
topological numbering since the initial graph contains no arcs. To add an arc (v,w),
if k(v) < k(w), merely add the arc. If, on the other hand, k(v) β₯ k(w), add the arc and
then do a selective forward search that begins by traversing (v,w) and continues until
the search traverses an arc into v (there is a cycle) or there are no more candidate
arcs to traverse. To traverse an arc (x, y), if y = v, stop (there is a cycle); otherwise, if
k(x) β₯ k(y), increase k(y) to k(x)+1 and add all arcs (y, z) to the set of candidate arcs to
traverse.
It is easy to show that (1) after each arc addition that does not form a cycle
the vertex levels are a weak topological numbering, (2) Algorithm N is correct, and
(3) 1 β€ k(v) β€ size(v) β€ n for all v. Since an arc (x, y) notEqual (v,w) is only traversed as a
result of k(x) increasing, each arc is traversed at most n times, resulting in a total time
bound of O(nm) for all arc additions.
=#
"""
ts = TearingSetup(G [,nv])
Generate a setup object `ts` from a bi-partite graph `G`
to reduce one or more systems of equations that are present in `G`.
The returned object `ts` holds internal auxiliary storage that is
used in analysis with function [`tearEquations!`](@ref).
`G` is expected to be a vector of integer vectors
(for example of type `Vector{ Vector{Int} }`).
The optional argument `nv` is the largest integer number occuring in `G`
(= number of variables). If `nv` is not provided, it is deduced from `G`.
`TearingSetup` is useful to reduce the amount of memory to be allocated,
if function [`tearEquations!`](@ref) shall be called several times on
equation systems from `G`.
"""
mutable struct TearingSetup
minlevel::Int
curlevel::Int
level::Vector{Int}
lastlevel::Vector{Int}
levelStack::Vector{Int}
visitedStack::Vector{Int}
vActive::Vector{Bool}
vAssignable::Vector{Bool}
visited::Vector{Bool}
check::Vector{Bool}
stack::Vector{Int}
eSolved::Vector{Int}
vSolved::Vector{Int}
G::AbstractVector # Vector{ Vector{Int} } or Vector{ Any }
assign::Vector{Int}
function TearingSetup(G::AbstractVector, nv::Int)
visited = fill(false, length(G))
check = fill(false, length(G))
vActive = fill(false, nv)
vAssignable = fill(false, nv)
level = fill(Undefined, nv)
lastlevel = fill(Undefined, nv)
levelStack = fill(0, 0)
stack = fill(0, 0)
visitedStack = fill(0, 0)
eSolved = fill(0, 0)
vSolved = fill(0, 0)
assign = fill(0, nv)
new(0, Undefined, level, lastlevel, levelStack, visitedStack, vActive,
vAssignable, visited, check, stack, eSolved, vSolved, G, assign)
end
function TearingSetup(G::AbstractVector)
nv = 1
for g in G
nv = max(nv, maximum(g))
end
TearingSetup(G,nv)
end
end
"""
reInitializeTearingSetup!(td::TearingSetup, es, vs, eSolvedFixed, vSolvedFixed)
Define the set of equations and the set of variables for which the equations shall be solved for
(equations es shall be solved for variables vs) and re-initialize td.
eSolvedFixed/vSolvedFixed must be a DAG starting at eSolvedFixed/vSolvedFixed[1]
"""
function reInitializeTearingSetup!(td::TearingSetup, es::Vector{Int}, vs::Vector{ Vector{Int} },
eSolvedFixed::Vector{Int}, vSolvedFixed::Vector{Int}, check::Bool)
# check arguments
if check
# Check that eSolvedFixed, vSolvedFixed are a DAG
@assert( length(eSolvedFixed) == length(vSolvedFixed) )
if length(eSolvedFixed) > 0
checkDAG!(td, eSolvedFixed, vSolvedFixed)
end
# Check es
neq = length(td.G)
for esi in es
@assert(esi > 0 && esi <= neq)
end
# Check that eSolvedFixed and es have no elements in common
ediff = intersect(eSolvedFixed, es)
if length(ediff) != 0
error("... Error in Tearing.jl: Function tearEquations!(...) was not called correctly:\n",
"eSolvedFixed and es are not allowed to have elements in common.")
end
# Check vs and that vSolvedFixed and vs have no elements in common
nv = length(td.vActive)
for vse in vs
for vsi in vse
@assert(vsi > 0 && vsi <= nv)
end
vdiff = intersect(vSolvedFixed, vse)
if length(vdiff) != 0
error("... Error in Tearing.jl: Function tearEquations!(...) was not called correctly:\n",
"vSolvedFixed and vs are not allowed to have elements in common.")
end
end
end
# Re-initialize td
td.minlevel = 0
td.curlevel = Undefined
for i in eachindex(td.visited)
td.visited[i] = false
td.check[i] = false
end
for i in eachindex(td.vActive)
td.vActive[i] = false
td.vAssignable[i] = false
td.assign[i] = 0
td.level[i] = Undefined
td.lastlevel[i] = Undefined
end
empty!(td.levelStack)
empty!(td.stack)
empty!(td.visitedStack)
empty!(td.eSolved)
empty!(td.vSolved)
# Define initial DAG
vs2 = Int[]
for i in eachindex(vSolvedFixed)
vFixed = vSolvedFixed[i]
td.assign[vFixed] = eSolvedFixed[i]
td.level[ vFixed] = i
push!(vs2, vFixed)
end
# Define that vs and vSolvedFixed shall be treated as the unknown variables during traversal
for vse in vs
for vsi in vse
td.vActive[vsi] = true
end
end
for vsi in vSolvedFixed
td.vActive[vsi] = true
end
return vs2
end
in_vActive(td, v) = td.vActive[v]
in_vAssignable(td, v) = td.vAssignable[v]
"""
result = isDAG!(td::TearingSetup, vStart::Int)
Traverse potential DAG starting from variable node vStart.
If no cycle is detected return true, otherwise return false.
"""
function isDAG!(td::TearingSetup, vStart::Int)::Bool
# Initialize stacks and flags
empty!(td.stack)
for i in eachindex(td.visited)
td.visited[i] = false
td.check[i] = false
end
# Traverse DAG
push!(td.stack, vStart)
while length(td.stack) > 0
veq = td.stack[end]
eq = td.assign[veq]
if !td.visited[eq]
td.visited[eq] = true
td.check[eq] = true
for v in td.G[eq]
if in_vActive(td, v) && v != veq # v is an element of the unknown variables and is not the variable to solve for
eq2 = td.assign[v]
if eq2 != 0
if !td.visited[eq2]
# eq2 not yet visited
push!(td.stack, v)
elseif td.check[eq2]
# cycle detected
return false
end
#else
# error("... error in Tearing.jl code in function isDAG! (should not occur): assign[$v] = 0")
end
end
end
else
td.check[eq] = false
pop!(td.stack)
end
end
return true
end
"""
visitDAG!(td::TearingSetup,v)
Traverse DAG starting from variable v and store visited equations and variables in stacks
eSolved, vSolved. If a cycle is deteced, raise an error (signals a programming error).
"""
function visitDAG!(td::TearingSetup, vVisit::Int)
push!(td.stack, vVisit)
while length(td.stack) > 0
veq = td.stack[end]
eq = td.assign[veq]
if !td.visited[eq]
td.visited[eq] = true
td.check[eq] = true
for v in td.G[eq]
if in_vActive(td, v) && v != veq # v is an element of the unknown variables and is not the variable to solve for
eq2 = td.assign[v]
if eq2 != 0
if !td.visited[eq2] # visit eq2 if not yet visited
push!(td.stack, v)
elseif td.check[eq2] # cycle detected
error("... error in Tearing.jl code: \n",
" cycle detected (should not occur): eq = ", eq, ", veq = ", veq, ", eq2 = ", eq2, ", v = ", v)
end
end
end
end
else
td.check[eq] = false
push!(td.eSolved, eq)
push!(td.vSolved, veq)
pop!(td.stack)
end
end
nothing
end
"""
(eSolved, vSolved) = sortDAG!(td::TearingSetup, vs)
Sort the equations that are assigned by variables vs using object td of type TearingSetup
and return the sorted equations eSolved and assigned variables vSolved.
"""
function sortDAG!(td::TearingSetup, vs::Vector{Int})
# initialize data structure
empty!(td.stack)
empty!(td.eSolved)
empty!(td.vSolved)
for i in eachindex(td.visited)
td.visited[i] = false
td.check[i] = false
end
# visit all assigned variables and equations
for veq in vs
if !td.visited[ td.assign[veq] ]
visitDAG!(td, veq)
end
end
return (td.eSolved, td.vSolved)
end
"""
checkDAG!(td::TearingSetup, es::Vector{Int}, vs::Vector{Int})
A DAG is defined by equations es and variables vs, such that es[1] is solved for vs[1],
afterwards es[2] for vs[2] etc.
This function checks whether es/vs defines a DAG.
If a cycle is detected, an error is raised (signals a programming error).
"""
function checkDAG!(td::TearingSetup, es::Vector{Int}, vs::Vector{Int})::Nothing
if length(vs) == 0
return nothing
end
# Initialize stacks and flags
empty!(td.stack)
for i in eachindex(td.visited)
td.visited[i] = false
td.check[i] = false
td.assign[i] = 0
end
for i in eachindex(td.vActive)
td.vActive[i] = false
end
for vsi in vs
td.vActive[vsi] = true
end
for i in eachindex(es)
td.assign[vs[i]] = es[i]
end
# Traverse DAG
push!(td.stack, vs[1])
while length(td.stack) > 0
veq = td.stack[end]
eq = td.assign[veq]
if !td.visited[eq]
td.visited[eq] = true
td.check[eq] = true
for v in td.G[eq]
if in_vActive(td, v) && v != veq # v is an element of the unknown variables and is not the variable to solve for
eq2 = td.assign[v]
if eq2 != 0
if !td.visited[eq2] # visit eq2 if not yet visited
push!(td.stack, v)
elseif td.check[eq2] # cycle detected
error("... error in Tearing.jl code (should not occur): \n",
" Cycle detected: eq = ", eq, ", veq = ", veq, ", eq2 = ", eq2, ", v = ", v)
end
else
error("... error in Tearing.jl code (should not occur): assign[$v] = 0")
end
end
end
else
td.check[eq] = false
pop!(td.stack)
end
end
return nothing
end
function tearEquationsCore!(vs2, td::TearingSetup, isSolvable::Function, es::Vector{Int}, vs::Vector{Int}, log::Bool)::Nothing
G = td.G
vAssignable = false
for eq in es # iterate only over equations that are not in eSolvedFixed
for vj in G[eq]
vAssignable = in_vAssignable(td, vj)
vAssigned = td.assign[vj] > 0
if log
if !vAssigned
if vAssignable
if isSolvable(eq,vj)
println(" Equation $eq can be solved for variable $vj")
else
println(" Equation $eq cannot be solved for variable $vj")
end
end
end
end
if vAssignable && !vAssigned && isSolvable(eq,vj)
# vj is an element of the variables that can be assigned in this stage, but is not yet assigned
# Add equation to graph
td.assign[vj] = eq
# Check for cycles (traverse DAG starting from vj)
if isDAG!(td, vj)
# accept vj
push!(vs2, vj)
if log
println(" -> solved for variable $vj without cycles")
end
break # continue with next equation
else
# cycle; remove vj from DAG and undo its changes
td.assign[vj] = 0
if log
println(" -> solving for variable $vj gives a cycle (so not done)")
end
# continue with next variable in equation eq
end
end
end
end
return nothing
end
"""
(eSolved, vSolved, eResidue, vTear) = tearEquations!(GorTs, isSolvable, es, vs;
eSolvedFixed=Int[], vSolvedFixed=Int[], check=true)
This function tears a system of equations consisting of equations `es` and `eSolvedFixed`
that are functions of variables `vs` and `vSolvedFixed`.
The optional arguments `eSolvedFixed, vSolvedFixed`
define the starting Directed Acyclic Graph (solving equations eSolvedFixed[1] for variable vSolvedFixed[1],
eSolvedFixed[2] for variable vSolvedFixed[2] etc.) starting at `vSolvedFixed[1]`.
The function returns
the teared equations so that if `vTear` are given, `vSolved` can be computed from `eSolved`
in a forward sequence (so solving `eSolved[1]` for `vSolved[1]`, `eSolved[2]` for `vSolved[2]`,
and so on). `vTear` must be selected, so that the equations `eResidues` are fulfilled.
Equations `eSolved` and `eResidue` are the (reordered) union of `es` and `eSolvedFixed`.
Variables vSolved` and `vTear` are the (reordered) union of `vs` and `vSolvedFixed`.
This means that an algebraic equation system `0 = g(w)`
(`g` are equations `es` and `eSolvedFixed`;
`w` are unknowns `vs` and `vSolvedFixed`) is solved as
much as possible explicitly for the unknowns resulting in
```
w_e := g_e(w_t)
0 = g_r(w_t, w_e)
```
where
- `w` (= vs and vSolvedFixed) consists of all elements of `w_e, w_t`;
- equations `g_e` of `g` are explicitly solved for `w_e`;
- equations `g_r` are the equations of `g` that cannot be explicitly solved (= residual equations).
# Required arguments
- `GorTs`: Either a bi-partite graph `G::Vector{Vector{Int}}` or `ts::TearingSetup`
generated with constructor `ts = TearingSetup(G)`.
`ts` is re-initialized in `tearEquations!` for any call of the function.
`TearingSetup` is useful to reduce the amount of memory to be allocated,
if several equation systems shall be teared from `G`.
- `isSolvable(e,v)`: Function that returns true, if equation `e`
can be solved for variable `v` without influencing the solution space
(= rank preserving operation).
- `es::Vector{Int}`: Vector of equations that shall be solved with respect to variables `vs`.
`es` must be equations from `G`.
- `vs`: Either of type `Vector{Int}` or of type `Vector{Vector{Int}}`.
`vs` are the unknown variables that shall be solved from `es`.
If `vs::Vector{Vector{Int}}`, it is first tried to solve `es`
for `vs[1]`, then for `vs[2]` etc.
(so `vs` defines a priority to solve for variables).
# Optional arguments
- `eSolvedFixed::Vector{Int}`: Equations of `G` that are already defined to be solved for `vSolvedFixed`.
- `vSolvedFixed::Vector{Int]`: Variables of `G` that are explicitly solved from `eSolvedFixed`.
- `check::Bool`: = true, if various checks shall be performed, for example
that eSolvedFixed/vSolvedFixed and eSolved/vSolved are a DAG respectively.
# Return arguments
- `eSolved::Vector{Int}`: Equations that are explicitly solved in the order `eSolved[1], eSolved[2], ...`.
- `vSolved::Vector{Int}`: Equation `eSolved[i]` is explicitly solved for variable `vSolved[i]`.
- `eResdiue::Vector{Int}`: Residual equations that are not explicitly solved.
- `vTear::Vector{Int}`: Tearing variables, so variables that are assumed to be known, when solving
equations `eSolved`.
# Example
```
using ModiaBase
G = Vector{Int}[ [1,2,4], # equation 1 depends on variables 1,2,4
[1,7],
[3,4],
[3,7],
[6,7],
[2] ]
es = [3,4,2,1] # Solve equations 3,4,2,1
vs = [3,7,1,4] # for variables 3,7,1,4
isSolvable(e,v) = true # Assume that every equation is solvable for its unknowns
(eSolved,vSolved,eResidue,vTear) = tearEquations!(G, isSolvable, es, vs)
# eSolved = [3,4,2]
# vSolved = [3,7,1]
# eResidue = [1]
# vTear = [4]
```
# Algorithm
The algorithm used in this function is sketched in the paper:
- Otter, Elmqvist (2017):
[Transformation of Differential Algebraic Array Equations to Index One Form](http://www.ep.liu.se/ecp/132/064/ecp17132565.pdf).
Modelica'2017 Conference.
The function uses several extensions of the described basic
tearing algorithm that are important for transforming higher index Differential Algebraic Equations
to index one form. Note, the traversals in Directed-Acyclic-Graphs - the core operation of
the tearing algorithm - is **not** performed with recursive function calls but with
while loops and an explicit stack, in order to avoid function stack overflow
for large algebraic loops. Tests up to 1 million equations in 1 million unknowns have been
performed.
For improving efficiency, algorithm N of the following paper is used as utility algorithm:
- Bender, Fineman, Gilbert, Tarjan (2016):
[A New Approach to Incremental Cycle Detection and Related Problems](http://dl.acm.org/citation.cfm?id=2756553).
ACM Transactions on Algorithms, Volume 12, Issue 2, Feb.
# Main developer
[Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en)
"""
tearEquations!(G , isSolvable, es, vs ; kwargs...) = tearEquations!(TearingSetup(G), isSolvable, es, vs; kwargs...)
tearEquations!(td::TearingSetup, isSolvable, es, vs::Vector{Int}; kwargs...) = tearEquations!(td , isSolvable, es, fill(vs,1); kwargs...)
function tearEquations!(td::TearingSetup, isSolvable::Function, es::Vector{Int}, vs::Vector{ Vector{Int} };
eSolvedFixed::Vector{Int}=Int[], vSolvedFixed::Vector{Int}=Int[],
check::Bool=true, log::Bool=false)
# Reinitialize td
vs2 = reInitializeTearingSetup!(td, es, vs, eSolvedFixed, vSolvedFixed, check)
# Try vs in order of priority
for vse in vs
if length(vse) > 0
# Define variables that can be assigned
for vsi in vse
td.vAssignable[ vsi ] = true
end
# Perform tearing
if log
println(" Try to solve for variable(s) ", vse)
end
tearEquationsCore!(vs2, td, isSolvable, es, vse, log)
# Undefine variables that can be assigned
for vsi in vse
td.vAssignable[ vsi ] = false
end
end
end
# Determine solved equations and variables
(eSolved, vSolved) = sortDAG!(td, vs2)
eResidue = setdiff(es, eSolved)
if length(vs) == 1
vTear = setdiff(vs[1], vSolved)
else
vs_vector = deepcopy(vs[1])
for i in 2:length(vs)
append!(vs_vector, vs[i])
end
vTear = setdiff(vs_vector, vSolved)
end
# Check result
if check
checkDAG!(td, eSolved, vSolved)
end
return (eSolved, vSolved, eResidue, vTear)
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 11974 | """
Module with tests for BLTandPantelides.
* Author: Hilding Elmqvist, Mogram AB
* Date: July-August 2016. Array version: Summer 2018
* License: MIT
"""
module TestBLTandPantelides
using ModiaBase
using Test
println("... Test BLTandPantelides.jl")
@testset "BLTandPantelides" begin
# testMatch
println("\nTest match")
G = Any[
[2],
[3,8],
[4,7],
[5],
[3,6],
[],
[4,6],
[1,7],
]
assign = ModiaBase.matching(G, 8)
@show assign
@testset "testMatch" begin
@test any(assign .== 6) == false
@test assign == [8,1,2,7,4,5,3,0]
end
# testSingular
println("\nSingular system")
G = Any[
[3],
[3],
[2,3]
]
assign = ModiaBase.matching(G, 4)
@show assign
(invAssign, unAssignedVariables) = ModiaBase.invertAssign(assign, length(G))
@show invAssign, unAssignedVariables
(ass, unAssignedEquations) = ModiaBase.invertAssign(invAssign, length(assign))
@show ass, unAssignedEquations
@testset "Singular system" begin
@test assign == [0,3,1,0]
@test invAssign == [3,0,2]
@test unAssignedVariables == [1,4]
@test ass == assign
@test unAssignedEquations == [2]
end
# testTarjan
println("\nTest Tarjans strong connect")
# Reference: Tarjan, Fig.3., page 158.
G = Any[
[2],
[3,8],
[4,7],
[5],
[3,6],
[],
[4,6],
[1,7],
]
assign = 1:8
components = ModiaBase.BLT(G, assign)
@show components
@testset "testTarjan" begin
@test components == Any[Any[6],Any[7,5,4,3],Any[8,2,1]]
end
# testPendulum
println("\nFixed-length pendulum")
# Reference: Pantelides, page 222-225.
G = Any[
[3, 5],
[4, 6],
[1, 7, 9],
[2, 8, 9],
[1, 2]
]
assign = ModiaBase.matching(G, 9)
@show assign
# Variable vector is: [x, y, w, z, der(x), der(y), der(w), der(z), T], i.e. vector A is:
A = [5,6,7,8,0,0,0,0,0]
# Symbolic output of results
variables = ["x", "y", "w", "z", "der(x)", "der(y)", "der(w)", "der(z)", "T"]
equations = [
"der(x) = w",
"der(y) = z",
"der(w) = T*x",
"der(z) = T*y - g",
"0 = x^2 + y^2 - L^2"]
println("\nAssigned original equations:")
ModiaBase.printAssignedEquations(equations, variables, 1:length(equations), assign, A, fill(0, length(equations)))
ModiaBase.printUnassigned(equations, variables, assign, A, fill(0, length(equations)))
println("\nTest diagnostics for too many equations")
tooManyEquations = [equations; [
"x = cos(phi)",
"y = sin(phi)"]]
tooFewVariables = [variables; ["phi", "dummy"]]
#=
Gbig = {G; {
[1, 10]
[2, 10]
}
}
=#
Gbig = copy(G)
push!(Gbig, [1, 10])
push!(Gbig, [2, 10])
@show Gbig
Abig = [A; fill(0, 1) ]
EGbig = [Gbig; ModiaBase.buildExtendedSystem(Abig)]
EGbig = ModiaBase.addDependencies(EGbig, (length(Abig)+1):length(EGbig))
@show EGbig
equationsBig = [tooManyEquations; fill("h(., der(.)) = 0", length(EGbig)-length(tooManyEquations))]
assignBig = ModiaBase.matching(EGbig, length(EGbig))
Abig = [Abig; fill(0, length(EGbig)-length(Abig))]
ModiaBase.printAssignedEquations(equationsBig, tooFewVariables, 1:length(EGbig), assignBig, Abig, fill(0, length(EGbig)))
ModiaBase.printUnassigned(equationsBig, tooFewVariables, assignBig, Abig, fill(0, length(EGbig)))
componentsBig = ModiaBase.BLT(EGbig, assignBig)
@show componentsBig
println("\nTest diagnostics for too many variables")
tooManyVariables = [variables; ["dummy"]]
Gbig = copy(G)
Gbig[5] = [1, 10] # Wrong equation involving x and dummy
Gbig[4] = [10, 8, 9] # Wrong equation involving dummy
@show Gbig
Abig = [A; fill(0, 1) ]
EGbig = [Gbig; ModiaBase.buildExtendedSystem(Abig)]
EGbig = [EGbig; ModiaBase.buildFullIncidence(length(Abig)-length(EGbig), length(Abig))]
@show EGbig
equationsBig = copy(equations)
equationsBig[5] = "0 = x^2 + dummy^2 - L^2"
equationsBig = [equationsBig; fill("h(., der(.)) = 0", length(EGbig)-length(equationsBig))]
assignBig = ModiaBase.matching(EGbig, length(EGbig))
Abig = [Abig; fill(0, length(EGbig)-length(Abig))]
ModiaBase.printAssignedEquations(equationsBig, tooManyVariables, 1:length(EGbig), assignBig, Abig, fill(0, length(EGbig)))
ModiaBase.printUnassigned(equationsBig, tooManyVariables, assignBig, Abig, fill(0, length(EGbig)))
componentsBig = ModiaBase.BLT(EGbig, assignBig)
@show componentsBig
println("\nTest diagnostics for too few equations")
tooManyVariables = variables
Gbig = copy(G)
pop!(Gbig) # Delete last equation
@show Gbig
Abig = A
EGbig = [Gbig; ModiaBase.buildExtendedSystem(Abig)]
equationsBig = equations[1:4]
equationsBig = [equationsBig; fill("h(., der(.)) = 0", length(EGbig)-length(equationsBig))]
EGbig = [EGbig; ModiaBase.buildFullIncidence(length(Abig)-length(EGbig), length(Abig))]
equationsBig = [equationsBig; fill("full(...) = 0", length(EGbig)-length(equationsBig))]
@show EGbig
assignBig = ModiaBase.matching(EGbig, length(EGbig))
Abig = [Abig; fill(0, length(EGbig)-length(Abig))]
ModiaBase.printAssignedEquations(equationsBig, tooManyVariables, 1:length(EGbig), assignBig, Abig, fill(0, length(EGbig)))
ModiaBase.printUnassigned(equationsBig, tooManyVariables, assignBig, Abig, fill(0, length(EGbig)))
componentsBig = ModiaBase.BLT(EGbig, assignBig)
@show componentsBig
println("\nCheck consistency of equations by ModiaBase.matching extended equation set")
EG = [G; ModiaBase.buildExtendedSystem(A)]
@show EG
assign = ModiaBase.matching(EG, 9)
@show assign
@testset "testPendulum 1" begin
@test all(assign .> 0)
end
println("\nPerform index reduction")
(assign, A, B) = ModiaBase.pantelides!(G, 9, A)
@show G
@show assign
@show A
@show B
@testset "testPendulum 2" begin
# According to Pantelides, page 224-225.
@test assign == [0,0,0,0,1,2,7,4,3,9,8]
@test A == [5,6,7,8,10,11,0,0,0,0,0]
@test B == [7,8,0,0,6,9,0,0,0]
end
println("------------------------------------------------------")
println()
vActive = fill(true, length(A))
vActive[[1,3]] .= false
@show vActive
assign = ModiaBase.matching(G, length(A), vActive)
@show assign
components = ModiaBase.BLT(G, assign)
@show components
@testset "testPendulum BLT components" begin
@test assign == [0, 5, 0, 2, 1, 6, 7, 4, 3, 9, 8]
@test components == Any[Any[1], Any[5], Any[6], Any[2], Any[4, 8, 9, 7, 3]]
end
println("------------------------------------------------------")
println()
#=
println("\nAll unknowns:")
printList(variables, 1:length(A), A)
println("\nAll equations:")
printList(equations, 1:length(B), B, true)
println("\nAssigned equations:")
ModiaBase.printAssignedEquations(equations, variables, 1:length(B), assign, A, B)
println("\nSorted equations:")
ModiaBase.printSortedEquations(equations, variables, components, assign, A, B)
ModiaBase.printUnassigned(equations, variables, assign, A, B)
println("\nBuild augmented system.")
AG = [G; ModiaBase.buildFullIncidence(length(A)-length(B), length(A))]
@show AG
assignAG = ModiaBase.matching(AG, length(A))
@show assignAG
componentsAG = ModiaBase.BLT(AG, assignAG)
@show componentsAG
@testset "testPendulum 3" begin
# See Pantelides, page 222, paragraph 1.
@test componentsAG == Any[Any[11,3,7,9,8,2,10,4,5,6,1]]
end
equationsAG = [equations; fill("", length(B)-length(equations)); fill("full", length(A)-length(B))]
BAG = [B;fill(0, length(A)-length(B))]
println("\nAssigned augmented equations:")
ModiaBase.printAssignedEquations(equationsAG, variables, 1:length(BAG), assignAG, A, BAG)
println("\nSorted augmented equations:")
ModiaBase.printSortedEquations(equationsAG, variables, componentsAG, assignAG, A, BAG)
ModiaBase.printUnassigned(equationsAG, variables, assignAG, A, BAG)
=#
println("\nSet initial conditions on x and y. Should fail.")
IG1 = copy(G)
push!(IG1, [1])
push!(IG1, [2])
@show IG1
assignIG1 = ModiaBase.matching(IG1, length(A))
@show assignIG1
@testset "testPendulum 4" begin
@test any(assignIG1 .== 0)
end
componentsIG1 = ModiaBase.BLT(IG1, assignIG1)
@show componentsIG1
equationsIG = [equations; fill("", length(B)-length(equations)); fill("initial", length(A)-length(B))]
BIG = [B;fill(0, length(A)-length(B))]
ModiaBase.printUnassigned(equationsIG, variables, assignIG1, A, BIG)
println("\nSet initial conditions on x and w.")
IG2 = copy(G)
push!(IG2, [1])
push!(IG2, [3])
@show IG2
assignIG2 = ModiaBase.matching(IG2, length(A))
@show assignIG2
@testset "testPendulum 5" begin
@test all(assignIG2 .> 0)
end
componentsIG2 = ModiaBase.BLT(IG2, assignIG2)
@show componentsIG2
println("\nSorted IG2 equations:")
ModiaBase.printSortedEquations(equationsIG, variables, componentsIG2, assignIG2, A, BIG)
println("\nSet initial conditions on w and z.")
IG3 = copy(G)
push!(IG3, [3])
push!(IG3, [4])
@show IG3
assignIG3 = ModiaBase.matching(IG3, length(A))
@show assignIG3
@testset "testPendulum 6" begin
@test all(assignIG3 .> 0)
end
componentsIG3 = ModiaBase.BLT(IG3, assignIG3)
@show componentsIG3
println("\nSorted IG3 equations:")
ModiaBase.printSortedEquations(equationsIG, variables, componentsIG3, assignIG3, A, BIG)
# testPendulum1
println("\nFixed-length pendulum")
# Reference: Pantelides, page 222-225.
G = Any[
[3, 5],
[4, 6],
[1, 7, 9],
[2, 8, 9],
[1, 2]
]
# Variable vector is: [x, y, w, z, der(x), der(y), der(w), der(z), T], i.e. vector A is:
A = [5,6,7,8,0,0,0,0,0]
println("\nPerform index reduction")
(assign, A, B) = ModiaBase.pantelides!(G, 9, A)
@testset "testPendulum 2" begin
# According to Pantelides, page 224-225.
@test assign == [0,0,0,0,1,2,7,4,3,9,8]
@test A == [5,6,7,8,10,11,0,0,0,0,0]
@test B == [7,8,0,0,6,9,0,0,0]
end
println("\nSet initial conditions on x and w.")
IG = copy(G)
push!(IG, [1])
push!(IG, [3])
@show IG
assignIG = ModiaBase.matching(IG, length(A))
@show assignIG
componentsIG = ModiaBase.BLT(IG, assignIG)
@show componentsIG
@testset "testPendulum" begin
@test all(assignIG .> 0)
@test componentsIG == Any[Any[11], Any[1], Any[10], Any[5], Any[6], Any[2], Any[7,9,8,4,3]]
end
# testReactor
# Reference: Pantelides, page 225-228.
println("\nExothermic Reactor Model")
G = Any[
[1, 3, 5],
[2, 4, 5, 6],
[1, 2, 5],
[1]
]
ModiaBase.matching(G, 6)
A = [3,4,0,0,0,0]
(assign, A, B) = ModiaBase.pantelides!(G, 6, A)
@show assign
@show A
@show B
components = ModiaBase.BLT(G, assign)
@show components
@testset "testReactor" begin
@test assign == [0,0,1,7,3,2,8,6]
@test components == Any[Any[3],Any[1],Any[8],Any[6],Any[7],Any[2],Any[4],Any[5]]
end
println("\n\n----------------------\n")
end
println("\n----------------------\n")
function bigTest(G)
n = length(G)
if n <= 100
@show G
end
assign = ModiaBase.matching(G, n)
if n <= 100
@show assign
end
components = ModiaBase.BLT(G, assign)
if n <= 100
@show components
end
end
const n=5000 # Stack overflow for band and n=10000
const nFull=1000
function test()
println("\nBig tests, n = ", n)
println("\nBig test: diagonal")
@static if VERSION < v"0.7.0-DEV.2005"
G1 = Array{Any}(n)
else
G1 = Array{Any}(undef, n)
end
for i in 1:n
G1[i] = [i]
end
@time bigTest(G1)
println("\nBig test: band")
@static if VERSION < v"0.7.0-DEV.2005"
G2 = Array{Any}(n)
else
G2 = Array{Any}(undef, n)
end
for i in 1:n
G2[i] = [i-1 < 1 ? n : i-1, i, mod(i,n)+1]
end
@time bigTest(G2)
println("\nBig test: full, n=", nFull)
@static if VERSION < v"0.7.0-DEV.2005"
G3 = Array{Any}(nFull)
else
G3 = Array{Any}(undef, nFull)
end
for i in 1:nFull
G3[i] = [i for i in 1:nFull]
end
@time bigTest(G3)
end
test()
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 7011 | module TestDifferentiate
using ModiaBase
using Test
removeBlock(ex) = ex
function removeBlock(ex::Expr)
if ex.head in [:block]
ex.args[1]
else
Expr(ex.head, [removeBlock(arg) for arg in ex.args]...)
end
end
function showDifferentiate(ex, timeInvariants=[])
Base.remove_linenums!(ex)
print(removeBlock(ex), " : ")
der = ModiaBase.derivative(ex, timeInvariants)
Base.remove_linenums!(der)
der = string(removeBlock(der))
println(der)
string(der)
end
function testDifferentiate()
println("\nTest differentiate")
@test showDifferentiate(10) == "0"
@test showDifferentiate(10.0) == "0"
@test showDifferentiate(:time) == "1"
@test showDifferentiate(:x) == "der(x)"
@test showDifferentiate(:(der(x))) == "der(der(x))"
println()
@test showDifferentiate(:(x.y.z)) == "der(x.y.z)"
@test showDifferentiate(:(x = y)) == "der(x) = der(y)"
println()
@test showDifferentiate(:(+ y)) == "+(der(y))"
@test showDifferentiate(:(x + y)) == "der(x) + der(y)"
@test showDifferentiate(:(x + y + z)) == "der(x) + der(y) + der(z)"
@test showDifferentiate(:(- y)) == "-(der(y))"
@test showDifferentiate(:(x - y)) == "der(x) - der(y)"
@test showDifferentiate(:(x - y - z)) == "(der(x) - der(y)) - der(z)"
@test showDifferentiate(:(x * y)) == "der(x) * y + x * der(y)"
@test showDifferentiate(:(x * y * z)) == "der(x) * y * z + x * der(y) * z + x * y * der(z)"
@test showDifferentiate(:(x / y)) == "(one(x) / y) * der(x) + -((x / y) / y) * der(y)"
# @test showDifferentiate(:(x / y / z)) == "(one(x / y) / z) * ((one(x) / y) * der(x) + -((x / y) / y) * der(y)) + -(((x / y) / z) / z) * der(z)"
@test showDifferentiate(:(x ^ 2)) == "(2 * x ^ (2 - 1)) * der(x)"
@test showDifferentiate(:(x ^ y)) == "(y * x ^ (y - 1)) * der(x) + if x isa Real && x <= 0\n Base.oftype(float(x), NaN)\n else\n x ^ y * log(x)\n end * der(y)"
# @test showDifferentiate(:(x ^ y ^ z)) == "(y ^ z * x ^ (y ^ z - 1)) * der(x) + (x ^ (y ^ z) * log(x)) * ((z * y ^ (z - 1)) * der(y) + (y ^ z * log(y)) * der(z))"
@test showDifferentiate(:(sin(x))) == "cos(x) * der(x)"
@test showDifferentiate(:([x, y])) == "[der(x), der(y)]"
@test showDifferentiate(:(if x; y else z end)) == "if x\n der(y)\nelse\n der(z)\nend"
@test showDifferentiate(:(if x; y elseif z; v else w end)) == "if x\n der(y)\nelseif z\n der(v)\nelse\n der(w)\nend"
@test showDifferentiate(:(1+2+3)) == "0"
@test showDifferentiate(:(1-2-3)) == "0"
@test showDifferentiate(:(x-2-3)) == "der(x)"
@test showDifferentiate(:(1-x-3)) == "-(der(x))"
@test showDifferentiate(:(2*x + y + z)) == "2 * der(x) + der(y) + der(z)"
# Previous test suit
der = showDifferentiate(:(x + 5 + z = w))
@test der == "der(x) + der(z) = der(w)"
# der = showDifferentiate(ModiaBase.derivative(:(x + 5 + z = w)))
# @test der == "der(der(x)) + der(der(z)) = der(der(w))"
der = showDifferentiate(Expr(:(=), Expr(:call, :+, :x), :w))
@test der == "+(der(x)) = der(w)"
der = showDifferentiate(:(2 + 3 = w))
@test der == "0 = der(w)"
der = showDifferentiate(Expr(:(=), Expr(:call, :-, :x), :w))
@test der == "-(der(x)) = der(w)"
der = showDifferentiate(:(x - 5 - z = w))
@test der == "der(x) - der(z) = der(w)"
der = showDifferentiate(:(5 - x - z = w))
@test der == "-(der(x)) - der(z) = der(w)"
der = showDifferentiate(:(5x = w))
@test der == "5 * der(x) = der(w)"
der = showDifferentiate(:(x * 5 * z = w))
@test der == "der(x) * 5 * z + x * 5 * der(z) = der(w)"
der = showDifferentiate(:(4 * 5 * 6 = w))
@test der == "0 = der(w)"
der = showDifferentiate(:(y = x/5))
@test der == "der(y) = (one(x) / 5) * der(x)"
der = showDifferentiate(:(y = 5/y))
@test der == "der(y) = -((5 / y) / y) * der(y)"
der = showDifferentiate(:(y = [1, x]))
@test der == "der(y) = [0, der(x)]"
der = showDifferentiate(:(y = [2x 3x; 4x 5x]))
@test der == "der(y) = [2 * der(x) 3 * der(x); 4 * der(x) 5 * der(x)]"
der = showDifferentiate(:(y = [2*x 3x; 4x 5x]*[1, x]))
@test der == "der(y) = [2 * der(x) 3 * der(x); 4 * der(x) 5 * der(x)] * [1, x] + [2x 3x; 4x 5x] * [0, der(x)]"
#=
der = showDifferentiate(:(y = transpose(B) + BΒ΄))
@test der == "der(y) = transpose(der(B)) + der(BΒ΄)"
=#
der = showDifferentiate(:(y = x[5, 6]))
@test der == "der(y) = (der(x))[5, 6]"
der = showDifferentiate(:(y = x[5:7]))
@test der == "der(y) = (der(x))[5:7]"
#= der = showDifferentiate(:(y = [x for x in z]))
@test der == "der(y) = [x for x = der(z)]"
der = showDifferentiate(:(y = [x[i] for i in 1:5]))
@test der == "der(y) = [x[i] for i = nothing]"
=#
der = showDifferentiate(:(y = sin(x)))
@test der == "der(y) = cos(x) * der(x)"
der = showDifferentiate(:(y = cos(x)))
@test der == "der(y) = -(sin(x)) * der(x)"
#=
der = showDifferentiate(:(y = tan(x)))
@test der == "der(y) = (1 / cos(x) ^ 2) * der(x)"
=#
der = showDifferentiate(:(y = exp(x)))
@test der == "der(y) = exp(x) * der(x)"
der = showDifferentiate(:(z = x^y))
#@test der == "der(z) = (y * x ^ (y - 1)) * der(x) + (x ^ y * log(x)) * der(y)"
@test der == "der(z) = (y * x ^ (y - 1)) * der(x) + if x isa Real && x <= 0\n Base.oftype(float(x), NaN)\n else\n x ^ y * log(x)\n end * der(y)"
der = showDifferentiate(:(y = log(x)))
@test der == "der(y) = inv(x) * der(x)"
der = showDifferentiate(:(y = asin(x)))
@test der == "der(y) = inv(sqrt(1 - x ^ 2)) * der(x)"
der = showDifferentiate(:(y = acos(x)))
@test der == "der(y) = -(inv(sqrt(1 - x ^ 2))) * der(x)"
der = showDifferentiate(:(y = atan(x)))
@test der == "der(y) = inv(1 + x ^ 2) * der(x)"
#=
der = showDifferentiate(:(y = f(x, 5, z)))
@test der == "der(y) = f_der_1(x, 5, z) * der(x) + f_der_3(x, 5, z) * der(z)"
der = showDifferentiate(:(y = f(x, 5, g(z))))
@test der == "der(y) = f_der_1(x, 5, g(z)) * der(x) + f_der_3(x, 5, g(z)) * (g_der_1(z) * der(z))"
=#
der = showDifferentiate(:(y = true ? x : y))
@test der == "der(y) = if true\n der(x)\n else\n der(y)\n end"
#=
der = showDifferentiate(:(y = if b; x elseif false; y else z end))
@test der == "der(y) = if b\n der(x)\n else\n if false\n der(y)\n else \n der(z)\n end\n end"
=#
der = showDifferentiate(:(y = time))
@test der == "der(y) = 1"
der = showDifferentiate(:(y = a*x), [:a])
@test der == "der(y) = a * der(x)"
end
@testset "Symbolic" begin
@testset "Differentiate" begin
testDifferentiate()
end
end
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 14527 | """
module TestLinearIntegerEquations - Test ModiaBase/src/LinearIntegerEquations.jl
"""
module TestLinearIntegerEquations
using Test
using ModiaBase
println("... Test LinearIntegerEquations.jl")
@testset "Test LinearIntegerEquations.jl" begin
@testset "... Test Voltage source and resistor without ground" begin
println("\n --- Test Voltage source and resistor without ground")
#=
# Resistor
1: R.v = R.p.v - R.n.v
2: 0 = R.p.i + R.n.i
3: R.v = R.R*R.p.i
# Voltage source
4: V.v = V.p.v - V.n.v
5: 0 = V.p.i + V.n.i
6: V.v = 10
# Connect equations
connect(V.p, R.p)
connect(R.n, V.n)
-> 7: V.p.v = R.p.v
8: 0 = V.p.i + R.p.i
9: R.n.v = V.n.v
10: 0 = R.n.i + V.n.i
# Variables:
1: R.v
2: R.p.v
3: R.p.i
4: R.n.v
5: R.n.i
6: V.v
7: V.p.v
8: V.p.i
9: V.n.v
10: V.n.i
=#
G = Vector{Int}[[1, 2, 4],
[3, 5],
[1, 3],
[6, 7, 9],
[8, 10],
[6],
[7, 2],
[8, 3],
[4, 9],
[5, 10]]
eInt = Int[1,2,4,5,7,8,9,10]
GcInt = Vector{Int}[[-1, 1, -1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1],
[1,1],
[-1,1],
[1,1]]
Avar = fill(0,10)
vNames = ["R.v",
"R.p.v",
"R.p.i",
"R.n.v",
"R.n.i",
"V.v",
"V.p.v",
"V.p.i",
"V.n.v",
"V.n.i"]
name(v) = vNames[v]
(vEliminated, vProperty, nvArbitrary, redundantEquations) = simplifyLinearIntegerEquations!(G, eInt, GcInt, Avar)
printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty, nvArbitrary,
redundantEquations, name, printTest=false)
@test nvArbitrary == 1
@test vEliminated == [9, 6, 2, 4, 10, 7, 8, 5]
@test vProperty[vEliminated] == [0, 1, 1, 0, 3, 1, -3, -3]
@test redundantEquations == [10]
@test eInt == [2, 5, 7, 8, 9, 1, 4, 10]
@test G[eInt] == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
@test GcInt == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
end
@testset "... Test Voltage source and resistor with ground" begin
println("\n --- Test Voltage source and resistor with ground")
#=
# Resistor
1: R.v = R.p.v - R.n.v
2: 0 = R.p.i + R.n.i
3: R.v = R.R*R.p.i
# Voltage source
4: V.v = V.p.v - V.n.v
5: 0 = V.p.i + V.n.i
6: V.v = 10
# Ground
7: ground.p.v = 0
# Connect equations
connect(V.p, R.p)
connect(R.n, V.n)
connect(R.n, ground.p)
-> 8: V.p.v = R.p.v
9: 0 = V.p.i + R.p.i
10: R.n.v = V.n.v
11: R.n.v = ground.p.v
12: 0 = R.n.i + V.n.i + ground.p.i
# Variables:
1: R.v
2: R.p.v
3: R.p.i
4: R.n.v
5: R.n.i
6: V.v
7: V.p.v
8: V.p.i
9: V.n.v
10: V.n.i
11: ground.p.v
12: ground.p.i
=#
G = Vector{Int}[[1, 2, 4],
[3, 5],
[1, 3],
[6, 7, 9],
[8, 10],
[6],
[11],
[7, 2],
[8, 3],
[4, 9],
[4,11],
[5, 10, 12]]
eInt = Int[1,2,4,5,7,8,9,10,11,12]
GcInt = Vector{Int}[[-1, 1, -1],
[1,1],
[-1,1,-1],
[1,1],
[1],
[-1,1],
[1,1],
[-1,1],
[-1,1],
[1,1,1]]
Avar = fill(0,12)
vNames = ["R.v",
"R.p.v",
"R.p.i",
"R.n.v",
"R.n.i",
"V.v",
"V.p.v",
"V.p.i",
"V.n.v",
"V.n.i",
"ground.p.v",
"ground.p.i"]
name(v) = vNames[v]
(vEliminated, vProperty, nvArbitrary, redundantEquations) = simplifyLinearIntegerEquations!(G, eInt, GcInt, Avar)
printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty, nvArbitrary,
redundantEquations, name, printTest=false)
@test nvArbitrary == 0
@test vEliminated == [6, 12, 5, 10, 7, 2, 8, 9, 4, 11]
@test vProperty[vEliminated] == [1, 0, -3, 3, 1, 1, -3, 0, 0, 0]
@test redundantEquations == Int64[]
@test eInt == [7, 11, 10, 5, 1, 8, 9, 2, 12, 4]
@test G[eInt] == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
@test GcInt == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
end
@testset "... Test Voltage source and capacitor without ground" begin
println("\n --- Test Voltage source and capacitor without ground")
#=
# Capacitor
1: C.v = C.p.v - C.n.v
2: 0 = C.p.i + C.n.i
3: C.C*der(C.v) = C.p.i
# Voltage source
4: V.v = V.p.v - V.n.v
5: 0 = V.p.i + V.n.i
6: V.v = 10
# Connect equations
connect(V.p, C.p)
connect(C.n, V.n)
-> 7: V.p.v = C.p.v
8: 0 = V.p.i + C.p.i
9: C.n.v = V.n.v
10: 0 = C.n.i + V.n.i
# Variables:
1: C.v
2: C.p.v
3: C.p.i
4: C.n.v
5: C.n.i
6: V.v
7: V.p.v
8: V.p.i
9: V.n.v
10: V.n.i
11: der(C.v)
=#
G = Any[[1, 2, 4],
[3, 5],
[11, 3],
[6, 7, 9],
[8, 10],
[6],
[7, 2],
[8, 3],
[4, 9],
[5, 10]]
eInt = Int[1,2,4,5,7,8,9,10]
GcInt = Any[[-1, 1, -1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1],
[1,1],
[-1,1],
[1,1]]
Avar = fill(0,10)
pushfirst!(Avar,11)
vNames = ["C.v",
"C.p.v",
"C.p.i",
"C.n.v",
"C.n.i",
"V.v",
"V.p.v",
"V.p.i",
"V.n.v",
"V.n.i",
"der(C.v)"]
name(v) = vNames[v]
(vEliminated, vProperty, nvArbitrary, redundantEquations) = simplifyLinearIntegerEquations!(G, eInt, GcInt, Avar)
printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty, nvArbitrary,
redundantEquations, name, printTest=false)
@test nvArbitrary == 1
@test vEliminated == [9, 6, 2, 4, 10, 7, 8, 5]
@test vProperty[vEliminated] == [0, 1, 1, 0, 3, 1, -3, -3]
@test redundantEquations == [10]
@test eInt == [2, 5, 7, 8, 9, 1, 4, 10]
@test G[eInt] == Any[Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
@test GcInt == Any[Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[]]
end
@testset "... Test two inductances in series with parallel resistors without ground" begin
println("\n --- Test two inductances in series with parallel resistors without ground")
#=
# Inductor 1
1: L1.v = L1.p.v - L1.n.v
2: 0 = L1.p.i + L1.n.i
3: L1.L*der(L1.p.i) = L1.v
# Inductor 2
4: L2.v = L2.p.v - L2.n.v
5: 0 = L2.p.i + L2.n.i
6: L2.L*der(L2.p.i) = L2.v
# Resistor 1
7: R1.v = R1.p.v - R1.n.v
8: 0 = R1.p.i + R1.n.i
9: R1.v = R1.R*R1.p.i
# Resistor 2
10: R2.v = R2.p.v - R2.n.v
11: 0 = R2.p.i + R2.n.i
12: R2.v = R2.R*R2.p.i
# Voltage source
13: V.v = V.p.v - V.n.v
14: 0 = V.p.i + V.n.i
15: V.v = 10
# Connect equations
connect(V.p, L1.p)
connect(L1.n, R1.p)
connect(L1.n, R2.p)
connect(R1.n, L2.p)
connect(R2.n, L2.p)
connect(L2.n, V.n)
-> 16: V.p.v = L1.p.v
17: 0 = V.p.i + L1.p.i
18: L1.n.v = R1.p.v
19: L1.n.v = R2.p.v
20: 0 = L1.n.i + R1.p.i + R2.p.i
21: R1.n.v = L2.p.v
22: R2.n.v = L2.p.v
23: 0 = R1.n.i + R2.n.i + L2.p.i
24: L2.n.v = V.n.v
25: 0 = L2.n.i + V.n.i
# Variables:
1: L1.v
2: L1.p.v
3: L1.p.i
4: L1.n.v
5: L1.n.i
6: L2.v
7: L2.p.v
8: L2.p.i
9: L2.n.v
10: L2.n.i
11: R1.v
12: R1.p.v
13: R1.p.i
14: R1.n.v
15: R1.n.i
16: R2.v
17: R2.p.v
18: R2.p.i
19: R2.n.v
20: R2.n.i
21: V.v
22: V.p.v
23: V.p.i
24: V.n.v
25: V.n.i
26: der(L1.p.i)
27: der(L2.p.i)
=#
G = [[1, 2, 4],
[3, 5],
[26, 1],
[6, 7, 9],
[8,10],
[27, 6],
[11, 12, 14],
[13, 15],
[11, 13],
[16, 17, 19],
[18, 20],
[16, 18],
[21, 22, 24],
[23, 25],
[21],
[22, 2],
[23, 3],
[4, 12],
[4, 17],
[5, 13, 18],
[14, 7],
[19, 7],
[15, 20, 8],
[9, 24],
[10, 25]]
eInt = Int[1,2,4,5,7,8,10,11,13,14,16,17,18,19,20,21,22,23,24,25]
GcInt = [[-1, 1, -1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1,-1],
[1,1],
[-1,1],
[1,1],
[-1,1],
[-1,1],
[1,1,1],
[-1,1],
[-1,1],
[1,1,1],
[-1,1],
[1,1]]
Avar = fill(0,27)
Avar[3] = 26
Avar[8] = 27
vNames = ["L1.v",
"L1.p.v",
"L1.p.i",
"L1.n.v",
"L1.n.i",
"L2.v",
"L2.p.v",
"L2.p.i",
"L2.n.v",
"L2.n.i",
"R1.v",
"R1.p.v",
"R1.p.i",
"R1.n.v",
"R1.n.i",
"R2.v",
"R2.p.v",
"R2.p.i",
"R2.n.v",
"R2.n.i",
"V.v",
"V.p.v",
"V.p.i",
"V.n.v",
"V.n.i",
"der(L1.p.i)",
"der(L2.p.i)"]
var_name(v) = vNames[v]
(vEliminated, vProperty, nvArbitrary, redundantEquations) = simplifyLinearIntegerEquations!(G, eInt, GcInt, Avar; log=true, var_name = var_name)
printSimplifiedLinearIntegerEquations(G, eInt, GcInt, vEliminated, vProperty, nvArbitrary, redundantEquations, var_name, printTest=false)
@test nvArbitrary == 1
@test vEliminated == [7, 16, 12, 9, 19, 14, 17, 4, 25, 22, 23, 20, 15, 10, 5, 6]
@test vProperty[vEliminated] == [0, 11, 11, 24, 0, 0, 11, 11, 3, 2, -3, -18, -13, -8, -3, -24]
@test redundantEquations == [25]
@test eInt == [2, 5, 8, 11, 14, 16, 17, 18, 19, 21, 22, 24, 1, 13, 7, 10, 4, 23, 20, 25]
@test G[eInt] == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], [2, 1, 11], [21, 2, 24], Int64[], Int64[], Int64[], [13, 8, 18], [3, 8], Int64[]]
@test GcInt == [Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], Int64[], [1, -1, -1], [-1, 1, -1], Int64[], Int64[], Int64[], [-1, 1, -1], [1, -1], Int64[]]
end
end
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 22899 | """
module TestNonlinearEquations
Module to test ModiaBase/src/NonlinearEquations.jl
"""
module TestNonlinearEquations
using Test
using ModiaBase.NonlinearEquations
using LinearAlgebra
println("... Test NonlinearEquations.jl")
@testset "\nTest NonlinearEquations.jl" begin
loglevel = info
# Linear underdetermined test
@testset "... Test linear problem:" begin
A = [1 2 3; 4 5 6]
xref = [-1; -2; 5]
b = A*xref
m = size(A, 1)
n = size(A, 2)
# Test function
function fx!(fx::Vector, x::Vector)
@assert(length(x) == n)
@assert(length(fx) == m)
res = A*x - b
for i in 1:m
fx[i] = res[i]
end
return true
end
# Solver parameters
tol = 1e-6
maxiter = 50
nonlinearity = mild
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("Linear problem:")
end
x = zeros(Float64, n)
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [-2.33333333333333, 0.666666666666666, 3.66666666666666])
end
# Determined test from https://www.zib.de/codelib/NewtonLib/
@testset "... Test Chebyquad problem of dimensions 2..7" begin
# Test function
function fx!(fx::Vector, x::Vector)
n = length(x)
for i in range(1, n-1, step=2)
fx[i] = 0.0
fx[i+1] = n / ((i + 1.0)^2 - 1.0)
end
if isodd(n)
fx[n] = 0.0
end
for l = 1:n
factt = 4.0 * x[l] - 2.0
ti2 = 1.0
ti1 = 0.5 * factt
fx[1] = ti1 + fx[1]
for i = 2:n
ti = factt * ti1 - ti2
fx[i] = ti + fx[i]
ti2 = ti1
ti1 = ti
end
end
return true
end
# Test parameters
nn = 7 # max. Chebyquad problem dimension
# Checks
@assert(nn >= 2)
# Solver parameters
tol = 1e-6
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
for n = 2:nn
if n == 6
continue
end
if loglevel >= info
println("Chebyquad problem n = $n:")
end
x = zeros(Float64, n)
y = zeros(Float64, n)
fscale = zeros(Float64, n)
for i = 1:n
x[i] = i / (n + 1)
fscale[i] = 1.0
end
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, n, x, fscale; nonlinearity=high, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
if n == 2
@test isapprox(x, [0.2113248654051863, 0.7886751345948136])
elseif n == 3
@test isapprox(x, [0.14644660948566998, 0.4999999999999999, 0.8535533905143302])
elseif n == 4
@test isapprox(x, [0.10267275999827528, 0.4062037728007276, 0.5937962271992724, 0.8973272400017247])
elseif n == 5
@test isapprox(x, [0.08375125622814243, 0.31272929406063793, 0.5, 0.6872707059393621, 0.9162487437718575])
elseif n == 6
@test converged == false
elseif n == 7
@test isapprox(x, [0.05806914954476061, 0.23517161265728312, 0.33804409443504935, 0.5, 0.6619559055649507, 0.764828387342717, 0.9419308504552394])
elseif n == 8
@test converged == false
elseif n == 9
@test converged == false
end
end
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/2by2.jl
# and https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/already_converged.jl
@testset "... Test 2by2 problem" begin
# Test function
m = 2
n = 2
function f_2by2!(F, x)
F[1] = (x[1]+3)*(x[2]^3-7)+18
F[2] = sin(x[2]*exp(x[1])-1)
return true
end
# Solver parameters
tol = 1e-7
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
for ii in 1:2
if loglevel >= info
println("2by2 problem $ii:")
end
if ii == 1
x = [-0.5, 1.4]
else
x = [0.0, 1.0] # already converged
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(f_2by2!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
f_2by2!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [0.0, 1.0], atol=100*tol)
end
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK Powell singular problem" begin
# Test function
m = 4
n = 4
function fx!(fvec, x)
fvec[1] = x[1] + 10x[2]
fvec[2] = sqrt(5)*(x[3] - x[4])
fvec[3] = (x[2] - 2x[3])^2
fvec[4] = sqrt(10)*(x[1] - x[4])^2
return true
end
# Initial guess
x = [3.0, -1.0, 0.0, 1.0]
# Solver parameters
tol = 1e-8
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("MINPACK Powell singular problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [0.0, 0.0, 0.0, 0.0], atol=100*tol)
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK Powell badly scaled problem" begin
# Test function
m = 2
n = 2
c1 = 1e4
c2 = 1.0001
function fx!(fvec, x)
fvec[1] = c1*x[1]*x[2] - 1
fvec[2] = exp(-x[1]) + exp(-x[2]) - c2
return true
end
# Initial guess
x = [0.0, 1.0]
# Solver parameters
tol = 1e-8
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("MINPACK Powell badly scaled problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [1.0981593296997054e-5, 9.106146739867453], atol=100*tol)
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK Wood problem" begin
# Test function
m = 4
n = 4
c3 = 2e2
c4 = 2.02e1
c5 = 1.98e1
c6 = 1.8e2
function fx!(F, x)
temp1 = x[2] - x[1]^2
temp2 = x[4] - x[3]^2
F[1] = -c3*x[1]*temp1 - (1 - x[1])
F[2] = c3*temp1 + c4*(x[2] - 1) + c5*(x[4] - 1)
F[3] = -c6*x[3]*temp2 - (1 - x[3])
F[4] = c6*temp2 + c4*(x[4] - 1) + c5*(x[2] - 1)
return true
end
# Initial guess
x = [-3.0, -1.0, -3.0, -1.0]
# Solver parameters
tol = 1e-8
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = true
if loglevel >= info
println("MINPACK Wood problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [-0.967974024937593, 0.9471391408178416, -0.9695163103315912, 0.9512476657923256], atol=100*tol)
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK helical valley problem" begin
# Test function
m = 3
n = 3
tpi = 8*atan(1)
c7 = 2.5e-1
c8 = 5e-1
function fx!(fvec, x)
if x[1] > 0
temp1 = atan(x[2]/x[1])/tpi
elseif x[1] < 0
temp1 = atan(x[2]/x[1])/tpi + c8
else
temp1 = c7*sign(x[2])
end
temp2 = sqrt(x[1]^2+x[2]^2)
fvec[1] = 10(x[3] - 10*temp1)
fvec[2] = 10(temp2 - 1)
fvec[3] = x[3]
return true
end
# Initial guess
x = [-1.0, 0.0, 0.0]
# Solver parameters
tol = 1e-8
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("MINPACK helical valey problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [1.0, 0.0, 0.0], atol=100*tol)
end
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK Watson problem" begin
# Solver parameters
tol = 1e-7
maxiter = 50
nonlinearity = high
restricted = false
quasi = true
forwardjac = true
for n in (6, 9)
# Test function
c9 = 2.9e1
function fx!(fvec, x)
fill!(fvec, 0)
for i = 1:29
ti = i/c9
sum1 = 0.0
temp = 1.0
for j = 2:n
sum1 += (j-1)*temp*x[j]
temp *= ti
end
sum2 = 0.0
temp = 1.0
for j = 1:n
sum2 += temp*x[j]
temp *= ti
end
temp1 = sum1 - sum2^2 - 1
temp2 = 2*ti*sum2
temp = 1/ti
for k = 1:n
fvec[k] += temp*(k-1-temp2)*temp1
temp *= ti
end
end
temp = x[2] - x[1]^2 - 1
fvec[1] += x[1]*(1-2temp)
fvec[2] += temp
return true
end
# Initial guess
x = zeros(n)
if loglevel >= info
println("MINPACK Watson problem n = $n:")
end
y = zeros(Float64, n)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, n, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
if n == 6
@test isapprox(x, [-0.01572508640145857, 1.0124348693691099, -0.2329916259567373, 1.260430087799606, -1.5137289227222785, 0.9929964324311331], atol=100*tol)
elseif n == 9
@test isapprox(x, [-1.530703652140686e-5, 0.9997897039319482, 0.014763963693568192, 0.14634232829924446, 1.000821103005262, -2.617731140520362, 4.104403164480623, -3.143612278557626, 1.0526264080104843], atol=100*tol)
end
end
end
#=
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test MINPACK trigonometric problem" begin
# Test function
m = 10
n = 10
function fx!(fvec, x)
for j = 1:n
fvec[j] = cos(x[j])
end
sum1 = sum(fvec)
for k = 1:n
fvec[k] = n+k-sin(x[k]) - sum1 - k*fvec[k]
end
return true
end
# Start point
x = ones(n)/n
# Solver parameters
tol = 1e-8
maxiter = 500
nonlinearity = extreme
restricted = true
quasi = false
forwardjac = true
if loglevel >= info
println("MINPACK trigonometric problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
end
=#
# Determined test from https://github.com/JuliaNLSolvers/NLsolve.jl/blob/master/test/minpack.jl
@testset "... Test determined Rosenbrock problem" begin
# Test function
m = 2
n = 2
function fx!(fvec, x)
fvec[1] = 1 - x[1]
fvec[2] = 10*(x[2] - x[1]^2)
return true
end
# Start point
x = [0.0, -0.1]
# Solver parameters
tol = 1e-6
maxiter = 500
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("Determined Rosenbrock problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [1.0, 1.0], atol=100*tol)
end
# Underdetermined test from https://de.wikipedia.org/wiki/Gau%C3%9F-Newton-Verfahren#Beispiel
@testset "... Test underdetermined Rosenbrock problem" begin
# Test function
m = 1
n = 2
a = 1.0
b = 100.0
function fx!(fx::Vector, x::Vector)
@assert(length(fx) == m)
@assert(length(x) == n)
fx[1] = (a - x[1])^2 + b*(x[2] - x[1]^2)^2
return true
end
# Start point
x = [0.0, -0.1]
# Solver parameters
tol = 1e-6
maxiter = 500
nonlinearity = high
restricted = false
quasi = false
forwardjac = true
if loglevel >= info
println("Underdeterminded Rosenbrock problem:")
end
y = zeros(Float64, m)
fscale = ones(Float64, n)
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale")
end
converged = solveNonlinearEquations!(fx!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
fx!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
@test isapprox(x, [1.0, 1.0], atol=100*tol)
end
# Underdetermined slider-crank initial state problem 1
@testset "... Test slider-crank initial state problem 1" begin
# kinematic parameters
wheelRadius = 0.1
barLength = 0.5
# function for computation of bar end y-component
function res_z!(res::Vector, ang::Vector)
phiWheel = ang[1]
phiBar = ang[2]
absPosJoint = wheelRadius*[-sin(phiWheel), cos(phiWheel)]
relPosJointEnd = barLength*[cos(phiBar), sin(phiBar)]
relTransJoint = [cos(phiWheel) -sin(phiWheel); sin(phiWheel) cos(phiWheel)]
absPosEnd = absPosJoint + relTransJoint * relPosJointEnd
res[1] = absPosEnd[2]
return true
end
# initial guess
phiWheel = 10.0
phiBar = 5.0
# solver parameters
tol = 1e-6
maxiter = 50
log = true
nonlinearity = mild
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("Slider-crank initial state problem 1:")
end
n = 2
m = 1
x = zeros(Float64, n)
y = zeros(Float64, m)
fscale = ones(Float64, n)
x[1] = phiWheel
x[2] = phiBar
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale\n")
end
converged = solveNonlinearEquations!(res_z!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
res_z!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
# consistent states
phiWheel = x[1]
phiBar = x[2]
@test isapprox([phiWheel, phiBar], [10.471751810654157, 5.1360050971720925], atol=100*tol)
end
# Underdetermined slider-crank initial state problem 2
@testset "... Test slider-crank initial state problem 2" begin
# kinematic parameters
wheelRadius = 0.1
barLength = 0.5
# function for computation of joint position residual
function res!(res::Vector, x::Vector)
phiWheel = x[1]
xBar = x[2]
phiBar = x[3]
absPosWheelJoint = wheelRadius*[-sin(phiWheel), cos(phiWheel)]
absPosBarJoint = [xBar, 0.0] + barLength*[cos(phiBar), sin(phiBar)]
res .= absPosWheelJoint .- absPosBarJoint
return true
end
# initial guess
phiWheel = 0.3
xBar = -0.4
phiBar = -0.1
# solver parameters
tol = 1e-6
maxiter = 50
log = true
nonlinearity = high
restricted = false
quasi = true
forwardjac = false
if loglevel >= info
println("Slider-crank initial state problem 2:")
end
n = 3
m = 2
x = zeros(Float64, n)
y = zeros(Float64, m)
fscale = ones(Float64, n)
x[1] = phiWheel
x[2] = xBar
x[3] = phiBar
if loglevel == debug
println("* start vector = $x")
println("* scale vector = $fscale\n")
end
converged = solveNonlinearEquations!(res!, m, x, fscale; nonlinearity=nonlinearity, restricted=restricted, xtol=tol, maxiter=maxiter, quasi=quasi, forwardjac=forwardjac, loglevel=loglevel)
if loglevel == debug
println("* solution = $x")
res!(y, x)
println("* rms(f(x*)) = $(norm(y)/sqrt(n))")
end
# consistent states
phiWheel = x[1]
xBar = x[2]
phiBar = x[3]
@test isapprox([phiWheel, xBar, phiBar], [0.30614037233377206, -0.5209621708411616, 0.1918759765115025], atol=100*tol)
end
end
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 5444 | module TestSymbolic
using ModiaBase
using ModiaBase.BLTandPantelidesUtilities
using ModiaBase.Symbolic
using Test
function showFindIncidence(ex)
Base.remove_linenums!(ex)
ex = removeBlock(ex)
print(rpad(string(ex), 40))
incidence = Incidence[]
findIncidence!(ex, incidence)
println(incidence)
incidence
end
function testFindIncidence()
println("testFindIncidence")
@test showFindIncidence(10) == []
@test showFindIncidence(:(x)) == [:x]
@test showFindIncidence(:(x*(y + z*sin(w)))) == [:x, :y, :z, :w]
@test showFindIncidence(:(x, x.y, x.y.z, x.y[z], f(x.y))) == [:x, :(x.y), :(x.y.z), :(x.y), :z, :(x.y)]
println()
end
function showLinearFactor(ex, x)
Base.remove_linenums!(ex)
ex = removeBlock(ex)
print(rpad(string(ex), 20))
print(rpad(string(x), 10))
factor = linearFactor(ex, x)
Base.remove_linenums!.(factor)
factor = string(removeBlock(factor))
println(factor)
string(factor)
end
function testLinearFactors()
println("testLinearFactors")
@test showLinearFactor(:(10), :x) == "(10, 0, true)"
@test showLinearFactor(:(20.0), :x) == "(20.0, 0, true)"
@test showLinearFactor(:(x), :x) == "(0, 1, true)"
@test showLinearFactor(:(x.y.z), :(x.y.z)) == "(0, 1, true)"
@test showLinearFactor(:(y), :x) == "(:y, 0, true)"
@test showLinearFactor(:(x + y), :x) == "(:y, 1, true)"
@test showLinearFactor(:(x + y + x + y), :x) == "(:(y + y), 2, true)"
@test showLinearFactor(:(x - y), :x) == "(:(-y), 1, true)"
@test showLinearFactor(:(y - x), :x) == "(:y, -1, true)"
@test showLinearFactor(:(-x + y), :x) == "(:y, -1, true)"
@test showLinearFactor(:(x - y - z), :x) == "(:(-y - z), 1, true)"
@test showLinearFactor(:(x - y - x), :x) == "(:(-y), 0, true)"
@test showLinearFactor(:(x - y + x), :x) == "(:(-y), 2, true)"
@test showLinearFactor(:(x * y), :x) == "(0, :y, true)"
@test showLinearFactor(:(x * x), :x) == "(0, 0, false)"
@test showLinearFactor(:(2 * x), :x) == "(0, 2, true)"
@test showLinearFactor(:(x * y * z), :x) == "(0, :(z * y), true)"
@test showLinearFactor(:(x / y), :x) == "(0, :(1 / y), true)"
@test showLinearFactor(:(y / x), :x) == "(:(y / 0), NaN, false)"
@test showLinearFactor(:(x / y / z), :x) == "(0, :((1 / y) / z), true)"
@test showLinearFactor(:(y \ x), :x) == "(0, :(1 / y), true)"
@test showLinearFactor(:(sin(x)), :x) == "(:(sin(x)), 0, false)"
@test showLinearFactor(:(sin(x) + x), :x) == "(:(sin(x)), 1, false)"
@test showLinearFactor(:(sin(y)), :x) == "(:(sin(y)), 0, true)"
@test showLinearFactor(:(if cond; x else y end), :x) == "(:(if cond\n 0\n else\n y\n end), :(if cond\n 1\n else\n 0\n end), true)"
@test showLinearFactor(:(if cond; x elseif cond2; y else z end), :x) == "(:(if cond\n 0\n else\n if cond2\n y\n else\n z\n end\n end), :(if cond\n 1\n else\n 0\n end), true)"
@test showLinearFactor(:(if cond; x+1 else 2x+2 end), :x) == "(:(if cond\n 1\n else\n 2\n end), :(if cond\n 1\n else\n 2\n end), true)"
@test showLinearFactor(:(x[5]), :x) == "(:(x[5]), 0, false)"
@test showLinearFactor(:(x = y), :x) == "(:(-y), 1, true)"
@test showLinearFactor(:(x + 1 = y + 2), :x) == "(:(1 - (y + 2)), 1, true)"
@test showLinearFactor(:(der(x) + x), :(der(x))) == "(:x, 1, true)"
@test showLinearFactor(:(R*i = u), :i) == "(:(-u), :R, true)"
println()
end
function showSolveEquation(ex, x)
Base.remove_linenums!(ex)
ex = removeBlock(ex)
print(rpad(string(ex), 20))
print(rpad(string(x), 10))
factor = solveEquation(ex, x)
Base.remove_linenums!.(factor)
factor = string(removeBlock(factor))
println(factor)
string(factor)
end
function testSolveEquations()
println("testSolveEquations")
@test showSolveEquation(:(R*i = u), :i) == "(:(i = u / R), true)"
@test showSolveEquation(:(R*i = u), :u) == "(:(u = R * i), true)"
@test showSolveEquation(:(R*i = u), :R) == "(:(R = u / i), true)"
println()
end
function showGetCoefficients(ex)
Base.remove_linenums!(ex)
ex = removeBlock(ex)
print(rpad(string(ex), 20))
factor = getCoefficients(ex)
Base.remove_linenums!(factor)
factor = string(removeBlock(factor))
println(factor)
string(factor)
end
function testGetCoefficients()
println("testGetCoefficients")
@test showGetCoefficients(:(v1 = 0)) == "(Union{Expr, Symbol}[:v1], Any[1], 0, true)"
@test showGetCoefficients(:(v1 = 10)) == "(Union{Expr, Symbol}[:v1], Any[1], -10, true)"
@test showGetCoefficients(:(v1 = v2)) == "(Union{Expr, Symbol}[:v1, :v2], Any[1, -1], 0, true)"
@test showGetCoefficients(:(v1 = v2 + sin(v3))) == "(Union{Expr, Symbol}[:v1, :v2, :v3], Any[1, -1], :(-(sin(v3))), false)"
@test showGetCoefficients(:(v1 = -v2)) == "(Union{Expr, Symbol}[:v1, :v2], Any[1, 1], 0, true)"
@test showGetCoefficients(:(R*i = u)) == "(Union{Expr, Symbol}[:R, :i, :u], Any[:i], :(-u), false)"
println()
end
@testset "Symbolic" begin
@testset "TestFindIncidence" begin
testFindIncidence()
end
@testset "TestLinearFactors" begin
testLinearFactors()
end
@testset "TestSolveEquations" begin
testSolveEquations()
end
@testset "TestGetCoefficients" begin
testGetCoefficients()
end
end
end | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 6321 | # License for this file: MIT (expat)
# Copyright 2017-2020, DLR Institute of System Dynamics and Control
"""
module TestTearing
Module to test ModiaBase/src/Tearing.jl
# Main developer
[Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en)
"""
module TestTearing
using Test
using ModiaBase
println("... Test Tearing.jl")
isSolvable(Gsolvable, e::Int, v::Int) = v in Gsolvable[e]
@testset "\nTest Tearing.jl" begin
@testset "... Test equation system 1" begin
G = Any[[1,2,3,4,5], [1,2,3,4,8], [4,1,2], [3,2,1], [2,1,7]]
es = [2,3,5,1]
vs = [1,3,7,8]
td = ModiaBase.TearingSetup(G, 8)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [3,8]
@test eSolved == [2,5]
@test vSolved == [1,7]
@test eResidue == [3,1]
end
@testset "... Test equation system 2" begin
#=
eSolved = [2,5]
vSolved = [1,7]
eResidue = [3,1]
vTear = [3,8]
ignored variables: 2,4,5,6
known : 2,3,4,5,6,8
unknown: 1,7
[1,2,3,4,8] is solved for 1
[2,1,7] is solved for 7
=#
#=
unknowns: x1dd, x2dd, x3dd, x4d, x5
1: 0 = x1dd - x2dd
2: 0 = x1dd + x2dd - x3dd + x6ddd
3: 0 = x1d + x3dd - x4d
4: 0 = 2*x1dd + x2dd + x3dd + x4d + x6ddd
5: 0 = 3*x1dd + 2*x2dd + x5
Variables
1: x1dd
2: x2dd
3: x3dd
4: x4d
5: x5
6: x1d
7: x6ddd
vTear = [2]
eSolved = [1,2,3,5]
vSolved = [1,3,4,5]
eResidue = [4]
=#
G = Any[[1,2],
[1,2,3,7],
[6,3,4],
[1,2,3,4,7],
[1,2,5]]
es = [1,2,3,4,5]
vs = [1,2,3,4,5]
td2 = TearingSetup(G, 7)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td2, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [2]
@test eSolved == [1,2,3,5]
@test vSolved == [1,3,4,5]
@test eResidue == [4]
end
@testset "... Test equation system 3" begin
G = Any[ [1,2,4,7],
[6,3,4],
[2,5],
[4,5],
[1,2,3,7]]
es = [1,2,3,4,5]
vs = [1,2,3,4,5]
td3 = TearingSetup(G, 7)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td3, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [5]
@test eSolved == [4,3,1,2]
@test vSolved == [4,2,1,3]
@test eResidue == [5]
end
@testset "... Test equation system 4" begin
#=
Same as test2, but indices changed
unknowns: z1, z2, z4, z6, z7
1: 0 = f1(z2,z6,z7)
2: 0 = f2(z1,z7)
3: 0 = f3(z1,z2,z3,z7)
4: 0 = f4(z6,z3,z4)
5: 0 = f5(z1,z2,z3,z4,z7)
6: 0 = f6(z1,z2,z5)
7: 0 = f7(z4,z5,z6)
Variables
1: z1 (x1dd)
2: z2 (x6ddd)
3: z3 (x3dd)
4: z4 (x4d)
5: z5 (x5)
6: z6 (x1d)
7: z7 (x2dd)
=#
G = Any[[2,6,7],
[1,2],
[1,2,3,7],
[6,3,4],
[1,2,3,4,7],
[1,2,5],
[4,5,6]]
es = [2,3,4,5,6]
vs = [1,3,4,5,7]
td4 = TearingSetup(G, 7)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td4, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [7]
@test eSolved == [2,3,4,6]
@test vSolved == [1,3,4,5]
@test eResidue == [5]
end
@testset "... Test equation system 5" begin
#=
Check "marked" flag
unknowns: z1, z2, z3, z4, z5, z6
1: 0 = f1(z1,z6)
2: 0 = f2(z2,z1)
3: 0 = f3(z3,z2)
4: 0 = f4(z4,z3)
5: 0 = f5(z5,z4)
6: 0 = f6(z6,z1)
=#
G = Any[[1,6],
[2,1],
[3,2],
[4,3],
[5,4],
[6,5]]
es = [1,2,3,4,5,6]
vs = [1,2,3,4,5,6]
td5 = TearingSetup(G, 6)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td5, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [6]
@test eSolved == [1,2,3,4,5]
@test vSolved == [1,2,3,4,5]
@test eResidue == [6]
end
@testset "... Test equation system 6" begin
#=
Check predefined equations
phi1 = phi2
w1 = der(phi1)
w2 = der(phi2)
der(phi1) = der(phi2)
unknowns: 1:w1, 2:w2, 3:der(phi1), 4:der(phi2)
1: 0 = f1(w1,der(phi1))
2: 0 = f2(w2,der(phi2))
3: 0 = f3(der(phi1), der(phi2))
=#
G = Vector{Int}[[1,3],
[2,4],
[3,4]]
es = [1,2,3]
vs = [1,2,3,4]
td = TearingSetup(G, 4)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [4]
@test eSolved == [3,1,2]
@test vSolved == [3,1,2]
@test eResidue == Int[]
es = [1,2]
vs = Vector{Int}[[4], [1,2]]
td = TearingSetup(G, 4)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td, (e,v) -> isSolvable(G,e,v), es, vs; eSolvedFixed=[3], vSolvedFixed=[3])
@test vTear == [2]
@test eSolved == [2,3,1]
@test vSolved == [4,3,1]
@test eResidue == Int[]
end
@testset "... Test equation systems 7a and 7b" begin
neq = 10
G = fill(fill(0, 0), neq)
for i in 1:neq
G[i] = i == 1 ? [1,neq] : [i,i - 1]
end
#G = [[i, (i==1 ? neq : i-1)] for i = 1:neq]
es = collect(1:neq)
vs = es
td5 = TearingSetup(G, neq)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td5, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [neq]
for i in 1:neq
G[i] = i == neq ? [1,neq] : [neq - i + 1,neq - i]
end
#println("G = ")
#display(G)
#G = [[i, (i==1 ? neq : i-1)] for i = neq:-1:1]
es = collect(1:neq)
vs = es
td5 = TearingSetup(G, neq)
(eSolved, vSolved, eResidue, vTear) = tearEquations!(td5, (e,v) -> isSolvable(G,e,v), es, vs)
@test vTear == [1]
end
end
end # module
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | code | 293 | module Runtests
using Test
@testset "Test ModiaBase" begin
include("TestSymbolic.jl")
include("TestBLTandPantelides.jl")
include("TestDifferentiate.jl")
include("TestTearing.jl")
include("TestLinearIntegerEquations.jl")
include("TestNonlinearEquations.jl")
end
end
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 2743 | # ModiaBase
[](https://modiasim.github.io/ModiaBase.jl/stable/)
[](https://github.com/ModiaSim/ModiaBase.jl/blob/master/LICENSE.md)
ModiaBase is part of [ModiaSim](https://modiasim.github.io/docs/). It is usually used via [Modia](https://github.com/ModiaSim/Modia.jl).
The [ModiaBase documentation](https://modiasim.github.io/ModiaBase.jl/stable/) provides details of the algorithms and how to use them.
ModiaBase provides basic algorithms and functionality that is needed for
equation-based modeling to transform a (potentially high-index) Differential-Algebraic Equation system (DAE),
to an Ordinary Differential Equation system in state space form (ODE).
It is used by [Modia](https://github.com/ModiaSim/Modia.jl),
but can also be utilized in another context. Especially the following functionality is provided:
- Simplify linear Integer equations (many equations of object-oriented models are linear Integer equations and can be pre-processed exactly)
- to remove alias variables and equations,
- to remove redundant equations,
- to provide definite values for variables that can have arbitrary values if this makes sense,
- to make state constraints structurally visible.
- Find a variable assignment of an equation system, in order
to transform the equation system in a directed graph that can be further
processed.
- Find the strong components in a directed graph (with the algorithm of Tarjan)
to determine algebraic equation systems that must be solved together.
- Sort an equation system (= transform to Block Lower Triangular form),
to determine the order in which the equations have to be evaluated.
- Reduce the dimension of algebraic equation systems by tearing.
- Find equations that need to be differentiated one or more times (with the algorithm of Pantelides)
in order that the DAE can be transformed to an ODE.
- Analytically differentiate the found equations.
- Statically select ODE states and transform to ODE form
(hereby identifying linear equation systems that must be solved during simulation).
## Installation
Typically, a user installs [Modia](https://github.com/ModiaSim/Modia.jl) and does not need
to install ModiaBase separately. If needed, ModiaBase is installed with (Julia 1.7 is required):
```julia
julia> ]add ModiaBase
```
## Main Developers
- [Hilding Elmqvist](mailto:[email protected]), [Mogram](http://www.mogram.net/).
- [Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en).
License: MIT (expat)
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 5457 | # Data Structures
In this chapter the **basic data structures** are summarized and
shortly described that are used in package ModiaBase.
## 1. Bi-partite Graph
The *bi-partite Graph* `G` of a DAE system defines the functional dependency of the equations ``e_i`` from
time-varying variables ``v_j``. `G` is also a sparse representation of the incidence matrix of the DAE system.
Example:
```julia
# Bi-partite graph of low pass filter
G = Vector{Int}[ [1,2,4], # equation 1 depends on variables 1,2,4
[1,7], # equation 2 depends on variables 1,7
[3,4],
[3,7],
[6,7],
[2] ]
```
In ModiaBase, only potential states and unknown variables are described with `G`.
Dependency on parameters (= constant quantities) is not shown.
The number of variables is usually larger as the number of equations, because both
the potential states and their derivatives are stored in `G`.
## 2. Assignment Vector
The *assignment vector* `assign` defines for all equations and variables the unique variable ``v_j`` that
is solved from equation ``e_i``. Example:
```julia
assign = [2,6,3,1,0,5,4] # Variable 1 is solved from equation 2
# Variable 2 is solved from equation 6
# ...
# Variable 5 is not solved from any equation
```
The *inverted assignment vector* `invAssign` defines the unique equation ``e_i`` that
is solved for the unique variable ``v_j``. This vector can be directly computed
from vector `assign`. Example:
```julia
(invAssign, unAssigned) = invertAssign(assign)
# invAssign = [4,1,3,7,6,2,0] # Equation 1 is solved for variable 4
# Equation 2 is solved for variable 1
# ...
# Equation 7 is not solved for any variable
```
## 3. Block Lower Triangular Form
The *Block Lower Triangular form* `blt` of an equation system describes the
sorted set of equations, in order to solve for the unknown variables.
With vector `invAssign` (see subsection 2 above) the information is provided
for which variable the respective equation is solved.
Example:
```julia
blt = [ [6], # Solve first equation 6
[3,4,2,1], # Afterwards solve equations 3,4,2,1 (they form an algebraic loop)
[5] ] # Finally solve equation 5
invAssign = [4, 1, 3, 7, 6, 2, 0]
```
The meaning is:
1. Equation 6 is solved for variable 2.
2. Equations 3,4,2,1 are solved simultaneously for variables 3, 7, 1, 4.
3. Equation 5 is solved for variable 6.
## 4. Variable Association Vector
The derivative relationship between variables is described with
the *variable association vector* `Avar` and its
inverted vector `invAvar`:
```math
\begin{aligned}
Avar_j &= \left\{ \begin{array}{rl}
k & \text{\textbf{if}}~ \dot{v}_j \equiv v_k \\
0 & \text{\textbf{if}}~ v_j \text{ is not a differentiated variable}
\end{array}\right. \\
invAvar_j &= \left\{ \begin{array}{rl}
k & \text{\textbf{if}}~ v_j \equiv \dot{v}_k \\
0 & \text{\textbf{if}}~ v_j \text{ is not a differentiated variable}
\end{array}\right.
\end{aligned}
```
Example:
The following derivative relationships between variables `v1,v2,v3,v4,v5`
```julia
1. v1
2. v2 = der(v1)
3. v3 = der(v2)
4. v4
5. v5 = der(v4)
```
are expressed by the following variable association vector and its inverted form:
```julia
Avar = [2,3,0,5,0] # The derivative of variable 1 is variable 2
# The derivative of variable 2 is variable 3
# ...
invAvar = [0,1,2,0,4] # Variable 1 is not a derivative
# Variable 2 is the derivative of variable 1
# ...
# Variable 5 is the derivative of variable 4
```
## 5. Equation Association Vector
The derivative relationship between equations is described with
the *equation association vector* `Bequ` and its
inverted vector `invBequ`:
```math
\begin{aligned}
Bequ_i &= \left\{ \begin{array}{rl}
k & \text{\textbf{if}}~ \dot{e}_i \equiv e_k \\
0 & \text{\textbf{if}}~ \dot{e}_i \text{ does not exist}
\end{array}\right. \\
invBequ_i &= \left\{ \begin{array}{rl}
k & \text{\textbf{if}}~ e_i \equiv \dot{e}_k \\
0 & \text{\textbf{if}}~ _i \text{ is not a differentiated equation}
\end{array}\right.
\end{aligned}
```
Example:
The following derivative relationships between equations `e1,e2,e3,e4,e5`
```julia
1. e1
2. e2 = der(e1)
3. e3 = der(e2)
4. e4
5. e5 = der(e4)
```
are expressed by the following equation association vector and its inverted form:
```julia
Bvar = [2,3,0,5,0] # The derivative of equation 1 is equation 2
# The derivative of equation 2 is equation 3
# ...
invBvar = [0,1,2,0,4] # Equation 1 is not a differentiated equation
# Equation 2 is the derivative of equation 1
# ...
# Equation 5 is the derivative of equation 4
```
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 1106 | # Equation Reduction
This section provides functions to **reduce the dimensions of
systems of equations**.
## Linear Integer Equations
Many equations of object-oriented models are **linear Integer equations**
(such as all equations deduced from connections, or, say defining a potential difference)
and can be pre-processed exactly to simplify the equations, for example elimination of
alias variables, or variables that are identically to zero). Furthermore,
(consistently) redundant or (consistently) overdetermined equations can be
removed. Finally, hidden state constraints can be made explicit in order
that a structural algorithm (such as the algorithm of Pantelides) can process state constraints.
```@meta
CurrentModule = ModiaBase
```
```@docs
simplifyLinearIntegerEquations!
printSimplifiedLinearIntegerEquations
```
## Algebraic Systems
**Algebraic equation systems** are reduced by selecting a subset of the
variables as iteration variables and computing the remaining variables in
a forward sequence.
```@meta
CurrentModule = ModiaBase
```
```@docs
TearingSetup
tearEquations!
```
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 438 | # Equation Sorting
This section provides functions to **sort systems of equations**.
## Main Functions
```@meta
CurrentModule = ModiaBase.BLTandPantelides
```
```@docs
matching
BLT
```
## Utility Functions
```@meta
CurrentModule = ModiaBase.BLTandPantelidesUtilities
```
```@docs
invertDer
invertAssign
buildExtendedSystem
buildFullIncidence
createNames
printList
printAssignedEquations
printSortedEquations
printUnassigned
```
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 215 | # Nonlinear Equations
This section provides functions to **solve nonlinear equations systems**.
## Main Functions
```@meta
CurrentModule = ModiaBase.NonlinearEquations
```
```@docs
solveNonlinearEquations!
```
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 538 | # Transformation to ODE System
This section provides utility functions to **transform DAE to ODE systems**. In particular,
- to find equations that need to be differentiated, in order to transform a
Differential Algebraic Equations (DAEs) agebraically to
Ordinary Differential Equations (ODEs),
- to differentiate relevant equations analytically,
## Main Functions
```@meta
CurrentModule = ModiaBase.BLTandPantelides
```
```@docs
pantelides!
```
```@meta
CurrentModule = ModiaBase.Differentiate
```
```@docs
derivative
```
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 7711 | # Tutorial
This chapter contains a short tutorial about the data structures and
functions provided by package [ModiaBase](https://github.com/ModiaSim/ModiaBase.jl).
## 1. Regular DAEs (Index Zero DAEs)
In this subsection functions are demonstrated that can be used to
transform regular DAEs to ODEs.
The transformations are explained with the
following simple electrical circuit (a low pass filter where the
voltage source is modelled with an inner resistor):

### 1.1 Bi-Partite Graph
In a first step, the structural information of the low pass filter model is provided as incidence matrix

- Every **column** corresponds to one time-varying **variable**.
Parameters, so variables with constant values, are not shown.
- Every **row** corresponds to one **equation**.
- A cell is marked (here in *blue*), if a time-varying variable is
present in one equation. Variables that are appearing differentiated,
such as `C.v`, are not marked because in a first analysis phase, these potential
state variables are treated as known.
The matrix above is called the **incidence matrix** or the
**bi-partite graph** of the circuit. In ModiaBase, this matrix is represented
as vector `G` of Integer vectors:
```julia
# Bi-partite graph of low pass filter
G = [ [25,27], # equation 1 depends on variables 25,27
[6,23],
[4,10],
...,
[27] ]
```
This can be also made more explicit (and a bit more efficient storage)
by defining the incidence matrix as:
```julia
# Bi-partite graph of low pass filter
G = Vector{Int}[ [25,27], # equation 1 depends on variables 25,27
[6,23],
[4,10],
...,
[27] ]
```
### 1.2 Linear Integer Equations
Object-oriented models consist of a lot of linear Integer equations, especially due to the connect-statements.
The linear integer equations of `G` are identified and the corresponding linear factors
are determined. With function [`simplifyLinearIntegerEquations!`](@ref) this information is used to simplify
the equations by transforming the linear Integer equations with a fraction-free (exact) Gaussian elimination to
a special normalized form and then perform the following simplifications:
- Equations of the form `v = 0` are removed and `v` is replaced by β0β at all places where
`v` occurs, and these equations are simplified.
- Equations of the form `v1 = v2` and `v1 = -v2` are removed, `v1` is replaced by `v2` (or `-v2`)
at all places where `v1` occurs (so called *alias-variables*), and these equations are simplified.
- *Redundant equations* are removed.
- Variables that appear *only* in the linear Integer equations (and in no other equations) are set to zero, if
they can be *arbitrarily selected*. For example, if an electrical circuit is not
grounded, then one of the electrical potentials is arbitrarily set to zero.
- State constraints are made structurally visible.
After applying [`simplifyLinearIntegerEquations!`](@ref) to the low pass filter circuit,
the incidence matrix is simplified to

```julia
# Bi-partite graph of simplified low pass filter
G = Vector{Int}[ [1,2,4],
[1,7],
[3,4],
[3,7],
[6,7],
[2] ]
# Eliminated variables
R.i = -(V.p.i)
ground.p.v = 0
R.p.i = -(V.p.i)
R.n.v = C.v
V.n.i = -(V.p.i)
V.n.v = 0
V.p.v = Ri.n.v
Ri.p.i = V.p.i
C.n.v = 0
C.p.v = C.v
Ri.p.v = R.p.v
C.n.i = V.p.i
V.i = V.p.i
R.n.i = V.p.i
C.p.i = -(V.p.i)
ground.p.i = 0
C.i = -(V.p.i)
Ri.i = V.p.i
V.v = Ri.n.v
Ri.n.i = -(V.p.i)
```
### 1.3 Assignment
In a follow-up step, an assignment is made (also called matching), to associate
one variable uniquely with one equation:

- *Red* marks show the assigned variables.
- *Blue* marks show if a variable is part of the respective equation
The assignment is computed with function [`ModiaBase.matching`](@ref)
returning a vector `assign`:
```julia
using ModiaBase
vActive = fill(true,7)
vActive[5] = false # state C.v is known
assign = matching(G, 7, vActive)
# assign = [2,6,3,1,0,5,4]
```
The meaning of vector `assign` is that
- Variable 1 is solved from equation 2,
- Variable 2 is solved from equation 6,
- etc.
### 1.4 Sorting
In a follow-up step, equations are sorted and algebraic loops determined
(= Block Lower Triangular transformation):

- *Red* marks show the assigned variables.
- *Blue* marks show if a variable is part of the respective equation
- A *grey* area marks an algebraic loop.
The sorting is computed with function [`ModiaBase.BLT`](@ref):
```julia
using ModiaBase
blt = BLT(G, assign)
#=
blt = [ [6],
[3,4,2,1],
[5] ]
=#
```
The meaning is for example that the second BLT block consists of
equations 3,4,2,1 and these equations form an algebraic loop.
### 1.5 Reducing sizes of equation systems
In a follow-up step, the sizes of equation systems are reduced by
variable substitution (= tearing). Applying [`ModiaBase.tearEquations!`](@ref) to the
low pass filter circuit, reduces the dimension of BLT block 2 from size 4 to size 1
resulting in the following equation system:
```julia
# iteration variables (inputs): C.i
# residual variables (outputs): residual
R.v := R.R*C.i
R.i.v := -Ri.R*C.i
R.p.v := Ri.v + V.v
residual := R.v - R.p.v + C.v
```
### 1.6 Generation of AST
In a final step, the AST (Abstract Syntax Tree) of the model is
generated. Hereby, it is determined that the equation system of section 1.4 and 1.5
is linear in the iteration variable (`C.i`) and an AST is generated
to build-up a linear equation system `A*C.i = b` and solve this system numerically
with an LU decomposition whenever the AST is called (if the equation system has size 1,
a simple division is used instead of calling a linear equation solver). Applying
`Modia.getSortedAndSolvedAST` results basically in a function
`getDerivatives` that can be solved with the many ODE integrators of
[DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl):
```julia
function getDerivatives(_der_x, _x, _m, _time)::Nothing
_m.time = ModiaLang.getValue(_time)
_m.nGetDerivatives += 1
instantiatedModel = _m
_p = _m.evaluatedParameters
_leq_mode = nothing
time = _time
var"C.v" = _x[1]
var"V.v" = (_p[:V])[:V]
begin
local var"C.i", var"R.v", var"Ri.v", var"R.p.v"
_leq_mode = _m.linearEquations[1]
_leq_mode.mode = -2
while ModiaBase.LinearEquationsIteration!(_leq_mode, _m.isInitial, _m.time, _m.timer)
var"C.i" = _leq_mode.vTear_value[1]
var"R.v" = (_p[:R])[:R] * var"C.i"
var"Ri.v" = (_p[:Ri])[:R] * -var"C.i"
var"R.p.v" = var"Ri.v" + var"V.v"
_leq_mode.residual_value[1] = (var"R.v" + -1var"R.p.v") + var"C.v"
end
_leq_mode = nothing
end
var"der(C.v)" = var"C.i" / (_p[:C])[:C]
_der_x[1] = var"der(C.v)"
if _m.storeResult
ModiaLang.addToResult!(_m, _der_x, time, var"R.v", var"R.p.v", var"Ri.v", var"C.i", var"V.v")
end
return nothing
end
```
## 2. Singular DAEs (Higher Index DAEs)
xxx
| ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.11.1 | 37e87a7e1dc621c63032c73ceea050b5fb4f1c6f | docs | 8998 | # ModiaBase.jl Documentation
[ModiaBase](https://github.com/ModiaSim/ModiaBase.jl) provides functions to support the transformation of a
Differential Algebraic Equation system (DAE)
```math
0 = f_{dae}(\frac{dx_{dae}}{dt},x_{dae},w,t) \tag{1}
```
to an explicit Ordinary Differential Equation system (ODE)
```math
\begin{aligned}
\frac{dx_{ode}}{dt} &= f_{x}(x_{ode},t) \\
(w, x_{dae}) &= f_{w}(x_{ode},t)
\end{aligned} \tag{2}
```
where ``x=x(t), w=w(t)`` are vector valued functions of the scalar
variable ``t`` (= usually time). ``x_{dae}`` is the DAE state vector,
``x_{ode}`` is the ODE state vector - a subset of ``x_{dae}``, and
``w`` are purely algebraic variables that do not appear differentiated in the DAE.
The equations are hereby represented as a vector of Julia expressions,
that is of an Abstract Syntax Tree (AST).
These functions are used by package [Modia](https://github.com/ModiaSim/Modia.jl),
but can also be utilized in another context. Especially the following functionality is provided:
- Simplify linear Integer equations (many equations of object-oriented models are linear Integer equations and can be pre-processed exactly)
- to remove alias variables and equations,
- to remove redundant equations,
- to provide definite values for variables that can have arbitrary values if this makes sense,
- to make state constraints structurally visible.
- Find a variable assignment of an equation system, in order
to transform the equation system in a directed graph that can be further
processed.
- Find the strong components in a directed graph (with the algorithm of Tarjan)
to determine algebraic equation systems that must be solved together.
- Sort an equation system (= transform to Block Lower Triangular form),
to determine the order in which the equations have to be evaluated.
- Reduce the dimension of algebraic equation systems by tearing.
- Find equations that need to be differentiated one or more times (with the algorithm of Pantelides)
in order that the DAE can be transformed to an ODE.
- Analytically differentiate the found equations.
- Statically select ODE states and transform to ODE form
(hereby identifying linear equation systems that must be solved during simulation).
Transformation from a DAE to an ODE form is (currently) performed if no nonlinear-algebraic equations
appear and the ODE-states can be statically selected.
Array variables and array equations are kept (they are not "flattened" in single elements).
However, DAE forms that require to differentiate array equations, are not yet supported.
The following extensions are planned (internal prototypes are available):
- Full support of array equations.
- If transformation to an ODE is not possible with the algorithms above,
transformation to a special index 1 DAE, that
can be simulated with standard DAE solvers (such as Sundials IDA).
## Installation
The package is registered and is installed with (Julia 1.7 is required):
```julia
julia> ]add ModiaBase
```
## Release Notes
### Version 0.11.1
- Included function `solveNonlinearEquations!` to solve nonlinear equation systems `F(x) = 0`
with `length(F) <= length(x)` by global Gauss-Newton method with error oriented convergence criterion
and adaptive trust region strategy. Optionally Broyden's 'good' Jacobian rank-1 updates are used.
In case of underdetermined and/or rank-deficient equation system, a least squares solution
is computed such that the norm of the scaled solution vector is minimal.
- Removed the manifest.toml file.
### Version 0.11.0
- Moved ModiaBase.Symbolic.makeDerVar to Modia (because makeDerVar needs FloatType for
generating type-stable code and FloatType is available in Modia but not in ModiaBase).
### Version 0.10.0
**Non-backwards** compatible changes
- EquationAndStateInfo.jl and StateSelection.jl moved to Modia (ModiaLang is merged into Modia), because
the AST generation in these files depends on details of CodeGeneration.jl of Modia/ModiaLang.
- TestLinearEquations.jl also moved to Modia/ModiaLang.
### Version 0.9.2
- Minor (efficiency) improvement of linear equation system if iteration variables are SVectors.
### Version 0.9.1
- Update of Manifest.toml file
### Version 0.9.0
Non-backwards compatible improvements
- Parameter values in the code are now type cast to the type of the parameter value from the
`@instantiatedModel(..)` call. The benefit is that access of parameter values in the code is type stable
and operations with the parameter value are more efficient and at run-time no memory is allocated.
Existing models can no longer be simulated, if parameter values provided via `simulate!(.., merge=xx)` are not
type compatible to their definition. For example, an error is thrown if the @instantedModel(..) uses a Float64 value and the
`simulate!(.., merge=xx)` uses a `Measurement{Float64}` value for the same parameter.
Other improvements
- Hierarchical names in function calls supported (e.g. `a.b.c.fc(..)`).
- Functions can return multiple values, e.g. `(tau1,tau2) = generalizedForces(derw1, derw2)`.
- Large speedup of symbolic transformation, if function depends on many input (and output) arguments
(includes new operator `implicitDependency(..)`).
- Support for StaticArrays variables (the StaticArrays feature is kept in the generated AST).
- Support for Array variables (especially of state and tearing variables)
where the dimension can change after @instantiateModel(..)
- Included DAE-Mode in solution of linear equation system (if DAE integrator is used and all unknowns of a linear
equation system are part of the DAE states, solve the linear equation system during continuous integration
via DAE solver (= usually large simulation speed-up, for larger linear equation systems)
- Improved code generation of linear equation systems lead to more efficient solution of linear equation systems.
Bug fixes
- Do no longer expand the unit macro in the AST, such as `u"N"`, because otherwise `logCode=true` results in wrong code
(previously, a `u"N"` definition in the model was displayed in the code as `N` which is not correct Julia code).
### Version 0.8.1
- Update Project.toml, Manifest.toml, README.md
### Version 0.8.0
- Require Julia 1.7
- Upgrade Manifest.toml to version 2.0
- Update Project.toml/Manifest.toml
### Version 0.7.8
- Tests of TestDifferentiate.jl corrected to comply with DiffRules > 1.0
- Scaling introduced to improve numerics when constructing A-matrix of linear equation system.
### Version 0.7.7
- Bug fixed when selecting RecursiveFactorization.jl
### Version 0.7.6
- Fixed bug in StateSelection.jl: If unitless=true, no unit is associated with the tearing variable.
- Solve linear equation systems optionally with [RecursiveFactorization.jl](https://github.com/YingboMa/RecursiveFactorization.jl)
instead of the default `lu!(..)` and `ldiv!(..)`.
- Project.toml: Changed DiffRules from "~1.0" to "1", since issue with "1.2.1"
(leading to an error in runtests) seems to be fixed.
- Project.toml: Added version 1 of MonteCarloMeasurements.
- Updated used packages.
- Tutorial slightly improved.
### Version 0.7.5
- Added a restriction, so that DiffRules 1.0.2 is used, instead of 1.2.1 (which leads to an error in the test suite).
### Version 0.7.4
- showCodeWithoutComments(code): Bug corrected to only remove comments and not other code
(ModiaLang.@instantiateModel(..., logCode=true, ...) gave wrong output).
- Used packages updated
### Version 0.7.3
- Speed improvements for structural and symbolic algorithms.
- Added support for state events, time events and synchronous operators
(positive(), Clock(), after(), pre(), previous(), hold(), initial(), terminal())
- Added support for mixed linear equation systems having Real and Boolean unknowns.
- Simplified code for linear equation systems (while-loop instead of for-loop).
- Added TimerOutputs @timeit instrumentation to the solution of linear equation systems.
### Version 0.7.2
- Support of parameters as hierarchical named tuples.
- Support of array comprehensions.
- Support of array end (e.g. A[3:end])
- If one equation cannot be solved for one unknown (e.g. since function call),
try to solve it as linear equation system.
- If variables with init values are explicitly solved for, print warning message
only if log = true (in TinyModia.simulate! an error occurs, if the init value
cannot be respected).
### Version 0.7.1
- Due to version conflicts, added version 0.17 of DataStructures in compat.
### Version 0.7.0
- Initial version, based on code developed for Modia 0.6 and ModiaMath 0.6.
## Main developers
- [Hilding Elmqvist](mailto:[email protected]), [Mogram](http://www.mogram.net/).
- [Martin Otter](https://rmc.dlr.de/sr/en/staff/martin.otter/),
[DLR - Institute of System Dynamics and Control](https://www.dlr.de/sr/en) | ModiaBase | https://github.com/ModiaSim/ModiaBase.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 660 | using Freenect
using Documenter
makedocs(;
modules=[Freenect],
authors="dmillard <[email protected]> and contributors",
repo="https://github.com/dmillard/Freenect.jl/blob/{commit}{path}#L{line}",
sitename="Freenect.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://dmillard.github.io/Freenect.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"installation.md",
"getting_started.md",
"displaying_images.md",
"reference.md"
],
)
deploydocs(;
repo="github.com/dmillard/Freenect.jl",
devbranch="main",
)
| Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 156 | module Freenect
using libfreenect_jll
using DocStringExtensions
include("./export_utils.jl")
include("./freenect_sync.jl")
include("./pointcloud.jl")
end | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 1112 | macro exported_enum(name, values)
export_sym(ex::Expr) = Expr(:export, ex.args[1])
export_sym(ex::Symbol) = Expr(:export, ex)
export_sym(ex::LineNumberNode) = ex
esc(quote
@enum($name, $values)
export $name
$(export_sym.(values.args)...)
end)
end
macro exported_cfun(proto)
hasdocs = proto.head == :block
if hasdocs
docs = proto.args[2]
proto = pop!(proto.args[2].args)
end
nameargs, returntype = proto.args
name = nameargs.args[1]
args = nameargs.args[2:end]
argname(ex::Expr)::Symbol = ex.args[1]
argtype(ex::Expr)::Union{Symbol,Expr} = ex.args[2]
defn = quote
function $name($(argname.(args)...))
ccall(
($(QuoteNode(Symbol("freenect_", name))), libfreenect_jll.libfreenect_sync),
$returntype,
($(argtype.(args)...),),
$(argname.(args)...)
)
end
end
if hasdocs
push!(docs.args, defn.args[2])
defn = docs
end
return esc(quote
$defn
export $name
end)
end
| Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 7949 | """
Available modes for the LED on the Kinect.
"""
@exported_enum LEDMode begin
off = 0
green = 1
red = 2
yellow = 3
blink_green = 4
blink_red_yellow = 6
end
"""
Enumeration of available resolutions.
Not all available resolutions are actually supported for all video formats.
Frame modes may not perfectly match resolutions. For instance,
`resolution_medium` is 640x488 for the IR camera.
"""
@exported_enum Resolution begin
resolution_low = 0 # QVGA - 320 x 240
resolution_medium = 1 # VGA - 640 x 480
resolution_high = 2 # SXGA - 1280x1024
end
const resolution_to_dims = Dict{Resolution,NTuple{2,Int}}(
resolution_low => (240, 320),
resolution_medium => (480, 640),
resolution_high => (1024, 1280)
)
"""
See <http://openkinect.org/wiki/Protocol_Documentation#RGB_Camera> for more information.
"""
@exported_enum VideoFormat begin
video_rgb = 0 # Decompressed RGB mode (demosaicing done by libfreenect)
video_bayer = 1 # Bayer compressed mode (raw information from camera)
video_ir_8bit = 2 # 8-bit IR mode
video_ir_10bit = 3 # 10-bit IR mode
video_ir_10bit_packed = 4 # 10-bit packed IR mode
video_yuv_rgb = 5 # YUV RGB mode
video_yuv_raw = 6 # YUV Raw mode
end
const video_format_to_channels = Dict{VideoFormat,Int}(
video_rgb => 3,
video_ir_8bit => 1,
# I haven't used the other modes enough to wrap them. Contributions welcome!
)
"""
See <http://openkinect.org/wiki/Protocol_Documentation#RGB_Camera> for more information.
"""
@exported_enum DepthFormat begin
depth_11bit = 0 # 11 bit depth information in one uint16_t/pixel
depth_10bit = 1 # 10 bit depth information in one uint16_t/pixel
depth_11bit_packed = 2 # 11 bit packed depth information
depth_10bit_packed = 3 # 10 bit packed depth information
depth_registered = 4 # processed depth data in mm, aligned to 640x480 rgb
depth_mm = 5 # depth to each pixel in mm, but left unaligned to rgb image
end
const wrappable_depth_formats = Set{DepthFormat}([
depth_11bit,
depth_10bit,
depth_registered,
depth_mm,
# I haven't used the other modes enough to wrap them. Contributions welcome!
])
"""
Possible status codes returned in [`RawTiltState`](@ref).
"""
@exported_enum TiltStatusCode begin
tilt_status_stopped = 0x00
tilt_status_limit = 0x01
tilt_status_moving = 0x04
end
"""
$(TYPEDEF)
$(TYPEDFIELDS)
This data is currently uninterpreted and only provided raw.
"""
struct RawTiltState
accelerometer_x::Cshort
accelerometer_y::Cshort
accelerometer_z::Cshort
tilt_angle::Cchar
tilt_status::Cchar
end
import Base.copy
copy(s::RawTiltState) = RawTiltState(s.accelerometer_x, s.accelerometer_y, s.accelerometer_z, s.tilt_angle, s.tilt_status)
export RawTiltState
# These are just a thin wrapper around the ccall
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Synchronous video function, starts the runloop if it isn't running
The returned buffer is valid until this function is called again, after
which the buffer must not be used again. Make a copy if the data is
required.
"""
sync_get_video_with_res(video::Ref{Ptr{Cvoid}}, timestamp::Ref{Cuint}, index::Cint, res::Cint, fmt::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Does the exact same as [`sync_get_video_with_res`](@ref), with a default
resolution of `resolution_medium`.
"""
sync_get_video(video::Ref{Ptr{Cvoid}}, timestamp::Ref{Cuint}, index::Cint, fmt::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Synchronous depth function, starts the runloop if it isn't running
In the raw pointer version, the returned buffer is valid until this
function is called again, after which the buffer must not be used again.
Make a copy if the data is required. The version returning an array does
not have this limitation.
"""
sync_get_depth_with_res(depth::Ref{Ptr{Cvoid}}, timestamp::Ref{Cuint}, index::Cint, res::Cint, fmt::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Does the exact same as [`sync_get_depth_with_res`](@ref), with a default
resolution of `resolution_medium`.
"""
sync_get_depth(depth::Ref{Ptr{Cvoid}}, timestamp::Ref{Cuint}, index::Cint, fmt::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Tilt the kinect to `angle`. Starts the runloop if it isn't running.
"""
sync_set_tilt_degs(angle::Cint, index::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Tilt state function, starts the runloop if it isn't running.
The returned pointer is only safe until the next call to this function.
"""
sync_get_tilt_state(state::Ref{Ptr{RawTiltState}}, index::Cint)::Cint
end
@exported_cfun begin
"""
$(TYPEDSIGNATURES)
Set the LED to the given mode, starts the runloop if it isn't running.
"""
sync_set_led(led::Cint, index::Cint)::Cint
end
# These should be safe to call and use the return values
"""
$(TYPEDSIGNATURES)
Synchronous video function, starts the runloop if it isn't running.
The returned array is copied before returning and is safe to store.
"""
function sync_get_video_with_res(index::Integer, res::Resolution, fmt::VideoFormat)
if fmt β keys(video_format_to_channels)
error(
"Video format \"$fmt\" not in wrapped types $(keys(video_format_to_channels)). " *
"Use the more verbose version of sync_get_video_with_res and decode manually."
)
end
timestamp = Ref{Cuint}()
unsafe_video = Ref{Ptr{Cvoid}}(C_NULL)
sync_get_video_with_res(unsafe_video, timestamp, index, res, fmt)
rows, cols = resolution_to_dims[res]
channels = video_format_to_channels[fmt]
wrapped_video = unsafe_wrap(Array, convert(Ptr{UInt8}, unsafe_video[]), (channels, cols, rows))
wrapped_video_transpose = PermutedDimsArray(wrapped_video, [1, 3, 2])
if channels == 1
wrapped_video_transpose = view(wrapped_video_transpose, 1, :, :)
end
return copy(wrapped_video_transpose), Int(timestamp[])
end
"""
$(TYPEDSIGNATURES)
Synchronous video function with resolution `resolution_medium`, starts the
runloop if it isn't running.
The returned array is copied before returning and is safe to store.
"""
sync_get_video(index::Integer, fmt::VideoFormat) = sync_get_video_with_res(index, resolution_medium, fmt)
"""
$(TYPEDSIGNATURES)
Synchronous depth function, starts the runloop if it isn't running.
The returned array is copied before returning and is safe to store.
"""
function sync_get_depth_with_res(index::Integer, res::Resolution, fmt::DepthFormat)
if fmt β wrappable_depth_formats
error(
"Depth format \"$fmt\" not in wrapped types $(wrappable_depth_formats). " *
"Use the more verbose version of sync_get_depth_with_res and decode manually."
)
end
timestamp = Ref{Cuint}()
unsafe_depth = Ref{Ptr{Cvoid}}(C_NULL)
sync_get_depth_with_res(unsafe_depth, timestamp, index, res, fmt)
rows, cols = resolution_to_dims[res]
wrapped_depth = unsafe_wrap(Array, convert(Ptr{UInt16}, unsafe_depth[]), (cols, rows))
wrapped_depth_transpose = transpose(wrapped_depth)
return copy(wrapped_depth_transpose), Int(timestamp[])
end
"""
$(TYPEDSIGNATURES)
Synchronous depth function with resolution `resolution_medium`, starts the
runloop if it isn't running.
The returned array is copied before returning and is safe to store.
"""
sync_get_depth(index::Integer, fmt::DepthFormat) = sync_get_depth_with_res(index, resolution_medium, fmt)
"""
$(TYPEDSIGNATURES)
Tilt state function, starts the runloop if it isn't running.
The returned `RawTiltState` struct is safe to store.
"""
function sync_get_tilt_state(index::Integer)
state = Ref{Ptr{RawTiltState}}(C_NULL)
sync_get_tilt_state(state, index)
return copy(unsafe_load(state[]))
end | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 1218 | function compute_world_X_depth()
# These values are rather undocumented and come from the source of the
# glpclview example in libfreenect.
fx = 594.21
fy = 591.04
a = -0.0030711
b = 3.3309495
cx = 339.5
cy = 242.7
W = [1 / fx 0 0 -cx / fx
0 -1 / fy 0 cy / fy
0 0 0 -1
0 0 a b]
P = [ 0. 0. -1. 0.
-1. 0. 0. 0.
0. 1. 0. 0.
0. 0. 0. 1.]
return P * W
end
const world_X_depth = compute_world_X_depth()
"""
$(TYPEDSIGNATURES)
Helper for getting a pointcloud from the camera at `index`.
The resulting point cloud is relative to the camera, with X forward, Y left,
and Z up.
This function uses a fixed homography matrix - you may have better results
for your own hardware by calibrating your Kinect.
"""
function sync_get_pointcloud(index::Integer)
depth, timestamp = sync_get_depth(index, depth_11bit)
cloud = zeros((3, 480, 640))
for i β 1:480, j β 1:640
uvw = Float64[j - 1, i - 1, depth[i, j], 1.0]
xyzw = world_X_depth * uvw
cloud[:, i, j] .= xyzw[1:3] ./ xyzw[4]
end
return cloud, timestamp
end
export sync_get_pointcloud | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | code | 89 | using Freenect
using Test
@testset "Freenect.jl" begin
# Write your tests here.
end
| Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 955 | # Freenect.jl
[](https://dmillard.github.io/Freenect.jl/stable)
[](https://dmillard.github.io/Freenect.jl/dev)
[](https://github.com/dmillard/Freenect.jl/actions)
[](https://codecov.io/gh/dmillard/Freenect.jl)
Freenect.jl is a wrapper around the [libfreenect](https://github.com/OpenKinect/libfreenect) open source Microsoft Kinect driver.
This package only supports Kinect v1, which comes from the XBox 360 era.
For installation and usage please visit the [documentation](https://dmillard.github.io/Freenect.jl/dev).
## Acknowledgements
The development of this software was supported in part by the NASA Space Technology Research Fellowship, grant number 80NSSC19K1182.
| Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 1290 | # Displaying Images
Freenect.jl doesn't come with any image-specific utilities out of the box, it
only returns arrays of data.
Here are some examples of using the [JuliaImages](https://juliaimages.org)
suite to display data from the Kinect.
Before running these examples, be sure to install the relevant packages:
```julia
using Pkg
Pkg.add("Images")
Pkg.add("ImageView")
```
## RGB Image
```julia
using Freenect, Images, ImageView
image, timestamp = sync_get_video(0, video_rgb)
imshow(colorview(RGB, image ./ 255))
```

## Infrared Image
```julia
using Freenect, Images, ImageView
image, timestamp = sync_get_video(0, video_ir_8bit)
imshow(colorview(Gray, image ./ 255))
```

## Depth Image
These images often appear washed out when directly visualized, and anything
below the minimum range appears in white.
```julia
using Freenect, Images, ImageView
depth, timestamp = sync_get_depth(0, depth_11bit)
imshow(colorview(Gray, depth ./ 2^11))
```

## Point Cloud Image
While direct XYZ to RGB isn't the cleanest visualizer, it works in a pinch.
```julia
using Freenect, Images, ImageView
cloud, timestamp = sync_get_pointcloud(0)
imshow(colorview(RGB, cloud))
```
 | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 893 | # Getting Started
The key functions are [`sync_get_video`](@ref), [`sync_get_depth`](@ref), and
[`sync_get_pointcloud`](@ref). Each of these has a memory-safe Julia wrapper
which will return an array.
To display the returned array as an image, refer to [Displaying
Images](displaying_images.md).
Sometimes `libfreenect` doesn't connect to the Kinect immediately. I find it
helpful to try to set the LED until it makes a connection.
## Quickstart Example
```julia
using Freenect
kinect_idx = 0
while sync_set_led(green, kinect_idx) != 0
sleep(0.5)
end
sync_set_led(blink_red_yellow, kinect_idx)
sleep(1)
sync_set_tilt_degs(10, kinect_idx)
sleep(1)
sync_set_tilt_degs(0, kinect_idx)
sleep(1)
image, image_timestamp = sync_get_video(kinect_idx, video_rgb)
depth, depth_timestamp = sync_get_depth(kinect_idx, depth_11bit)
cloud, cloud_timestamp = sync_get_pointcloud(kinect_idx)
``` | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 328 | # Freenect.jl
Freenect.jl is a wrapper around the libfreenect open source Microsoft Kinect driver.
This package only supports Kinect v1, which comes from the XBox 360 era.
## Acknowledgements
The development of this software was supported in part by the NASA Space Technology Research Fellowship, grant number 80NSSC19K1182. | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 347 | # Installation
`libfreenect` is a userspace driver, and is included in the Julia
[Yggdrasil](https://github.com/JuliaPackaging/Yggdrasil) package tree as
`libfreenect_jll`. The upshot of this is that you do _not_ need `libfreenect`
installed on your system to use Freenect.jl, and can just install with
```julia
using Pkg
Pkg.add("Freenect")
``` | Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.2 | f3fd84bbb7693624a92ecd86f5b41eb4a6c83847 | docs | 105 | ```@meta
CurrentModule = Freenect
```
# Reference
```@index
```
```@autodocs
Modules = [Freenect]
```
| Freenect | https://github.com/dmillard/Freenect.jl.git |
|
[
"MIT"
] | 0.1.6 | a749a3ab2d986ce3f981af4da17f904ae0c4478f | code | 4835 | module MLJTSVDInterface
import TSVD
import MLJModelInterface
using Random: MersenneTwister, AbstractRNG, GLOBAL_RNG
const PKG = "TSVD"
const MMI = MLJModelInterface
MMI.@mlj_model mutable struct TSVDTransformer <: MLJModelInterface.Unsupervised
nvals::Int = 2
maxiter::Int = 1000
rng::Union{Int, AbstractRNG} = GLOBAL_RNG
end
struct TSVDTransformerResult
singular_values::Vector{Float64}
components::Matrix{Float64}
is_table::Bool
end
as_matrix(X) = MMI.matrix(X)
as_matrix(X::AbstractMatrix) = X
_get_rng(rng::Int) = MersenneTwister(rng)
_get_rng(rng) = rng
function MMI.fit(transformer::TSVDTransformer, verbosity, Xuser)
X = as_matrix(Xuser)
rng = _get_rng(transformer.rng)
U, s, V = TSVD.tsvd(
X,
transformer.nvals;
maxiter=transformer.maxiter,
initvec = convert(Vector{float(eltype(X))}, randn(rng, size(X,1)))
)
is_table = ~isa(Xuser, AbstractArray)
fitresult = TSVDTransformerResult(s, V, is_table)
cache = nothing
return fitresult, cache, NamedTuple()
end
# for returning user-friendly form of the learned parameters:
function MMI.fitted_params(::TSVDTransformer, fitresult)
singular_values = fitresult.singular_values
components = fitresult.components
is_table = fitresult.is_table
return (singular_values = singular_values, components = components, is_table=is_table)
end
function MMI.transform(::TSVDTransformer, fitresult, Xuser)
X = as_matrix(Xuser)
Xtransformed = X * fitresult.components
if fitresult.is_table
Xtransformed = MMI.table(Xtransformed)
end
return Xtransformed
end
## META DATA
MMI.metadata_pkg(
TSVDTransformer,
name="$PKG",
uuid="9449cd9e-2762-5aa3-a617-5413e99d722e",
url="https://github.com/JuliaLinearAlgebra/TSVD.jl",
is_pure_julia=true,
license="MIT",
is_wrapper=false
)
MMI.metadata_model(
TSVDTransformer,
input_scitype = Union{MMI.Table(MMI.Continuous),AbstractMatrix{MMI.Continuous}},
output_scitype = Union{MMI.Table(MMI.Continuous),AbstractMatrix{MMI.Continuous}},
human_name = "truncated SVD transformer",
docstring = "Truncated SVD dimensionality reduction", # brief description
path = "MLJTSVDInterface.TSVDTransformer"
)
"""
$(MMI.doc_header(TSVDTransformer))
This model performs linear dimension reduction. It differs from regular principal
component analysis in that data is not centered, so that sparsity, if present, can be
preserved during the computation. Text analysis is a common application.
The truncated SVD is computed by Lanczos bidiagonalization. The Lanczos vectors are
partially orthogonalized as described in R. M. Larsen, *Lanczos bidiagonalization with
partial reorthogonalization*, Department of Computer Science, Aarhus University, Technical
report, DAIMI PB-357, September 1998.
# Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
Here:
- `X` is any table of input features (eg, a `DataFrame`) whose columns are of scitype
`Continuous`; check the column scitypes with `schema(X)`; alternatively, `X` is any
`AbstractMatrix` with `Continuous` elements; check the scitype with `scitype(X)`.
Train the machine using `fit!(mach, rows=...)`.
# Operations
- `transform(mach, Xnew)`: transform (project) observations in `Xnew` into their
lower-dimensional representations; `Xnew` should have the same scitype as `X`
above, and the object returned is a table or matrix according to the type of `X`.
# Hyper-parameters
- `nvals=2`: The output dimension (number of singular values)
- `maxiter=1000`: The maximum number if iterations.
- `rng=Random.GLOBAL_RNG`: The random number generator to use, either an `Int` seed, or an
`AbstractRNG`.
# Fitted parameters
The fields of `fitted_params(mach)` are:
- `singular_values`: The estimated singular values, stored as a vector.
- `components`: The estimated component vectors, stored as a matrix.
- `is_table`: Whether or not `transform` returns a table or matrix.
# Examples
With tabular input:
```julia
using MLJ
SVD = @load TSVDTransformer pkg=TSVD
X, _ = @load_iris # `X`, a table
svd = SVD(nvals=3)
mach = machine(svd, X) |> fit!
(; singular_values, components) = fitted_params(mach)
Xsmall = transform(mach, X) # a table
to_matrix(x) = hcat(values(x)...)
@assert sum(round.((to_matrix(Xsmall) * components') - to_matrix(X))) == 0
```
With sparse matrix input:
```julia
using MLJ
using SparseArrays
SVD = @load TSVDTransformer pkg=TSVD
# sparse matrix with 10 rows (observations):
I = rand(1:10, 100)
J = rand(1:10^6, 100)
K = rand(100)
X = sparse(I, J, K, 10, 10^6)
svd = SVD(nvals=4)
mach = machine(svd, X) |> fit!
Xsmall = transform(mach, X) # matrix with 10 rows but only 4 columns
```
"""
TSVDTransformer
end # module
| MLJTSVDInterface | https://github.com/JuliaAI/MLJTSVDInterface.jl.git |
|
[
"MIT"
] | 0.1.6 | a749a3ab2d986ce3f981af4da17f904ae0c4478f | code | 2299 | using MLJTSVDInterface
using Test
using TSVD
using MLJBase
using SparseArrays
using StableRNGs # for RNGs stable across all julia versions
using MLJTestInterface
const rng = StableRNGs.StableRNG(123)
@testset "tsvd transformer" begin
n = 10
p = 20
prob_nonzero = 0.5
# test with a sparse matrix
X_sparse = sprand(rng, n, p, prob_nonzero)
# use defaults - transform into an n x 2 dense matrix
model = TSVDTransformer(rng=42)
mach = machine(model, X_sparse)
fit!(mach, verbosity=0)
X_transformed = transform(mach, X_sparse)
# also do the raw transformation with TSVD library
U, s, V = tsvd(X_sparse, 2)
@test size(X_transformed) == (10, 2)
@test isapprox(s, fitted_params(mach).singular_values)
@test size(V) == size(fitted_params(mach).components)
# test with a dense matrix
X_dense = rand(rng, n, p)
mach = machine(model, X_dense)
fit!(mach, verbosity=0)
X_transformed = transform(mach, X_dense)
# also do the raw transformation with TSVD library
U, s, V = tsvd(X_dense, 2)
@test size(X_transformed) == (10, 2)
@test isapprox(s, fitted_params(mach).singular_values)
@test size(V) == size(fitted_params(mach).components)
# test tables
X, _ = make_regression(100, 5)
mach = machine(model, X)
fit!(mach, verbosity=0)
X_transformed = transform(mach, X)
@test length(keys(X_transformed)) == 2
# test with default RNG
model = TSVDTransformer()
mach = machine(model, X_sparse)
fit!(mach, verbosity=0)
X_transformed = transform(mach, X_sparse)
# also do the raw transformation with TSVD library
U, s, V = tsvd(X_sparse, 2)
@test size(X_transformed) == (10, 2)
@test isapprox(s, fitted_params(mach).singular_values)
@test size(V) == size(fitted_params(mach).components)
end
@testset "generic interface tests" begin
for data in first.([
MLJTestInterface.make_regression(),
MLJTestInterface.make_regression(row_table=true),
])
failures, summary = MLJTestInterface.test(
[TSVDTransformer,],
data,
mod=@__MODULE__,
verbosity=2, # bump to debug
throw=false, # set to true to debug
)
@test isempty(failures)
end
end
| MLJTSVDInterface | https://github.com/JuliaAI/MLJTSVDInterface.jl.git |
|
[
"MIT"
] | 0.1.6 | a749a3ab2d986ce3f981af4da17f904ae0c4478f | docs | 573 | # MLJTSVDInterface.jl
Repository implementing the [MLJ](https://alan-turing-institute.github.io/MLJ.jl/dev/) model interface for models provided by
[TSVD.jl](https://github.com/JuliaLinearAlgebra/TSVD.jl).
| Linux | Coverage |
| :------------ | :------- |
| [](https://github.com/JuliaAI/MLJTSVDInterface.jl/actions) | [](https://codecov.io/github/JuliaAI/MLJTSVDInterface.jl?branch=master) |
| MLJTSVDInterface | https://github.com/JuliaAI/MLJTSVDInterface.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 646 | using MetidaNCA
using Documenter, Weave, PrettyTables, CSV, DataFrames
#using DocumenterLaTeX
include("validation.jl")
#v_out_path = joinpath(dirname(@__FILE__), "src", "validation_report.md")
makedocs(
modules = [MetidaNCA],
sitename = "MetidaNCA.jl",
authors = "Vladimir Arnautov",
pages = [
"Home" => "index.md",
"Examples" => "examples.md",
"Details" => "details.md",
"Parameter list" => "parameters.md",
"API" => "api.md"
],
)
deploydocs(repo = "github.com/PharmCat/MetidaNCA.jl.git", devbranch = "main", forcepush = true
)
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 12308 | refdict = Dict(
:Cmax => [
190.869
261.177
105.345
208.542
169.334
154.648
153.254
138.327
167.347
125.482
],
:Tmax => [
1
1
1.5
1
4
2.5
2.5
4
3
2
],
:Cdose => [
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
],
:Clast => [
112.846
85.241
67.901
97.625
110.778
69.501
58.051
74.437
93.44
42.191
],
:AUClast => [
9585.4218
10112.176
5396.5498
9317.8358
9561.26
6966.598
7029.5735
7110.6745
8315.0803
5620.8945
],
:AUMClast => [
333582.48
298701.39
186032.06
313955.9
315181.56
226977.06
219797.71
240526.05
277613.98
154893.06
],
:AUCall => [
9585.4218
10112.1760
5396.5498
9317.8358
9561.2600
6966.5980
7029.5735
7110.6745
8315.0803
5620.8945
],
:Rsq => [
0.78607696
0.99276359
0.81358898
0.91885869
0.85335995
0.95011904
0.97031231
0.94796904
0.94753789
0.88092269
],
:ARsq => [
0.71476928
0.99035145
0.77630678
0.83771737
0.82891994
0.92517856
0.96041642
0.92195356
0.92130684
0.86391165
],
:Kel => [
0.0033847439
0.014106315
0.0032914304
0.0076953442
0.0068133279
0.0076922807
0.012458956
0.0089300798
0.0056458649
0.017189737
],
:HL => [
204.78571
49.137367
210.59148
90.073577
101.73401
90.109450057666
55.634451
77.619371
122.77077
40.323315
],
:Clast_pred => [
117.30578
82.53669
66.931057
100.76793
105.29832
71.939942
61.172702
75.604277
93.761762
38.810857
],
:AUCinf => [
42925.019
16154.93
26026.183
22004.078
25820.275
16001.76
11688.953
15446.21
24865.246
8075.3242
],
:AUCpct => [
77.669383
37.405019
79.26492
57.6540502829908
62.969953
56.463551
39.861391
53.964925
66.559429
30.394194
],
:MRTlast => [
34.801023
29.538786
34.472406
33.69408
32.964438
32.58076
31.267574
33.826053
33.386807
27.556657
],
:MRTinf => [
293.16224
71.937917
305.04073
130.69968
149.96684
128.24114
79.498252
114.8571
176.97811
58.746446
],
:Clinf => [
0.0023296437
0.0061900608
0.0038422846
0.0045446122
0.0038729255
0.0062493127
0.0085550864
0.0064740799
0.0040216775
0.012383404
],
:Vzinf => [
0.68827768
0.43881487
1.1673601
0.59056646
0.56843374
0.8124135
0.68666158
0.72497447
0.71232266
0.72039519
],
)
################################################################################
refdict2 = Dict(
:Cmax => [
190.869
261.177
105.345
208.542
169.334
154.648
153.254
138.327
167.347
125.482
],
:Tmax => [
1
1
1.5
1
4
2.5
2.5
4
3
2
],
:Cdose => [
121.239
62.222
49.849
52.421
0
57.882
19.95
22.724
105.438
13.634
],
:Clast => [
112.846
85.241
67.901
97.625
110.778
69.501
58.051
74.437
93.44
42.191
],
:AUClast => [
9566.59680869131
10054.28647805950
5392.45721941379
9297.09633445033
9519.18087436122
6948.98562111745
6988.77263241364
7058.81896352039
8302.36808633358
5486.83888944199
],
:AUMCtau => [
5477.20423544297
8367.57088170951
3455.34643479800
6014.64604481587
6609.78830163090
5064.72384740413
4976.96365993911
2863.00517022791
5386.88322025614
4713.47970846693
],
:AUCall => [
9566.59680869131
10054.28647805950
5392.45721941379
9297.09633445033
9519.18087436122
6948.98562111745
6988.77263241364
7058.81896352039
8302.36808633358
5486.83888944199
],
:Rsq => [
0.786076957
0.992763591
0.81358898
0.918858685
0.853359952
0.95011904
0.970312315
0.94796904
0.947537895
0.88092269
],
:ARsq => [
0.714769276
0.990351454
0.776306776
0.83771737
0.828919944
0.92517856
0.96041642
0.92195356
0.921306842
0.863911645
],
# LZint
:LZint => [
5.00848559255328
5.42889759540296
4.44064607555325
5.16688496904739
5.14735707974283
4.82967584017057
5.01074587961482
4.96847859724365
4.94725938794774
4.89636108788302
],
:Kel => [
0.00338474394000776
0.01410631494324980
0.00329143037249282
0.00769534422298109
0.00681332791154901
0.00769228066663777
0.01245895597676470
0.00893007980967252
0.00564586491870971
0.01718973683041960
],
:HL => [
204.785706938398
49.137367437811
210.591476080649
90.073577019460
101.734011566509
90.109450057666
55.634451382012
77.619371308325
122.770769499451
40.323315440951
],
:Clast_pred => [
117.3057799
82.53668981
66.93105694
100.7679335
105.2983206
71.93994201
61.17270231
75.60427664
93.76176158
38.81085735
],
:AUCinf => [
42906.1941313004
16097.0411126277
26022.0900281352
21983.3384532182
25778.1957695968
15984.1473646863
11648.1518057779
15394.3547690766
24852.5337997128
7941.2685538530
],
:AUCpct => [
77.7034598328254
37.5395365663056
79.2773861992505
57.7084419901233
63.0727419426760
56.5257660444885
40.0010169085628
54.1467046238288
66.5934743183831
30.9072744205350
],
:MRTtauinf => [
299.791671096989
74.654997085457
305.919973652938
143.538421744963
173.022067431888
124.653434795141
92.735873637166
175.461862330056
178.810514188399
69.516339753006
],
:Cltau => [
0.078847213948573
0.054590500813083
0.132511872732088
0.074823364534525
0.076283206573122
0.089747243392665
0.092646906460213
0.130442680913677
0.081991954283052
0.103060243120434
],
:Vztau => [
23.2948829648816
3.8699335037324
40.2596615257358
9.7231991664617
11.1961742577834
11.6671826317919
7.4361693413954
14.6071125559694
14.5224789228203
5.9954520617241
],
:AUCtau => [
1268.27563070553
1831.82052757491
754.64936037981
1336.48093242129
1310.90451610924
1114.24035123260
1079.36685444479
766.62024499617
1219.63186357018
970.30626915116
],
)
################################################################################
# 3 Linear Trapezoidal with Linear Interpolation, Dose 120, Dosetime 0.0, tau 12
refdict3 = Dict(
:Cmax => [190.869
261.177
105.345
208.542
169.334
154.648
153.254
138.327
167.347
125.482],
:Tmax => [1
1
1.5
1
4
2.5
2.5
4
3
2],
:Cdose => [0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0],
:Clast => [112.846
85.241
67.901
97.625
110.778
69.501
58.051
74.437
93.44
42.191],
:AUClast => [9585.4218
10112.176
5396.5498
9317.8358
9561.26
6966.598
7029.5735
7110.6745
8315.0803
5620.8945],
:AUMCtau => [9984.8168
14630.0690
6024.4953
10299.7210
11466.1230
8467.3568
9003.0193
6457.0058
10095.8180
8367.3005],
:AUCall => [9585.4218
10112.1760
5396.5498
9317.8358
9561.2600
6966.5980
7029.5735
7110.6745
8315.0803
5620.8945],
:Rsq => [0.78607696
0.99276359
0.81358898
0.91885869
0.86367664
0.95011904
0.97031231
0.94796904
0.94753789
0.87969895],
:ARsq => [0.71476928
0.99035145
0.77630678
0.83771737
0.84420187
0.92517856
0.96041642
0.92195356
0.92130684
0.86766884],
# LZint
:LZint => [5.0084856
5.4288976
4.4406461
5.166885
5.1496027
4.8296758
5.0107459
4.9684786
4.9472594
4.8651403],
:Kel => [0.0033847439
0.0141063150
0.0032914304
0.0076953442
0.0068579883
0.0076922807
0.0124589560
0.0089300798
0.0056458649
0.0165437520],
:HL => [204.785706938398
49.1373674378108
210.591476080649
90.0735770194602
101.071502239954
90.109450057666
55.6344513820121
77.6193713083247
122.770769499451
41.8978220179993],
:Clast_pred => [117.30578
82.53669
66.931057
100.76793
105.19623
71.939942
61.172702
75.604277
93.761762
39.408841],
:AUCinf => [42925.019
16154.93
26026.183
22004.078
25714.393
16001.76
11688.953
15446.21
24865.246
8171.1624],
:AUCpct => [77.669383
37.405019
79.26492
57.6540502829908
62.817478
56.463551
39.861391
53.964925
66.559429
31.210589],
:MRTtauinf => [302.40303
75.590599
312.72083
148.34069
172.0933
130.19061
91.908297
161.57402
176.30461
70.260736],
#Cltau, CLss
:Cltau => [0.07185191
0.050414459
0.12240579
0.070132959
0.06902661
0.085106504
0.083532913
0.10859036
0.073251565
0.092756742],
:Vztau => [21.228167
3.5738929
37.18924
9.113687
10.06514
11.063884
6.7046479
12.160066
12.974374
5.6067536],
:AUCtau => [1670.1018
2380.2695
980.34575
1711.0358
1738.46
1409.998
1436.5595
1105.0705
1638.1903
1293.7065],
)
refdict4 = Dict(
:Cmax => [
190.869
261.177
105.345
208.542
169.334
154.648
153.254
138.327
167.347
125.482
],
:Tmax => [
1
1
1.5
1
4
2.5
2.5
4
3
2
],
:Cdose => [
0
0
0
0
0
0
0
0
0
0
],
:Clast => [
112.846
85.241
67.901
97.625
110.778
69.501
58.051
74.437
93.44
42.191
],
:AUClast => [
9572.8582
10054.0367665966
5391.5322
9296.2179
9518.6531
6948.5757
6987.0645
7064.7816
8298.9634
5485.6538
],
:AUMCtau => [
9973.8062
14631.1197073321
6022.9286
10307.954
11473.081
8471.0956
8982.0378
6271.7444
10040.829690586
8361.7894
],
:AUCall => [
9572.8582
10054.0367665966
5391.5322
9296.2179
9518.6531
6948.5757
6987.0645
7064.7816
8298.9634
5485.6538
],
:Rsq => [
0.78607696
0.99276359
0.81358898
0.91885869
0.85335995
0.95011904
0.97031231
0.94796904
0.94753789
0.88092269
],
:ARsq => [
0.71476928
0.99035145
0.77630678
0.83771737
0.82891994
0.92517856
0.96041642
0.92195356
0.92130684
0.86391165
],
# LZint
:LZint => [
5.0084856
5.4288976
4.4406461
5.166885
5.1473571
4.8296758
5.0107459
4.9684786
4.9472594
4.8963611
],
:Kel => [
0.003384744
0.014106315
0.00329143
0.007695344
0.006813328
0.007692281
0.012458956
0.00893008
0.005645865
0.017189737
],
:HL => [
204.78571
49.137367
210.59148
90.073577
101.73401
90.109450057666
55.634451
77.619371
122.77077
40.323315
],
:Clast_pred => [
117.30578
82.53669
66.931057
100.76793
105.29832
71.939942
61.172702
75.604277
93.761762
38.810857
],
:AUCinf => [
42912.456
16096.791
26021.165
21982.4599914207
25777.668
15983.737
11646.444
15400.317
24849.129
7940.0834
],
:AUCpct => [
77.692122
37.540119
79.280204
57.710748
63.074033
56.527216
40.006884
54.12574
66.602599
30.911888
],
:MRTtauinf => [
302.63508
75.323724
313.06798
148.31081
172.5577
130.22554
91.866692
164.91799
176.98523
68.167555
],
:Cltau => [
0.071927102
0.050429351
0.12256044
0.070184147
0.069035447
0.0852177496596485
0.08379761
0.11110872
0.073575577
0.092819834
],
:Vztau => [
21.250382
3.5749486
37.236223
9.1203389
10.132412
11.078346
6.7258934
12.442074
13.031764
5.399724
],
:AUCtau => [
1668.3558
2379.5666
979.10878
1709.7878
1738.2375
1408.1573
1432.0218
1080.0233
1630.976
1292.8271
],
)
################################################################################
urefdict = Dict{Symbol, Float64}(
#:N_Samples => 5,
#:Dose => 100,
:Rsq => 0.90549162,
:ARsq => 0.81098324, #Rsq_adjusted
#:Corr_XY => -0.95157323,
#:No_points_lambda_z => 3,
:Kel => 0.13445441, #Lambda_z
#:Lambda_z_intercept => 0.79280975,
#:Lambda_z_lower => 4,
#:Lambda_z_upper => 15,
:HL => 5.1552579, #HL_Lambda_z
#:Span => 2.1337439,
#:Tlag => 0,
:Tmax => 1.5, #Tmax_Rate
:Maxrate => 4, #Max_Rate
#:Mid_Pt_last => 15,
:Rlast => 0.33333333, #Rate_last
#:Rate_last_pred => 0.2940497,
:AUClast => 17.125, #AURC_last
#:AURC_last_D => 0.17125,
:Vol => 11, #Vol_UR
:AR => 16, #Amount_Recovered
:Prec => 16, #Percent_Recovered
:AUCall => 17.125, #AURC_all
:AUCinf => 19.604155, #AURC_INF_obs
:AUCpct => 12.646069, #AURC_%Extrap_obs
#:AURC_INF_pred => 19.311984,
#:AURC_%Extrap_pred => 11.324493,
)
pdrefdict = Dict{Symbol, Float64}(
#:N_Samples => 5,
#:Dose => 100,
:Rsq => 0.90549162,
:ARsq => 0.81098324, #Rsq_adjusted
#:Corr_XY => -0.95157323,
#:No_points_lambda_z => 3,
:Kel => 0.13445441, #Lambda_z
#:Lambda_z_intercept => 0.79280975,
#:Lambda_z_lower => 4,
#:Lambda_z_upper => 15,
:HL => 5.1552579, #HL_Lambda_z
#:Span => 2.1337439,
#:Tlag => 0,
:Tmax => 1.5, #Tmax_Rate
:Maxrate => 4, #Max_Rate
#:Mid_Pt_last => 15,
:Rlast => 0.33333333, #Rate_last
#:Rate_last_pred => 0.2940497,
:AUClast => 17.125, #AURC_last
#:AURC_last_D => 0.17125,
:Vol => 11, #Vol_UR
:AR => 16, #Amount_Recovered
:Prec => 16, #Percent_Recovered
:AUCall => 17.125, #AURC_all
:AUCinf => 19.604155, #AURC_INF_obs
:AUCpct => 12.646069, #AURC_%Extrap_obs
#:AURC_INF_pred => 19.311984,
#:AURC_%Extrap_pred => 11.324493,
)
pdrefdict = Dict{Symbol, Float64}(
#N_Samples => 13,
:Tmax => 5.0,
:Rmax => 8.0 ,
:AUCABL => 7.3857143 ,
:AUCBBL => 8.7357143 ,
:AUCATH => 13.959524 ,
:AUCBTH => 1.8095238 ,
:AUCBTW => 6.926190 ,
:TABL => 3.4809524 ,
:TBBL => 5.5190476 ,
:TATH => 5.7619048 ,
:TBTH => 3.2380952 ,
:AUCNETB => -1.35 ,
:AUCNETT => 12.15 ,
:TIMEBTW => 2.2809524,
) | MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1297 |
#weave(joinpath(dirname(pathof(MetidaNCA)), "..", "docs", "validation_report.jmd");
weave(joinpath(dirname(@__FILE__), "validation_report.jmd");
#doctype = "pandoc2pdf",
doctype = "pandoc2pdf",
#out_path = joinpath(dirname(pathof(MetidaNCA)), "..", "docs", "src"),
out_path = joinpath(dirname(@__FILE__), "src"),
pandoc_options=["--toc", "-V colorlinks=true" , "-V linkcolor=blue", "-V urlcolor=red",
"-V toccolor=gray", "--number-sections"])
rm(joinpath(dirname(@__FILE__), "src", "validation_report.aux"); force=true)
rm(joinpath(dirname(@__FILE__), "src", "validation_report.log"); force=true)
rm(joinpath(dirname(@__FILE__), "src", "validation_report.out"); force=true)
#=
using Documenter, MetidaNCA, Weave, PrettyTables, CSV, DataFrames
weave(joinpath(dirname(@__FILE__), "validation_report.jmd");
#doctype = "pandoc2pdf",
doctype = "pandoc2pdf",
out_path = ppath,
pandoc_options=["--toc", "-V colorlinks=true" , "-V linkcolor=blue", "-V urlcolor=red", "-V toccolor=gray"])
mainfont: romanuni.ttf
sansfont: NotoSans-Regular.ttf
monofont: NotoSansMono-Regular.ttf
mathfont: texgyredejavu-math.otf
- name: Install TeXlive and Pandoc
run: sudo apt-get install pandoc texlive texlive-publishers texlive-science latexmk texlive-xetex texlive-latex-base texlive-fonts-recommended
=#
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1389 | # Metida
# Copyright Β© 2019-2020 Vladimir Arnautov aka PharmCat <[email protected]>
module MetidaNCA
using RecipesBase
import Base: length, length, push!, resize!
import MetidaBase
import MetidaBase: Tables, StatsBase, PrecompileTools,
PrettyTables,
AbstractIdData,
AbstractSubject,
DataSet,
AbstractSubjectResult,
AbstractResultData,
isnanormissing,
getid,
getdata,
metida_table, metida_table_, MetidaTable,
uniqueidlist,
indsdict!,
subset
using MetidaBase.Requires
export pkimport, upkimport, pdimport, nca!, nca, DoseTime, ElimRange, LimitRule, NoPageSort,
auc_sparse,
setdosetime!, setkelauto!, setkelrange!, applylimitrule!, setbl!, setth!,
pkplot,
getkeldata, getkelauto, getkelrange, getdosetime, getbl, getth, subset,
metida_table,
PKSubject, UPKSubject, PDSubject, NCAResult
function __init__()
@require Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" begin
savefig = Plots.savefig
end
end
const LOG2 = log(2)
include("types.jl")
include("setkelauto.jl")
include("setkelrange.jl")
include("setdosetime.jl")
include("getkeldata.jl")
include("applylimitrule.jl")
include("show.jl")
include("import.jl")
include("nca.jl")
include("plots.jl")
include("metidatable.jl")
include("setblth.jl")
include("timefilter.jl")
include("sparse.jl")
include("atomic.jl")
include("precompile.jl")
end # module
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 3201 | #Subject
"""
applylimitrule!(data::Union{PKSubject, PDSubject}, rule::LimitRule)
Apply rule to PK subject .
* STEP 1 (NaN step): replace all `NaN` and `missing` values with nan keyword value (if `nan` not NaN);
* STEP 2 (LLOQ step): replace values below `lloq` with `btmax` value if this value befor Tmax or with atmax if this value after Tmax (if `lloq` not NaN);
* STEP 3 (remove NaN): `rm` == true, then remove all `NaN` and `missing` values.
"""
function applylimitrule!(data::Union{PKSubject, PDSubject}, rule::LimitRule)
applylimitrule!(data.time, data.obs, rule)
data
end
"""
applylimitrule!(f::Function, data::DataSet{T}, rule::LimitRule) where T <: Union{PKSubject, PDSubject}
Apply if `f(subj)` return `true`.
"""
function applylimitrule!(f::Function, data::DataSet{T}, rule::LimitRule) where T <: Union{PKSubject, PDSubject}
for i in data
if f(i) applylimitrule!(i, rule) end
end
data
end
#DS ind Int
"""
applylimitrule!(data::DataSet{T}, rule::LimitRule, ind::Int) where T <: Union{PKSubject, PDSubject}
Apply by ind.
"""
function applylimitrule!(data::DataSet{T}, rule::LimitRule, ind::Int) where T <: Union{PKSubject, PDSubject}
applylimitrule!(data[ind], rule)
data
end
#DS iter Int
"""
applylimitrule!(data::DataSet{T}, rule::LimitRule, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: Union{PKSubject, PDSubject}
Apply by inds.
"""
function applylimitrule!(data::DataSet{T}, rule::LimitRule, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: Union{PKSubject, PDSubject}
for i in inds
applylimitrule!(data[i], rule)
end
data
end
#DS all
"""
applylimitrule!(data::DataSet{T}, rule::LimitRule) where T <: Union{PKSubject, PDSubject}
Apply to all dataset.
"""
function applylimitrule!(data::DataSet{T}, rule::LimitRule) where T <: Union{PKSubject, PDSubject}
for i = 1:length(data)
applylimitrule!(data[i], rule)
end
data
end
#DS Dict
"""
applylimitrule!(data::DataSet{T}, rule::LimitRule, sort::Dict) where T <: PKSubject
"""
function applylimitrule!(data::DataSet{T}, rule::LimitRule, sort::Dict) where T <: Union{PKSubject, PDSubject}
for i = 1:length(data)
if sort β data[i].id applylimitrule!(data[i], rule) end
end
data
end
"""
applylimitrule!(time, obs, rule::LimitRule)
"""
function applylimitrule!(time, obs, rule::LimitRule)
if validobsn(time, obs) == 0 return Float64[], Float64[] end
cmax, tmax, tmaxn = ctmax(time, obs)
#NaN Rule
obsn = length(obs)
if !isnan(rule.nan)
for i = 1:obsn
if isnanormissing(obs[i])
obs[i] = rule.nan
end
end
end
#LLOQ rule
if !isnan(rule.lloq)
for i = 1:obsn
if !isnanormissing(obs[i]) && obs[i] <= rule.lloq
if i <= tmaxn
obs[i] = rule.btmax
else
obs[i] = rule.atmax
end
end
end
end
#NaN Remove rule
if rule.rm
inds = findall(isnanormissing, obs)
deleteat!(time, inds)
deleteat!(obs, inds)
end
time, obs
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1529 |
"""
cmax(time::AbstractVector, obs::AbstractVector)
Return Cmax
"""
function cmax(time::AbstractVector, obs::AbstractVector)
length(time) == length(obs) || error("length(time) != length(obs)")
cmax, tmax, tmaxn = ctmax(time, obs)
cmax
end
"""
tmax(time::AbstractVector, obs::AbstractVector)
Return Tmax
"""
function tmax(time::AbstractVector, obs::AbstractVector)
length(time) == length(obs) || error("length(time) != length(obs)")
cmax, tmax, tmaxn = ctmax(time, obs)
tmax
end
"""
auc(time::AbstractVector, obs::AbstractVector; calcm = :lint)
Return AUC. All concentration points included in calculation.
* `calcm` - AUC/AUMC calculation method:
- `:lint` - linear trapezoidal;
- `:logt` - log-trapezoidal after Tmax;
- `:luld` - linar up log down;
- `:luldt` - linear up log down after Tmax;
!!! note
This function doesn't contain `NaN`, `missing` or dosing time checks.
"""
function auc(time::AbstractVector, obs::AbstractVector; calcm = :lint)
length(time) == length(obs) || error("length(time) != length(obs)")
length(time) >= 2 || error("length(time) >= 2")
auc_ = 0.0
if calcm == :lint
@inbounds for i = 1:(length(time) - 1)
auc_ += linauc(time[i], time[i + 1], obs[i], obs[i+1])
end
else
cmax, tmax, tmaxn = ctmax(time, obs)
@inbounds for i = 1:(length(time) - 1)
auc_ += aucpart(time[i], time[i + 1], obs[i], obs[i + 1], calcm, i >= tmaxn)
end
end
auc_
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 257 | """
getkeldata(data::T) where T <: PKSubject
"""
function getkeldata(data::T) where T <: PKSubject
data.keldata
end
"""
getkeldata(data::T) where T <: PKSubject
"""
function getkeldata(ncar::T) where T <: NCAResult
getkeldata(ncar.data)
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 13833 | # ΠΠ°ΠΏΠΎΠ»Π½ΡΠ΅Ρ ΡΠ»ΠΎΠ²Π°ΡΡ d ΠΈΠ½Π΄Π΅ΠΊΡΠ°ΠΌΠΈ ΠΈΠ½Π΄ΠΈΠ²ΠΈΠ΄ΡΠ°Π»ΡΠ½ΡΡ
Π·Π½Π°ΡΠ΅Π½ΠΈΠΉ
nonunique(v) = [k for (k, v) in StatsBase.countmap(v) if v > 1]
function floatparse(data, warn)
if !isa(data, AbstractFloat) && !ismissing(data)
if isa(data, AbstractString)
tp = tryparse(Float64, data)
if isnothing(tp)
warn && @warn "Value $data parsed as `NaN`"
return NaN
else
return tp
end
else
try
return float(data)
catch
return NaN
end
end
elseif ismissing(data)
return NaN
else
return data
end
end
#=
function floatparse(data::AbstractVector)
v = Vector{Float64}(undef, length(data))
@inbounds for i = 1:length(data)
if !isa(data[i], AbstractFloat) && !ismissing(data[i])
if isa(data[i], AbstractString)
try
v[i] = parse(Float64, data[i])
catch
v[i] = NaN
end
else
try
v[i] = float(data[i])
catch
v[i] = NaN
end
end
elseif ismissing(data[i])
v[i] = NaN
else
v[i] = data[i]
end
end
identity.(v)
end
=#
#=
Check element type of time column
Check element type of observation / concentration column
return new arrays
=#
function checkvalues(timevals_sp, concvals_sp; warn = true)
timevals_sp_ = identity.(timevals_sp)
eltype(timevals_sp_) <: Number || error("Some time values not a number ($(eltype(timevals_sp_))))!")
if !(eltype(concvals_sp) <: Union{Number, Missing})
warn && @warn "Some concentration values maybe not a number, try to fix."
concvals_sp_ = floatparse.(concvals_sp, warn)
elseif eltype(concvals_sp) <: Integer
warn && @warn "Concentration values transformed to float."
concvals_sp_ = float.(concvals_sp)
else
concvals_sp_ = identity.(concvals_sp)
end
timevals_sp_, concvals_sp_
end
"""
pkimport(data, time, conc, sort;
kelauto = true,
elimrange = ElimRange(),
dosetime = DoseTime(),
limitrule::Union{Nothing, LimitRule} = nothing,
warn = true,
kwargs...)
Import PK data from table `data`.
* `time` - time column;
* `conc` - concentration column;
* `sort` - subject sorting columns.
keywords:
* `kelauto` - if `true` auto range settings, if `false` used `kelstart`/`kelend` from `elimrange`;
* `elimrange` - set elimination range settings;
* `dosetime` - set dose and dose time, by default dosetime = 0, dose is `NaN`;
* `limitrule` - apply limitrule to subject;
* `warn` - false for warnings supress.
!!! note
If time column have non-unique values - last pair time-concentration will be used.
See also: [`ElimRange`](@ref), [`DoseTime`](@ref), [`LimitRule`](@ref).
"""
function pkimport(data, time, conc, sort; kelauto = true, elimrange = ElimRange(), dosetime = nothing, limitrule::Union{Nothing, LimitRule} = nothing, warn = true, kwargs...)
if isa(sort, String) sort = [Symbol(sort)] end
if isa(sort, Symbol) sort = [sort] end
Tables.istable(data) || error("Data not a table!")
cols = Tables.columns(data)
cdata = Tuple(Tables.getcolumn(cols, y) for y in sort)
d = Dict{Tuple{eltype.(cdata)...}, Vector{Int}}()
indsdict!(d, cdata)
timec = Tables.getcolumn(data, time)
concc = Tables.getcolumn(data, conc)
if isnothing(dosetime) dosetime = DoseTime(NaN, zero(eltype(timec)), NaN) end
any(isnanormissing, timec) && error("Some time values is NaN or Missing!")
sdata = Vector{PKSubject}(undef, length(d))
i = one(Int)
@inbounds for (k, v) in d
timevals = view(timec, v)
concvals = view(concc, v)
if !allunique(timevals)
nuv = nonunique(timevals)
warn && @warn "Not all time values is unique ($nuv), last observation used! ($k)"
nuvinds = findall(x -> x == first(nuv), timevals)
resize!(nuvinds, length(nuvinds) - 1)
if length(nuv) > 1
for cnt = 2:length(nuv)
nuvinds_ = findall(x -> x == nuv[cnt], timevals)
resize!(nuvinds_, length(nuvinds_) - 1)
append!(nuvinds, nuvinds_)
end
end
sort!(nuvinds)
deleteat!(v, nuvinds)
timevals = view(timec, v)
concvals = view(concc, v)
end
sp = sortperm(timevals)
timevals_spv = view(timevals, sp)
concvals_spv = view(concvals, sp)
timevals_sp, concvals_sp = checkvalues(timevals_spv, concvals_spv; warn = warn)
sdata[i] = PKSubject(timevals_sp, concvals_sp, kelauto, elimrange, dosetime, Dict(sort .=> k))
i += one(Int)
end
ds = DataSet(identity.(sdata))
if !isnothing(limitrule)
applylimitrule!(ds, limitrule)
end
ds
end
"""
pkimport(data, time, conc;
warn = true,
kwargs...)
Import PK data from tabular data `data`, `time` - time column, `conc` - concentration column.
"""
function pkimport(data, time, conc; warn = true, kwargs...)
timevals_sp, concvals_sp = checkvalues(Tables.getcolumn(data, time), Tables.getcolumn(data, conc); warn = warn)
pkimport(timevals_sp, concvals_sp; warn = warn, kwargs...)
end
"""
pkimport(time, conc;
kelauto = true,
elimrange = ElimRange(),
dosetime = DoseTime(),
id = Dict{Symbol, Any}(),
limitrule::Union{Nothing, LimitRule} = nothing,
warn = true,
kwargs...)
Import PK data from time vector `time` and concentration vector `conc`.
"""
function pkimport(time, conc; kelauto = true, elimrange = ElimRange(), dosetime = nothing, id = Dict{Symbol, Any}(), limitrule::Union{Nothing, LimitRule} = nothing, warn = true, kwargs...)
timevals_sp, concvals_sp = checkvalues(time, conc, warn = warn)
if isnothing(dosetime) dosetime = DoseTime(NaN, zero(eltype(timevals_sp)), NaN) end
pks = PKSubject(timevals_sp, concvals_sp, kelauto, elimrange, dosetime, id)
if !isnothing(limitrule)
applylimitrule!(pks, limitrule)
end
pks
end
function pkimport(data; time, conc, sort = nothing, kwargs...)
if isnothing(sort)
return pkimport(data, time, conc; kwargs...)
end
return pkimport(data, time, conc, sort; kwargs...)
end
"""
upkimport(data, stime, etime, conc, vol, sort; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime())
Import urine PK data from table `data`.
* `stime` - start time column;
* `etime` - end time column;
* `conc` - concentration column;
* `vol` - volume column;
* `sort` - subject sorting columns.
"""
function upkimport(data, stime, etime, conc, vol, sort; kelauto = true, elimrange = ElimRange(), dosetime = nothing)
if isa(sort, String) sort = [Symbol(sort)] end
if isa(sort, Symbol) sort = [sort] end
cols = Tables.columns(data)
cdata = Tuple(Tables.getcolumn(cols, y) for y in sort)
d = Dict{Tuple{eltype.(cdata)...}, Vector{Int}}()
indsdict!(d, cdata)
tnames = Symbol.(names(data))
isa(stime, Symbol) || stime in tnames || error("column Start Time ($stime) not found")
isa(etime, Symbol) || etime in tnames || error("column End Time ($etime) not found")
isa(conc, Symbol) || conc in tnames || error("column Concentration ($conc) not found")
isa(vol, Symbol) || vol in tnames || error("column Volume ($vol) not found")
stimec = Tables.getcolumn(data, stime)
etimec = Tables.getcolumn(data, etime)
concc = Tables.getcolumn(data, conc)
volc = Tables.getcolumn(data, vol)
if isnothing(dosetime) dosetime = DoseTime(NaN, zero(promote_type(eltype(stimec), eltype(etimec))), NaN) end
any(isnanormissing, stimec) && error("Some Start Time values is NaN or Missing!")
any(isnanormissing, etimec) && error("Some End Time values is NaN or Missing!")
sdata = Vector{UPKSubject}(undef, length(d))
i = one(Int)
@inbounds for (k, v) in d
stimevals = view(stimec, v)
etimevals = view(etimec, v)
concvals = view(concc, v)
volvals = view(volc, v)
sdata[i] = upkimport(stimevals, etimevals, concvals, volvals; kelauto = kelauto, elimrange = elimrange, dosetime = dosetime, id = Dict(sort .=> k))
i += one(Int)
end
return DataSet(identity.(sdata))
end
"""
upkimport(data, stime, etime, conc, vol; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime())
Import single urine PK data from table `data`.
* `stime` - start time column;
* `etime` - end time column;
* `conc` - concentration column;
* `vol` - volume column.
"""
function upkimport(data, stime, etime, conc, vol; kelauto = true, elimrange = ElimRange(), dosetime = nothing)
upkimport(Tables.getcolumn(data, stime), Tables.getcolumn(data, etime), Tables.getcolumn(data, conc), Tables.getcolumn(data, vol); kelauto = kelauto, elimrange = elimrange, dosetime = dosetime)
end
"""
upkimport(stime, etime, conc, vol; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime())
Import urine PK data from time vectors:
* `stime` - start times;
* `etime` - end times;
* `conc` - concentrations;
* `vol` - volumes.
"""
function upkimport(stime, etime, conc, vol; kelauto = true, elimrange = ElimRange(), dosetime = nothing, id = Dict{Symbol, Any}())
any(isnanormissing, stime) && error("Some Start Time values is NaN or Missing!")
any(isnanormissing, etime) && error("Some End Time values is NaN or Missing!")
timeranges = collect(zip(stime, etime))
sp = sortperm(stime)
timevals_sp = timeranges[sp]
concvals_sp = conc[sp]
volvals_sp = vol[sp]
time_type = promote_type(typeof(zero(eltype(stime))), typeof(zero(eltype(stime))))
zerotime = zero(time_type)
if isnothing(dosetime)
dosetime = DoseTime(NaN, zerotime, NaN*zerotime)
else
if !(time_type <: typeof(dosetime.time)) && !(time_type <: Real)
@warn "Type of dose time can be wrong... try to fix it"
dosetime = DoseTime(dosetime.dose, dosetime.time*oneunit(time_type), dosetime.tau)
end
end
if length(timevals_sp) > 1
for c = 2:length(timevals_sp)
timevals_sp[c][1] == timevals_sp[c-1][2] || error("Start time ($(timevals_sp[c][1])) for observation $c not equal End time ($(timevals_sp[c-1][2])) for observation $(c-1)!")
end
end
UPKSubject(timevals_sp, concvals_sp, volvals_sp, kelauto, elimrange, dosetime, id)
end
"""
pdimport(data, time, obs, sort;
bl = 0,
th = 0,
limitrule::Union{Nothing, LimitRule} = nothing,
warn = true)
Import pharmackodynamic data from table:
* `data` - data table;
* `time` - observation time;
* `obs` - observation value;
* `sort` - sorting columns.
Keywords:
* `bl` - baseline;
* `th` - threshold;
* `limitrule` - limit rule;
* `warn` - warning supress if `false`.
"""
function pdimport(data, time, obs, sort; bl = 0, th = 0, limitrule::Union{Nothing, LimitRule} = nothing, warn = true)
if isa(sort, String) sort = [Symbol(sort)] end
if isa(sort, Symbol) sort = [sort] end
Tables.istable(data) || error("Data not a table!")
cols = Tables.columns(data)
cdata = Tuple(Tables.getcolumn(cols, y) for y in sort)
d = Dict{Tuple{eltype.(cdata)...}, Vector{Int}}()
indsdict!(d, cdata)
timec = Tables.getcolumn(data, time)
obsc = Tables.getcolumn(data, obs)
any(isnanormissing, timec) && error("Some time values is NaN or Missing!")
sdata = Vector{PDSubject}(undef, length(d))
i = one(Int)
@inbounds for (k, v) in d
timevals = timec[v]
obsvals = obsc[v]
if !allunique(timevals)
nuv = nonunique(timevals)
warn && @warn "Not all time values is unique ($nuv), last observation used! ($k)"
nuvinds = findall(x -> x == first(nuv), timevals)
resize!(nuvinds, length(nuvinds) - 1)
if length(nuv) > 1
for cnt = 2:length(nuv)
nuvinds_ = findall(x -> x == nuv[cnt], timevals)
resize!(nuvinds_, length(nuvinds_) - 1)
append!(nuvinds, nuvinds_)
end
end
sort!(nuvinds)
deleteat!(v, nuvinds)
timevals = view(timec, v)
obsvals = view(obsc, v)
end
sp = sortperm(timevals)
timevals_spv = view(timevals, sp)
obsvals_spv = view(obsvals, sp)
timevals_sp, obsvals_sp = checkvalues(timevals_spv, obsvals_spv, warn = warn)
sdata[i] = PDSubject(timevals_sp, obsvals_sp, bl, th, Dict(sort .=> k))
i += one(Int)
end
ds = DataSet(identity.(sdata))
if !isnothing(limitrule)
applylimitrule!(ds, limitrule)
end
ds
end
"""
pdimport(data, time, obs;
warn = true,
kwargs...)
Import PD data from tabular data `data`, `time` - time column, `obs` - observations column.
"""
function pdimport(data, time, obs; warn = true, kwargs...)
timevals_sp, obsvals_sp = checkvalues(Tables.getcolumn(data, time), Tables.getcolumn(data, obs), warn = warn)
pdimport(timevals_sp, obsvals_sp; kwargs...)
end
"""
pdimport(time, obs;
bl = 0,
th = 0,
id = Dict{Symbol, Any}(),
warn = true)
Import PD data from time vector `time` and observations vector `obs`.
"""
function pdimport(time, obs; bl = 0, th = 0, id = Dict{Symbol, Any}(), warn = true)
timevals_sp, obsvals_sp = checkvalues(copy(time), copy(obs), warn = warn)
PDSubject(timevals_sp, obsvals_sp, bl, th, id)
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1052 | #=
function Base.append!(t::MetidaTable, t2::MetidaTable)
if !(names(t) β names(t2)) error("Names for t not in t2") end
for n in names(t)
append!(t.table[n], t2.table[n])
end
t
end
=#
function MetidaBase.metida_table_(obj::DataSet{T}) where T <: PKSubject
idset = Set(keys(first(obj).id))
if length(obj) > 1
for i = 2:length(obj)
union!(idset, Set(keys(obj[i].id)))
end
end
mt1 = metida_table_((fill(getid(obj, 1, c), length(obj[1])) for c in idset)...; names = idset)
mt2 = metida_table_(deepcopy(obj[1].time), deepcopy(obj[1].obs); names = [:time, :obs])
mtm = merge(mt1, mt2)
if length(obj) > 1
for i = 2:length(obj)
mt1 = metida_table_((fill(getid(obj, i, c), length(obj[i])) for c in idset)...; names = idset)
mt2 = metida_table_(obj[i].time, obj[i].obs; names = [:time, :obs])
amtm = merge(mt1, mt2)
for n in keys(mtm)
append!(mtm[n], amtm[n])
end
end
end
mtm
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 43631 | # Pharmacokinetics
# Makoid C, Vuchetich J, Banakar V. 1996-1999. Basic Pharmacokinetics.
function validobsn(time::Vector{<:Number}, obs::Vector)
if length(time) != length(obs) error("Vector length `time` not equal `obs`") end
n = 0
@inbounds for i = 1:length(time)
if !isnanormissing(time[i]) && !isnanormissing(obs[i]) n+=1 end
end
n
end
function firstobs(time::Vector{T}, obs::Vector, dosetime) where T <: Number
@inbounds for i = 1:length(time)
if time[i] >= dosetime && !isnanormissing(obs[i]) return i end #unitful dosetime?
end
error("Observations not found")
end
function firstobs(time::Vector{<:Tuple}, obs, vol, dosetime)
@inbounds for i = 1:length(time)
if time[i][1] >= dosetime && !isnanormissing(obs[i]) && !isnanormissing(vol[i]) return i end
end
error("Observations not found")
end
function ctaumin(time::AbstractVector, obs::AbstractVector, taulastp::Int)
fi = 0
min = NaN
for i = 1:taulastp
if !isnanormissing(obs[i])
fi = i
min = obs[i]
break
end
end
if length(obs) == 1 return min end
@inbounds for i = fi:taulastp
if !isnanormissing(obs[i]) && obs[i] < min min = obs[i] end
end
min
end
function ctmax(time::AbstractVector, obs::AbstractVector{T}, taulastp) where T
cmax = obs[1]
tmaxn = 1
if length(obs) == 1 return cmax, first(time), tmaxn end
taulastp > length(obs) && error("taulastp > length(obs")
@inbounds for i = 2:taulastp
if !isnanormissing(obs[i]) && obs[i] > cmax
cmax = obs[i]
tmaxn = i
end
end
return cmax, time[tmaxn], tmaxn
end
function ctmax(time::AbstractVector, obs::AbstractVector)
f = findfirst(!isnanormissing, obs)
cmax = obs[f]
tmaxn = f
if length(obs) - f == 0 return cmax, time[f], tmaxn end
@inbounds for i = f + 1:length(obs)
if !isnanormissing(obs[i]) && obs[i] > cmax
cmax = obs[i]
tmaxn = i
end
end
return cmax, time[tmaxn], tmaxn
end
function ctmax(data::PKSubject)
fobs = firstobs(data.time, data.obs, data.dosetime.time)
if data.dosetime.tau > 0
taulastp = findlast(x -> x <= data.dosetime.time + data.dosetime.tau, data.time)
else
taulastp = length(data.obs)
end
cmax = data.obs[fobs]
tmaxn = fobs
if length(data.obs) - fobs == 0 return cmax, data.time[fobs], tmaxn end
@inbounds for i = fobs + 1:length(data.obs)
if !isnanormissing(data.obs[i]) && data.obs[i] > cmax
cmax = data.obs[i]
tmaxn = i
end
end
return cmax, data.time[tmaxn], tmaxn
end
function logtpredict(cβ, cβ, cx, tβ, tβ)
return log(cx/cβ)/log(cβ/cβ)*(tβ-tβ)+tβ
end
function logcpredict(tβ, tβ, tx, cβ::T, cβ::T) where T
return exp(log(cβ/oneunit(cβ)) + (tx-tβ)/(tβ-tβ)*(log(cβ/oneunit(cβ)) - log(cβ/oneunit(cβ))))*oneunit(cβ)
end
#Linear trapezoidal auc
function linauc(tβ, tβ, cβ, cβ)
return (tβ-tβ)*(cβ+cβ)/2
end
#Linear trapezoidal aumc
function linaumc(tβ, tβ, cβ, cβ)
return (tβ-tβ)*(tβ*cβ+tβ*cβ)/2
end
#Log trapezoidal auc
function logauc(tβ, tβ, cβ, cβ)
return (tβ-tβ)*(cβ-cβ)/log(cβ/cβ)
end
#Log trapezoidal aumc
function logaumc(tβ, tβ, cβ, cβ)
return (tβ-tβ) * (tβ*cβ-tβ*cβ) / log(cβ/cβ) - (tβ-tβ)^2 * (cβ-cβ) / log(cβ/cβ)^2
end
#Intrapolation
#linear prediction bx from ax, a1 < ax < a2
function linpredict(aβ, aβ, ax, bβ, bβ)
return (ax - aβ) / (aβ - aβ)*(bβ - bβ) + bβ
end
function slope(x::AbstractVector{X}, y::AbstractVector{Y}; funk::Function = identity) where X where Y
XYT = promote_type(X, Y)
if length(x) != length(y) throw(ArgumentError("Unequal vector length!")) end
n = length(x)
if n < 2 throw(ArgumentError("n < 2!")) end
Ξ£xy = zero(XYT)
Ξ£x = zero(X)
Ξ£y = zero(Y)
Ξ£x2 = zero(X)
Ξ£y2 = zero(Y)
@inbounds for i = 1:n
xi = x[i]
yi = funk(y[i])
Ξ£xy += xi * yi
Ξ£x += xi
Ξ£y += yi
Ξ£x2 += xi^2
Ξ£y2 += yi^2
end
a = (n * Ξ£xy - Ξ£x * Ξ£y)/(n * Ξ£x2 - Ξ£x^2)
b = (Ξ£y * Ξ£x2 - Ξ£x * Ξ£xy)/(n * Ξ£x2 - Ξ£x^2)
r2 = (n * Ξ£xy - Ξ£x * Ξ£y)^2/((n * Ξ£x2 - Ξ£x^2)*(n * Ξ£y2 - Ξ£y^2))
n > 2 ? ar = 1 - (1 - r2)*(n - 1)/(n - 2) : ar = NaN
return a, b, r2, ar, n
end
#=
function logslope(x, y)
if length(x) != length(y) throw(ArgumentError("Unequal vector length!")) end
n = length(x)
if n < 2 throw(ArgumentError("n < 2!")) end
Ξ£xy = zero(Float64)
Ξ£x = zero(Float64)
Ξ£y = zero(Float64)
Ξ£x2 = zero(Float64)
Ξ£y2 = zero(Float64)
@inbounds for i = 1:n
xi = x[i]
yi = log(y[i])
Ξ£xy += xi * yi
Ξ£x += xi
Ξ£y += yi
Ξ£x2 += xi^2
Ξ£y2 += yi^2
end
a = (n * Ξ£xy - Ξ£x * Ξ£y)/(n * Ξ£x2 - Ξ£x^2)
b = (Ξ£y * Ξ£x2 - Ξ£x * Ξ£xy)/(n * Ξ£x2 - Ξ£x^2)
r2 = (n * Ξ£xy - Ξ£x * Ξ£y)^2/((n * Ξ£x2 - Ξ£x^2)*(n * Ξ£y2 - Ξ£y^2))
n > 2 ? ar = 1 - (1 - r2)*(n - 1)/(n - 2) : ar = NaN
return a, b, r2, ar, n
end #end slope
=#
#---------------------------------------------------------------------------
function aucpart(tβ, tβ, cβ, cβ, calcm, aftertmax)
if calcm == :lint || cβ <= zero(cβ) && cβ <= zero(cβ)
auc = linauc(tβ, tβ, cβ, cβ)
elseif calcm == :logt && aftertmax && cβ > zero(cβ) && cβ > zero(cβ)
auc = logauc(tβ, tβ, cβ, cβ)
elseif calcm == :luld && cβ > cβ > zero(cβ)
auc = logauc(tβ, tβ, cβ, cβ)
elseif calcm == :luldt && aftertmax && cβ > cβ > zero(cβ)
auc = logauc(tβ, tβ, cβ, cβ)
#elseif calcm == :log && cβ > zero(T) && cβ > zero(T)
#auc = logauc(tβ, tβ, cβ, cβ)
else
auc = linauc(tβ, tβ, cβ, cβ)
end
return auc
end
function aumcpart(tβ, tβ, cβ, cβ, calcm, aftertmax)
if calcm == :lint || cβ <= zero(cβ) && cβ <= zero(cβ)
aumc = linaumc(tβ, tβ, cβ, cβ)
elseif calcm == :logt && aftertmax && cβ > zero(cβ) && cβ > zero(cβ)
aumc = logaumc(tβ, tβ, cβ, cβ)
elseif calcm == :luld && cβ > cβ > zero(cβ)
aumc = logaumc(tβ, tβ, cβ, cβ)
elseif calcm == :luldt && aftertmax && cβ > cβ > zero(cβ)
aumc = logaumc(tβ, tβ, cβ, cβ)
#elseif calcm == :log && cβ > zero(T) && cβ > zero(T)
#aumc = logaumc(tβ, tβ, cβ, cβ)
else
aumc = linaumc(tβ, tβ, cβ, cβ)
end
return aumc
end
#---------------------------------------------------------------------------
function interpolate(tβ, tβ, tx, cβ, cβ, intpm, aftertmax)
if intpm == :lint || cβ <= zero(cβ) || cβ <= zero(cβ)
c = linpredict(tβ, tβ, tx, cβ, cβ)
elseif intpm == :logt && aftertmax && cβ > zero(cβ) && cβ > zero(cβ)
c = logcpredict(tβ, tβ, tx, cβ, cβ)
elseif intpm == :luld && cβ > cβ > zero(cβ)
c = logcpredict(tβ, tβ, tx, cβ, cβ)
elseif intpm == :luldt && aftertmax && cβ > cβ > zero(cβ)
c = logcpredict(tβ, tβ, tx, cβ, cβ)
#elseif intpm == :log && cβ > zero(T) && cβ > zero(T)
#c = logcpredict(tβ, tβ, tx, cβ, cβ)
else
c = linpredict(tβ, tβ, tx, cβ, cβ)
end
return c
end
# Time interpolation
function tinterpolate(cβ, cβ, cx, tβ, tβ, intpm, aftertmax)
if intpm == :lint || cβ <= zero(cβ) || cβ <= zero(cβ)
t = linpredict(cβ, cβ, cx, tβ, tβ)
elseif intpm == :logt && aftertmax && cβ > zero(cβ) && cβ > zero(cβ)
t = logtpredict(cβ, cβ, cx, tβ, tβ)
elseif intpm == :luld && cβ > cβ > zero(cβ)
t = logtpredict(cβ, cβ, cx, tβ, tβ)
elseif intpm == :luldt && aftertmax && cβ > cβ > zero(cβ)
t = logtpredict(cβ, cβ, cx, tβ, tβ)
#elseif intpm == :log && cβ > zero(T) && cβ > zero(T)
#t = logtpredict(cβ, cβ, cx, tβ, tβ)
else
t = linpredict(cβ, cβ, cx, tβ, tβ)
end
return t
end
################################################################################
# STEPS
################################################################################
# 1
# Make observation vector and time vector, no points befor Dosetime and nopoints after last nonmissing concentration
function step_1_filterpksubj(_time::AbstractVector{T}, _obs, dosetime) where T
fobs = firstobs(_time, _obs, dosetime)
li = findlast(!isnanormissing, _obs)
n = li - fobs + 1
obstype = typeof(zero(eltype(_obs)))
nanobst = NaN * zero(eltype(_obs))
time = Vector{T}(undef, n)
obs = Vector{obstype}(undef, n)
inds = Vector{Int}(undef, 0)
ii = 0
for i = fobs:li
ii += 1
time[ii] = _time[i]
if isnanormissing(_obs[i])
obs[ii] = nanobst
push!(inds, ii)
else
obs[ii] = _obs[i]
end
end
time, obs, inds # return time, obs, and NaN or Missing value list
end
#2
function step_2_interpolate!(time, obs::AbstractVector{T}, inds, tmaxn, intpm) where T
if length(inds) > 0
vals = Vector{T}(undef, length(inds))
for i = 1:length(inds)
aftertmax = inds[i] > tmaxn
iβ = findlast(!isnanormissing, obs[1:inds[i] - 1])
iβ = findfirst(!isnanormissing, obs[inds[i] + 1:end]) + inds[i]
vals[i] = interpolate(time[iβ], time[iβ], time[inds[i]], obs[iβ], obs[iβ], intpm, aftertmax)
end
for i = 1:length(inds)
obs[inds[i]] = vals[i]
end
end
end
# 3
# Elimination, TlastN, Tlast
function step_3_elim!(result, data, adm, tmaxn, time_cp::AbstractVector{T}, obs_cp::AbstractVector{O}, time, keldata) where T where O
resize!(keldata)
obsnum = length(time_cp)
# data.kelrange.kelexcl - indexes; excltime - time values
excltime = time[data.kelrange.kelexcl]
# Unitful values
r_time_cp = reinterpret(typeof(one(T)), time_cp)
r_obs_cp = reinterpret(typeof(one(O)), obs_cp)
tlastn = findlast(x-> x > zero(x), r_obs_cp)
tlast = r_time_cp[tlastn]
if data.kelauto
if (adm != :iv && obsnum - tmaxn > 2) || (adm == :iv && obsnum - tmaxn > 1)
if adm == :iv
stimep = tmaxn
else
stimep = tmaxn + 1
end
timep = collect(stimep:obsnum) # time points (and conc) - indexes for time vector from start to end
# Esclude all indexes in data.kelrange.kelexcl
if length(data.kelrange.kelexcl) > 0
exclinds = findall(x -> x in excltime, time_cp)
filter!(x-> x β exclinds, timep)
end
# find all concentrations <= 0 - indexes
zcinds = findall(x -> x <= zero(O), obs_cp)
# exclude concentrations <= 0 from time vector
filter!(x-> x β zcinds, timep)
if length(timep) > 2
logconc = log.(r_obs_cp)
for i = length(timep)-2:-1:1
timepv = view(timep, i:length(timep))
sl = slope(view(r_time_cp, timepv), view(logconc, timepv))
if sl[1] < 0
push!(keldata, time_cp[timep[i]], time_cp[timep[end]], sl[1], sl[2], sl[3], sl[4], sl[5])
end
end
end
end
else
stimep = findfirst(x -> x >= time[data.kelrange.kelstart], time_cp)
etimep = findlast(x -> x <= time[data.kelrange.kelend], time_cp)
timep = collect(stimep:etimep)
if length(data.kelrange.kelexcl) > 0
@inbounds for i in data.kelrange.kelexcl
filter!(x-> x β findall(x -> x in excltime, time_cp), timep)
end
end
zcinds = findall(x -> x <= 0, obs_cp)
filter!(x-> x β zcinds, timep)
if length(timep) > 1
sl = slope(view(r_time_cp, timep), view(r_obs_cp, timep), funk = log)
push!(keldata, time_cp[stimep], time_cp[etimep], sl[1], sl[2], sl[3], sl[4], sl[5])
end
end
# keldata - New KelData, excltime excluded time values, t last N and tlast
keldata, excltime, tlastn, tlast
end
# 6
function step_6_areas(time_cp::AbstractVector{T}, obs_cp::AbstractVector{O}, calcm, tmaxn, tlastn) where T where O
obsnum = length(time_cp)
auctype = typeof(zero(eltype(time_cp))*zero(eltype(obs_cp)))
aumctype = typeof(zero(eltype(time_cp))^2*zero(eltype(obs_cp)))
aucpartl = Array{auctype, 1}(undef, obsnum - 1)
aumcpartl = Array{aumctype, 1}(undef, obsnum - 1)
#Calculate all AUC/AUMC part based on data
for i = 1:(obsnum - 1)
aucpartl[i] = aucpart(time_cp[i], time_cp[i + 1], obs_cp[i], obs_cp[i + 1], calcm, i >= tmaxn)
aumcpartl[i] = aumcpart(time_cp[i], time_cp[i + 1], obs_cp[i], obs_cp[i + 1], calcm, i >= tmaxn)
end
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
auclast = zero(auctype)
aumclast = zero(aumctype)
@inbounds for i = 1:tlastn-1
auclast += aucpartl[i]
aumclast += aumcpartl[i]
end
if auclast == zero(auctype) auclast = NaN * zero(auctype) end
if aumclast == zero(aumctype) aumclast = NaN * zero(aumctype) end
aucall = auclast
if tlastn < length(time_cp)
@inbounds for i = tlastn:obsnum-1
aucall += aucpartl[i]
end
end
aucpartl, aumcpartl, auclast, aumclast, aucall
end
"""
nca(args...; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime(), kwargs...)
nca(data, time, conc, sort; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime(), kwargs...)
nca(data, time, conc; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime(), kwargs...)
nca(time, conc; kelauto = true, elimrange = ElimRange(), dosetime = DoseTime(), kwargs...)
Import data and perform NCA analysis.
Syntax simillar to [`pkimport`](@ref)
Applicable `kwargs` see [`nca!`](@ref).
See also: [`ElimRange`](@ref), [`DoseTime`](@ref), [`LimitRule`](@ref).
"""
function nca(args...; type::Symbol = :bps, bl = 0, th = 0, kelauto = true, elimrange = ElimRange(), dosetime = DoseTime(), limitrule::Union{Nothing, LimitRule} = nothing, kwargs...)
if !(type in (:bps, :ur, :pd)) error("Unknown type") end
if type == :bps
pki = pkimport(args...; kelauto = kelauto, elimrange = elimrange, dosetime = dosetime, limitrule = limitrule, kwargs...)
elseif type == :ur
pki = upkimport(args...; kelauto = kelauto, elimrange = elimrange, dosetime = dosetime, kwargs...)
elseif type == :pd
pki = pdimport(args...; th = th, bl = bl, limitrule = limitrule, kwargs...)
end
nca!(pki; kwargs...)
end
"""
nca!(data::DataSet{Subj}; adm = :ev, calcm = :lint, intpm = nothing, verbose = 0, warn = true, io::IO = stdout, modify! = identity) where Subj <: AbstractSubject
Non-compartmental (NCA) analysis of PK/PD data.
"""
function nca!(data::DataSet{Subj}; kwargs...) where Subj <: AbstractSubject
#result = Vector{NCAResult{Subj}}(undef, length(data))
#for i = 1:length(data)
# result[i] = nca!(data[i]; kwargs...)
#end
#DataSet(result)
map(x -> nca!(x; kwargs...), data)
end
"""
nca!(data::PKSubject{T,O}; adm = :ev, calcm = :lint, intpm = nothing, partials = nothing, prtext = :err, verbose = 0, warn = true, io::IO = stdout, modify! = nothing) where T where O
* `adm` - administration:
- `:ev` - extra vascular;
- `:iv` - intravascular bolus;
* `calcm` - AUC/AUMC calculation method:
- `:lint` - linear trapezoidal;
- `:luld` - linear up log down;
- `:luldt` - linear up log down after Tmax;
- `:logt` - log-trapezoidal after Tmax (Not Recommended);
* `intpm` - interpolation method:
- `:lint` - linear trapezoidal;
- `:luld` - linear up log down;
- `:luldt` - linear up log down after Tmax;
- `:logt` - log-trapezoidal after Tmax;
* `partials` - calculate partial AUC vor vector of time intervals (`:err` (default) - throw error if end time > last oservation time; `:last` - no extrapolation; `:extr` - if `Kel` calculated used extrapolation or `NaN` if no `Kel`);
* `prtext` - extrapolation rule for partials AUC;
* `verbose` - print to `io`, 1: partial areas table, 2: 1, and results;
* `warn` - show warnings;
* `io` - output stream;
* `modify!` - function to modify output paramaters, call `modify!(ncaresult)` if difined.
Results:
* Cmax
* Tmax
* Cdose
* Tlag
* Clast
* AUClast
* AUMClast
* AUCall
* Rsq
* ARsq
* Kel
* HL
* LZint
* NpLZ
* Clast_pred
* AUCinf
* AUCinf_pred
* AUMCinf
* AUMCinf_pred
* AUCpct
* MRTlast
* MRTinf
* MRTinf_pred
* Cllast
* Clinf
* Vzlast
* Vzinf
* Vssinf
Steady-state parameters (tau used):
* AUCtau
* AUMCtau
* Ctau
* Cavg
* Ctaumin
* Accind
* Fluc
* Fluctau
* Swing
* Swingtau
* MRTtauinf
* Cltau
* Vztau
`partials` is a vector of vectors, tuples or pairs. Example: `partials = [(1,2), (3,4)]`, `partials = [[1,2], (3,4)]`
"""
function nca!(data::PKSubject{T, O}; adm = :ev, calcm = :lint, intpm = nothing, partials = nothing, prtext = :err, verbose = 0, warn = true, io::IO = stdout, modify! = identity) where T where O
ptype = promote_type(Float64, T, O)
result = Dict{Symbol, ptype}()
if isnothing(intpm) intpm = calcm end
options = Dict(:type => :bps, :adm => adm, :calcm => calcm, :intpm => intpm, :verbose => verbose, :warn => warn, :modify! => modify!)
if verbose > 0
println(io, " Non-compartmental Pharmacokinetic Analysis")
if length(data.id) > 0
print(io, " Subject: ")
for (k,v) in data.id
print(io, "$(k) => $(v); ")
end
println(io, "")
end
println(io, " Settings:")
println(io, " Method: $(calcm); Dose: $(data.dosetime.dose); Dose time: $(data.dosetime.time)")
if data.dosetime.tau > 0
println(io, " Tau: $(data.dosetime.tau)")
end
end
################################################################################
# STEP 1 FILTER ALL BEFORE DOSETIME AND ALL NAN OR MISSING VALUES
if validobsn(data.time, data.obs) == 0 return NCAResult(data, options, result) end
time_cp, obs_cp, einds = step_1_filterpksubj(data.time, data.obs, data.dosetime.time)
if length(obs_cp) < 2
return NCAResult(data, options, result)
end
################################################################################
# STEP 2 - CMAX TMAX FOR TAU RANGE Clast Tlast; interpolate NaN and missings
#result[:Obsnum] = obsnum = length(obs_cp)
result[:Obsnum] = validobsn(time_cp, obs_cp)
# If TAU set, calculates start and end timepoints for AUCtau
if data.dosetime.tau > zero(typeof(data.dosetime.tau))
taulastp = findlast(x -> x <= data.dosetime.time + data.dosetime.tau, time_cp)
result[:Ctaumin] = ctaumin(time_cp, obs_cp, taulastp)
else
taulastp = length(obs_cp)
end
result[:Cmax], result[:Tmax], tmaxn = ctmax(time_cp, obs_cp, taulastp)
step_2_interpolate!(time_cp, obs_cp, einds, tmaxn, intpm)
################################################################################
# STEP 3
# Elimination, add interpolated inds to elimination exclusion
# Get last concentration
if length(einds) > 0
for ei in einds
if ei β data.kelrange.kelexcl push!(data.kelrange.kelexcl, ei) end
end
sort!(data.kelrange.kelexcl)
if data.kelrange.kelstart in data.kelrange.kelexcl || data.kelrange.kelend in data.kelrange.kelexcl
data.kelauto = true
end
end
keldata, excltime, tlastn, tlast = step_3_elim!(result, data, adm, tmaxn, time_cp, obs_cp, data.time, data.keldata)
# C last and T last
result[:Tlast] = time_cp[tlastn]
result[:Clast] = obs_cp[tlastn]
################################################################################
# STEP 4
if data.dosetime.time > zero(T)
time_cp .-= data.dosetime.time
end
################################################################################
# STEP 5
# Dose concentration
# Dosetime is first point
cdoseins = zero(Int)
if iszero(first(time_cp))
result[:Cdose] = first(obs_cp)
# Dosetime before first point
else
if adm == :iv
if first(obs_cp) > obs_cp[2] > zero(O)
result[:Cdose] = logcpredict(first(time_cp), time_cp[2], 0, first(obs_cp), obs_cp[2])
else
result[:Cdose] = first(obs_cp)
end
else
if data.dosetime.tau > zero(typeof(data.dosetime.tau))
result[:Cdose] = result[:Ctaumin]
else
result[:Cdose] = zero(O)
end
end
cdoseins = 1
pushfirst!(time_cp, zero(T))
pushfirst!(obs_cp, result[:Cdose])
# if time-zero point added some points shoud be shifted
taulastp += 1
tmaxn += 1
tlastn += 1
end
################################################################################
# STEP 6
# Areas
aucpartl, aumcpartl, auclast, aumclast, aucall = step_6_areas(time_cp, obs_cp, calcm, tmaxn, tlastn)
result[:AUClast] = auclast
result[:AUMClast] = aumclast
result[:AUCall] = aucall
################################################################################
# STEP 7
# Other parameters
#---------------------------------------------------------------------------
result[:MRTlast] = result[:AUMClast] / result[:AUClast]
#---------------------------------------------------------------------------
if data.dosetime.dose > zero(data.dosetime.dose)
result[:Cllast] = data.dosetime.dose / result[:AUClast]
result[:Dose] = data.dosetime.dose
end
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
tlagn = findfirst(!iszero, obs_cp)
if isnothing(tlagn)
result[:Tlag] = NaN*zero(T)
elseif tlagn > 1
result[:Tlag] = time_cp[tlagn-1]
else
result[:Tlag] = zero(T)
end
if length(data.keldata) > 0
#data.keldata = keldata
result[:ARsq], rsqn = findmax(keldata.ar)
result[:Rsq] = keldata.r[rsqn]
#kel = abs(keldata.a[rsqn])
result[:Kel] = abs(keldata.a[rsqn]) / oneunit(T)
result[:LZ] = keldata.a[rsqn]
result[:LZint] = keldata.b[rsqn]
result[:Rsqn] = rsqn
result[:NpLZ] = keldata.n[rsqn]
result[:Clast_pred] = exp(result[:LZint] + result[:LZ] * tlast) * oneunit(O)
result[:HL] = LOG2 / result[:Kel]
result[:AUCinf] = result[:AUClast] + result[:Clast] / result[:Kel]
result[:AUCinf_pred] = result[:AUClast] + result[:Clast_pred] / result[:Kel]
result[:AUCpct] = (result[:AUCinf] - result[:AUClast]) / result[:AUCinf] * 100
result[:AUMCinf] = result[:AUMClast] + result[:Tlast] * result[:Clast] / result[:Kel] + result[:Clast] / result[:Kel] ^ 2
result[:MRTinf] = result[:AUMCinf] / result[:AUCinf]
if data.dosetime.dose > zero(data.dosetime.dose)
result[:Vzlast] = data.dosetime.dose / result[:AUClast] / result[:Kel]
result[:Vzinf] = data.dosetime.dose / result[:AUCinf] / result[:Kel]
result[:Clinf] = data.dosetime.dose / result[:AUCinf]
result[:Vssinf] = result[:Clinf] * result[:MRTinf]
end
else
result[:Kel] = NaN / oneunit(T)
end
################################################################################
# STEP 8
# Steady-state parameters
if data.dosetime.tau > zero(data.dosetime.tau)
eaucpartl = zero(T)*zero(O)
eaumcpartl = zero(T)^2*zero(O)
if time_cp[taulastp] < data.dosetime.tau < time_cp[end]
result[:Ctau] = interpolate(time_cp[taulastp], time_cp[taulastp + 1], data.dosetime.tau, obs_cp[taulastp], obs_cp[taulastp + 1], intpm, true)
eaucpartl = aucpart(time_cp[taulastp], data.dosetime.tau, obs_cp[taulastp], result[:Ctau], calcm, true)
eaumcpartl = aumcpart(time_cp[taulastp], data.dosetime.tau, obs_cp[taulastp], result[:Ctau], calcm, true)
#remoove part after tau
elseif data.dosetime.tau > time_cp[end] && result[:Kel] !== NaN
#extrapolation
result[:Ctau] = exp(result[:LZint] + result[:LZ] * (data.dosetime.tau + data.dosetime.time))
eaucpartl = aucpart(time_cp[end], data.dosetime.tau, obs_cp[end], result[:Ctau], calcm, true)
eaumcpartl = aumcpart(time_cp[end], data.dosetime.tau, obs_cp[end], result[:Ctau], calcm, true)
else
result[:Ctau] = obs_cp[taulastp]
end
auctau = eaucpartl
aumctau = eaumcpartl
@inbounds for i = 1:taulastp-1
auctau += aucpartl[i]
aumctau += aumcpartl[i]
end
result[:AUCtau] = auctau
result[:AUMCtau] = aumctau
result[:Cavg] = result[:AUCtau]/data.dosetime.tau
if result[:Ctaumin] != 0
result[:Swing] = (result[:Cmax] - result[:Ctaumin])/result[:Ctaumin]
end
if result[:Ctau] != 0
result[:Swingtau] = (result[:Cmax] - result[:Ctau])/result[:Ctau]
end
result[:Fluc] = (result[:Cmax] - result[:Ctaumin])/result[:Cavg] * 100
result[:Fluctau] = (result[:Cmax] - result[:Ctau])/result[:Cavg] * 100
#If Kel calculated
result[:Cltau] = data.dosetime.dose / result[:AUCtau]
if !isnan(result[:Kel])
result[:Accind] = 1 / (1 - (exp(-result[:Kel] * data.dosetime.tau)))
result[:MRTtauinf] = (result[:AUMCtau] + data.dosetime.tau * (result[:AUCinf] - result[:AUCtau])) / result[:AUCtau]
result[:Vztau] = data.dosetime.dose / result[:AUCtau] / result[:Kel]
result[:Vsstau] = result[:Cltau] * result[:MRTtauinf]
end
end
#partials
if !isnothing(partials)
for prt in partials
stime = prt[1]
etime = prt[2]
if stime < data.dosetime.time error("Start time can't be less than dose time!") end
if stime < data.dosetime.time error("End time can't be less than dose time!") end
if etime <= stime error("End time can't be less or equal start time!") end
suffix = "_"*string(stime)*"_"*string(etime)
stime = stime - data.dosetime.time
etime = etime - data.dosetime.time
if etime > last(time_cp) && prtext == :err error("End time can't be greater than last time point ($(last(time_cp)))! Use keyword `prtext=:last` or `prtext=:extr`...") end
#first point
firstp = findfirst(x -> x >= stime, time_cp)
#last point
lastp = findlast(x -> x <= etime, time_cp)
firstpart = zero(T)*zero(O)
lastpart = zero(T)*zero(O)
if stime < time_cp[firstp]
firstpartc = interpolate(time_cp[firstp - 1], time_cp[firstp], stime, obs_cp[firstp - 1], obs_cp[firstp], intpm, stime > result[:Tmax])
firstpart += aucpart(stime, time_cp[firstp], firstpartc, obs_cp[firstp], calcm, stime > result[:Tmax])
#println("firstpartc = $firstpartc , firstpart = $firstpart")
end
if etime > time_cp[lastp] && etime < last(time_cp) # if last time > etime -> interpolation
lastpartc = interpolate(time_cp[lastp], time_cp[lastp + 1], etime, obs_cp[lastp], obs_cp[lastp + 1], intpm, time_cp[lastp] > result[:Tmax])
lastpart += aucpart(time_cp[lastp], etime, obs_cp[lastp], lastpartc, calcm, time_cp[lastp] > result[:Tmax])
#println("lastpartc = $lastpartc , lastpart = $lastpart")
elseif etime >= time_cp[lastp] && prtext == :last
lastpartc = zero(O)
elseif etime > time_cp[lastp] && prtext == :extr && !isnan(result[:Kel])
lastpartc = exp(result[:LZint] + result[:LZ] * etime)
lastpart += aucpart(time_cp[lastp], etime, obs_cp[end], lastpartc, calcm, time_cp[lastp] > result[:Tmax])
else
lastpartc = NaN
lastpart += lastpartc
end
aucpartial = zero(T)*zero(O)
if firstp != lastp
aucpartn = lastp - firstp
for i = 1:aucpartn
aucpartial += aucpart(time_cp[firstp + i - 1], time_cp[firstp + i], obs_cp[firstp + i - 1], obs_cp[firstp + i], calcm, time_cp[firstp + i - 1] >= result[:Tmax])
#println("first = $(firstp + i - 1) , scns = $(firstp + i) , aucpartial = $aucpartial")
end
end
aucpartial += firstpart + lastpart
result[Symbol("AUC"*suffix)] = aucpartial
if verbose > 2
println(io, " Partial $(prt[1]) - $(prt[2]); from point $firstp to $lastp")
if stime < time_cp[firstp]
println(io, " Interpolation values: first = $firstpartc, last = $lastpartc")
end
if etime > time_cp[lastp]
println(io, " Interpolation parts: first = $firstpart, last = $lastpart")
end
end
end
end
################################################################################
# Verbose output
if verbose > 0
aucpartlsum = similar(aucpartl)
aumcpartlsum = similar(aumcpartl)
@inbounds for i = 1:length(aucpartl)
aucpartlsum[i] = sum(view(aucpartl, 1:i))
aumcpartlsum[i] = sum(view(aumcpartl, 1:i))
end
if data.dosetime.time > 0
time_cp .+= data.dosetime.time
end
hnames = [:Time, :Concentrtion, :AUC, :AUC_cum, :AUMC, :AUMC_cum, :Info]
mx = metida_table(time_cp, obs_cp, pushfirst!(aucpartl, 0.0), pushfirst!(aucpartlsum, 0.0), pushfirst!(aumcpartl, 0.0), pushfirst!(aumcpartlsum, 0.0), fill("", length(obs_cp));
names = hnames)
if cdoseins > 0
mx[1, 7] = "D*"
else
mx[1, 7] = "D"
end
if !isnan(result[:Kel])
@inbounds for i = 1:length(time_cp)
if time_cp[i] >= keldata.s[rsqn] && time_cp[i] <= keldata.e[rsqn]
if length(data.kelrange.kelexcl) > 0
if time_cp[i] in excltime
mx[i, 7] = "Excl"
else
mx[i, 7] = "E"
end
else
mx[i, 7] = "E"
end
end
if i in einds
mx[i, 7] *= "@"
end
end
end
hnames = (["Time" "Conc." "AUC" "AUC" "AUMC" "AUMC" "Info"],
["" "" "" "(cum.)" "" "(cum.)" ""])
PrettyTables.pretty_table(io, mx; tf = PrettyTables.tf_compact, header = hnames, formatters = PrettyTables.ft_printf("%3.4g"))
println(io, "")
println(io, " Cdose: $(result[:Cdose]), Dose time: $(data.dosetime.time)")
if isnan(result[:Kel])
println(io, " Elimination not calculated")
else
println(io, " Kel start: $(keldata.s[rsqn]); end: $(keldata.e[rsqn])")
end
if length(einds) > 0
println(io, " @ - Interpolated points ($(length(einds)))")
end
println(io, "")
if data.dosetime.tau < time_cp[end] && data.dosetime.tau > 0
println(io, " Tau + dosetime is less then end time. Interpolation used.")
println(io, " Ctau: $(result[:Ctau])")
println(io, " AUC final part: $(eaucpartl)")
println(io, " AUMC final part: $(eaumcpartl)")
println(io, "")
end
if verbose > 1
println(io, " Results:")
PrettyTables.pretty_table(io, result; tf = PrettyTables.tf_compact, header = ["Parameter", "Value"], formatters = PrettyTables.ft_printf("%4.6g"))
end
end
################################################################################
ncares = NCAResult(data, options, result)
modify!(ncares)
#-----------------------------------------------------------------------
return ncares
end
function maxconc(subj::T) where T <: PKSubject
maximum(subj.obs)
end
function minconc(subj::T, pos = false) where T <: PKSubject
if pos
return minimum(Iterators.filter(x-> x > zero(x), subj.obs))
else
return minimum(subj.obs)
end
end
function exrate(time::AbstractVector{Tuple{S, E}}, conc::AbstractVector{C}, vol::AbstractVector{V}) where S where E where C where V
T = promote_type(S, E)
length(time) == length(conc) == length(vol) || error("")
er = Vector{typeof(oneunit(C)*oneunit(V)/oneunit(T))}(undef, length(time))
@inbounds for i = 1:length(time)
er[i] = conc[i]*vol[i]/(time[i][2] - time[i][1])
end
er
end
function step_1_filterupksubj(time, obs, vol, dosetime)
fobs = firstobs(time, obs, vol, dosetime)
ni = 0
@inbounds for i = fobs:length(obs)
if !isnanormissing(obs[i]) && !isnanormissing(vol[i])
ni += 1
end
end
inds = Vector{Int}(undef, ni)
ni = 1
@inbounds for i = fobs:length(obs)
if !isnanormissing(obs[i]) && !isnanormissing(vol[i])
inds[ni] = i
ni += 1
end
end
time_cp = time[inds]
obs_cp = obs[inds]
vol_cp = vol[inds]
time_cp, obs_cp, vol_cp
end
"""
nca!(data::UPKSubject{T, O, VOL, V}; adm = :ev, calcm = :lint, intpm = nothing, verbose = 0, warn = true, io::IO = stdout, modify! = identity) where T where O where VOL where V
Non-compartmental (NCA) analysis of pharmacokinetic for urine data.
Results:
* AUCall
* AUClast
* Rlast
* Maxrate
* Tmax
* AR
* Vol
* Prec
* ARsq
* Rsq
* Kel
* LZ
* LZint
* Rsqn
* HL
* AUCinf
"""
function nca!(data::UPKSubject{Tuple{S, E}, O, VOL, V}; adm = :ev, calcm = :lint, intpm = nothing, verbose = 0, warn = true, io::IO = stdout, modify! = identity, kwargs...) where S where E where O where VOL where V
ptype = promote_type(Float64, S, E, O, VOL)
ttype = promote_type(S, E)
result = Dict{Symbol, ptype}()
options = Dict(:type => :urine, :adm => adm, :calcm => calcm, :intpm => intpm, :verbose => verbose, :warn => warn, :modify! => modify!)
if verbose > 0
println(io, " Non-compartmental Pharmacokinetic Analysis")
println(io, " Matrix: urine")
if length(data.id) > 0
print(io, " Subject: ")
for (k,v) in data.id
print(io, "$(k) => $(v); ")
end
println(io, "")
end
println(io, " Settings:")
println(io, " Method: $(calcm); Dose: $(data.dosetime.dose); Dose time: $(data.dosetime.time)")
end
if isnothing(intpm) intpm = calcm end
time, obs, vol = step_1_filterupksubj(data.time, data.obs, data.vol, data.dosetime.time)
mtime = map(x-> (x[1]+x[2])/2, time)
exr = exrate(time, obs, vol)
result[:AR] = data.obs' * data.vol
result[:Vol] = sum(vol)
if time[1][1] > data.dosetime.time
pushfirst!(mtime, 0)
else
pushfirst!(mtime, time[1][1])
end
pushfirst!(obs, zero(O))
pushfirst!(vol, zero(VOL))
pushfirst!(exr, zero(eltype(exr)))
result[:Maxrate], result[:Tmax], tmaxn = ctmax(mtime, exr)
if data.dosetime.dose > zero(data.dosetime.dose)
result[:Prec] = result[:AR]/data.dosetime.dose * 100
end
obsnum = length(exr)
lastobs = length(exr)
for i = length(exr):-1:1
if exr[i] > zero(eltype(exr))
break
else
lastobs = i
end
end
# STEP 3
# Elimination
keldata, excltime = step_3_elim!(result, data, adm, tmaxn, mtime, exr, data.time, data.keldata)
#result[:Kel]
#result[:HL]
aucpartl = Vector{typeof(zero(eltype(exr))*zero(ttype))}(undef, obsnum - 1)
#Calculate all AUC part based on data
for i = 1:(obsnum - 1)
aucpartl[i] = aucpart(mtime[i], mtime[i + 1], exr[i], exr[i + 1], calcm, i >= tmaxn)
end
result[:AUCall] = sum(aucpartl)
result[:AUClast] = sum(aucpartl[1:lastobs-1])
result[:Rlast] = exr[end]
if length(data.keldata) > 0
result[:ARsq], rsqn = findmax(keldata.ar)
result[:Rsq] = keldata.r[rsqn]
result[:Kel] = abs(keldata.a[rsqn]) / oneunit(ttype)
result[:LZ] = keldata.a[rsqn]
result[:LZint] = keldata.b[rsqn]
result[:Rsqn] = rsqn
result[:NpLZ] = keldata.n[rsqn]
result[:HL] = LOG2 / result[:Kel]
result[:AUCinf] = result[:AUClast] + result[:Rlast] / result[:Kel]
result[:AUCpct] = (result[:AUCinf] - result[:AUClast]) / result[:AUCinf] * 100
end
ncares = NCAResult(data, options, result)
modify!(ncares)
#-----------------------------------------------------------------------
return ncares
end
function auctspl(c1, c2, t1, t2, sl, calcm)
slu = sl*oneunit(c1)
if c1 >= slu && c2 >= slu
auca = aucpart(t1, t2, c1, c2, calcm, true) - (t2 - t1)*slu
aucb = zero(auca)
ta = t2 - t1
tb = zero(ta)
elseif c1 <= slu && c2 <= slu
aucb = (t2 - t1)*slu - aucpart(t1, t2, c1, c2, calcm, true)
auca = zero(aucb)
tb = t2 - t1
ta = zero(tb)
else
tint = tinterpolate(c1, c2, slu, t1, t2, calcm, true)
l = aucpart(t1, tint, c1, slu, calcm, true)
r = aucpart(tint, t2, slu, c2, calcm, true)
if c1 > slu && c2 < slu
auca = l - (tint - t1)*slu
aucb = (t2 - tint)*slu - r
ta = tint - t1
tb = t2 - tint
elseif c1 < slu && c2 > slu
auca = r - (t2 - tint)*slu
aucb = (tint - t1)*slu - l
ta = t2 - tint
tb = tint - t1
else
error("!!")
end
end
auca, aucb, ta, tb
end
function auctblth(c1, c2, t1, t2, bl, th, calcm)
aucabl, aucbbl, tabl, tbbl = auctspl(c1, c2, t1, t2, bl, calcm)
aucath, aucbth, tath, tbth = auctspl(c1, c2, t1, t2, th, calcm)
if th > bl
aucbtw = aucabl - aucath
else
aucbtw = aucbbl - aucbth
end
aucabl, aucbbl, tabl, tbbl, aucath, aucbth, tath, tbth, aucbtw
end
"""
nca!(data::PDSubject{T,O}; calcm = :lint, intpm = nothing, verbose = 0, warn = true, io::IO = stdout, modify! = identity, kwargs...) where T where O
Non-compartmental (NCA) analysis of pharmacodynamic data.
Results:
* Rmax - max responce;
* Tmax - time for maximum responce;
* AUCABL - AUC above baseline;
* AUCBBL - AUC below baseline;
* AUCATH - AUC above threshold;
* AUCBTH - AUC below threshold;
* AUCNETB - AUCABL - AUCBBL;
* AUCNETT - AUCATH - AUCBTH;
* TABL - time above baseline;
* TBBL - time below baseline;
* TATH - time above threshold;
* TBTH - time below threshold;
* AUCBTW - AUC between baseline and threshold;
"""
function nca!(data::PDSubject{T,O}; calcm = :lint, intpm = nothing, verbose = 0, warn = true, io::IO = stdout, modify! = identity, kwargs...) where T where O
ptype = promote_type(Float64, T, O)
result = Dict{Symbol, ptype}()
if isnothing(intpm) intpm = calcm end
options = Dict(:type => :pd, :calcm => calcm, :intpm => intpm, :verbose => verbose, :warn => warn, :modify! => modify!)
if verbose > 0
println(io, " Pharmacodynamic Analysis")
if length(data.id) > 0
print(io, " Subject: ")
for (k,v) in data.id
print(io, "$(k) => $(v); ")
end
println(io, "")
end
println(io, " Settings:")
println(io, " Method: $(calcm);")
end
auctype = promote_type(eltype(data.time), eltype(data.obs))
################################################################################
# STEP 1 FILTER ALL BEFORE DOSETIME AND ALL NAN OR MISSING VALUES
if validobsn(data.time, data.obs) == 0 return NCAResult(data, options, result) end
time_cp, obs_cp, einds = step_1_filterpksubj(data.time, data.obs, first(data.time))
if length(obs_cp) < 2
return NCAResult(data, options, result)
end
################################################################################
result[:Obsnum] = obsnum = length(obs_cp)
result[:Rmax], result[:Tmax], tmaxn = ctmax(time_cp, obs_cp, length(obs_cp))
# ALL NAN AND MISSING VALUES LINEAR INTERPOLATED
step_2_interpolate!(time_cp, obs_cp, einds, 1, :lint)
aucpartabl = Array{ptype, 1}(undef, obsnum - 1)
aucpartbbl = Array{ptype, 1}(undef, obsnum - 1)
aucpartath = Array{ptype, 1}(undef, obsnum - 1)
aucpartbth = Array{ptype, 1}(undef, obsnum - 1)
tpartabl = Array{ptype, 1}(undef, obsnum - 1)
tpartbbl = Array{ptype, 1}(undef, obsnum - 1)
tpartath = Array{ptype, 1}(undef, obsnum - 1)
tpartbth = Array{ptype, 1}(undef, obsnum - 1)
aucpartbtw = Array{ptype, 1}(undef, obsnum - 1)
for i = 1:(obsnum - 1)
aucpartabl[i], aucpartbbl[i], tpartabl[i], tpartbbl[i], aucpartath[i], aucpartbth[i], tpartath[i], tpartbth[i], aucpartbtw[i] = auctblth( obs_cp[i], obs_cp[i + 1], time_cp[i], time_cp[i + 1], data.bl, data.th, calcm)
end
result[:AUCABL] = sum(aucpartabl)
result[:AUCBBL] = sum(aucpartbbl)
result[:AUCATH] = sum(aucpartath)
result[:AUCBTH] = sum(aucpartbth)
result[:TABL] = sum(tpartabl)
result[:TBBL] = sum(tpartbbl)
result[:TATH] = sum(tpartath)
result[:TBTH] = sum(tpartbth)
result[:AUCBTW] = sum(aucpartbtw)
result[:AUCNETB] = result[:AUCABL] - result[:AUCBBL]
result[:AUCNETT] = result[:AUCATH] - result[:AUCBTH]
if data.th > data.bl
result[:TIMEBTW] = result[:TBTH] - result[:TBBL]
else
result[:TIMEBTW] = result[:TBBL] - result[:TBTH]
end
# Verbose output
if verbose > 0
hnames = [:Time, :Observation, :AUCABL, :AUCBBL, :AUCATH, :AUCBTH]
mx = metida_table(collect(time_cp),
collect(obs_cp),
pushfirst!(aucpartabl, zero(first(aucpartabl))),
pushfirst!(aucpartbbl, zero(first(aucpartbbl))),
pushfirst!(aucpartath, zero(first(aucpartath))),
pushfirst!(aucpartbth, zero(first(aucpartbth)));
names = hnames)
hnames = (["Time" "Obs." "AUCABL" "AUCBBL" "AUCATH" "AUCBTH"],
["" "" "" "" "" "" ""])
PrettyTables.pretty_table(io, mx; tf = PrettyTables.tf_compact, header = hnames, formatters = PrettyTables.ft_printf("%3.4g"))
println(io, "")
if verbose > 1
println(io, " Results:")
PrettyTables.pretty_table(io, result; tf = PrettyTables.tf_compact, header = ["Parameter", "Value"], formatters = PrettyTables.ft_printf("%4.6g"))
end
end
ncares = NCAResult(data, options, result)
modify!(ncares)
#-----------------------------------------------------------------------
return ncares
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 16245 | struct NoPageSort end
const PKPLOTSTYLE = (
(:solid, :blue, :circle, :blue),
(:solid, :red, :utriangle, :red),
(:solid, :green, :diamond, :green),
(:solid, :magenta, :pentagon, :magenta),
(:solid, :purple, :heptagon, :purple),
(:solid, :indigo, :octagon, :indigo),
(:solid, :gold, :star, :gold),
(:solid, :yellow, :rect, :yellow),
(:solid, :gray, :xcross, :gray),
(:solid, :cyan, :cross, :cyan),
(:dot, :blue, :utriangle, :blue),
(:dot, :red, :circle, :red),
(:dot, :green, :rect, :green),
(:dot, :yellow, :diamond, :yellow),
(:dot, :gray, :cross, :gray),
(:dot, :cyan, :xcross, :cyan),
(:dot, :gold, :pentagon, :gold),
(:dot, :magenta, :star, :magenta),
(:dot, :purple, :octagon, :purple),
(:dot, :indigo, :heptagon, :indigo),
)
function plotstyle(n)
if isnothing(n) n = 1 end
if n >= 1 && n <= 20
return PKPLOTSTYLE[n]
elseif n > 1900
return (:auto, :auto, :auto, :auto)
end
n -= 20
linestyle = (:dash, :dashdot, :dashdotdot)
linecolor = (:yellow, :gray, :cyan, :gold, :magenta, :purple, :indigo)
markershape = (:star4, :star6, :star7, :star8, :rect, :star5, :diamond, :hexagon, :cross, :xcross, :utriangle, :dtriangle, :rtriangle, :ltriangle, :pentagon, :heptagon, :octagon, :vline, :hline)
markercolor = (:gray, :gold, :magenta, :purple, :indigo)
a = n % 3 + 1
b = n % 7 + 1
c = n % 19 + 1
d = n % 5 + 1
(linestyle[a], linecolor[b], markershape[c], markercolor[d])
end
@userplot PKPlot
@userplot PKElimpPlot
@userplot PKElimpDrop
@userplot PdHLine
function luceil(x)
fl = Int(floor(log10(x)))
if fl < 0 fl = 0 end
ceil(x/10^fl)*10^fl
end
@recipe function f(subj::PKPlot; lcd = :auto, tcd = :auto)
x, y = subj.args
if isa(lcd, Real)
lc = luceil(maximum(x->isnan(x) ? -Inf : x, y)/lcd)
yt = 0:lc:lc*lcd
elseif isa(lcd, StepRange)
yt = lcd
elseif lcd == :all
yt = y
end
if isa(tcd, Real)
tc = luceil(maximum(x)/tcd)
xt = 0:tc:tc*tcd
elseif isa(tcd, StepRange)
xt = tcd
elseif tcd == :all
xt = x
end
widen --> true
seriestype --> :line
xguide --> "Time"
link --> :both
legend --> true
grid --> true
gridstyle --> :auto
#ticks := [nothing :auto nothing]
#xlims --> (minimum(x), maximum(x)*1.1)
#ylims --> (0, maximum(y)*1.1)
if !isa(lcd, Symbol) || lcd != :auto
yticks --> yt
end
if !isa(tcd, Symbol) || tcd != :auto
xticks --> xt
end
seriescolor --> :blue
markershape --> :circle
markersize --> 3
markercolor --> :match
markerstrokealpha --> 0
(x, y)
end
@recipe function f(subj::PKElimpPlot)
x, y = subj.args
seriestype --> :line
legend --> false
markersize --> 0
markerstrokealpha --> 0
(x, y)
end
@recipe function f(subj::PKElimpDrop)
x, y = subj.args
seriestype --> :scatter
legend --> false
markersize --> 4
markercolor --> :red
markershape --> :xcross
(x, y)
end
@recipe function f(subj::PdHLine)
x, y = subj.args
seriestype --> :straightline
(x, [y, y])
end
# Text label from ID
function plotlabel(d, ld = nothing)
title = ""
if isnothing(d) return title end
if length(d) > 0
for (k, v) in d
kv = k
if !isnothing(ld) && haskey(ld, kv)
kv = ld[kv]
end
title *= "$(kv) = $(v); "
end
end
return title
end
"""
pkplot(subj; ls = false, elim = false, xticksn = :auto, yticksn = :auto, kwargs...)
Plot for subject
* `ls` - concentration in log scale;
* `elim` - draw elimination curve;
* `xticksn` - number of ticks on x axis;
* `yticksn` - number of ticks on y axis;
*Other keywords:*
* `plotstyle` - predefined plot style from PKPLOTSTYLE;
* `drawbl` (`false`) - draw baseline, only for PDSubject;
* `drawth` (`false`) - draw threshold, only for PDSubject;
"""
function pkplot(subj::AbstractSubject; ls = false, elim = false, xticksn = :auto, yticksn = :auto, kwargs...)
time = subj.time
obs = subj.obs
kwargs = Dict{Symbol, Any}(kwargs)
k = keys(kwargs)
if !(:plotstyle in k)
kwargs[:linestyle], kwargs[:linecolor], kwargs[:markershape], kwargs[:markercolor] = PKPLOTSTYLE[1]
else
kwargs[:linestyle], kwargs[:linecolor], kwargs[:markershape], kwargs[:markercolor] = kwargs[:plotstyle]
end
if !(:drawbl in k)
kwargs[:drawbl] = false
end
if !(:drawth in k)
kwargs[:drawth] = false
end
if !(:title in k)
kwargs[:title] = plotlabel(subj.id)
end
if !(:legend in k)
kwargs[:legend] = true
end
if !(:ylabel in k)
kwargs[:ylabel] = "Concentration"
end
if !(:xlims in k)
kwargs[:xlims] = (minimum(subj.time), maximum(subj.time)*1.1)
end
if :yscale in k
if kwargs[:yscale] in [:ln, :log, :log2, :log10]
ls = false
if !(:minorticks in k) kwargs[:minorticks] = true end
inds = findall(x-> x > 0, subj.obs)
time = subj.time[inds]
obs = subj.obs[inds]
if !(:yticks in k)
if kwargs[:yscale] == :log10
b = 10
elseif kwargs[:yscale] == :log2
b = 2
elseif kwargs[:yscale] == :ln || kwargs[:yscale] == :log
b = β―
end
t = collect(floor(log(b, minimum(obs))):ceil(log(b, maximum(obs))))
pushfirst!(t, first(t) - 1)
kwargs[:yticks] = b .^ t
end
if !(:ylims in k)
kwargs[:ylims] = (minimum(obs)*0.5, maximum(obs)*2.)
else
if kwargs[:ylims][1] <= 0
kwargs[:ylims] = (minimum(obs)/b, kwargs[:ylims][2])
end
end
end
else
if !(:ylims in k)
kwargs[:ylims] = (minimum(subj.obs), maximum(subj.obs)*1.15)
end
end
if ls == true
inds = findall(x-> x > 0, subj.obs)
time = subj.time[inds]
obs = log.(subj.obs[inds])
if (:ylims in k)
kwargs[:ylims] = (0, log(kwargs[:ylims][2]))
end
end
p = pkplot(time, obs; lcd = yticksn, tcd = xticksn, kwargs...)
if elim
if length(subj.keldata) > 0
arsq, rsqn = findmax(subj.keldata.ar)
lz = subj.keldata.a[rsqn]
lzint = subj.keldata.b[rsqn]
ts = subj.keldata.s[rsqn]
te = subj.keldata.e[rsqn]
if ls true
x = [ts, te]
y = [lzint + lz * x[1], lzint + lz * x[2]]
else
x = collect(ts:(te-ts)/100:te)
y = exp.(lzint .+ lz .* x)
end
pkelimpplot!(p, x, y; title = kwargs[:title]*"\n($(round(lzint, sigdigits = 4)) + $(round(lz, sigdigits = 4)) * Time; aRΒ² = $(round(arsq, sigdigits = 4))) ")
if length(subj.kelrange.kelexcl) > 0
pkelimpdrop!(p, time[subj.kelrange.kelexcl], obs[subj.kelrange.kelexcl])
end
end
end
if isa(subj, PDSubject)
if kwargs[:drawth] == true
pdhline!(p, [minimum(subj.time), maximum(subj.time)], getth(subj), lc = :blue, label = "TH")
end
if kwargs[:drawbl] == true
pdhline!(p, [minimum(subj.time), maximum(subj.time)], getbl(subj), lc = :red, label = "BL")
end
end
return p
end
function pkplot!(subj; ls = false, elim = false, xticksn = :auto, yticksn = :auto, kwargs...)
time = subj.time
obs = subj.obs
kwargs = Dict{Symbol, Any}(kwargs)
k = keys(kwargs)
if !(:plotstyle in k)
kwargs[:linestyle], kwargs[:linecolor], kwargs[:markershape], kwargs[:markercolor] = PKPLOTSTYLE[1]
else
kwargs[:linestyle], kwargs[:linecolor], kwargs[:markershape], kwargs[:markercolor] = kwargs[:plotstyle]
end
if !(:legend in k)
kwargs[:legend] = true
end
if !(:xlims in k)
kwargs[:xlims] = (minimum(subj.time), maximum(subj.time)*1.1)
end
if :yscale in k
if kwargs[:yscale] in [:ln, :log, :log2, :log10]
ls = false
if !(:minorticks in k) kwargs[:minorticks] = true end
inds = findall(x-> x > 0, subj.obs)
time = subj.time[inds]
obs = subj.obs[inds]
if !(:yticks in k)
if kwargs[:yscale] == :log10
b = 10
elseif kwargs[:yscale] == :log2
b = 2
elseif kwargs[:yscale] == :ln || kwargs[:yscale] == :log
b = β―
end
t = collect(floor(log(b, minimum(obs))):ceil(log(b, maximum(obs))))
pushfirst!(t, first(t) - 1)
kwargs[:yticks] = b .^ t
end
if !(:ylims in k)
kwargs[:ylims] = (minimum(obs)*0.5, maximum(obs)*2.)
else
if kwargs[:ylims][1] <= 0
kwargs[:ylims] = (minimum(obs)/b, kwargs[:ylims][2])
end
end
end
else
if !(:ylims in k)
kwargs[:ylims] = (minimum(subj.obs), maximum(subj.obs)*1.15)
end
end
if ls == true
inds = findall(x-> x > 0, subj.obs)
time = subj.time[inds]
obs = log.(subj.obs[inds])
if (:ylims in k)
kwargs[:ylims] = (0, log(kwargs[:ylims][2]))
end
end
p = pkplot!(time, obs; lcd = yticksn, tcd = xticksn, kwargs...)
return p
end
function pageplot(data, id, ulist; kwargs...)
kwargs = Dict{Symbol, Any}(kwargs)
k = keys(kwargs)
if !(:title in k)
kwargs[:title] = plotlabel(id, kwargs[:ldict])
end
fst = true
p = nothing
labvec = Vector{Int}(undef, 0)
# Make subdata by ID
isnothing(id) ? subdata = data : subdata = subset(data, id)
# Y lims
if !(:ylims in k) && length(subdata) > 1
ysc = :yscale in k
ylmin = findmin(x->minconc(x, ysc), getdata(subdata))[1]
ylmax = findmax(x->maxconc(x), getdata(subdata))[1]*1.15
if ysc
ylmax *= 5
end
kwargs[:ylims] = (ylmin, ylmax)
end
# Plotting subdata
if length(subdata) > 1 kwargs[:elim] = false end
for subj in subdata
if !isnothing(ulist)
num = findfirst(x-> x β subj.id, ulist)
if !isnothing(num)
style = plotstyle(num)
if num β labvec
kwargs[:label] = nothing
else
kwargs[:label] = plotlabel(ulist[num], kwargs[:ldict])
push!(labvec, num)
end
else
style = plotstyle(1)
end
else
style = plotstyle(1)
end
if fst
p = pkplot(subj; plotstyle = style, kwargs...)
fst = false
else
pkplot!(subj; plotstyle = style, kwargs...)
end
end
p
end
"""
pkplot(data::DataSet{T};
typesort::Union{Nothing, Symbol, AbstractVector{Symbol}} = nothing,
pagesort::Union{Nothing, Symbol, AbstractVector{Symbol}, NoPageSort} = nothing,
filter::Union{Nothing, Dict{Symbol}} = nothing,
uylims::Bool = false,
ldict = nothing,
savepath::Union{Nothing, AbstractString} = nothing,
namepref::Union{Nothing, AbstractString} = nothing,
onlyplots = false,
kwargs...) where T <: AbstractSubject
PK plot for subject set.
* `typesort` - sort on page by this id key;
* `pagesort` - different pages by this id key;
* `filter` - use only subjects if filter β subject id;
* `uylims` - same ylims for all dataset;
* `ldict` - Dict with labels for replace;
* `savepath` - path for plot saving;
* `namepref` - name prefix for saving files.
* `onlyplots` - if `true` return only vetor of plots;
Use `pagesort = MetidaNCA.NoPageSort()` to prevent page plotting (return single plot).
Return vector of pairs: `Page ID` => `Plot`.
"""
function pkplot(data::DataSet{T};
typesort::Union{Nothing, Symbol, AbstractVector{Symbol}} = nothing,
pagesort::Union{Nothing, Symbol, AbstractVector{Symbol}, NoPageSort} = nothing,
filter::Union{Nothing, Dict{Symbol}} = nothing,
uylims::Bool = false,
ldict = nothing,
savepath::Union{Nothing, AbstractString} = nothing,
namepref::Union{Nothing, AbstractString} = nothing,
onlyplots = false,
kwargs...) where T <: AbstractSubject
kwargs = Dict{Symbol, Any}(kwargs)
k = keys(kwargs)
if !(:ls in k)
kwargs[:ls] = false
end
if !(:elim in k)
kwargs[:elim] = false
end
if !(:drawbl in k)
kwargs[:drawbl] = false
end
if !(:drawth in k)
kwargs[:drawth] = false
end
if uylims && !(:ylims in k)
kwargs[:ylims] = (findmin(x -> minconc(x), getdata(data))[1], findmax(x -> maxconc(x), getdata(data))[1]*1.15)
end
if !isnothing(filter) data = subset(data, filter) end
if !isnothing(typesort)
if isa(typesort, Symbol) typesort = [typesort] end
typelist = uniqueidlist(data, typesort)
else
typelist = nothing
if !(:legend in k)
kwargs[:legend] = false
end
end
p = []
if isnothing(typesort) && isnothing(pagesort)
printtitle = false
if !(:title in k)
printtitle = true
end
for subj in data
if printtitle
kwargs[:title] = plotlabel(subj.id, ldict)
end
if !(:legend in k)
kwargs[:legend] = false
end
push!(p, subj.id => pkplot(subj; kwargs...))
end
elseif !isnothing(typesort) && isnothing(pagesort)
printtitle = false
if !(:title in k)
printtitle = true
end
for subj in data
if printtitle
kwargs[:title] = plotlabel(subj.id, ldict)
end
if !(:legend in k)
kwargs[:legend] = false
end
push!(p, subj.id => pageplot(data, subj.id, typelist; ldict, kwargs...))
end
elseif !isnothing(pagesort) && !isa(pagesort, NoPageSort)
if isa(pagesort, Symbol) pagesort = [pagesort] end
pagelist = uniqueidlist(data, pagesort)
for id in pagelist
push!(p, id => pageplot(data, id, typelist; ldict, kwargs...))
end
else
if !(:title in k) && !isnothing(filter)
kwargs[:title] = plotlabel(filter)
end
push!(p, pageplot(data, nothing, typelist; ldict, kwargs...))
end
if !isnothing(savepath)
if @isdefined savefig
if isfile(savepath)
error("File found on this path...")
elseif !isdir(savepath)
mkpath(savepath)
end
if isnothing(namepref) namepref = "plot" end
for i = 1:length(p)
if isa(p[i], Pair)
savefig(p[i][2], joinpath(savepath, namepref*"_$(i).png"))
else
savefig(p[i], joinpath(savepath, namepref*"_$(i).png"))
end
end
else
@warn "savefig not defined, install Plots.jl for plot writing... plots NOT saved..."
end
end
if isa(pagesort, NoPageSort)
return p[1]
end
if onlyplots return getindex.(p, 2) end
return p
end
"""
pkplot(data::DataSet{T}; kwargs...) where T <: NCAResult
"""
function pkplot(data::DataSet{T}; kwargs...) where T <: NCAResult
ds = map(x-> x.data, data)
pkplot(ds; kwargs...)
end
"""
pkplot(data::NCAResult; kwargs...)
"""
function pkplot(data::NCAResult; kwargs...)
pkplot(data.data; kwargs...)
end | MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 272 |
PrecompileTools.@compile_workload begin
data = metida_table([0.,1.,2.,3.,4.,2.,1.,0.], [0.,1.,2.,3.,4.,5.,6.,7.], names = (:conc, :time))
pki = pkimport(data, :time, :conc; dosetime = MetidaNCA.DoseTime(dose = 100, time = 0, tau = 5.5))
pkr = nca!(pki)
end | MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 2998 | # BL
# Subject
"""
setbl!(data::T, bl) where T <: PDSubject
Set `baseline` for pd subject `data`.
"""
function setbl!(data::T, bl) where T <: PDSubject
isnanormissing(bl) && error("Baseline can't be NaN or missing.")
data.bl = bl
data
end
#DS ind Int
"""
setbl!(data::DataSet{T}, bl, ind::Int) where T <: PDSubject
Set baseline for subject `ind` in `data`.
"""
function setbl!(data::DataSet{T}, bl, ind::Int) where T <: PDSubject
setbl!(data[ind], bl)
data
end
#DS iter Int
"""
setbl!(data::DataSet{T}, bl, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}})
Set baseline for all subjects in range or vector `ind` in `data`.
"""
function setbl!(data::DataSet{T}, bl, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: PDSubject
for i in inds
setbl!(data[i], bl)
end
data
end
#DS all
"""
setbl!(data::DataSet{T}, bl) where T <: PDSubject
Set `baseline` for all subjects in `data`.
"""
function setbl!(data::DataSet{T}, bl) where T <: PDSubject
for i = 1:length(data)
setbl!(data[i], bl)
end
data
end
#DS Dict
"""
setbl!(data::DataSet{T}, bl, sort::Dict) where T <: PDSubject
Set `baseline` only for subjects which `sort` β `id` is `true`.
"""
function setbl!(data::DataSet{T}, bl, sort::Dict; kelauto = false) where T <: PDSubject
for i = 1:length(data)
if sort β data[i].id setbl!(data[i], bl) end
end
data
end
#GET subj
"""
getbl(data::T) where T <: PDSubject
"""
function getbl(data::T) where T <: PDSubject
data.bl
end
################################################################################
# TH
# Subject
"""
setth!(data::T, th) where T <: PDSubject
Set `threshold` for subject `data`.
"""
function setth!(data::T, th) where T <: PDSubject
isnanormissing(th) && error("Threshold can't be NaN or missing.")
data.th = th
data
end
#DS ind Int
"""
setth!(data::DataSet{T}, th, ind::Int) where T <: PDSubject
"""
function setth!(data::DataSet{T}, th, ind::Int) where T <: PDSubject
setth!(data[ind], th)
data
end
#DS iter Int
"""
setth!(data::DataSet{T}, th, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}})
"""
function setth!(data::DataSet{T}, th, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: PDSubject
for i in inds
setth!(data[i], th)
end
data
end
#DS all
"""
setth!(data::DataSet{T}, th) where T <: PDSubject
"""
function setth!(data::DataSet{T}, th) where T <: PDSubject
for i = 1:length(data)
setth!(data[i], th)
end
data
end
#DS Dict
"""
setth!(data::DataSet{T}, th, sort::Dict) where T <: PDSubject
"""
function setth!(data::DataSet{T}, th, sort::Dict; kelauto = false) where T <: PDSubject
for i = 1:length(data)
if sort β data[i].id setth!(data[i], th) end
end
data
end
#GET subj
"""
getth(data::T) where T <: PDSubject
"""
function getth(data::T) where T <: PDSubject
data.th
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1751 | #Subject
"""
setdosetime!(data::T, dosetime::DoseTime) where T <: PKSubject
Set dose time `dosetime` for subject `data`.
"""
function setdosetime!(data::PKSubject, dosetime::DoseTime)
data.dosetime = dosetime
data
end
#DS ind Int
"""
setdosetime!(data::DataSet{T}, dosetime::DoseTime, ind::Int) where T <: PKSubject
* `ind` - index in DataSet.
"""
function setdosetime!(data::DataSet{<:PKSubject}, dosetime::DoseTime, ind::Int)
setdosetime!(data[ind], dosetime)
data
end
#DS iter Int
"""
setdosetime!(data::DataSet{T}, dosetime::DoseTime, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: PKSubject
* `inds` - indexes in DataSet.
"""
function setdosetime!(data::DataSet{<:PKSubject}, dosetime::DoseTime, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}})
for i in inds
setdosetime!(data[i], dosetime)
end
data
end
#DS all
"""
setdosetime!(data::DataSet{T}, dosetime::DoseTime) where T <: PKSubject
For all subjects in DataSet.
"""
function setdosetime!(data::DataSet{<:PKSubject}, dosetime::DoseTime)
for i = 1:length(data)
setdosetime!(data[i], dosetime)
end
data
end
#DS Dict
"""
setdosetime!(data::DataSet{T}, dosetime::DoseTime, sort::Dict) where T <: PKSubject
Set dose time `dosetime` for subjects if `sort` β subject's `id`.
"""
function setdosetime!(data::DataSet{<:PKSubject}, dosetime::DoseTime, sort::Dict)
for i = 1:length(data)
if sort β data[i].id setdosetime!(data[i], dosetime) end
end
data
end
#GET subj
"""
getdosetime(data::T) where T <: PKSubject
Return dosetime.
"""
function getdosetime(data::PKSubject)
data.dosetime
end
#Subject
#DS ind Int
#DS iter Int
#DS all
#DS Dict
#GET subj
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1751 | #Subject
"""
setkelauto!(data::T, kelauto::Bool) where T <: PKSubject
Set range for elimination parameters calculation for subject.
* `data` - PK subject;
* `kelauto` - value.
"""
function setkelauto!(data::PKSubject, kelauto::Bool)
if !kelauto
if !(data.kelrange.kelend > data.kelrange.kelstart > 0) error("Start point: $(data.kelrange.kelstart) end point: $(data.kelrange.kelend), check that data.kelrange.kelend > data.kelrange.kelstart > 0") end
end
data.kelauto = kelauto
data
end
#DS ind Int
"""
setkelauto!(data::DataSet{T}, kelauto::Bool, ind::Int) where T <: PKSubject
"""
function setkelauto!(data::DataSet{<: PKSubject}, kelauto::Bool, ind::Int)
setkelauto!(data[ind], kelauto)
data
end
#DS iter Int
"""
setkelauto!(data::DataSet{T}, kelauto::Bool, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}) where T <: PKSubject
"""
function setkelauto!(data::DataSet{<: PKSubject}, kelauto::Bool, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}})
for i in inds
setkelauto!(data[i], kelauto)
end
data
end
#DS all
"""
setkelauto!(data::DataSet{T}, kelauto::Bool) where T <: PKSubject
"""
function setkelauto!(data::DataSet{<: PKSubject}, kelauto::Bool)
for i = 1:length(data)
setkelauto!(data[i], kelauto)
end
data
end
#DS Dict
"""
setkelauto!(data::DataSet{T}, kelauto::Bool, sort::Dict) where T <: PKSubject
"""
function setkelauto!(data::DataSet{<: PKSubject}, kelauto::Bool, sort::Dict)
for i = 1:length(data)
if sort β data[i].id setkelauto!(data[i], kelauto) end
end
data
end
#GET subj
"""
getkelauto!(data::T) where T <: PKSubject
"""
function getkelauto(data::PKSubject)
data.kelauto
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1964 | #Subject
"""
setkelrange!(data::T, range::ElimRange{:point}; kelauto = false) where T <: PKSubject
Set `range` for subject `data`. Set `kelauto` if possible.
"""
function setkelrange!(data::T, range::ElimRange{:point}; kelauto = false) where T <: PKSubject
if range.kelend > length(data) throw(ArgumentError("Kel endpoint out of range")) end
data.kelrange = range
setkelauto!(data, kelauto)
data
end
#DS ind Int
"""
setdosetime!(data::DataSet{T}, dosetime::DoseTime, ind::Int) where T <: PKSubject
"""
function setkelrange!(data::DataSet{T}, range::ElimRange{:point}, ind::Int; kelauto = false) where T <: PKSubject
setkelrange!(data[ind], range; kelauto = kelauto)
data
end
#DS iter Int
"""
setkelrange!(data::DataSet{T}, range::ElimRange{:point}, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}; kelauto = false)
"""
function setkelrange!(data::DataSet{T}, range::ElimRange{:point}, inds::Union{Vector{Int}, UnitRange{Int}, Tuple{Vararg{Int}}}; kelauto = false) where T <: PKSubject
for i in inds
setkelrange!(data[i], range; kelauto = kelauto)
end
data
end
#DS all
"""
setkelrange!(data::DataSet{T}, range::ElimRange{:point}; kelauto = false) where T <: PKSubject
"""
function setkelrange!(data::DataSet{T}, range::ElimRange{:point}; kelauto = false) where T <: PKSubject
for i = 1:length(data)
setkelrange!(data[i], range; kelauto = kelauto)
end
data
end
#DS Dict
"""
setkelrange!(data::DataSet{T}, range::ElimRange{:point}, sort::Dict; kelauto = false) where T <: PKSubject
"""
function setkelrange!(data::DataSet{T}, range::ElimRange{:point}, sort::Dict; kelauto = false) where T <: PKSubject
for i = 1:length(data)
if sort β data[i].id setkelrange!(data[i], range; kelauto = kelauto) end
end
data
end
#GET subj
"""
getkelrange(data::T) where T <: PKSubject
"""
function getkelrange(data::T) where T <: PKSubject
data.kelrange
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 3861 |
function Base.show(io::IO, obj::DoseTime)
print(io, "Dose - $(obj.dose); Time - $(obj.time); Tau - $(obj.tau)")
end
function Base.show(io::IO, obj::ElimRange)
print(io, "Elimination range: $(obj.kelstart) - $(obj.kelend) ")
if length(obj.kelexcl) > 0
print(io, "Exclusions: $(obj.kelexcl[1])")
if length(obj.kelexcl) > 1
for i = 2:length(obj.kelexcl) print(io, ", $(obj.kelexcl[i])") end
end
print(io, ".")
else
print(io, "No exclusion.")
end
end
function Base.show(io::IO, obj::KelData)
println(io, "Elimination table:")
header = ["Strat time", "End time", "a", "b", "rΒ²", "Adjusted rΒ²", "N"]
mt = metida_table(obj.s, obj.e, obj.a, obj.b, obj.r, obj.ar, obj.n; names = Tuple(Symbol.(header)))
PrettyTables.pretty_table(io, mt; tf = PrettyTables.tf_compact, header = header)
end
# PK Subject
function Base.show(io::IO, obj::PKSubject)
println(io, " Pharmacokinetic subject")
if length(obj.id) > 0
print(io, "ID: ")
for (k, v) in obj.id
print(io, "$k => $v;")
end
println(io, "")
end
println(io, "Observations: $(length(obj)); ", obj.dosetime)
println(io, obj.kelrange)
PrettyTables.pretty_table(io, metida_table(obj.time, obj.obs; names = (:Time, :Concentration)); tf = PrettyTables.tf_compact)
end
function Base.show(io::IO, obj::UPKSubject)
println(io, " Pharmacokinetic subject (urine)")
println(io, "Observations: $(length(obj)); ", obj.dosetime)
println(io, obj.kelrange)
PrettyTables.pretty_table(io, metida_table(getindex.(obj.time, 1), getindex.(obj.time, 2), obj.obs, obj.vol); tf = PrettyTables.tf_compact, header = ["Start time", "End time", "Concentration", "Volume"])
end
function Base.show(io::IO, obj::PDSubject)
println(io, " Pharmacodynamics subject")
if length(obj.id) > 0
print(io, "ID: ")
for (k, v) in obj.id
print(io, "$k => $v;")
end
println(io, "")
end
println(io, "Observations: $(length(obj)); ")
PrettyTables.pretty_table(io, metida_table(obj.time, obj.obs; names = (:Time, :Observation)); tf = PrettyTables.tf_compact)
end
function subject_type_str(subj::Type{<:PKSubject})
"Pharmacokinetics subject"
end
function subject_type_str(subj::Type{<:UPKSubject})
"Pharmacokinetics subject (urine)"
end
function subject_type_str(subj::Type{<:PDSubject})
"Pharmacodynamics subject"
end
function Base.show(io::IO, obj::DataSet{ST}) where ST <: AbstractSubject
println(io, "DataSet: $(subject_type_str(ST))")
println(io, "Length: $(length(obj))")
lo = min(length(obj), 20)
for i = 1:lo
print(io, "Subject $(i): ")
if length(obj[i].id) > 0
for (k, v) in obj[i].id
print(io, "$k => $v, ")
end
println(io, "")
else
println(io, "-")
end
end
if lo < length(obj)
printstyled(io, "$(length(obj) - lo) subjects omitted... \n"; color = :blue)
end
end
function Base.show(io::IO, obj::T) where T <: NCAResult
println(io, " PK/PD subject NCA result")
PrettyTables.pretty_table(io, obj.result; header = ["Parameter", "Value"], tf = PrettyTables.tf_compact)
end
function Base.show(io::IO, obj::DataSet{Res}) where Res <: NCAResult
println(io, "DataSet: PK/PD NCA result")
println(io, "Length: $(length(obj))")
lo = min(length(obj), 20)
for i = 1:lo
print(io, "Subject $(i): ")
if length(obj[i].data.id) > 0
for (k, v) in obj[i].data.id
print(io, "$k => $v, ")
end
println(io, "")
else
println(io, "-")
end
end
if lo < length(obj)
printstyled(io, "$(length(obj) - lo) subjects omitted... \n"; color = :blue)
end
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 891 | """
auc_sparse(time, obs)
AUC for sparse data.
```math
w_1 = (t_2 - t_1) / 2
```
```math
w_j = (t_{j+1} - t_{j-1}) / 2 (2 \\leq j \\leq J - 1)
```
```math
w_J = (t_J - t_{J-1}) / 2
```
```math
AUC = \\sum_{j=1}^J \\mu_j w_j
```
where `math \\mu_j` is the mean drug concentration at time t.
"""
function auc_sparse(time::AbstractVector, obs::AbstractVector)
if length(time) < 2 error("length(time) < 2") end
if length(time) != length(obs) error("length(time) != length(obs)") end
for i = 1:length(time) - 1
if time[i+1] <= time[i] error("Unsorted observations!") end
end
wts = Vector{Float64}(undef, length(time))
wts[1] = (time[2] - time[1]) / 2
wts[end] = (time[end] - time[end-1]) / 2
if length(time) > 2
for i = 2:length(time) - 1
wts[i] = (time[i+1] - time[i-1]) / 2
end
end
return obs' * wts
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 1320 |
"""
timefilter(subj::PKSubject, time::AbstractRange)
Exclude all observation than not in time range.
"""
function timefilter(subj::PKSubject, time::AbstractRange)
subj_ = deepcopy(subj)
inds = Int[]
for n = 1:length(subj_)
if !(subj_.time[n] in time) push!(inds, n) end
end
deleteat!(subj_.time, inds)
deleteat!(subj_.obs, inds)
resize!(subj_.keldata, 0)
if !(subj_.kelrange.kelstart in time) || !(subj_.kelrange.kelend in time) || any(x-> !(x in time), subj_.kelrange.kelexcl)
subj_.kelrange = ElimRange()
subj_.kelauto = true
end
subj_
end
"""
timefilter(subj::PKSubject, time::Tuple{<:Number, <:Number})
Make deepcopy of subj and remove all observations < time[1] or > time[2]. Then resize keldata to 0.
If any of points in elimination rage not in min/max time, then elimination settings reset.
"""
function timefilter(subj::PKSubject, time::Tuple{<:Number, <:Number})
timefilter(subj, LinRange(time[1], time[2], 2))
end
"""
timefilter(data::DataSet{<: PKSubject}, time)
Make new DataSet with new filtered subjects.
"""
function timefilter(data::DataSet{<: PKSubject}, time)
subj = getdata(data)
data_ = similar(subj)
for i in 1:length(subj)
data_[i] = timefilter(subj[i], time)
end
DataSet(data_)
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 9013 |
# Elimination data
struct KelData{S<:Number, E<:Number}
s::Vector{S}
e::Vector{E}
a::Vector{Float64}
b::Vector{Float64}
r::Vector{Float64}
ar::Vector{Float64}
n::Vector{Int}
function KelData(s::Vector{S}, e::Vector{E}, a::Vector{A}, b::Vector{B}, r, ar, n)::KelData where S <: Number where E <: Number where A where B
new{S, E}(s, e, a, b, r, ar, n)::KelData
end
function KelData()::KelData
KelData(Float64[], Float64[], Float64[], Float64[], Float64[], Float64[], Int[])
end
end
function resize!(keldata::KelData)
resize!(keldata, 0)
end
function resize!(keldata::KelData, i::Int)
resize!(keldata.s, i)
resize!(keldata.e, i)
resize!(keldata.a, i)
resize!(keldata.b, i)
resize!(keldata.r, i)
resize!(keldata.ar, i)
resize!(keldata.n, i)
keldata
end
function Base.push!(keldata::KelData, s, e, a, b, r, ar, n)
push!(keldata.s, s)
push!(keldata.e, e)
push!(keldata.a, a)
push!(keldata.b, b)
push!(keldata.r, r)
push!(keldata.ar, ar)
push!(keldata.n, n)
end
function Base.length(keldata::KelData)
return length(keldata.a)
end
# Elimination settings
"""
ElimRange(kelstart::Int, kelend::Int, kelexcl::Vector{Int})::ElimRange
Elimination settings for PK subject.
* `kelstart` - start point;
* `kelend` - end point;
* `kelexcl` - excluded points (not time value).
"""
mutable struct ElimRange{Symbol}
kelstart::Int
kelend::Int
kelexcl::Vector{Int}
function ElimRange(kelstart::Int, kelend::Int, kelexcl::Vector{Int}; time = false)::ElimRange
if kelstart > kelend throw(ArgumentError("Kel start > kel end")) end
if kelstart < 0 throw(ArgumentError("Kel start point < 0")) end
if kelend < 0 throw(ArgumentError("Kel endpoint < 0")) end
if any(x -> x < 0, kelexcl) throw(ArgumentError("Exclude point < 0")) end
if kelstart in kelexcl || kelend in kelexcl throw(ArgumentError("Kel start or kel end in exclusion")) end
new{:point}(kelstart, kelend, kelexcl)::ElimRange
end
function ElimRange(;kelstart = 0, kelend = 0, kelexcl = Int[])
ElimRange(kelstart, kelend, kelexcl)
end
end
# Dose settings
"""
DoseTime(dose::D, time::T, tau::TAU) where D <: Number where T <: Number where TAU <: Number
Dose settings.
* `dose` - dose;
* `time` - dose time;
* `tau` - tau (Ο);
Dose time set 0 by default.
"""
struct DoseTime{D <: Number, T <: Number, TAU <: Number}
dose::D
time::T
tau::TAU
function DoseTime(dose::D, time::T, tau::TAU) where D <: Number where T <: Number where TAU <: Number
if time < zero(T) throw(ArgumentError("Dose time can't be less zero!")) end
new{D, T, TAU}(dose, time, tau)::DoseTime
end
function DoseTime(;dose = NaN, time = 0.0, tau = NaN)
DoseTime(dose, time, tau)
end
#=
function DoseTime(dose)
DoseTime(dose, 0, NaN)
end
function DoseTime(dose, time)
DoseTime(dose, time, NaN)
end
=#
end
# PK subject
"""
PKSubject(time::Vector{T}, conc::Vector{O}, kelauto::Bool, kelrange::ElimRange, dosetime::DoseTime, keldata::KelData, sort::Dict{Symbol, V} = Dict{Symbol, Any}()) where T <: Number where O <: Union{Number, Missing} where V
Pharmacokinetic subject.
Fields:
* time::Vector{T} - time values;
* obs::Vector{O} - observations;
* kelauto::Bool
* kelrange::ElimRange
* dosetime::DoseTime
* keldata::KelData
* id::Dict{Symbol, V}
"""
mutable struct PKSubject{T <: Number, O <: Union{Number, Missing}, V <: Any} <: AbstractSubject
time::Vector{T}
obs::Vector{O}
kelauto::Bool
kelrange::ElimRange
dosetime::DoseTime
keldata::KelData
id::Dict{Symbol, V}
function PKSubject(time::Vector{T}, conc::Vector{O}, kelauto::Bool, kelrange::ElimRange, dosetime::DoseTime, keldata::KelData, sort::Dict{Symbol, V} = Dict{Symbol, Any}()) where T <: Number where O <: Union{Number, Missing} where V
new{T, O, V}(time, conc, kelauto, kelrange, dosetime, keldata, sort)::PKSubject
end
function PKSubject(time::Vector{T}, conc::Vector{O}, kelauto::Bool, kelrange::ElimRange, dosetime::DoseTime, sort::Dict{Symbol, V}) where T where O where V
PKSubject(time, conc, kelauto, kelrange, dosetime, KelData(T[], T[], Float64[], Float64[], Float64[], Float64[], Int[]), sort)
end
#=
function PKSubject(time::Vector, conc::Vector, sort::Dict)
PKSubject(time, conc, true, ElimRange(), DoseTime(NaN, 0), KelData(), sort)
end
function PKSubject(time::Vector, conc::Vector, kelauto::Bool, kelrange, dosetime; sort = Dict())
PKSubject(time, conc, kelauto, kelrange, dosetime, KelData(), sort)
end
function PKSubject(time::Vector, conc::Vector; sort = Dict())
PKSubject(time, conc, true, ElimRange(), DoseTime(NaN, 0), KelData(), sort)
end
=#
end
function Base.length(obj::T) where T <: AbstractSubject
length(obj.time)
end
"""
NCAResult(subject::T, options, result::Dict{Symbol, U}) where T <: AbstractSubject where U
NCA resulst.
Fields:
* data::T
* options::Dict{Symbol}
* result::Dict{Symbol, U}
"""
struct NCAResult{T, U} <: AbstractSubjectResult{T}
data::T
options::Dict{Symbol}
result::Dict{Symbol, U}
function NCAResult(subject::T, options, result::Dict{Symbol, U}) where T <: AbstractSubject where U
new{T, U}(subject, options, result)
end
#=
function NCAResult(subject::T, method, result) where T <: AbstractSubject
NCAResult(subject, method, result, Dict())
end
=#
end
"""
LimitRule(lloq::T, btmax, atmax, nan, rm::Bool) where T <: Real
LimitRule(;lloq = NaN, btmax = NaN, atmax = NaN, nan = NaN, rm::Bool = false)
* `lloq` - LLOQ - low limit of quantification;
* `btmax` - value for points before Tmax;
* `atmat` - values for points after Tmax;
* `nan` - values for replacing `NaN`;
* `rm` - if `true`, removee all `NaN` points.
Rule for PK subject.
* STEP 1 (NaN step): replace all `NaN` and `missing` values with nan keyword value (if `nan` not NaN);
* STEP 2 (LLOQ step): replace values below `lloq` with `btmax` value if this value befor Tmax or with atmax if this value after Tmax (if `lloq` not NaN);
* STEP 3 (remove NaN): `rm` == true, then remove all `NaN` and `missing` values.
See also: [`applylimitrule!`](@ref)
"""
struct LimitRule{T<:Real}
lloq::T
btmax::Float64
atmax::Float64
nan::Float64
rm::Bool
function LimitRule(lloq::T, btmax, atmax, nan, rm::Bool) where T <: Real
new{T}(lloq, btmax, atmax, nan, rm)::LimitRule
end
function LimitRule(;nan = NaN, lloq = NaN, btmax = NaN, atmax = NaN, rm::Bool = false)
LimitRule(lloq, btmax, atmax, nan, rm)
end
end
#=
function isapplicable(lr::LimitRule)
!isnan(lr.lloq) || !isnan(lr.nan) || lr.rm ? true : false
end
=#
#Urine PK subject
mutable struct UPKSubject{T <: Tuple{Number, Number}, O <: Union{Number, Missing}, VOL <: Union{Number, Missing}, V <: Any} <: AbstractSubject
time::Vector{T}
obs::Vector{O}
vol::Vector{VOL}
kelauto::Bool
kelrange::ElimRange
dosetime::DoseTime
keldata::KelData
id::Dict{Symbol, V}
function UPKSubject(time::Vector{T}, conc::Vector{O}, vol::Vector{VOL}, kelauto::Bool, kelrange::ElimRange, dosetime::DoseTime, keldata::KelData, id::Dict{Symbol, V} = Dict{Symbol, Any}()) where T <: Tuple{Number, Number} where O <: Union{Number, Missing} where VOL <: Union{Number, Missing} where V
new{T, O, VOL, V}(time, conc, vol, kelauto, kelrange, dosetime, keldata, id)
end
function UPKSubject(time::AbstractVector{Tuple{S,E}}, conc::Vector, vol::Vector, kelauto::Bool, kelrange::ElimRange, dosetime::DoseTime, id::Dict{Symbol, V}) where V where S where E
ttype = promote_type(S, E)
UPKSubject(time, conc, vol, kelauto, kelrange, dosetime, KelData(ttype[], ttype[], Float64[], Float64[], Float64[], Float64[], Int[]), id)
end
end
# PD subject
mutable struct PDSubject{T <: Number, O <: Union{Number, Missing}, V <: Any} <: AbstractSubject
time::Vector{T}
obs::Vector{O}
bl::Float64
th::Float64
id::Dict{Symbol, V}
function PDSubject(time::Vector{T}, conc::Vector{O}, bl, th, sort::Dict{Symbol, V} = Dict{Symbol, Any}()) where T <: Number where O <: Union{Number, Missing} where V
new{T, O, V}(time, conc, bl, th, sort)::PDSubject
end
function PDSubject(time::Vector, conc::Vector, bl, th, sort::Dict{Symbol, V}) where V
PDSubject(time, conc, bl, th, sort)
end
end
function gettime(subj::T) where T <: AbstractSubject
getfield(subj, :time)
end
function getobs(subj::T) where T <: AbstractSubject
getfield(subj, :obs)
end
struct NCAOptions
adm::Symbol
calcm::Symbol
intpm::Symbol
verbose::Int
warn::Bool
io::IO
modify!::Function
end
#=
function gettime(subj::PKSubject)
end
function getobs(subj::PKSubject)
end
function gettime(subj::UPKSubject)
end
function getobs(subj::UPKSubject)
end
=#
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
|
[
"MIT"
] | 0.5.13 | e7a74076e469eb9611b204bab0cc9e5c69453894 | code | 3015 |
@testset " #6 Pharmacodynamics data; Linear-trapezoidal rule " begin
io = IOBuffer();
pd = MetidaNCA.pdimport(pddata, :time, :obs, :subj; bl = 1.5, th = 5.0)
@test_nowarn MetidaNCA.pkplot(pd)
@test_nowarn MetidaNCA.pkplot(pd[1], drawth = true, drawbl = true)
pd_res = MetidaNCA.nca!(pd[1])
pd_rds = MetidaNCA.nca!(pd)
@test last(pd[1].time) - first(pd[1].time) == pd_res[:TABL] + pd_res[:TBBL] == pd_res[:TATH] + pd_res[:TBTH]
nca_res = MetidaNCA.nca(pddata, :time, :obs, :subj)[1]
pd_res = MetidaNCA.nca(pddata, :time, :obs, :subj, type = :pd, bl = 0.0, th = 0.0)[1]
@test pd_res[:AUCABL] == pd_res[:AUCATH] == nca_res[:AUClast]
@test MetidaNCA.getbl(pd[1]) β 1.5
@test MetidaNCA.getth(pd[1]) β 5.0
MetidaNCA.setbl!(pd, 2)
MetidaNCA.setth!(pd, 6)
@test MetidaNCA.getbl(pd[1]) β 2.0
@test MetidaNCA.getth(pd[1]) β 6.0
MetidaNCA.setbl!(pd, 3, 1)
MetidaNCA.setth!(pd, 4, 1)
@test MetidaNCA.getbl(pd[1]) β 3.0
@test MetidaNCA.getth(pd[1]) β 4.0
MetidaNCA.setbl!(pd, 2, [1])
MetidaNCA.setth!(pd, 1, [1])
@test MetidaNCA.getbl(pd[1]) β 2.0
@test MetidaNCA.getth(pd[1]) β 1.0
MetidaNCA.setbl!(pd, 0, Dict(:subj => 1))
MetidaNCA.setth!(pd, 0, Dict(:subj => 1))
@test MetidaNCA.getbl(pd[1]) β 0.0
@test MetidaNCA.getth(pd[1]) β 0.0
@test_throws ErrorException MetidaNCA.setbl!(pd, NaN)
@test_throws ErrorException MetidaNCA.setth!(pd, NaN)
pd = MetidaNCA.pdimport(pddata, :time, :obs; bl = 3.0, th = 1.5, id = Dict(:subj => 1))
pd_rds = MetidaNCA.nca!(pd)
@test pd_rds[:Tmax] β 5.0 atol=1E-6
@test pd_rds[:Rmax] β 8.0 atol=1E-6
@test pd_rds[:AUCABL] β 7.3857143 atol=1E-6
@test pd_rds[:AUCBBL] β 8.7357143 atol=1E-6
@test pd_rds[:AUCATH] β 13.959524 atol=1E-6
@test pd_rds[:AUCBTH] β 1.8095238 atol=1E-6
@test pd_rds[:AUCBTW] β 6.926190 atol=1E-6
@test pd_rds[:TABL] β 3.4809524 atol=1E-6
@test pd_rds[:TBBL] β 5.5190476 atol=1E-6
@test pd_rds[:TATH] β 5.7619048 atol=1E-6
@test pd_rds[:TBTH] β 3.2380952 atol=1E-6
@test pd_rds[:AUCNETB] β -1.35 atol=1E-2
@test pd_rds[:AUCNETT] β 12.15 atol=1E-2
@test pd_rds[:TIMEBTW] β 2.2809524 atol=1E-6
pd = MetidaNCA.pdimport(pddata, :time, :obs; bl = 1.5, th = 3.0, id = Dict(:subj => 1))
pd_rds = MetidaNCA.nca!(pd)
@test pd_rds[:AUCATH] β 7.3857143 atol=1E-6
@test pd_rds[:AUCBTH] β 8.7357143 atol=1E-6
@test pd_rds[:AUCABL] β 13.959524 atol=1E-6
@test pd_rds[:AUCBBL] β 1.8095238 atol=1E-6
@test pd_rds[:AUCBTW] β 6.5738095 atol=1E-6
@test pd_rds[:TATH] β 3.4809524 atol=1E-6
@test pd_rds[:TBTH] β 5.5190476 atol=1E-6
@test pd_rds[:TABL] β 5.7619048 atol=1E-6
@test pd_rds[:TBBL] β 3.2380952 atol=1E-6
@test pd_rds[:AUCNETT] β -1.35 atol=1E-2
@test pd_rds[:AUCNETB] β 12.15 atol=1E-2
@test pd_rds[:TIMEBTW] β 2.2809524 atol=1E-6
#
pd_rds = MetidaNCA.nca!(pd; calcm = :luld)
end
| MetidaNCA | https://github.com/PharmCat/MetidaNCA.jl.git |
Subsets and Splits