licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 11001 | # We provide an explicit extension package for CUDA
# since the pullback kernel profits a lot from
# parallel reductions, which are relatively straightforwadly
# expressed using while loops.
# However KernelAbstractions currently does not play nicely
# with while loops, see e.g. here:
# https://github.com/JuliaGPU/KernelAbstractions.jl/issues/330
module DiffPointRasterisationCUDAExt
using DiffPointRasterisation, CUDA
using ArgCheck
using FillArrays
using StaticArrays
const CuOrFillArray{T,N} = Union{CuArray{T,N},FillArrays.AbstractFill{T,N}}
const CuOrFillVector{T} = CuOrFillArray{T,1}
function raster_pullback_kernel!(
::Type{T},
ds_dout,
points::AbstractVector{<:StaticVector{N_in}},
rotations::AbstractVector{<:StaticMatrix{N_out,N_in,TR}},
translations::AbstractVector{<:StaticVector{N_out,TT}},
out_weights,
point_weights,
shifts,
scale,
# outputs:
ds_dpoints,
ds_drotation,
ds_dtranslation,
ds_dout_weight,
ds_dpoint_weight,
) where {T,TR,TT,N_in,N_out}
n_voxel = blockDim().z
points_per_workgroup = blockDim().x
batchsize_per_workgroup = blockDim().y
# @assert points_per_workgroup == 1
# @assert n_voxel == 2^N_out
# @assert threadIdx().x == 1
n_threads_per_workgroup = n_voxel * batchsize_per_workgroup
s = threadIdx().z
b = threadIdx().y
thread = (b - 1) * n_voxel + s
neighbor_voxel_id = (blockIdx().z - 1) * n_voxel + s
point_idx = (blockIdx().x - 1) * points_per_workgroup + threadIdx().x
batch_idx = (blockIdx().y - 1) * batchsize_per_workgroup + b
in_batch = batch_idx <= length(rotations)
dimension1 = (N_out, n_voxel, batchsize_per_workgroup)
ds_dpoint_rot_shared = CuDynamicSharedArray(T, dimension1)
offset = sizeof(T) * prod(dimension1)
dimension2 = (N_in, batchsize_per_workgroup)
ds_dpoint_shared = CuDynamicSharedArray(T, dimension2, offset)
dimension3 = (n_voxel, batchsize_per_workgroup)
offset += sizeof(T) * prod(dimension2)
ds_dpoint_weight_shared = CuDynamicSharedArray(T, dimension3, offset)
rotation = @inbounds in_batch ? rotations[batch_idx] : @SMatrix zeros(TR, N_in, N_in)
point = @inbounds points[point_idx]
point_weight = @inbounds point_weights[point_idx]
if in_batch
translation = @inbounds translations[batch_idx]
out_weight = @inbounds out_weights[batch_idx]
shift = @inbounds shifts[neighbor_voxel_id]
origin = (-@SVector ones(TT, N_out)) - translation
coord_reference_voxel, deltas = DiffPointRasterisation.reference_coordinate_and_deltas(
point, rotation, origin, scale
)
voxel_idx = CartesianIndex(
CartesianIndex(Tuple(coord_reference_voxel)) + CartesianIndex(shift), batch_idx
)
ds_dweight_local = zero(T)
if voxel_idx in CartesianIndices(ds_dout)
@inbounds ds_dweight_local = DiffPointRasterisation.voxel_weight(
deltas, shift, ds_dout[voxel_idx]
)
factor = ds_dout[voxel_idx] * out_weight * point_weight
ds_dcoord_part = SVector(
factor .* ntuple(
n -> DiffPointRasterisation.interpolation_weight(
n, N_out, deltas, shift
),
Val(N_out),
),
)
@inbounds ds_dpoint_rot_shared[:, s, b] .= ds_dcoord_part .* scale
else
@inbounds ds_dpoint_rot_shared[:, s, b] .= zero(T)
end
@inbounds ds_dpoint_weight_shared[s, b] = ds_dweight_local * out_weight
ds_dout_weight_local = ds_dweight_local * point_weight
@inbounds CUDA.@atomic ds_dout_weight[batch_idx] += ds_dout_weight_local
else
@inbounds ds_dpoint_weight_shared[s, b] = zero(T)
@inbounds ds_dpoint_rot_shared[:, s, b] .= zero(T)
end
# parallel summation of ds_dpoint_rot_shared over neighboring-voxel dimension
# for a given thread-local batch index
stride = 1
@inbounds while stride < n_voxel
sync_threads()
idx = 2 * stride * (s - 1) + 1
if idx <= n_voxel
dim = 1
while dim <= N_out
other_val_p = if idx + stride <= n_voxel
ds_dpoint_rot_shared[dim, idx + stride, b]
else
zero(T)
end
ds_dpoint_rot_shared[dim, idx, b] += other_val_p
dim += 1
end
end
stride *= 2
end
sync_threads()
if in_batch
dim = s
if dim <= N_out
coef = ds_dpoint_rot_shared[dim, 1, b]
@inbounds CUDA.@atomic ds_dtranslation[dim, batch_idx] += coef
j = 1
while j <= N_in
val = coef * point[j]
@inbounds CUDA.@atomic ds_drotation[dim, j, batch_idx] += val
j += 1
end
end
end
# derivative of point with respect to rotation per batch dimension
dim = s
while dim <= N_in
val = zero(T)
j = 1
while j <= N_out
@inbounds val += rotation[j, dim] * ds_dpoint_rot_shared[j, 1, b]
j += 1
end
@inbounds ds_dpoint_shared[dim, b] = val
dim += n_voxel
end
# parallel summation of ds_dpoint_shared over batch dimension
stride = 1
@inbounds while stride < batchsize_per_workgroup
sync_threads()
idx = 2 * stride * (b - 1) + 1
if idx <= batchsize_per_workgroup
dim = s
while dim <= N_in
other_val_p = if idx + stride <= batchsize_per_workgroup
ds_dpoint_shared[dim, idx + stride]
else
zero(T)
end
ds_dpoint_shared[dim, idx] += other_val_p
dim += n_voxel
end
end
stride *= 2
end
# parallel summation of ds_dpoint_weight_shared over voxel and batch dimension
stride = 1
@inbounds while stride < n_threads_per_workgroup
sync_threads()
idx = 2 * stride * (thread - 1) + 1
if idx <= n_threads_per_workgroup
other_val_w = if idx + stride <= n_threads_per_workgroup
ds_dpoint_weight_shared[idx + stride]
else
zero(T)
end
ds_dpoint_weight_shared[idx] += other_val_w
end
stride *= 2
end
sync_threads()
dim = thread
while dim <= N_in
val = ds_dpoint_shared[dim, 1]
# batch might be split across blocks, so need atomic add
@inbounds CUDA.@atomic ds_dpoints[dim, point_idx] += val
dim += n_threads_per_workgroup
end
if thread == 1
val_w = ds_dpoint_weight_shared[1, 1]
# batch might be split across blocks, so need atomic add
@inbounds CUDA.@atomic ds_dpoint_weight[point_idx] += val_w
end
return nothing
end
# single image
function raster_pullback!(
ds_dout::CuArray{<:Number,N_out},
points::AbstractVector{<:StaticVector{N_in,<:Number}},
rotation::StaticMatrix{N_out,N_in,<:Number},
translation::StaticVector{N_out,<:Number},
background::Number,
out_weight::Number,
point_weight::CuOrFillVector{<:Number},
ds_dpoints::AbstractMatrix{<:Number},
ds_dpoint_weight::AbstractVector{<:Number};
kwargs...,
) where {N_in,N_out}
return error(
"Not implemented: raster_pullback! for single image not implemented on GPU. Consider using CPU arrays",
)
end
# batch of images
function DiffPointRasterisation.raster_pullback!(
ds_dout::CuArray{<:Number,N_out_p1},
points::CuVector{<:StaticVector{N_in,<:Number}},
rotation::CuVector{<:StaticMatrix{N_out,N_in,<:Number}},
translation::CuVector{<:StaticVector{N_out,<:Number}},
background::CuOrFillVector{<:Number},
out_weight::CuOrFillVector{<:Number},
point_weight::CuOrFillVector{<:Number},
ds_dpoints::CuMatrix{TP},
ds_drotation::CuArray{TR,3},
ds_dtranslation::CuMatrix{TT},
ds_dbackground::CuVector{<:Number},
ds_dout_weight::CuVector{OW},
ds_dpoint_weight::CuVector{PW},
) where {N_in,N_out,N_out_p1,TP<:Number,TR<:Number,TT<:Number,OW<:Number,PW<:Number}
T = promote_type(eltype(ds_dout), TP, TR, TT, OW, PW)
batch_axis = axes(ds_dout, N_out_p1)
@argcheck N_out == N_out_p1 - 1
@argcheck batch_axis ==
axes(rotation, 1) ==
axes(translation, 1) ==
axes(background, 1) ==
axes(out_weight, 1)
@argcheck batch_axis ==
axes(ds_drotation, 3) ==
axes(ds_dtranslation, 2) ==
axes(ds_dbackground, 1) ==
axes(ds_dout_weight, 1)
@argcheck N_out == N_out_p1 - 1
n_points = length(points)
@argcheck length(ds_dpoint_weight) == n_points
batch_size = length(batch_axis)
ds_dbackground = vec(
sum!(reshape(ds_dbackground, ntuple(_ -> 1, Val(N_out))..., batch_size), ds_dout)
)
scale = SVector{N_out,T}(size(ds_dout)[1:(end - 1)]) / T(2)
shifts = DiffPointRasterisation.voxel_shifts(Val(N_out))
ds_dpoints = fill!(ds_dpoints, zero(TP))
ds_drotation = fill!(ds_drotation, zero(TR))
ds_dtranslation = fill!(ds_dtranslation, zero(TT))
ds_dout_weight = fill!(ds_dout_weight, zero(OW))
ds_dpoint_weight = fill!(ds_dpoint_weight, zero(PW))
args = (
T,
ds_dout,
points,
rotation,
translation,
out_weight,
point_weight,
shifts,
scale,
ds_dpoints,
ds_drotation,
ds_dtranslation,
ds_dout_weight,
ds_dpoint_weight,
)
ndrange = (n_points, batch_size, 2^N_out)
workgroup_size(threads) = (1, min(threads ÷ (2^N_out), batch_size), 2^N_out)
function shmem(threads)
_, bs_p_wg, n_voxel = workgroup_size(threads)
return ((N_out + 1) * n_voxel + N_in) * bs_p_wg * sizeof(T)
# ((N_out + 1) * threads + N_in * bs_p_wg) * sizeof(T)
end
let kernel = @cuda launch = false raster_pullback_kernel!(args...)
config = CUDA.launch_configuration(kernel.fun; shmem)
workgroup_sz = workgroup_size(config.threads)
blocks = cld.(ndrange, workgroup_sz)
kernel(args...; threads=workgroup_sz, blocks=blocks, shmem=shmem(config.threads))
end
return (;
points=ds_dpoints,
rotation=ds_drotation,
translation=ds_dtranslation,
background=ds_dbackground,
out_weight=ds_dout_weight,
point_weight=ds_dpoint_weight,
)
end
function DiffPointRasterisation.default_ds_dpoints_batched(
points::CuVector{<:AbstractVector{TP}}, N_in, batch_size
) where {TP<:Number}
return similar(points, TP, (N_in, length(points)))
end
function DiffPointRasterisation.default_ds_dpoint_weight_batched(
points::CuVector{<:AbstractVector{<:Number}}, T, batch_size
)
return similar(points, T)
end
end # module | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 3040 | module DiffPointRasterisationChainRulesCoreExt
using DiffPointRasterisation, ChainRulesCore, StaticArrays
# single image
function ChainRulesCore.rrule(
::typeof(DiffPointRasterisation.raster),
grid_size,
points::AbstractVector{<:StaticVector{N_in,T}},
rotation::AbstractMatrix{<:Number},
translation::AbstractVector{<:Number},
optional_args...,
) where {N_in,T<:Number}
out = raster(grid_size, points, rotation, translation, optional_args...)
function raster_pullback(ds_dout)
out_pb = raster_pullback!(
unthunk(ds_dout), points, rotation, translation, optional_args...
)
ds_dpoints = reinterpret(reshape, SVector{N_in,T}, out_pb.points)
return NoTangent(),
NoTangent(), ds_dpoints,
values(out_pb)[2:(3 + length(optional_args))]...
end
return out, raster_pullback
end
function ChainRulesCore.rrule(
f::typeof(DiffPointRasterisation.raster),
grid_size,
points::AbstractVector{<:AbstractVector{<:Number}},
rotation::AbstractMatrix{<:Number},
translation::AbstractVector{<:Number},
optional_args...,
)
return ChainRulesCore.rrule(
f,
grid_size,
DiffPointRasterisation.inner_to_sized(points),
rotation,
translation,
optional_args...,
)
end
# batch of images
function ChainRulesCore.rrule(
::typeof(DiffPointRasterisation.raster),
grid_size,
points::AbstractVector{<:StaticVector{N_in,TP}},
rotation::AbstractVector{<:StaticMatrix{N_out,N_in,TR}},
translation::AbstractVector{<:StaticVector{N_out,TT}},
optional_args...,
) where {N_in,N_out,TP<:Number,TR<:Number,TT<:Number}
out = raster(grid_size, points, rotation, translation, optional_args...)
function raster_pullback(ds_dout)
out_pb = raster_pullback!(
unthunk(ds_dout), points, rotation, translation, optional_args...
)
ds_dpoints = reinterpret(reshape, SVector{N_in,TP}, out_pb.points)
L = N_out * N_in
ds_drotation = reinterpret(
reshape, SMatrix{N_out,N_in,TR,L}, reshape(out_pb.rotation, L, :)
)
ds_dtranslation = reinterpret(reshape, SVector{N_out,TT}, out_pb.translation)
return NoTangent(),
NoTangent(), ds_dpoints, ds_drotation, ds_dtranslation,
values(out_pb)[4:(3 + length(optional_args))]...
end
return out, raster_pullback
end
function ChainRulesCore.rrule(
f::typeof(DiffPointRasterisation.raster),
grid_size,
points::AbstractVector{<:AbstractVector{<:Number}},
rotation::AbstractVector{<:AbstractMatrix{<:Number}},
translation::AbstractVector{<:AbstractVector{<:Number}},
optional_args...,
)
return ChainRulesCore.rrule(
f,
grid_size,
DiffPointRasterisation.inner_to_sized(points),
DiffPointRasterisation.inner_to_sized(rotation),
DiffPointRasterisation.inner_to_sized(translation),
optional_args...,
)
end
end # module DiffPointRasterisationChainRulesCoreExt | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 318 | module DiffPointRasterisation
using ArgCheck
using Atomix
using ChunkSplitters
using FillArrays
using KernelAbstractions
using SimpleUnPack
using StaticArrays
using TestItems
include("util.jl")
include("raster.jl")
include("raster_pullback.jl")
include("interface.jl")
export raster, raster!, raster_pullback!
end
| DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 20059 | """
raster(grid_size, points, rotation, translation, [background, out_weight])
Interpolate points (multi-) linearly into an Nd-array of size `grid_size`.
Before `points` are interpolated into the array, each point ``p`` is first
transformed according to
```math
\\hat{p} = R p + t
```
with `rotation` ``R`` and `translation` ``t``.
Points ``\\hat{p}`` that fall into the N-dimensional hypercube
with edges spanning from (-1, 1) in each dimension, are interpolated
into the output array.
The total weight of each point (`out_weight * point_weight`) is
distributed onto the 2^N nearest pixels/voxels of the output array
(according to the closeness of the voxel center to the coordinates
of point ``\\hat{p}``) via N-linear interpolation.
# Arguments
- `grid_size`: Tuple of integers defining the output dimensions
- `points::AbstractVector{<:AbstractVector}`: A vector of same length
vectors representing points
- `rotation`: Either a single matrix(-like object) or a vector of such,
that linearly transform(s) `points` before rasterisation.
- `translation`: Either a single vector or a vector of such, that
translates `points` *after* `rotation`. If `rotation` includes a
projection, `translation` thus needs to have the same length as
`rotation * points[i]`.
- `background`: Either a single number or a vector of such.
- `out_weight`: Either a single number or a vector (one per image) of such.
- `point_weight`: A vector of numbers (one per point).
`rotation`, `translation`, `background` and `out_weight` can have an
additional "batch" dimension (by providing them as vectors of single
parameters. The length of these vectors must be the same for all four
arguments).
In this case, the output array will have dimensionality +1 with an
additional axis on last position corresponding to the number of
elements in the batch.
See [Raster a single point cloud to a batch of poses](@ref) for more
details.
See also: [`raster!`](@ref)
"""
function raster end
"""
raster!(out, points, rotation, translation, [background, out_weight, point_weight])
Interpolate points (multi-) linearly into the Nd-array `out`.
In-place version of [`raster`](@ref). See there for details.
"""
function raster! end
###############################################
# Step 1: Allocate output
###############################################
function raster(grid_size::Tuple, args...)
eltypes = deep_eltype.(args)
T = promote_type(eltypes...)
points = args[1]
rotation = args[2]
if isa(rotation, AbstractMatrix)
# non-batched
out = similar(points, T, grid_size)
else
# batched
@assert rotation isa AbstractVector{<:AbstractMatrix}
batch_size = length(rotation)
out = similar(points, T, (grid_size..., batch_size))
end
return raster!(out, args...)
end
deep_eltype(el) = deep_eltype(typeof(el))
deep_eltype(t::Type) = t
deep_eltype(t::Type{<:AbstractArray}) = deep_eltype(eltype(t))
###############################################
# Step 2: Fill default arguments if necessary
###############################################
@inline raster!(out::AbstractArray{<:Number}, args::Vararg{Any,3}) =
raster!(out, args..., default_background(args[2]))
@inline raster!(out::AbstractArray{<:Number}, args::Vararg{Any,4}) =
raster!(out, args..., default_out_weight(args[2]))
@inline raster!(out::AbstractArray{<:Number}, args::Vararg{Any,5}) =
raster!(out, args..., default_point_weight(args[1]))
###############################################
# Step 3: Convenience interface for single image:
# Convert arguments for single image to
# length-1 vec of arguments
###############################################
function raster!(
out::AbstractArray{<:Number},
points::AbstractVector{<:AbstractVector{<:Number}},
rotation::AbstractMatrix{<:Number},
translation::AbstractVector{<:Number},
background::Number,
weight::Number,
point_weight::AbstractVector{<:Number},
)
return drop_last_dim(
raster!(
append_singleton_dim(out),
points,
@SVector([rotation]),
@SVector([translation]),
@SVector([background]),
@SVector([weight]),
point_weight,
),
)
end
###############################################
# Step 4: Convert arguments to canonical form,
# i.e. vectors of statically sized arrays
###############################################
function raster!(out::AbstractArray{<:Number}, args::Vararg{AbstractVector,6})
return raster!(out, inner_to_sized.(args)...)
end
###############################################
# Step 5: Error on inconsistent dimensions
###############################################
# if N_out_rot == N_out_trans this should not be called
# because the actual implementation specializes on N_out
function raster!(
::AbstractArray{<:Number,N_out},
::AbstractVector{<:StaticVector{N_in,<:Number}},
::AbstractVector{<:StaticMatrix{N_out_rot,N_in_rot,<:Number}},
::AbstractVector{<:StaticVector{N_out_trans,<:Number}},
::AbstractVector{<:Number},
::AbstractVector{<:Number},
::AbstractVector{<:Number},
) where {N_in,N_out,N_in_rot,N_out_rot,N_out_trans}
if N_out_trans != N_out
error(
"Dimension of translation (got $N_out_trans) and output dimentsion (got $N_out) must agree!",
)
end
if N_out_rot != N_out
error(
"Row dimension of rotation (got $N_out_rot) and output dimentsion (got $N_out) must agree!",
)
end
if N_in_rot != N_in
error(
"Column dimension of rotation (got $N_in_rot) and points (got $N_in) must agree!",
)
end
return error("Dispatch error. Should not arrive here. Please file a bug.")
end
# now similar for pullback
"""
raster_pullback!(
ds_dout, args...;
[points, rotation, translation, background, out_weight, point_weight]
)
Pullback for [`raster`](@ref) / [`raster!`](@ref).
Take as input `ds_dout` the sensitivity of some quantity (`s` for "scalar")
to the *output* `out` of the function `out = raster(grid_size, args...)`
(or `out = raster!(out, args...)`), as well as
the exact same arguments `args` that were passed to `raster`/`raster!`, and
return the sensitivities of `s` to the *inputs* `args` of the function
`raster`/`raster!`.
Optionally, pre-allocated output arrays for each input sensitivity can be
specified as keyword arguments with the name of the original argument to
`raster` as key, and a nd-array as value, where the n-th dimension is the
batch dimension.
For example to provide a pre-allocated array for the sensitivity of `s` to
the `translation` argument of `raster`, do:
`sensitivities = raster_pullback!(ds_dout, args...; translation = [zeros(2) for _ in 1:8])`
for 2-dimensional points and a batch size of 8.
See also [Raster a single point cloud to a batch of poses](@ref)
"""
function raster_pullback! end
###############################################
# Step 1: Fill default arguments if necessary
###############################################
@inline raster_pullback!(ds_out::AbstractArray{<:Number}, args::Vararg{Any,3}; kwargs...) =
raster_pullback!(ds_out, args..., default_background(args[2]); kwargs...)
@inline raster_pullback!(ds_dout::AbstractArray{<:Number}, args::Vararg{Any,4}; kwargs...) =
raster_pullback!(ds_dout, args..., default_out_weight(args[2]); kwargs...)
@inline raster_pullback!(ds_dout::AbstractArray{<:Number}, args::Vararg{Any,5}; kwargs...) =
raster_pullback!(ds_dout, args..., default_point_weight(args[1]); kwargs...)
###############################################
# Step 2: Convert arguments to canonical form,
# i.e. vectors of statically sized arrays
###############################################
# single image
function raster_pullback!(
ds_dout::AbstractArray{<:Number},
points::AbstractVector{<:AbstractVector{<:Number}},
rotation::AbstractMatrix{<:Number},
translation::AbstractVector{<:Number},
background::Number,
out_weight::Number,
point_weight::AbstractVector{<:Number};
kwargs...,
)
return raster_pullback!(
ds_dout,
inner_to_sized(points),
to_sized(rotation),
to_sized(translation),
background,
out_weight,
point_weight;
kwargs...,
)
end
# batch of images
function raster_pullback!(
ds_dout::AbstractArray{<:Number}, args::Vararg{AbstractVector,6}; kwargs...
)
return raster_pullback!(ds_dout, inner_to_sized.(args)...; kwargs...)
end
###############################################
# Step 3: Allocate output
###############################################
# single image
function raster_pullback!(
ds_dout::AbstractArray{<:Number,N_out},
inp_points::AbstractVector{<:StaticVector{N_in,TP}},
inp_rotation::StaticMatrix{N_out,N_in,<:Number},
inp_translation::StaticVector{N_out,<:Number},
inp_background::Number,
inp_out_weight::Number,
inp_point_weight::AbstractVector{PW};
points::AbstractMatrix{TP}=default_ds_dpoints_single(inp_points, N_in),
point_weight::AbstractVector{PW}=similar(inp_points, PW),
kwargs...,
) where {N_in,N_out,TP<:Number,PW<:Number}
return raster_pullback!(
ds_dout,
inp_points,
inp_rotation,
inp_translation,
inp_background,
inp_out_weight,
inp_point_weight,
points,
point_weight;
kwargs...,
)
end
# batch of images
function raster_pullback!(
ds_dout::AbstractArray{<:Number},
inp_points::AbstractVector{<:StaticVector{N_in,TP}},
inp_rotation::AbstractVector{<:StaticMatrix{N_out,N_in,TR}},
inp_translation::AbstractVector{<:StaticVector{N_out,TT}},
inp_background::AbstractVector{TB},
inp_out_weight::AbstractVector{OW},
inp_point_weight::AbstractVector{PW};
points::AbstractArray{TP}=default_ds_dpoints_batched(
inp_points, N_in, length(inp_rotation)
),
rotation::AbstractArray{TR,3}=similar(
inp_points, TR, (N_out, N_in, length(inp_rotation))
),
translation::AbstractMatrix{TT}=similar(
inp_points, TT, (N_out, length(inp_translation))
),
background::AbstractVector{TB}=similar(inp_points, TB, (length(inp_background))),
out_weight::AbstractVector{OW}=similar(inp_points, OW, (length(inp_out_weight))),
point_weight::AbstractArray{PW}=default_ds_dpoint_weight_batched(
inp_points, PW, length(inp_rotation)
),
) where {N_in,N_out,TP<:Number,TR<:Number,TT<:Number,TB<:Number,OW<:Number,PW<:Number}
return raster_pullback!(
ds_dout,
inp_points,
inp_rotation,
inp_translation,
inp_background,
inp_out_weight,
inp_point_weight,
points,
rotation,
translation,
background,
out_weight,
point_weight,
)
end
###############################################
# Step 4: Error on inconsistent dimensions
###############################################
# single image
function raster_pullback!(
::AbstractArray{<:Number,N_out},
::AbstractVector{<:StaticVector{N_in,<:Number}},
::StaticMatrix{N_out_rot,N_in_rot,<:Number},
::StaticVector{N_out_trans,<:Number},
::Number,
::Number,
::AbstractVector{<:Number},
::AbstractMatrix{<:Number},
::AbstractVector{<:Number};
kwargs...,
) where {N_in,N_out,N_in_rot,N_out_rot,N_out_trans}
return error_dimensions(N_in, N_out, N_in_rot, N_out_rot, N_out_trans)
end
# batch of images
function raster_pullback!(
::AbstractArray{<:Number,N_out_p1},
::AbstractVector{<:StaticVector{N_in,<:Number}},
::AbstractVector{<:StaticMatrix{N_out_rot,N_in_rot,<:Number}},
::AbstractVector{<:StaticVector{N_out_trans,<:Number}},
::AbstractVector{<:Number},
::AbstractVector{<:Number},
::AbstractVector{<:Number},
::AbstractArray{<:Number},
::AbstractArray{<:Number,3},
::AbstractMatrix{<:Number},
::AbstractVector{<:Number},
::AbstractVector{<:Number},
::AbstractArray{<:Number},
) where {N_in,N_out_p1,N_in_rot,N_out_rot,N_out_trans}
return error_dimensions(N_in, N_out_p1 - 1, N_in_rot, N_out_rot, N_out_trans)
end
function error_dimensions(N_in, N_out, N_in_rot, N_out_rot, N_out_trans)
if N_out_trans != N_out
error(
"Dimension of translation (got $N_out_trans) and output dimentsion (got $N_out) must agree!",
)
end
if N_out_rot != N_out
error(
"Row dimension of rotation (got $N_out_rot) and output dimentsion (got $N_out) must agree!",
)
end
if N_in_rot != N_in
error(
"Column dimension of rotation (got $N_in_rot) and points (got $N_in) must agree!",
)
end
return error("Dispatch error. Should not arrive here. Please file a bug.")
end
default_background(rotation::AbstractMatrix, T=eltype(rotation)) = zero(T)
function default_background(
rotation::AbstractVector{<:AbstractMatrix}, T=eltype(eltype(rotation))
)
return Zeros(T, length(rotation))
end
function default_background(rotation::AbstractArray{_T,3} where {_T}, T=eltype(rotation))
return Zeros(T, size(rotation, 3))
end
default_out_weight(rotation::AbstractMatrix, T=eltype(rotation)) = one(T)
function default_out_weight(
rotation::AbstractVector{<:AbstractMatrix}, T=eltype(eltype(rotation))
)
return Ones(T, length(rotation))
end
function default_out_weight(rotation::AbstractArray{_T,3} where {_T}, T=eltype(rotation))
return Ones(T, size(rotation, 3))
end
function default_point_weight(points::AbstractVector{<:AbstractVector{T}}) where {T<:Number}
return Ones(T, length(points))
end
function default_ds_dpoints_single(
points::AbstractVector{<:AbstractVector{TP}}, N_in
) where {TP<:Number}
return similar(points, TP, (N_in, length(points)))
end
function default_ds_dpoints_batched(
points::AbstractVector{<:AbstractVector{TP}}, N_in, batch_size
) where {TP<:Number}
return similar(points, TP, (N_in, length(points), min(batch_size, Threads.nthreads())))
end
function default_ds_dpoint_weight_batched(
points::AbstractVector{<:AbstractVector{<:Number}}, T, batch_size
)
return similar(points, T, (length(points), min(batch_size, Threads.nthreads())))
end
@testitem "raster interface" begin
include("../test/data.jl")
@testset "no projection" begin
local out
@testset "canonical arguments (vec of staticarray)" begin
out = raster(
D.grid_size_3d,
D.points_static,
D.rotations_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "reinterpret nd-array as vec-of-array" begin
@test out ≈ raster(
D.grid_size_3d,
D.points_reinterp,
D.rotations_reinterp,
D.translations_3d_reinterp,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "point as non-static vector" begin
@test out ≈ raster(
D.grid_size_3d,
D.points,
D.rotations_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "rotation as non-static matrix" begin
@test out ≈ raster(
D.grid_size_3d,
D.points_static,
D.rotations,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "translation as non-static vector" begin
@test out ≈ raster(
D.grid_size_3d,
D.points_static,
D.rotations_static,
D.translations_3d,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "all as non-static array" begin
@test out ≈ raster(
D.grid_size_3d,
D.points,
D.rotations,
D.translations_3d,
D.backgrounds,
D.weights,
D.point_weights,
)
end
out = raster(
D.grid_size_3d,
D.points_static,
D.rotations_static,
D.translations_3d_static,
zeros(D.batch_size),
ones(D.batch_size),
ones(length(D.points_static)),
)
@testset "default argmuments canonical" begin
@test out ≈ raster(
D.grid_size_3d,
D.points_static,
D.rotations_static,
D.translations_3d_static,
)
end
@testset "default arguments all as non-static array" begin
@test out ≈ raster(D.grid_size_3d, D.points, D.rotations, D.translations_3d)
end
end
@testset "projection" begin
local out
@testset "canonical arguments (vec of staticarray)" begin
out = raster(
D.grid_size_2d,
D.points_static,
D.projections_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "reinterpret nd-array as vec-of-array" begin
@test out ≈ raster(
D.grid_size_2d,
D.points_reinterp,
D.projections_reinterp,
D.translations_2d_reinterp,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "point as non-static vector" begin
@test out ≈ raster(
D.grid_size_2d,
D.points,
D.projections_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "projection as non-static matrix" begin
@test out ≈ raster(
D.grid_size_2d,
D.points_static,
D.projections,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "translation as non-static vector" begin
@test out ≈ raster(
D.grid_size_2d,
D.points_static,
D.projections_static,
D.translations_2d,
D.backgrounds,
D.weights,
D.point_weights,
)
end
@testset "all as non-static array" begin
@test out ≈ raster(
D.grid_size_2d,
D.points_static,
D.projections,
D.translations_2d,
D.backgrounds,
D.weights,
D.point_weights,
)
end
out = raster(
D.grid_size_2d,
D.points_static,
D.projections_static,
D.translations_2d_static,
zeros(D.batch_size),
ones(D.batch_size),
ones(length(D.points_static)),
)
@testset "default argmuments canonical" begin
@test out ≈ raster(
D.grid_size_2d,
D.points_static,
D.projections_static,
D.translations_2d_static,
)
end
@testset "default arguments all as non-static array" begin
@test out ≈ raster(D.grid_size_2d, D.points, D.projections, D.translations_2d)
end
end
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 12012 | ###############################################
# Step 6: Actual implementation
###############################################
function raster!(
out::AbstractArray{T,N_out_p1},
points::AbstractVector{<:StaticVector{N_in,<:Number}},
rotation::AbstractVector{<:StaticMatrix{N_out,N_in,<:Number}},
translation::AbstractVector{<:StaticVector{N_out,<:Number}},
background::AbstractVector{<:Number},
out_weight::AbstractVector{<:Number},
point_weight::AbstractVector{<:Number},
) where {T<:Number,N_in,N_out,N_out_p1}
@argcheck N_out == N_out_p1 - 1 DimensionMismatch
out_batch_dim = ndims(out)
batch_size = size(out, out_batch_dim)
@argcheck batch_size ==
length(rotation) ==
length(translation) ==
length(background) ==
length(out_weight) DimensionMismatch
n_points = length(points)
@argcheck length(point_weight) == n_points
scale = SVector{N_out,T}(size(out)[1:(end - 1)]) / T(2)
shifts = voxel_shifts(Val(N_out))
out .= reshape(background, ntuple(_ -> 1, Val(N_out))..., length(background))
args = (out, points, rotation, translation, out_weight, point_weight, shifts, scale)
backend = get_backend(out)
ndrange = (2^N_out, n_points, batch_size)
workgroup_size = 1024
raster_kernel!(backend, workgroup_size, ndrange)(args...)
return out
end
@kernel function raster_kernel!(
out::AbstractArray{T},
points,
rotations,
translations::AbstractVector{<:StaticVector{N_out}},
out_weights,
point_weights,
shifts,
scale,
) where {T,N_out}
neighbor_voxel_id, point_idx, batch_idx = @index(Global, NTuple)
point = @inbounds points[point_idx]
rotation = @inbounds rotations[batch_idx]
translation = @inbounds translations[batch_idx]
weight = @inbounds out_weights[batch_idx] * point_weights[point_idx]
shift = @inbounds shifts[neighbor_voxel_id]
origin = (-@SVector ones(T, N_out)) - translation
coord_reference_voxel, deltas = reference_coordinate_and_deltas(
point, rotation, origin, scale
)
voxel_idx = CartesianIndex(
CartesianIndex(Tuple(coord_reference_voxel)) + CartesianIndex(shift), batch_idx
)
if voxel_idx in CartesianIndices(out)
val = voxel_weight(deltas, shift, weight)
@inbounds Atomix.@atomic out[voxel_idx] += val
end
end
"""
reference_coordinate_and_deltas(point, rotation, origin, scale)
Return
- The cartesian coordinate of the voxel of an N-dimensional rectangular
grid that is the one closest to the origin, out of the 2^N voxels that are next
neighbours of the (N-dimensional) `point`
- A Nx2 array containing coordinate-wise distances of the `scale`d `point` to the
voxel that is
* closest to the origin (out of the 2^N next neighbors) in the first column
* furthest from the origin (out of the 2^N next neighbors) in the second column.
The grid is implicitely assumed to discretize the hypercube ranging from (-1, 1)
in each dimension.
Before `point` is discretized into this grid, it is first translated by
`-origin` and then scaled by `scale`.
"""
@inline function reference_coordinate_and_deltas(
point::AbstractVector{T}, rotation, origin, scale
) where {T}
projected_point = rotation * point
# coordinate of transformed point in output coordinate system
# which is defined by the (integer) coordinates of the pixels/voxels
# in the output array.
coord = (projected_point - origin) .* scale
# round to **lower** integer (note the -1/2) coordinate ("upper left" if this were a matrix)
coord_reference_voxel = round.(Int, coord .- T(0.5), RoundUp)
# distance to lower integer coordinate (distance from "lower left" neighboring pixel
# in units of fractional pixels):
deltas_lower = coord - (coord_reference_voxel .- T(0.5))
# distances to lower (first column) and upper (second column) integer coordinates
deltas = [deltas_lower one(T) .- deltas_lower]
return coord_reference_voxel, deltas
end
@inline function voxel_weight(deltas, shift::NTuple{N,Int}, point_weight) where {N}
lower_upper = mod1.(shift, 2)
delta_idxs = SVector{N}(CartesianIndex.(ntuple(identity, Val(N)), lower_upper))
val = prod(@inbounds @view deltas[delta_idxs]) * point_weight
return val
end
@testitem "raster correctness" begin
using Rotations
grid_size = (5, 5)
points_single_center = [zeros(2)]
points_single_1pix_right = [[0.0, 0.4]]
points_single_1pix_up = [[-0.4, 0.0]]
points_single_1pix_left = [[0.0, -0.4]]
points_single_1pix_down = [[0.4, 0.0]]
points_single_halfpix_down = [[0.2, 0.0]]
points_single_halfpix_down_and_right = [[0.2, 0.2]]
points_four_cross = reduce(
vcat,
[
points_single_1pix_right,
points_single_1pix_up,
points_single_1pix_left,
points_single_1pix_down,
],
)
no_rotation = Float64[1; 0;; 0; 1]
rotation_90_deg = Float64[0; 1;; -1; 0]
no_translation = zeros(2)
translation_halfpix_right = [0.0, 0.2]
translation_1pix_down = [0.4, 0.0]
zero_background = 0.0
out_weight = 4.0
# -------- interpolations ---------
out = raster(
grid_size,
points_single_center,
no_rotation,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 0 0 0
0 0 4 0 0
0 0 0 0 0
0 0 0 0 0
]
out = raster(
grid_size,
points_single_1pix_right,
no_rotation,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 0 0 0
0 0 0 4 0
0 0 0 0 0
0 0 0 0 0
]
out = raster(
grid_size,
points_single_halfpix_down,
no_rotation,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 0 0 0
0 0 2 0 0
0 0 2 0 0
0 0 0 0 0
]
out = raster(
grid_size,
points_single_halfpix_down_and_right,
no_rotation,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 0 0 0
0 0 1 1 0
0 0 1 1 0
0 0 0 0 0
]
# -------- translations ---------
out = raster(
grid_size,
points_four_cross,
no_rotation,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 4 0 0
0 4 0 4 0
0 0 4 0 0
0 0 0 0 0
]
out = raster(
grid_size,
points_four_cross,
no_rotation,
translation_halfpix_right,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 2 2 0
0 2 2 2 2
0 0 2 2 0
0 0 0 0 0
]
out = raster(
grid_size,
points_four_cross,
no_rotation,
translation_1pix_down,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 0 0 0
0 0 4 0 0
0 4 0 4 0
0 0 4 0 0
]
# -------- rotations ---------
out = raster(
grid_size,
points_single_1pix_right,
rotation_90_deg,
no_translation,
zero_background,
out_weight,
)
@test out ≈ [
0 0 0 0 0
0 0 4 0 0
0 0 0 0 0
0 0 0 0 0
0 0 0 0 0
]
# -------- point weights ---------
out = raster(
grid_size,
points_four_cross,
no_rotation,
no_translation,
zero_background,
1.0,
[1.0, 2.0, 3.0, 4.0],
)
@test out ≈ [
0 0 0 0 0
0 0 2 0 0
0 3 0 1 0
0 0 4 0 0
0 0 0 0 0
]
out = raster(
grid_size,
points_four_cross,
no_rotation,
translation_halfpix_right,
zero_background,
2.0,
[1.0, 2.0, 3.0, 4.0],
)
@test out ≈ [
0 0 0 0 0
0 0 2 2 0
0 3 3 1 1
0 0 4 4 0
0 0 0 0 0
]
end
@testitem "raster inference and allocations" begin
using BenchmarkTools, CUDA, StaticArrays
include("../test/data.jl")
# check type stability
# single image
@inferred DiffPointRasterisation.raster(
D.grid_size_3d, D.points_static, D.rotation, D.translation_3d
)
@inferred DiffPointRasterisation.raster(
D.grid_size_2d, D.points_static, D.projection, D.translation_2d
)
# batched canonical
@inferred DiffPointRasterisation.raster(
D.grid_size_3d, D.points_static, D.rotations_static, D.translations_3d_static
)
@inferred DiffPointRasterisation.raster(
D.grid_size_2d, D.points_static, D.projections_static, D.translations_2d_static
)
# batched reinterpret reshape
@inferred DiffPointRasterisation.raster(
D.grid_size_3d, D.points_reinterp, D.rotations_reinterp, D.translations_3d_reinterp
)
@inferred DiffPointRasterisation.raster(
D.grid_size_2d,
D.points_reinterp,
D.projections_reinterp,
D.translations_2d_reinterp,
)
if CUDA.functional()
# single image
@inferred DiffPointRasterisation.raster(
D.grid_size_3d, cu(D.points_static), cu(D.rotation), cu(D.translation_3d)
)
@inferred DiffPointRasterisation.raster(
D.grid_size_2d, cu(D.points_static), cu(D.projection), cu(D.translation_2d)
)
# batched
@inferred DiffPointRasterisation.raster(
D.grid_size_3d,
cu(D.points_static),
cu(D.rotations_static),
cu(D.translations_3d_static),
)
@inferred DiffPointRasterisation.raster(
D.grid_size_2d,
cu(D.points_static),
cu(D.projections_static),
cu(D.translations_2d_static),
)
end
# Ideally the sinlge image (non batched) case would be allocation-free.
# The switch to KernelAbstractions made this allocating.
# set test to broken for now.
out_3d = Array{Float64,3}(undef, D.grid_size_3d...)
out_2d = Array{Float64,2}(undef, D.grid_size_2d...)
allocations = @ballocated DiffPointRasterisation.raster!(
$out_3d, $D.points_static, $D.rotation, $D.translation_3d
) evals = 1 samples = 1
@test allocations == 0 broken = true
allocations = @ballocated DiffPointRasterisation.raster!(
$out_2d, $D.points_static, $D.projection, $D.translation_2d
) evals = 1 samples = 1
@test allocations == 0 broken = true
end
@testitem "raster batched consistency" begin
include("../test/data.jl")
# raster
out_3d = zeros(D.grid_size_3d..., D.batch_size)
out_3d_batched = zeros(D.grid_size_3d..., D.batch_size)
for (out_i, args...) in zip(
eachslice(out_3d; dims=4), D.rotations, D.translations_3d, D.backgrounds, D.weights
)
raster!(out_i, D.more_points, args..., D.more_point_weights)
end
DiffPointRasterisation.raster!(
out_3d_batched,
D.more_points,
D.rotations,
D.translations_3d,
D.backgrounds,
D.weights,
D.more_point_weights,
)
# raster_project
out_2d = zeros(D.grid_size_2d..., D.batch_size)
out_2d_batched = zeros(D.grid_size_2d..., D.batch_size)
for (out_i, args...) in zip(
eachslice(out_2d; dims=3),
D.projections,
D.translations_2d,
D.backgrounds,
D.weights,
)
DiffPointRasterisation.raster!(out_i, D.more_points, args..., D.more_point_weights)
end
DiffPointRasterisation.raster!(
out_2d_batched,
D.more_points,
D.projections,
D.translations_2d,
D.backgrounds,
D.weights,
D.more_point_weights,
)
@test out_2d_batched ≈ out_2d
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 11615 | # single image
function raster_pullback!(
ds_dout::AbstractArray{<:Number,N_out},
points::AbstractVector{<:StaticVector{N_in,<:Number}},
rotation::StaticMatrix{N_out,N_in,TR},
translation::StaticVector{N_out,TT},
background::Number,
out_weight::OW,
point_weight::AbstractVector{<:Number},
ds_dpoints::AbstractMatrix{TP},
ds_dpoint_weight::AbstractVector{PW};
accumulate_ds_dpoints=false,
) where {N_in,N_out,TP<:Number,TR<:Number,TT<:Number,OW<:Number,PW<:Number}
T = promote_type(eltype(ds_dout), TP, TR, TT, OW, PW)
@argcheck size(ds_dpoints, 1) == N_in
@argcheck length(point_weight) ==
length(points) ==
length(ds_dpoint_weight) ==
size(ds_dpoints, 2)
# The strategy followed here is to redo some of the calculations
# made in the forward pass instead of caching them in the forward
# pass and reusing them here.
if !accumulate_ds_dpoints
fill!(ds_dpoints, zero(TP))
fill!(ds_dpoint_weight, zero(PW))
end
origin = (-@SVector ones(TT, N_out)) - translation
scale = SVector{N_out,T}(size(ds_dout)) / 2
shifts = voxel_shifts(Val(N_out))
all_density_idxs = CartesianIndices(ds_dout)
# initialize some output for accumulation
ds_dtranslation = @SVector zeros(TT, N_out)
ds_drotation = @SMatrix zeros(TR, N_out, N_in)
ds_dout_weight = zero(OW)
# loop over points
for (pt_idx, point) in enumerate(points)
point = SVector{N_in,TP}(point)
point_weight_i = point_weight[pt_idx]
coord_reference_voxel, deltas = reference_coordinate_and_deltas(
point, rotation, origin, scale
)
ds_dcoord = @SVector zeros(T, N_out)
ds_dpoint_weight_i = zero(PW)
# loop over voxels that are affected by point
for shift in shifts
voxel_idx = CartesianIndex(Tuple(coord_reference_voxel)) + CartesianIndex(shift)
(voxel_idx in all_density_idxs) || continue
ds_dout_i = ds_dout[voxel_idx]
ds_dweight = voxel_weight(deltas, shift, ds_dout_i)
ds_dout_weight += ds_dweight * point_weight_i
ds_dpoint_weight_i += ds_dweight * out_weight
factor = ds_dout_i * out_weight * point_weight_i
# loop over dimensions of point
ds_dcoord += SVector(
factor .*
ntuple(n -> interpolation_weight(n, N_out, deltas, shift), Val(N_out)),
)
end
scaled = ds_dcoord .* scale
ds_dtranslation += scaled
ds_drotation += scaled * point'
ds_dpoint = rotation' * scaled
@view(ds_dpoints[:, pt_idx]) .+= ds_dpoint
ds_dpoint_weight[pt_idx] += ds_dpoint_weight_i
end
return (;
points=ds_dpoints,
rotation=ds_drotation,
translation=ds_dtranslation,
background=sum(ds_dout),
out_weight=ds_dout_weight,
point_weight=ds_dpoint_weight,
)
end
# batch of images
function raster_pullback!(
ds_dout::AbstractArray{<:Number,N_out_p1},
points::AbstractVector{<:StaticVector{N_in,<:Number}},
rotation::AbstractVector{<:StaticMatrix{N_out,N_in,<:Number}},
translation::AbstractVector{<:StaticVector{N_out,<:Number}},
background::AbstractVector{<:Number},
out_weight::AbstractVector{<:Number},
point_weight::AbstractVector{<:Number},
ds_dpoints::AbstractArray{<:Number,3},
ds_drotation::AbstractArray{<:Number,3},
ds_dtranslation::AbstractMatrix{<:Number},
ds_dbackground::AbstractVector{<:Number},
ds_dout_weight::AbstractVector{<:Number},
ds_dpoint_weight::AbstractMatrix{<:Number},
) where {N_in,N_out,N_out_p1}
batch_axis = axes(ds_dout, N_out_p1)
@argcheck N_out == N_out_p1 - 1
@argcheck batch_axis ==
axes(rotation, 1) ==
axes(translation, 1) ==
axes(background, 1) ==
axes(out_weight, 1)
@argcheck batch_axis ==
axes(ds_drotation, 3) ==
axes(ds_dtranslation, 2) ==
axes(ds_dbackground, 1) ==
axes(ds_dout_weight, 1)
fill!(ds_dpoints, zero(eltype(ds_dpoints)))
fill!(ds_dpoint_weight, zero(eltype(ds_dpoint_weight)))
n_threads = size(ds_dpoints, 3)
Threads.@threads for (idxs, ichunk) in chunks(batch_axis, n_threads)
for i in idxs
args_i = (
selectdim(ds_dout, N_out_p1, i),
points,
rotation[i],
translation[i],
background[i],
out_weight[i],
point_weight,
)
result_i = raster_pullback!(
args_i...,
view(ds_dpoints, :, :, ichunk),
view(ds_dpoint_weight, :, ichunk);
accumulate_ds_dpoints=true,
)
ds_drotation[:, :, i] .= result_i.rotation
ds_dtranslation[:, i] = result_i.translation
ds_dbackground[i] = result_i.background
ds_dout_weight[i] = result_i.out_weight
end
end
return (;
points=dropdims(sum(ds_dpoints; dims=3); dims=3),
rotation=ds_drotation,
translation=ds_dtranslation,
background=ds_dbackground,
out_weight=ds_dout_weight,
point_weight=dropdims(sum(ds_dpoint_weight; dims=2); dims=2),
)
end
function interpolation_weight(n, N, deltas, shift)
val = @inbounds shift[n] == 1 ? one(eltype(deltas)) : -one(eltype(deltas))
# loop over other dimensions
@inbounds for other_n in 1:N
if n == other_n
continue
end
val *= deltas[other_n, mod1(shift[other_n], 2)]
end
return val
end
@testitem "raster_pullback! inference and allocations" begin
using BenchmarkTools, CUDA, Adapt
include("../test/data.jl")
ds_dout_3d = randn(D.grid_size_3d)
ds_dout_3d_batched = randn(D.grid_size_3d..., D.batch_size)
ds_dout_2d = randn(D.grid_size_2d)
ds_dout_2d_batched = randn(D.grid_size_2d..., D.batch_size)
ds_dpoints = similar(D.points_array)
ds_dpoints_batched = similar(
D.points_array, (size(D.points_array)..., Threads.nthreads())
)
ds_drotations = similar(D.rotations_array)
ds_dprojections = similar(D.projections_array)
ds_dtranslations_3d = similar(D.translations_3d_array)
ds_dtranslations_2d = similar(D.translations_2d_array)
ds_dbackgrounds = similar(D.backgrounds)
ds_dweights = similar(D.weights)
ds_dpoint_weights = similar(D.point_weights)
ds_dpoint_weights_batched = similar(
D.point_weights, (size(D.point_weights)..., Threads.nthreads())
)
args_batched_3d = (
ds_dout_3d_batched,
D.points_static,
D.rotations_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.point_weights,
ds_dpoints_batched,
ds_drotations,
ds_dtranslations_3d,
ds_dbackgrounds,
ds_dweights,
ds_dpoint_weights_batched,
)
args_batched_2d = (
ds_dout_2d_batched,
D.points_static,
D.projections_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.point_weights,
ds_dpoints_batched,
ds_dprojections,
ds_dtranslations_2d,
ds_dbackgrounds,
ds_dweights,
ds_dpoint_weights_batched,
)
function to_cuda(args)
args_cu = adapt(CuArray, args)
args_cu = Base.setindex(args_cu, args_cu[8][:, :, 1], 8) # ds_dpoint without batch dim
return args_cu = Base.setindex(args_cu, args_cu[13][:, 1], 13) # ds_dpoint_weight without batch dim
end
# check type stability
# single image
@inferred DiffPointRasterisation.raster_pullback!(
ds_dout_3d,
D.points_static,
D.rotation,
D.translation_3d,
D.background,
D.weight,
D.point_weights,
ds_dpoints,
ds_dpoint_weights,
)
@inferred DiffPointRasterisation.raster_pullback!(
ds_dout_2d,
D.points_static,
D.projection,
D.translation_2d,
D.background,
D.weight,
D.point_weights,
ds_dpoints,
ds_dpoint_weights,
)
# batched
@inferred DiffPointRasterisation.raster_pullback!(args_batched_3d...)
@inferred DiffPointRasterisation.raster_pullback!(args_batched_2d...)
if CUDA.functional()
cu_args_3d = to_cuda(args_batched_3d)
@inferred DiffPointRasterisation.raster_pullback!(cu_args_3d...)
cu_args_2d = to_cuda(args_batched_2d)
@inferred DiffPointRasterisation.raster_pullback!(cu_args_2d...)
end
# check that single-imge pullback is allocation-free
allocations = @ballocated DiffPointRasterisation.raster_pullback!(
$ds_dout_3d,
$(D.points_static),
$(D.rotation),
$(D.translation_3d),
$(D.background),
$(D.weight),
$(D.point_weights),
$ds_dpoints,
$ds_dpoint_weights,
) evals = 1 samples = 1
@test allocations == 0
end
@testitem "raster_pullback! threaded" begin
include("../test/data.jl")
ds_dout = randn(D.grid_size_3d..., D.batch_size)
ds_dargs_threaded = DiffPointRasterisation.raster_pullback!(
ds_dout,
D.more_points,
D.rotations,
D.translations_3d,
D.backgrounds,
D.weights,
D.more_point_weights,
)
ds_dpoints = Matrix{Float64}[]
ds_dpoint_weight = Vector{Float64}[]
for i in 1:(D.batch_size)
ds_dargs_i = @views raster_pullback!(
ds_dout[:, :, :, i],
D.more_points,
D.rotations[i],
D.translations_3d[i],
D.backgrounds[i],
D.weights[i],
D.more_point_weights,
)
push!(ds_dpoints, ds_dargs_i.points)
push!(ds_dpoint_weight, ds_dargs_i.point_weight)
@views begin
@test ds_dargs_threaded.rotation[:, :, i] ≈ ds_dargs_i.rotation
@test ds_dargs_threaded.translation[:, i] ≈ ds_dargs_i.translation
@test ds_dargs_threaded.background[i] ≈ ds_dargs_i.background
@test ds_dargs_threaded.out_weight[i] ≈ ds_dargs_i.out_weight
end
end
@test ds_dargs_threaded.points ≈ sum(ds_dpoints)
@test ds_dargs_threaded.point_weight ≈ sum(ds_dpoint_weight)
ds_dout = zeros(D.grid_size_2d..., D.batch_size)
ds_dargs_threaded = DiffPointRasterisation.raster_pullback!(
ds_dout,
D.more_points,
D.projections,
D.translations_2d,
D.backgrounds,
D.weights,
D.more_point_weights,
)
ds_dpoints = Matrix{Float64}[]
ds_dpoint_weight = Vector{Float64}[]
for i in 1:(D.batch_size)
ds_dargs_i = @views raster_pullback!(
ds_dout[:, :, i],
D.more_points,
D.projections[i],
D.translations_2d[i],
D.backgrounds[i],
D.weights[i],
D.more_point_weights,
)
push!(ds_dpoints, ds_dargs_i.points)
push!(ds_dpoint_weight, ds_dargs_i.point_weight)
@views begin
@test ds_dargs_threaded.rotation[:, :, i] ≈ ds_dargs_i.rotation
@test ds_dargs_threaded.translation[:, i] ≈ ds_dargs_i.translation
@test ds_dargs_threaded.background[i] ≈ ds_dargs_i.background
@test ds_dargs_threaded.out_weight[i] ≈ ds_dargs_i.out_weight
end
end
@test ds_dargs_threaded.points ≈ sum(ds_dpoints)
@test ds_dargs_threaded.point_weight ≈ sum(ds_dpoint_weight)
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 3425 |
"""
digitstuple(k, Val(N))
Return a N-tuple containing the bit-representation of k
"""
digitstuple(k, ::Val{N}, int_type=Int64) where {N} =
ntuple(i -> int_type(k >> (i - 1) % 2), N)
@testitem "digitstuple" begin
@test DiffPointRasterisation.digitstuple(5, Val(3)) == (1, 0, 1)
@test DiffPointRasterisation.digitstuple(2, Val(2)) == (0, 1)
@test DiffPointRasterisation.digitstuple(2, Val(4)) == (0, 1, 0, 0)
end
"""
voxel_shifts(Val(N), [int_type])
Enumerate nearest neighbor coordinate shifts with respect
to "upper left" voxel.
For a N-dimensional voxel grid, return a 2^N-tuple of N-tuples,
where each element of the outer tuple is a cartesian coordinate
shift from the "upper left" voxel.
"""
voxel_shifts(::Val{N}, int_type=Int64) where {N} =
ntuple(k -> digitstuple(k - 1, Val(N), int_type), Val(2^N))
@testitem "voxel_shifts" begin
@inferred DiffPointRasterisation.voxel_shifts(Val(4))
@test DiffPointRasterisation.voxel_shifts(Val(1)) == ((0,), (1,))
@test DiffPointRasterisation.voxel_shifts(Val(2)) == ((0, 0), (1, 0), (0, 1), (1, 1))
@test DiffPointRasterisation.voxel_shifts(Val(3)) == (
(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(1, 1, 0),
(0, 0, 1),
(1, 0, 1),
(0, 1, 1),
(1, 1, 1),
)
end
to_sized(arg::StaticArray{<:Any,<:Number}) = arg
to_sized(arg::AbstractArray{T}) where {T<:Number} = SizedArray{Tuple{size(arg)...},T}(arg)
inner_to_sized(arg::AbstractVector{<:Number}) = arg
inner_to_sized(arg::AbstractVector{<:StaticArray}) = arg
function inner_to_sized(arg::AbstractVector{<:AbstractArray{<:Number}})
return inner_to_sized(arg, Val(size(arg[1])))
end
function inner_to_sized(
arg::AbstractVector{<:AbstractArray{T}}, ::Val{sz}
) where {sz,T<:Number}
return SizedArray{Tuple{sz...},T}.(arg)
end
@testitem "inner_to_sized" begin
using StaticArrays
@testset "vector" begin
inp = randn(3)
@inferred DiffPointRasterisation.inner_to_sized(inp)
out = DiffPointRasterisation.inner_to_sized(inp)
@test out === inp
end
@testset "vec of dynamic vec" begin
inp = [randn(3) for _ in 1:5]
out = DiffPointRasterisation.inner_to_sized(inp)
@test out == inp
@test out isa Vector{<:StaticVector{3}}
end
@testset "vec of static vec" begin
inp = [@SVector randn(3) for _ in 1:5]
@inferred DiffPointRasterisation.inner_to_sized(inp)
out = DiffPointRasterisation.inner_to_sized(inp)
@test out === inp
@test out isa Vector{<:StaticVector{3}}
end
@testset "vec of dynamic matrix" begin
inp = [randn(3, 2) for _ in 1:5]
out = DiffPointRasterisation.inner_to_sized(inp)
@test out == inp
@test out isa Vector{<:StaticMatrix{3,2}}
end
end
@inline append_singleton_dim(a) = reshape(a, size(a)..., 1)
@inline append_singleton_dim(a::Number) = [a]
@inline drop_last_dim(a) = dropdims(a; dims=ndims(a))
@testitem "append drop dim" begin
using BenchmarkTools
a = randn(2, 3, 4)
a2 = DiffPointRasterisation.drop_last_dim(
DiffPointRasterisation.append_singleton_dim(a)
)
@test a2 === a broken = true
allocations = @ballocated DiffPointRasterisation.drop_last_dim(
DiffPointRasterisation.append_singleton_dim($a)
) evals = 1 samples = 1
@test allocations == 0 broken = true
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 1914 |
@testitem "ChainRules single" begin
using ChainRulesTestUtils, ChainRulesCore
include("data.jl")
test_rrule(
raster,
D.grid_size_3d,
D.points_static,
D.rotation ⊢ D.rotation_tangent,
D.translation_3d,
D.background,
D.weight,
D.point_weights,
)
# default arguments
test_rrule(
raster,
D.grid_size_3d,
D.points_static,
D.rotation ⊢ D.rotation_tangent,
D.translation_3d,
)
test_rrule(
raster,
D.grid_size_2d,
D.points_static,
D.projection ⊢ D.projection_tangent,
D.translation_2d,
D.background,
D.weight,
D.point_weights,
)
# default arguments
test_rrule(
raster,
D.grid_size_2d,
D.points_static,
D.projection ⊢ D.projection_tangent,
D.translation_2d,
)
end
@testitem "ChainRules batch" begin
using ChainRulesTestUtils
include("data.jl")
test_rrule(
raster,
D.grid_size_3d,
D.points_static,
D.rotations_static ⊢ D.rotation_tangents_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
# default arguments
test_rrule(
raster,
D.grid_size_3d,
D.points_static,
D.rotations_static ⊢ D.rotation_tangents_static,
D.translations_3d_static,
)
test_rrule(
raster,
D.grid_size_2d,
D.points_static,
D.projections_static ⊢ D.projection_tangents_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.point_weights,
)
# default arguments
test_rrule(
raster,
D.grid_size_2d,
D.points_static,
D.projections_static ⊢ D.projection_tangents_static,
D.translations_2d_static,
)
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 2849 |
@testitem "CUDA forward" begin
using Adapt, CUDA
CUDA.allowscalar(false)
include("data.jl")
include("util.jl")
cuda_available = CUDA.functional()
# no projection
args = (
D.grid_size_3d,
D.more_points,
D.rotations_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.more_point_weights,
)
@test cuda_cpu_agree(raster, args...) skip = !cuda_available
# default arguments
args = (D.grid_size_3d, D.more_points, D.rotations_static, D.translations_3d_static)
@test cuda_cpu_agree(raster, args...) skip = !cuda_available
# projection
args = (
D.grid_size_2d,
D.more_points,
D.projections_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.more_point_weights,
)
@test cuda_cpu_agree(raster, args...) skip = !cuda_available
end
@testitem "CUDA backward" begin
using Adapt, CUDA
CUDA.allowscalar(false)
include("data.jl")
include("util.jl")
cuda_available = CUDA.functional()
# no projection
ds_dout_3d = randn(D.grid_size_3d..., D.batch_size)
args = (
ds_dout_3d,
D.more_points,
D.rotations_static,
D.translations_3d_static,
D.backgrounds,
D.weights,
D.more_point_weights,
)
@test cuda_cpu_agree(raster_pullback!, args...) skip = !cuda_available
# default arguments
args = (ds_dout_3d, D.more_points, D.rotations_static, D.translations_3d_static)
@test cuda_cpu_agree(raster_pullback!, args...) skip = !cuda_available
# projection
ds_dout_2d = randn(D.grid_size_2d..., D.batch_size)
args = (
ds_dout_2d,
D.more_points,
D.projections_static,
D.translations_2d_static,
D.backgrounds,
D.weights,
D.more_point_weights,
)
@test cuda_cpu_agree(raster_pullback!, args...) skip = !cuda_available
end
# The follwing currently fails.
# Not sure whether test_rrule is supposed to play nicely with CUDA.
# @testitem "CUDA ChainRules" begin
# using Adapt, CUDA, ChainRulesTestUtils
# include("data.jl")
# include("util.jl")
# c(a) = adapt(CuArray, a)
# if CUDA.functional()
# ds_dout_3d = CUDA.randn(Float64, D.grid_size_3d..., D.batch_size)
# args = (D.grid_size_3d, c(D.points), c(D.rotations) ⊢ c(D.rotation_tangents), c(D.translations_3d), c(D.backgrounds), c(D.weights))
# test_rrule(raster, args...; output_tangent=ds_dout_3d)
#
# ds_dout_2d = CUDA.randn(Float64, D.grid_size_2d..., D.batch_size)
# args = (D.grid_size_2d, c(D.points), c(D.rotations) ⊢ c(D.rotation_tangents), c(D.translations_2d), c(D.backgrounds), c(D.weights))
# test_rrule(raster, args...; output_tangent=ds_dout_2d)
# end
# end
| DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 3212 | module D
using Rotations, StaticArrays
function batch_size_for_test()
local batch_size = Threads.nthreads() + 1
while (Threads.nthreads() > 1) && (batch_size % Threads.nthreads() == 0)
batch_size += 1
end
return batch_size
end
const P = @SMatrix Float64[
1 0 0
0 1 0
]
const grid_size_3d = (8, 8, 8)
const grid_size_2d = (8, 8)
const batch_size = batch_size_for_test()
const points = [0.4 * randn(3) for _ in 1:10]
const points_static = SVector{3}.(points)
const points_array = Matrix{Float64}(undef, 3, length(points))
eachcol(points_array) .= points
const points_reinterp = reinterpret(reshape, SVector{3,Float64}, points_array)
const more_points = [0.4 * @SVector randn(3) for _ in 1:100_000]
const rotation = rand(RotMatrix3{Float64})
const rotations_static = rand(RotMatrix3{Float64}, batch_size)::Vector{<:StaticMatrix}
const rotations = (Array.(rotations_static))::Vector{Matrix{Float64}}
const rotations_array = Array{Float64,3}(undef, 3, 3, batch_size)
eachslice(rotations_array; dims=3) .= rotations
const rotations_reinterp = reinterpret(
reshape, SMatrix{3,3,Float64,9}, reshape(rotations_array, 9, :)
)
const rotation_tangent = Array(rand(RotMatrix3))
const rotation_tangents_static =
rand(RotMatrix3{Float64}, batch_size)::Vector{<:StaticMatrix}
const rotation_tangents = (Array.(rotation_tangents_static))::Vector{Matrix{Float64}}
const projection = P * rand(RotMatrix3)
const projections_static = Ref(P) .* rand(RotMatrix3{Float64}, batch_size)
const projections = (Array.(projections_static))::Vector{Matrix{Float64}}
const projections_array = Array{Float64,3}(undef, 2, 3, batch_size)
eachslice(projections_array; dims=3) .= projections
const projections_reinterp = reinterpret(
reshape, SMatrix{2,3,Float64,6}, reshape(projections_array, 6, :)
)
const projection_tangent = Array(P * rand(RotMatrix3))
const projection_tangents_static = Ref(P) .* rand(RotMatrix3{Float64}, batch_size)
const projection_tangents = (Array.(projection_tangents_static))::Vector{Matrix{Float64}}
const translation_3d = 0.1 * @SVector randn(3)
const translation_2d = 0.1 * @SVector randn(2)
const translations_3d_static = [0.1 * @SVector randn(3) for _ in 1:batch_size]
const translations_3d = (Array.(translations_3d_static))::Vector{Vector{Float64}}
const translations_3d_array = Matrix{Float64}(undef, 3, batch_size)
eachcol(translations_3d_array) .= translations_3d
const translations_3d_reinterp = reinterpret(
reshape, SVector{3,Float64}, translations_3d_array
)
const translations_2d_static = [0.1 * @SVector randn(2) for _ in 1:batch_size]
const translations_2d = (Array.(translations_2d_static))::Vector{Vector{Float64}}
const translations_2d_array = Matrix{Float64}(undef, 2, batch_size)
eachcol(translations_2d_array) .= translations_2d
const translations_2d_reinterp = reinterpret(
reshape, SVector{2,Float64}, translations_2d_array
)
const background = 0.1
const backgrounds = collect(1:1.0:batch_size)
const weight = rand()
const weights = 10 .* rand(batch_size)
const point_weights = let
w = rand(length(points))
w ./ sum(w)
end
const more_point_weights = let
w = rand(length(more_points))
w ./ sum(w)
end
end # module D | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 206 | using TestItemRunner: @run_package_tests, @testitem
@testitem "Aqua.test_all" begin
import Aqua
Aqua.test_all(DiffPointRasterisation)
end
@run_package_tests # filter=ti-> occursin("CUDA", ti.name) | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | code | 1019 | function run_cuda(f, args...)
cu_args = adapt(CuArray, args)
return f(cu_args...)
end
function cuda_cpu_agree(f, args...)
out_cpu = f(args...)
out_cuda = run_cuda(f, args...)
return is_approx_equal(out_cuda, out_cpu)
end
function is_approx_equal(actual::AbstractArray, expected::AbstractArray)
return Array(actual) ≈ expected
end
function is_approx_equal(actual::NamedTuple, expected::NamedTuple)
actual_cpu = adapt(Array, actual)
for prop in propertynames(expected)
try
actual_elem = getproperty(actual_cpu, prop)
expected_elem = getproperty(expected, prop)
if !(actual_elem ≈ expected_elem)
throw(
"Values differ:\nActual: $(string(actual_elem)) \nExpected: $(string(expected_elem))",
)
return false
end
catch e
println("Error while trying to compare element $(string(prop))")
rethrow()
end
end
return true
end | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | docs | 7933 | # DiffPointRasterisation
*Differentiable rasterisation of point clouds in julia*
[](https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://microscopic-image-analysis.github.io/DiffPointRasterisation.jl/dev)
[](https://github.com/JuliaTesting/Aqua.jl)

## About
This package provides a rasterisation routine for arbitrary-dimensional point cloud data that is fully (auto-)differentiable.
The implementation uses multiple threads on CPU or GPU hardware if available.
The roots of this package are in single-particle 3d reconstruction from tomographic data, and as such it comes with the following ins and out:
- Currently only a single "channel" (e.g gray scale) is supported
- If data is projected to a lower dimension (meaning the dimensionality of the output grid is lower than the dimensionality of the points)
- Projections are always orthographic (currently no support for perspective projection)
- no "z-blending": The contribution of each point to its output voxel is independent
of the point's coordinate along the projection axis.
## Rasterisation interface
The interface consists of a single function `raster` that accepts a point cloud (as a vector of m-dimensional vectors) and pose/projection parameters, (as well as optional weight and background parameters) and returns a n-dimensional (n <= m) array into which the points are rasterized, each point by default with a weight of 1 that is mulit-linearly interpolated into the neighboring grid cells.
#### Simple Example
```julia-repl
julia> using DiffPointRasterisation, LinearAlgebra
julia> grid_size = (5, 5) # 2d grid with 5 x 5 pixels
(5, 5)
julia> rotation, translation = I(2), zeros(2) # pose parameters
(Bool[1 0; 0 1], [0.0, 0.0])
```
```julia-repl
julia> raster(grid_size, [zeros(2)], rotation, translation) # single point at center
5×5 Matrix{Float64}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
```julia-repl
julia> raster(grid_size, [[0.2, 0.0]], rotation, translation) # single point half a pixel below center
5×5 Matrix{Float64}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.5 0.0 0.0
0.0 0.0 0.5 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
```julia-repl
julia> raster(grid_size, [[0.2, -0.2]], I(2), zeros(2)) # single point half a pixel below and left of center
5×5 Matrix{Float64}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.25 0.25 0.0 0.0
0.0 0.25 0.25 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
## Differentiability
Both, an explicit function that calculates derivatives of `raster`, as well as an integration to common automatic differentiation libraries in julia are provided.
### Automatic differentiation libraries
This package provides rules for reverse-mode automatic differentiation libraries that are based on the [ChainRules.jl](https://juliadiff.org/ChainRulesCore.jl/dev/#ChainRules-roll-out-status) ecosystem. So using `raster(args...)` in a program that uses any of the ChainRules-based reverse-mode autodiff libraries should just work™. Gradients with respect to all parameters (except `grid_size`) are supported:
#### Example using Zygote.jl
we define a simple scalar "loss" that we want to differentiate:
```julia-repl
julia> using Zygote
julia> target_image = rand(grid_size...)
5×5 Matrix{Float64}:
0.345889 0.032283 0.589178 0.0625972 0.310929
0.404836 0.573265 0.350633 0.0417926 0.895955
0.174528 0.127555 0.0906833 0.639844 0.832502
0.189836 0.360597 0.243664 0.825484 0.667319
0.672631 0.520593 0.341701 0.101026 0.182172
julia> loss(params...) = sum((target_image .- raster(grid_size, params...)).^2)
loss (generic function with 1 method)
```
some input parameters to `raster`:
```julia-repl
julia> points = [2 * rand(2) .- 1 for _ in 1:5] # 5 random points
5-element Vector{Vector{Float64}}:
[0.8457397177007744, 0.3482756109584688]
[-0.6028188536164718, -0.612801322279686]
[-0.47141692007256464, 0.6098964840013308]
[-0.74526926786903, 0.6480225109030409]
[-0.4044384373422192, -0.13171854413805173]
julia> rotation = [ # explicit matrix for rotation (to satisfy Zygote)
1.0 0.0
0.0 1.0
]
2×2 Matrix{Float64}:
1.0 0.0
0.0 1.0
```
and let Zygote calculate the gradient of `loss` with respect to those parameters
```julia-repl
julia> d_points, d_rotation, d_translation = Zygote.gradient(loss, points, rotation, translation);
ulia> d_points
5-element Vector{StaticArraysCore.SVector{2, Float64}}:
[-2.7703628931165025, 3.973371400200988]
[-0.70462225282373, 1.0317734946448016]
[-1.7117138793471494, -3.235178706903591]
[-2.0683933141077886, -0.6732149105779637]
[2.6278388385655904, 1.585621066861592]
julia> d_rotation
2×2 reshape(::StaticArraysCore.SMatrix{2, 2, Float64, 4}, 2, 2) with eltype Float64:
-0.632605 -3.26353
4.12402 -1.86668
julia> d_translation
2-element StaticArraysCore.SVector{2, Float64} with indices SOneTo(2):
-4.62725350082958
2.6823723442258274
```
### Explicit interface
The explicit interface for calculating derivatives of `raster` with respect to its arguments again consists of a single function called `raster_pullback!`:
The function `raster_pullback!(ds_dout, raster_args...)` takes as input the sensitivity of some scalar quantity to the output of `raster(grid_size, raster_args...)`, `ds_dout`, and returns the sensitivity of said quantity to the *input arguments* `raster_args` of `raster` (hence the name pullback).
#### Example
We can repeat the above calculation using the explicit interface.
To do that, we first have to calculate the sensitivity of the scalar output of `loss` to the output of `raster`. This could be done using an autodiff library, but here we do it manually:
```julia-repl
julia> ds_dout = 2 .* (target_image .- raster(grid_size, points, rotation, translation)) # gradient of loss \w respect to output of raster
5×5 Matrix{Float64}:
0.152276 -0.417335 1.16347 -0.700428 -0.63595
0.285167 0.033845 -0.625258 -0.801198 0.760124
0.349055 0.25511 0.181367 1.27969 1.665
0.379672 0.721194 0.487329 1.65097 1.33464
1.34526 1.04119 0.454354 -1.3402 0.364343
```
Then we can feed that into `raster_pullback!` together with the same arguments that were fed to `raster`:
```julia-repl
julia> ds_din = raster_pullback!(ds_dout, points, rotation, translation);
```
We see that the result is the same as obtained via Zygote:
```julia-repl
julia> ds_din.points
2×5 Matrix{Float64}:
2.77036 0.704622 1.71171 2.06839 -2.62784
-3.97337 -1.03177 3.23518 0.673215 -1.58562
julia> ds_din.rotation
2×2 StaticArraysCore.SMatrix{2, 2, Float64, 4} with indices SOneTo(2)×SOneTo(2):
0.632605 3.26353
-4.12402 1.86668
julia> ds_din.translation
2-element StaticArraysCore.SVector{2, Float64} with indices SOneTo(2):
4.62725350082958
-2.6823723442258274
```
## Timings
**Points**|**Images**|**Pixel**|**Mode**|**Fwd time CPU**|**Fwd time CPUx8\***|**Fwd time GPU**|**Bwd time CPU**|**Bwd time CPUx8\***|**Bwd time GPU\*\***
:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:
10⁴|64|128²|3D → 2D|341 ms|73 ms|15 ms|37 ms|10 ms|1 ms
10⁴|64|1024²|3D → 2D|387 ms|101 ms|16 ms|78 ms|24 ms|2 ms
10⁵|64|128²|3D → 2D|3313 ms|741 ms|153 ms|374 ms|117 ms|9 ms
10⁵|64|1024²|3D → 2D|3499 ms|821 ms|154 ms|469 ms|173 ms|10 ms
10⁵|1|1024³|3D → 3D|493 ms|420 ms|24 ms|265 ms|269 ms|17 ms
\* 8 julia threads on 4 hardware threads with hyperthreading
\*\* 1 Nvidia HGX A100 GPU | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | docs | 254 | # API Documentation
```@meta
CurrentModule = DiffPointRasterisation
```
## Exported functions
```@autodocs
Modules = [DiffPointRasterisation]
Private = false
```
## Private functions
```@autodocs
Modules = [DiffPointRasterisation]
Public = false
``` | DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | docs | 1992 | # Raster a single point cloud to a batch of poses
To make best use of the hardware it is advantageous to raster a batch of poses at once.
On GPU hardware this is currently also the only supported mode.
To raster a single point cloud to a batch of `n` images, all parameters except the point cloud should be provided as `n`-vectors.
This is a more flexible interface than the often used array with trailing batch dimension, since it allows to pass in a batch of parameters that have a more structured type than a simple array (e.g. a vector of `Rotation` objects from [Rotations.jl](https://github.com/JuliaGeometry/Rotations.jl)).
## Array with trailing batch dim to vec of array
If you have data in the array with trailing batch dimension format, it is straightforward (and quite cheap) to reinterpret it as a batch-vector of single parameters:
```julia-repl
julia> matrices = randn(2, 2, 3) # batch of 3 2x2-matrices as 3d-array
2×2×3 Array{Float64, 3}:
[:, :, 1] =
-0.947072 1.10155
0.328925 0.0957267
[:, :, 2] =
-1.14336 1.71218
0.277723 0.436665
[:, :, 3] =
-0.114541 -0.769275
0.321084 -0.215008
julia> using StaticArrays
julia> vec_of_matrices = reinterpret(reshape, SMatrix{2, 2, Float64, 4}, reshape(matrices, 4, :))
3-element reinterpret(reshape, SMatrix{2, 2, Float64, 4}, ::Matrix{Float64}) with eltype SMatrix{2, 2, Float64, 4}:
[-0.947072487060636 1.1015531033643386; 0.3289251820481776 0.0957267306067441]
[-1.143363316882325 1.712179045069409; 0.27772320359678004 0.4366650562384542]
[-0.11454148373779363 -0.7692750798350269; 0.32108447348937047 -0.21500805160408776]
```
## Pre-allocation for batched pullback
[`raster_pullback!`](@ref) can be optionally provided with pre-allocated arrays for its output.
For these arrays the expected format is actually in the nd-array with trailing batch dimension format.
The rationale behind this is that the algorithm works better on continuous blocks of memory, since atomic operations are required.
| DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.2.2 | 2fdf8dec8ac5fbf4a19487210c6d3bb8dff95459 | docs | 1877 | # DiffPointRasterisation
*Differentiable rasterisation of point clouds in julia*
DiffPointRasterisation.jl provides a rasterisation routine for arbitrary-dimensional point cloud data that is fully (auto-)differentiable.
The implementation uses multiple threads on CPU or GPU hardware if available.
## Rasterisation interface
The interface consists of a single function [`raster`](@ref) that accepts a point cloud (as a vector of m-dimensional vectors) and pose/projection parameters, (as well as optional weight and background parameters) and returns a n-dimensional (n <= m) array into which the points are rasterized, each point by default with a weight of 1 that is mulit-linearly interpolated into the neighboring grid cells.
## Differentiability
Both, an explicit function that calculates derivatives of `raster`, as well as an integration to common automatic differentiation libraries in julia are provided.
### Automatic differentiation libraries
Rules for reverse-mode automatic differentiation libraries that are based on the [ChainRules.jl](https://juliadiff.org/ChainRulesCore.jl/dev/#ChainRules-roll-out-status) ecosystem are provided via an extension package. So using `raster(args...)` in a program that uses any of the ChainRules-based reverse-mode autodiff libraries should just work™. Gradients with respect to all parameters (except `grid_size`) are supported.
### Explicit interface
The explicit interface for calculating derivatives of `raster` with respect to its arguments again consists of a single function called [`raster_pullback!`](@ref):
The function `raster_pullback!(ds_dout, raster_args...)` takes as input the sensitivity of some scalar quantity to the output of `raster(grid_size, raster_args...)`, `ds_dout`, and returns the sensitivity of said quantity to the *input arguments* `raster_args` of `raster` (hence the name pullback).
| DiffPointRasterisation | https://github.com/microscopic-image-analysis/DiffPointRasterisation.jl.git |
|
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | code | 558 | using NicePipes
using Documenter
makedocs(;
modules=[NicePipes],
authors="Simeon Schaub <[email protected]> and contributors",
repo="https://github.com/simeonschaub/NicePipes.jl/blob/{commit}{path}#L{line}",
sitename="NicePipes.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://simeonschaub.github.io/NicePipes.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/simeonschaub/NicePipes.jl",
)
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
|
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | code | 2084 | module NicePipes
if VERSION < v"1.3.0-rc4"
@warn "Can't use binary artifacts, using your system's `grep` and `sed`."
grep(f) = f("grep")
sed(f) = f("sed")
else
using grep_jll, sed_jll
end
struct ShPipe{T,C}
val::T
cmd::C
args::Cmd
end
# like Base.open, but doesn't throw if exitcode is non-zero and always returns process instead
# of return value of f
function _open(f::Function, cmds::Base.AbstractCmd, args...; kwargs...)
P = open(cmds, args...; kwargs...)
ret = try
f(P)
catch
kill(P)
rethrow()
finally
close(P.in)
end
wait(P)
return P
end
function Base.show(io_out::IO, x::ShPipe)
x.cmd() do cmd
p = _open(`$cmd $(x.args)`, "w", io_out) do io_in
show(io_in, MIME("text/plain"), x.val)
end
if x.cmd === grep && p.exitcode == 1
println(io_out, "No matches found!")
elseif p.exitcode != 0
print(io_out, "Command $(p.cmd) failed with exit code $(p.exitcode)")
elseif x.cmd === grep
# delete additional newline
print(io_out, "\033[1A")
end
end
return nothing
end
struct ShPipeEndpoint{C}
cmd::C
args::Cmd
end
macro p_cmd(s)
cmd, args = match(r"^(.*)\s(.*)$", s).captures
return :(ShPipeEndpoint(f->f($cmd)), @cmd($args))
end
(endpoint::ShPipeEndpoint)(val) = ShPipe(val, endpoint.cmd, endpoint.args)
Base.:|(val, endpoint::ShPipeEndpoint) = val |> endpoint
macro special_command(cmd)
return quote
export $(Symbol('@', cmd))
macro $cmd(args...)
args = map(args) do arg
# interpret raw_str as raw string
if Meta.isexpr(arg, :macrocall) && arg.args[1] === Symbol("@raw_str")
arg = arg.args[3]
end
return arg isa String ? string('"', arg, '"') : arg
end
args = join(args, ' ')
return :(ShPipeEndpoint($$cmd, @cmd($args)))
end
end |> esc
end
@special_command grep
@special_command sed
end
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
|
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | code | 751 | using NicePipes
using Test
@static if Sys.iswindows()
const LE = "\r\n"
else
const LE = "\n"
end
function test_show(x, show_x)
io = IOBuffer()
show(io, x)
output = String(take!(io))
output = replace(output, LE*"\e[1A" => "")
@test output == show_x
end
@testset "NicePipes.jl" begin
a = ["foo", "bar"]
show_a = [
"2-element $(Vector{String}):",
" \"foo\"",
" \"bar\"",
]
test_show((a | @grep foo), show_a[2])
test_show((a | @grep -iv FoO), join(show_a[[1, 3]], LE))
test_show((3 | @grep 4), "No matches found!\n")
test_show((a | @sed "/foo/d"), join(show_a[[1, 3]], LE))
test_show((a | @sed raw"s/f\(o\+\)/b\1/g"), show_a[1] * LE * " \"boo\"" * LE * show_a[3])
end
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
|
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | docs | 1702 | # NicePipes
[](https://github.com/simeonschaub/NicePipes.jl/actions)
[](https://codecov.io/gh/simeonschaub/NicePipes.jl)
[](https://simeonschaub.github.io/NicePipes.jl/stable)
[](https://simeonschaub.github.io/NicePipes.jl/dev)
[](https://juliahub.com/ui/Packages/NicePipes/tVmGC)
Pipe REPL `show` output into unix tools:
```julia
julia> using NicePipes
julia> methods(+) | @grep BigFloat
[15] +(c::BigInt, x::BigFloat) in Base.MPFR at mpfr.jl:414
[22] +(a::BigFloat, b::BigFloat, c::BigFloat, d::BigFloat, e::BigFloat) in Base.MPFR at mpfr.jl:564
[23] +(a::BigFloat, b::BigFloat, c::BigFloat, d::BigFloat) in Base.MPFR at mpfr.jl:557
[24] +(a::BigFloat, b::BigFloat, c::BigFloat) in Base.MPFR at mpfr.jl:551
[25] +(x::BigFloat, c::BigInt) in Base.MPFR at mpfr.jl:410
[26] +(x::BigFloat, y::BigFloat) in Base.MPFR at mpfr.jl:379
[27] +(x::BigFloat, c::Union{UInt16, UInt32, UInt64, UInt8}) in Base.MPFR at mpfr.jl:386
[28] +(x::BigFloat, c::Union{Int16, Int32, Int64, Int8}) in Base.MPFR at mpfr.jl:394
[29] +(x::BigFloat, c::Union{Float16, Float32, Float64}) in Base.MPFR at mpfr.jl:402
[61] +(c::Union{UInt16, UInt32, UInt64, UInt8}, x::BigFloat) in Base.MPFR at mpfr.jl:390
[62] +(c::Union{Int16, Int32, Int64, Int8}, x::BigFloat) in Base.MPFR at mpfr.jl:398
[63] +(c::Union{Float16, Float32, Float64}, x::BigFloat) in Base.MPFR at mpfr.jl:406
```
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
|
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | docs | 107 | ```@meta
CurrentModule = NicePipes
```
# NicePipes
```@index
```
```@autodocs
Modules = [NicePipes]
```
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 820 | using Documenter, DynamicNLPModels
const _PAGES = [
"Introduction" => "index.md",
"Quick Start"=>"guide.md",
"API Manual" => "api.md"
]
makedocs(
sitename = "DynamicNLPModels",
authors = "David Cole, Sungho Shin, Francois Pacaud",
format = Documenter.LaTeX(platform="docker"),
pages = _PAGES
)
makedocs(
sitename = "DynamicNLPModels",
modules = [DynamicNLPModels],
authors = "David Cole, Sungho Shin, Francois Pacaud",
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
sidebar_sitename = true,
collapselevel = 1,
),
pages = _PAGES,
clean = false,
)
deploydocs(
repo = "github.com/MadNLP/DynamicNLPModels.jl.git",
target = "build",
devbranch = "main",
devurl = "dev",
push_preview = true,
)
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 2952 | using DynamicNLPModels, Random, LinearAlgebra, SparseArrays
using MadNLP, QuadraticModels, MadNLPGPU, CUDA, NLPModels
# Extend MadNLP functions
function MadNLP.jac_dense!(nlp::DenseLQDynamicModel{T, V, M1, M2, M3}, x, jac) where {T, V, M1<: AbstractMatrix, M2 <: AbstractMatrix, M3 <: AbstractMatrix}
NLPModels.increment!(nlp, :neval_jac)
J = nlp.data.A
copyto!(jac, J)
end
function MadNLP.hess_dense!(nlp::DenseLQDynamicModel{T, V, M1, M2, M3}, x, w1l, hess; obj_weight = 1.0) where {T, V, M1<: AbstractMatrix, M2 <: AbstractMatrix, M3 <: AbstractMatrix}
NLPModels.increment!(nlp, :neval_hess)
H = nlp.data.H
copyto!(hess, H)
end
# Time horizon
N = 3
# generate random Q, R, A, and B matrices
Random.seed!(10)
Q_rand = Random.rand(2, 2)
Q = Q_rand * Q_rand' + I
R_rand = Random.rand(1, 1)
R = R_rand * R_rand' + I
A_rand = rand(2, 2)
A = A_rand * A_rand' + I
B = rand(2, 1)
# generate upper and lower bounds
sl = rand(2)
ul = fill(-15.0, 1)
su = sl .+ 4
uu = ul .+ 10
s0 = sl .+ 2
# Define K matrix for numerical stability of condensed problem
K = - [1.41175 2.47819;] # found from MatrixEquations.jl; ared(A, B, 1, 1)
# Build model for 1 D heat transfer
lq_dense = DenseLQDynamicModel(s0, A, B, Q, R, N; K = K, sl = sl, su = su, ul = ul, uu = uu)
lq_sparse = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, su = su, ul = ul, uu = uu)
# Solve the dense problem
dense_options = Dict{Symbol, Any}(
:kkt_system => MadNLP.DENSE_CONDENSED_KKT_SYSTEM,
:linear_solver=> LapackCPUSolver,
:max_iter=> 50,
:jacobian_constant=>true,
:hessian_constant=>true,
:lapack_algorithm=>MadNLP.CHOLESKY
)
d_ips = MadNLP.InteriorPointSolver(lq_dense, option_dict = dense_options)
sol_ref_dense = MadNLP.optimize!(d_ips)
# Solve the sparse problem
sparse_options = Dict{Symbol, Any}(
:max_iter=>50,
:jacobian_constant=>true,
:hessian_constant=>true,
)
s_ips = MadNLP.InteriorPointSolver(lq_sparse, option_dict = sparse_options)
sol_ref_sparse = MadNLP.optimize!(s_ips)
# Solve the dense problem on the GPU
gpu_options = Dict{Symbol, Any}(
:kkt_system=>MadNLP.DENSE_CONDENSED_KKT_SYSTEM,
:linear_solver=>LapackGPUSolver,
:max_iter=>50,
:jacobian_constant=>true,
:hessian_constant=>true,
:lapack_algorithm=>MadNLP.CHOLESKY
)
gpu_ips = MadNLPGPU.CuInteriorPointSolver(lq_dense, option_dict = gpu_options)
sol_ref_gpu = MadNLP.optimize!(gpu_ips)
println("States from dense problem on CPU are ", get_s(sol_ref_dense, lq_dense))
println("States from dense problem on GPU are ", get_s(sol_ref_gpu, lq_dense))
println("States from sparse problem on CPU are ", get_s(sol_ref_sparse, lq_sparse))
println()
println("Inputs from dense problem on CPU are ", get_u(sol_ref_dense, lq_dense))
println("Inputs from dense problem on GPU are ", get_u(sol_ref_gpu, lq_dense))
println("Inputs from sparse problem on CPU are ", get_u(sol_ref_sparse, lq_sparse))
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 542 | module DynamicNLPModels
import NLPModels
import QuadraticModels
import LinearAlgebra
import SparseArrays
import LinearOperators
import CUDA
import CUDA: CUBLAS
import SparseArrays: SparseMatrixCSC
export LQDynamicData, SparseLQDynamicModel, DenseLQDynamicModel
export get_u, get_s, get_jacobian, add_jtsj!, reset_s0!
include(joinpath("LinearQuadratic", "LinearQuadratic.jl"))
include(joinpath("LinearQuadratic", "sparse.jl"))
include(joinpath("LinearQuadratic", "dense.jl"))
include(joinpath("LinearQuadratic", "tools.jl"))
end # module
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 12791 | abstract type AbstractLQDynData{T, V} end
@doc raw"""
LQDynamicData{T,V,M,MK} <: AbstractLQDynData{T,V}
A struct to represent the features of the optimization problem
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, ..., N-1\\
&\; s^l \le s \le s^u \\
&\; u^l \le u \le u^u \\
&\; s_0 = s0
\end{aligned}
```
---
Attributes include:
- `s0`: initial state of system
- `A` : constraint matrix for system states
- `B` : constraint matrix for system inputs
- `Q` : objective function matrix for system states from 0:(N-1)
- `R` : objective function matrix for system inputs from 0:(N-1)
- `N` : number of time steps
- `Qf`: objective function matrix for system state at time N
- `S` : objective function matrix for system states and inputs
- `ns`: number of state variables
- `nu`: number of input varaibles
- `E` : constraint matrix for state variables
- `F` : constraint matrix for input variables
- `K` : feedback gain matrix
- 'w' : constant term for dynamic constraints
- `sl`: vector of lower bounds on state variables
- `su`: vector of upper bounds on state variables
- `ul`: vector of lower bounds on input variables
- `uu`: vector of upper bounds on input variables
- `gl`: vector of lower bounds on constraints
- `gu`: vector of upper bounds on constraints
see also `LQDynamicData(s0, A, B, Q, R, N; ...)`
"""
struct LQDynamicData{T, V, M, MK} <: AbstractLQDynData{T, V}
s0::V
A::M
B::M
Q::M
R::M
N::Int
Qf::M
S::M
ns::Int
nu::Int
E::M
F::M
K::MK
w::V
sl::V
su::V
ul::V
uu::V
gl::V
gu::V
end
@doc raw"""
LQDynamicData(s0, A, B, Q, R, N; ...) -> LQDynamicData{T, V, M, MK}
A constructor for building an object of type `LQDynamicData` for the optimization problem
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
- `s0`: initial state of system
- `A` : constraint matrix for system states
- `B` : constraint matrix for system inputs
- `Q` : objective function matrix for system states from 0:(N-1)
- `R` : objective function matrix for system inputs from 0:(N-1)
- `N` : number of time steps
The following attributes of the `LQDynamicData` type are detected automatically from the length of s0 and size of R
- `ns`: number of state variables
- `nu`: number of input varaibles
The following keyward arguments are also accepted
- `Qf = Q`: objective function matrix for system state at time N; dimensions must be ns x ns
- `S = nothing`: objective function matrix for system state and inputs
- `E = zeros(eltype(Q), 0, ns)` : constraint matrix for state variables
- `F = zeros(eltype(Q), 0, nu)` : constraint matrix for input variables
- `K = nothing` : feedback gain matrix
- `w = zeros(eltype(Q), ns * N)` : constant term for dynamic constraints
- `sl = fill(-Inf, ns)`: vector of lower bounds on state variables
- `su = fill(Inf, ns)` : vector of upper bounds on state variables
- `ul = fill(-Inf, nu)`: vector of lower bounds on input variables
- `uu = fill(Inf, nu)` : vector of upper bounds on input variables
- `gl = fill(-Inf, size(E, 1))` : vector of lower bounds on constraints
- `gu = fill(Inf, size(E, 1))` : vector of upper bounds on constraints
"""
function LQDynamicData(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
if size(Q, 1) != size(Q, 2)
error("Q matrix is not square")
end
if size(R, 1) != size(R, 1)
error("R matrix is not square")
end
if size(A, 2) != length(s0)
error("Number of columns of A are not equal to the number of states")
end
if size(B, 2) != size(R, 1)
error("Number of columns of B are not equal to the number of inputs")
end
if length(s0) != size(Q, 1)
error("size of Q is not consistent with length of s0")
end
if !all(sl .<= su)
error("lower bound(s) on s is > upper bound(s)")
end
if !all(ul .<= uu)
error("lower bound(s) on u is > upper bound(s)")
end
if !all(sl .<= s0) || !all(s0 .<= su)
error("s0 is not within the given upper and lower bounds")
end
if size(E, 1) != size(F, 1)
error("E and F have different numbers of rows")
end
if !all(gl .<= gu)
error("lower bound(s) on Es + Fu is > upper bound(s)")
end
if size(E, 2) != size(Q, 1)
error("Dimensions of E are not the same as number of states")
end
if size(F, 2) != size(R, 1)
error("Dimensions of F are not the same as the number of inputs")
end
if length(gl) != size(E, 1)
error("Dimensions of gl do not match E and F")
end
if length(gu) != size(E, 1)
error("Dimensions of gu do not match E and F")
end
if size(S, 1) != size(Q, 1) || size(S, 2) != size(R, 1)
error("Dimensions of S do not match dimensions of Q and R")
end
if K != nothing
if size(K, 1) != size(R, 1) || size(K, 2) != size(Q, 1)
error("Dimensions of K do not match number of states and inputs")
end
end
if Int(size(w, 1)) != Int(size(s0, 1) * N)
error("Dimensions of w do not match ns")
end
ns = size(Q, 1)
nu = size(R, 1)
LQDynamicData{T, V, M, MK}(
s0,
A,
B,
Q,
R,
N,
Qf,
S,
ns,
nu,
E,
F,
K,
w,
sl,
su,
ul,
uu,
gl,
gu,
)
end
abstract type AbstractDynamicModel{T, V} <: QuadraticModels.AbstractQuadraticModel{T, V} end
struct SparseLQDynamicModel{T, V, M1, M2, M3, MK} <: AbstractDynamicModel{T, V}
meta::NLPModels.NLPModelMeta{T, V}
counters::NLPModels.Counters
data::QuadraticModels.QPData{T, V, M1, M2}
dynamic_data::LQDynamicData{T, V, M3, MK}
end
"""
Struct containing block matrices used for creating and resetting the `DenseLQDynamicModel`. A and B matrices are given in part by
Jerez, Kerrigan, and Constantinides in section 4 of "A sparse and condensed QP formulation for predictive control of LTI systems"
(doi:10.1016/j.automatica.2012.03.010). States are eliminated by the equation ``x = Ax_0 + Bu + \\hat{A}w`` where ``x = [x_0^T, x_1^T, ..., x_N^T]``
and ``u = [u_0^T, u_1^T, ..., u_{N-1}^T]``
---
- `A` : block A matrix given by Jerez et al. with ``n_s(N + 1)`` rows and ns columns
- `B` : block B matrix given by Jerez et al. with ``n_s(N)`` rows and nu columns
- `Aw` : length ``n_s(N + 1)`` vector corresponding to the linear term of the dynamic constraints
- `h` : ``n_u(N) \\times n_s`` matrix for building the linear term of the objective function. Just needs to be
multiplied by `s0`.
- `h01`: ns x ns matrix for building the constant term fo the objective function. This can be found by
taking ``s_0^T`` `h01` ``s_0``
- `h02`: similar to `h01`, but one side is multiplied by `Aw` rather than by `As0`. This will just
be multiplied by `s0` once
- `h_constant` : linear term in the objective function that arises from `Aw`. Not a function of `s0`
- `h0_constant`: constant term in the objective function that arises from `Aw`. Not a function of `s0`
- `d` : length ``n_c(N)`` term for the constraint bounds corresponding to `E` and `F`. Must be multiplied by `s0` and
subtracted from `gl` and `gu`. Equal to the blocks (E + FK) A (see Jerez et al.)
- `dw` : length ``n_c(N)`` term for the constraint bounds that arises from `w`. Equal to the blocks (E + FK) Aw
- `KA` : size ``n_u(N)`` x ns matrix. Needs to be multiplied by `s0` and subtracted from `ul` and `uu` to update
the algebraic constraints corresponding to the input bounds
- `KAw`: similar to `KA`, but it is multiplied by Aw rather than A
See also `reset_s0!`
"""
mutable struct DenseLQDynamicBlocks{T, V, M}
A::M
B::M
Aw::V # Aw = block_matrix_A * w (result is a Vector; block_matrix A is like block_B, but with I instead of B)
h::M # h = (QB + SKB + K^T R K B + K^T S^T B)^T A + (S + K^T R)^T A
h01::M # h01 = A^T((Q + KTRK + KTST + SK))A where Q, K, R, S, and A are block matrices just needs to be multiplied by s0 on each side
h02::V # h02 = wT block_matrix_AT (Q + KTRK + KTSK + SK) A; just needs to be multiplied by s0 on right
h_constant::V # h_constant = BT (Q + KTRK + SK + KTST) block_matrix_A w + (RK + ST)B block_matrix_A w
h0_constant::T # h0_constant = wT block_matrix_AT (Q + KTRK + KTSK + SK) block_matrix_A w
d::M # d = (E + FK) A
dw::V # dw = (E + FK) block_matrix_A w - constant term to be subtracted from d
KA::M
KAw::V
end
struct DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK} <: AbstractDynamicModel{T, V}
meta::NLPModels.NLPModelMeta{T, V}
counters::NLPModels.Counters
data::QuadraticModels.QPData{T, V, M1, M2}
dynamic_data::LQDynamicData{T, V, M3, MK}
blocks::DenseLQDynamicBlocks{T, V, M4}
end
"""
LQJacobianOperator{T, V, M}
Struct for storing the implicit Jacobian matrix. All data for the Jacobian can be stored
in the first `nu` columns of `J`. This struct contains the needed data and storage arrays for
calculating ``Jx``, ``J^T x``, and ``J^T \\Sigma J``. ``Jx`` and ``J^T x`` are performed through extensions
to `LinearAlgebra.mul!()`.
---
Attributes
- `truncated_jac1`: Matrix of first `nu` columns of the Jacobian corresponding to Ax + Bu constraints
- `truncated_jac2`: Matrix of first `nu` columns of the Jacobian corresponding to state variable bounds
- `truncated_jac3`: Matrix of first `nu` columns of the Jacobian corresponding to input variable bounds
- `N` : number of time steps
- `nu` : number of inputs
- `nc` : number of algebraic constraints of the form gl <= Es + Fu <= gu
- `nsc`: number of bounded state variables
- `nuc`: number of bounded input variables (if `K` is defined)
- `SJ1`: placeholder for storing data when calculating `ΣJ`
- `SJ2`: placeholder for storing data when calculating `ΣJ`
- `SJ3`: placeholder for storing data when calculating `ΣJ`
- `H_sub_block`: placeholder for storing data when adding `J^T ΣJ` to the Hessian
"""
struct LQJacobianOperator{T, M, A} <: LinearOperators.AbstractLinearOperator{T}
truncated_jac1::A # tensor of Jacobian blocks corresponding Ex + Fu constraints
truncated_jac2::A # tensor of Jacobian blocks corresponding to state variable limits
truncated_jac3::A # tensor of Jacobian blocks corresponding to input variable limits
N::Int # number of time steps
nu::Int # number of inputs
nc::Int # number of inequality constraints
nsc::Int # number of state variables that are constrained
nuc::Int # number of input variables that are constrained
# Storage tensors for building Jx and J^Tx
x1::A
x2::A
x3::A
y::A
# Storage tensors for building J^TΣJ
SJ1::M
SJ2::M
SJ3::M
# Storage block for adding J^TΣJ to H
H_sub_block::M
end
function _init_similar(mat, dim1::Number, dim2::Number, dim3::Number, T::DataType)
new_mat = similar(mat, dim1, dim2, dim3)
fill!(new_mat, zero(T))
return new_mat
end
function _init_similar(mat, dim1::Number, dim2::Number, T = eltype(mat))
new_mat = similar(mat, dim1, dim2)
fill!(new_mat, zero(T))
return new_mat
end
function _init_similar(mat, dim1::Number, T = eltype(mat))
new_mat = similar(mat, dim1)
fill!(new_mat, zero(T))
return new_mat
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 36525 | @doc raw"""
DenseLQDynamicModel(dnlp::LQDynamicData; implicit = false) -> DenseLQDynamicModel
DenseLQDynamicModel(s0, A, B, Q, R, N; implicit = false ...) -> DenseLQDynamicModel
A constructor for building a `DenseLQDynamicModel <: QuadraticModels.AbstractQuadraticModel`
Input data is for the problem of the form
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
Data is converted to the form
```math
\begin{aligned}
\min &\; \frac{1}{2} z^T H z \\
\textrm{s.t.} &\; \textrm{lcon} \le Jz \le \textrm{ucon}\\
&\; \textrm{lvar} \le z \le \textrm{uvar}
\end{aligned}
```
Resulting `H`, `J`, `h`, and `h0` matrices are stored within `QuadraticModels.QPData` as `H`, `A`, `c`, and `c0` attributes respectively
If `K` is defined, then `u` variables are replaced by `v` variables. The bounds on `u` are transformed into algebraic constraints,
and `u` can be queried by `get_u` and `get_s` within `DynamicNLPModels.jl`
Keyword argument `implicit = false` determines how the Jacobian is stored within the `QPData`. If `implicit = false`, the full, dense
Jacobian matrix is stored. If `implicit = true`, only the first `nu` columns of the Jacobian are stored with the Linear Operator `LQJacobianOperator`.
"""
function DenseLQDynamicModel(
dnlp::LQDynamicData{T, V, M};
implicit::Bool = false,
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
if implicit
_build_implicit_dense_lq_dynamic_model(dnlp)
else
_build_dense_lq_dynamic_model(dnlp)
end
end
function DenseLQDynamicModel(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
implicit = false,
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
dnlp = LQDynamicData(
s0,
A,
B,
Q,
R,
N;
Qf = Qf,
S = S,
E = E,
F = F,
K = K,
w = w,
sl = sl,
su = su,
ul = ul,
uu = uu,
gl = gl,
gu = gu,
)
DenseLQDynamicModel(dnlp; implicit = implicit)
end
function _build_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
G = _init_similar(Q, nc * N, nu, T)
J = _init_similar(Q, nc * N + num_real_bounds_s * N, nu * N, T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
_set_J1_dense!(J, G, N)
As0 = _init_similar(s0, ns * (N + 1), T)
LinearAlgebra.mul!(As0, block_A, s0)
lvar = repeat(ul, N)
uvar = repeat(uu, N)
# Convert state variable constraints to algebraic constraints
offset_s = N * nc
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
J[
(offset_s + 1 + (i - 1) * ns):(offset_s + ns * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[1:(ns * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
for j = 1:(N - i + 1)
J[
(offset_s + 1 + (i + j - 2) * num_real_bounds_s):(offset_s + (i + j - 1) * num_real_bounds_s),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[(1 + (j - 1) * ns):(j * ns), :][bool_vec_s, :])
end
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
lcon = _init_similar(s0, length(dl) + length(lcon2), T)
ucon = _init_similar(s0, length(du) + length(ucon2), T)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(1 + length(dl)):(length(dl) + num_real_bounds_s * N)] = lcon2
ucon[(1 + length(du)):(length(du) + num_real_bounds_s * N)] = ucon2
end
nvar = nu * N
nnzj = size(J, 1) * size(J, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
ncon = size(J, 1)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
block_KAw = dense_blocks.KAw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds_u = sum(bool_vec_u)
G = _init_similar(Q, nc * N, nu, T)
J = _init_similar(Q, (nc + num_real_bounds_s + num_real_bounds_u) * N, nu * N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
KAs0_bounds = _init_similar(s0, num_real_bounds_u * N, T)
KBI = _init_similar(Q, nu * N, nu, T)
KAs0 = _init_similar(s0, nu * N, T)
KAs0_block = _init_similar(s0, nu, T)
KB = _init_similar(Q, nu, nu, T)
I_mat = _init_similar(Q, nu, nu, T)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
_set_J1_dense!(J, G, N)
LinearAlgebra.mul!(As0, block_A, s0)
# Convert state variable constraints to algebraic constraints
offset_s = nc * N
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
J[
(offset_s + 1 + (i - 1) * ns):(offset_s + ns * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[1:(ns * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] =
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
for j = 1:(N - i + 1)
J[
(offset_s + 1 + (i + j - 2) * num_real_bounds_s):(offset_s + (i + j - 1) * num_real_bounds_s),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[(1 + (j - 1) * ns):(j * ns), :][bool_vec_s, :])
end
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
# Convert bounds on u to algebraic constraints
for i = 1:N
if i == 1
KB = I_mat
else
B_row_range = (1 + (i - 2) * ns):((i - 1) * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(KB, K, B_sub_block)
end
KBI[(1 + nu * (i - 1)):(nu * i), :] = KB
LinearAlgebra.mul!(KAs0_block, K, As0[(1 + ns * (i - 1)):(ns * i)])
KAs0[(1 + nu * (i - 1)):(nu * i)] = KAs0_block
end
offset_u = nc * N + num_real_bounds_s * N
if num_real_bounds_u == length(ul)
KAs0_bounds .= KAs0 .+ block_KAw
for i = 1:N
J[
(offset_u + 1 + (i - 1) * nu):(offset_u + nu * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(KBI[1:(nu * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_u):(i * num_real_bounds_u)
KAs0_bounds[row_range] =
KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .+
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
for j = 1:(N - i + 1)
J[
(offset_u + 1 + (i + j - 2) * num_real_bounds_u):(offset_u + (i + j - 1) * num_real_bounds_u),
(1 + nu * (i - 1)):(nu * i),
] = @view(KBI[(1 + (j - 1) * nu):(j * nu), :][bool_vec_u, :])
end
end
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, KAs0_bounds, lcon3)
LinearAlgebra.axpy!(-1, KAs0_bounds, ucon3)
lcon = _init_similar(s0, size(J, 1), T)
ucon = _init_similar(s0, size(J, 1), T)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(length(dl) + 1):(length(dl) + length(lcon2))] = lcon2
ucon[(length(du) + 1):(length(du) + length(ucon2))] = ucon2
end
if length(lcon3) > 0
lcon[(length(dl) + length(lcon2) + 1):(length(dl) + length(lcon2) + length(
lcon3,
))] = lcon3
ucon[(length(du) + length(ucon2) + 1):(length(du) + length(ucon2) + length(
ucon3,
))] = ucon3
end
nvar = nu * N
nnzj = size(J, 1) * size(J, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
ncon = size(J, 1)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_implicit_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
nvar = nu * N
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
G = _init_similar(Q, nc * N, nu, T)
Jac1 = _init_similar(Q, nc, nu, N, T)
Jac2 = _init_similar(Q, num_real_bounds_s, nu, N, T)
Jac3 = _init_similar(Q, 0, nu, N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
c = _init_similar(s0, nvar, T)
x0 = _init_similar(s0, nvar, T)
lcon = _init_similar(s0, nc * N + num_real_bounds_s * N, T)
ucon = _init_similar(s0, nc * N + num_real_bounds_s * N, T)
x1 = _init_similar(Q, nc, 1, N, T)
x2 = _init_similar(Q, num_real_bounds_s, 1, N, T)
x3 = _init_similar(Q, 0, 1, N, T)
y = _init_similar(Q, nu, 1, N, T)
SJ1 = _init_similar(Q, nc, nu, T)
SJ2 = _init_similar(Q, num_real_bounds_s, nu, T)
SJ3 = _init_similar(Q, 0, nu, T)
H_sub_block = _init_similar(Q, nu, nu, T)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
for i = 1:N
Jac1[:, :, i] = @view G[(1 + nc * (i - 1)):(nc * i), :]
end
LinearAlgebra.mul!(As0, block_A, s0)
lvar = repeat(ul, N)
uvar = repeat(uu, N)
# Convert state variable constraints to algebraic constraints
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
Jac2[:, :, i] = @view block_B[(1 + ns * (i - 1)):(ns * i), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
Jac2[:, :, i] = @view block_B[(1 + (i - 1) * ns):(i * ns), :][bool_vec_s, :]
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(1 + length(dl)):(length(dl) + num_real_bounds_s * N)] = lcon2
ucon[(1 + length(du)):(length(du) + num_real_bounds_s * N)] = ucon2
end
ncon = (nc + num_real_bounds_s) * N
nnzj = ncon * size(H, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
J = LQJacobianOperator{T, M, AbstractArray{T}}(
Jac1,
Jac2,
Jac3,
N,
nu,
nc,
num_real_bounds_s,
0,
x1,
x2,
x3,
y,
SJ1,
SJ2,
SJ3,
H_sub_block,
)
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = x0,
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_implicit_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
block_KAw = dense_blocks.KAw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds_u = sum(bool_vec_u)
G = _init_similar(Q, nc * N, nu, T)
Jac1 = _init_similar(Q, nc, nu, N, T)
Jac2 = _init_similar(Q, num_real_bounds_s, nu, N, T)
Jac3 = _init_similar(Q, num_real_bounds_u, nu, N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
KAs0_bounds = _init_similar(s0, num_real_bounds_u * N, T)
KBI = _init_similar(Q, nu * N, nu, T)
KAs0 = _init_similar(s0, nu * N, T)
KAs0_block = _init_similar(s0, nu, T)
KB = _init_similar(Q, nu, nu, T)
lcon = _init_similar(s0, (nc + num_real_bounds_s + num_real_bounds_u) * N, T)
ucon = _init_similar(s0, (nc + num_real_bounds_s + num_real_bounds_u) * N, T)
I_mat = _init_similar(Q, nu, nu, T)
x1 = _init_similar(Q, nc, 1, N, T)
x2 = _init_similar(Q, num_real_bounds_s, 1, N, T)
x3 = _init_similar(Q, num_real_bounds_u, 1, N, T)
y = _init_similar(Q, nu, 1, N, T)
SJ1 = _init_similar(Q, nc, nu, T)
SJ2 = _init_similar(Q, num_real_bounds_s, nu, T)
SJ3 = _init_similar(Q, num_real_bounds_u, nu, T)
H_sub_block = _init_similar(Q, nu, nu, T)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
for i = 1:N
Jac1[:, :, i] = @view G[(1 + nc * (i - 1)):(nc * i), :]
end
LinearAlgebra.mul!(As0, block_A, s0)
# Convert state variable constraints to algebraic constraints
offset_s = nc * N
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
Jac2[:, :, i] = @view block_B[(1 + ns * (i - 1)):(ns * i), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
Jac2[:, :, i] = @view block_B[(1 + (i - 1) * ns):(i * ns), :][bool_vec_s, :]
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
# Convert bounds on u to algebraic constraints
for i = 1:N
if i == 1
KB = I_mat
else
B_row_range = (1 + (i - 2) * ns):((i - 1) * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(KB, K, B_sub_block)
end
KBI[(1 + nu * (i - 1)):(nu * i), :] = KB
LinearAlgebra.mul!(KAs0_block, K, As0[(1 + ns * (i - 1)):(ns * i)])
KAs0[(1 + nu * (i - 1)):(nu * i)] = KAs0_block
end
offset_u = nc * N + num_real_bounds_s * N
if num_real_bounds_u == length(ul)
KAs0_bounds .= KAs0 .+ block_KAw
for i = 1:N
Jac3[:, :, i] = @view KBI[(1 + (i - 1) * nu):(i * nu), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_u):(i * num_real_bounds_u)
KAs0_bounds[row_range] =
KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .+
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
Jac3[:, :, i] = @view KBI[(1 + (i - 1) * nu):(i * nu), :][bool_vec_u, :]
end
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, KAs0_bounds, lcon3)
LinearAlgebra.axpy!(-1, KAs0_bounds, ucon3)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(length(dl) + 1):(length(dl) + length(lcon2))] = lcon2
ucon[(length(du) + 1):(length(du) + length(ucon2))] = ucon2
end
if length(lcon3) > 0
lcon[(length(dl) + length(lcon2) + 1):(length(dl) + length(lcon2) + length(
lcon3,
))] = lcon3
ucon[(length(du) + length(ucon2) + 1):(length(du) + length(ucon2) + length(
ucon3,
))] = ucon3
end
nvar = nu * N
ncon = (nc + num_real_bounds_s + num_real_bounds_u) * N
nnzj = ncon * size(H, 1)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
J = LQJacobianOperator{T, M, AbstractArray{T}}(
Jac1,
Jac2,
Jac3,
N,
nu,
nc,
num_real_bounds_s,
num_real_bounds_u,
x1,
x2,
x3,
y,
SJ1,
SJ2,
SJ3,
H_sub_block,
)
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_block_matrices(
A::M,
B::M,
K,
N,
w::V,
nc,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}}
ns = size(A, 2)
nu = size(B, 2)
if K == nothing
K = _init_similar(A, nu, ns, T)
end
# Define block matrices
block_A = _init_similar(A, ns * (N + 1), ns, T)
block_B = _init_similar(B, ns * N, nu, T)
block_Aw = _init_similar(w, ns * (N + 1), T)
block_h = _init_similar(A, nu * N, ns, T)
block_h01 = _init_similar(A, ns, ns, T)
block_h02 = _init_similar(w, ns, T)
h_const = _init_similar(w, nu * N, T)
h0_const = T(0)
block_d = _init_similar(A, nc * N, ns, T)
block_dw = _init_similar(w, nc * N, T)
block_KA = _init_similar(A, nu * N, ns, T)
block_KAw = _init_similar(w, nu * N, T)
A_k = copy(A)
BK = _init_similar(A, ns, ns, T)
KA = _init_similar(A, nu, ns, T)
KAw = _init_similar(w, nu, T)
Aw = _init_similar(A, ns, T)
AB_klast = _init_similar(A, size(B, 1), size(B, 2), T)
AB_k = _init_similar(A, size(B, 1), size(B, 2), T)
block_B[1:ns, :] = B
block_A[LinearAlgebra.diagind(block_A)] .= T(1)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, A_k)
A_klast = copy(A_k)
A_knext = copy(A_k)
block_A[(ns + 1):(ns * 2), :] = A_k
LinearAlgebra.mul!(AB_k, A_k, B, 1, 0)
block_B[(1 + ns):(2 * ns), :] = AB_k
AB_klast = copy(AB_k)
# Fill the A and B matrices
LinearAlgebra.mul!(KA, K, A_k)
block_KA[1:nu, :] .= K
block_KA[(1 + nu):(2 * nu), :] .= KA
for i = 2:(N - 1)
LinearAlgebra.mul!(AB_k, A_k, AB_klast)
LinearAlgebra.mul!(A_knext, A_k, A_klast)
block_A[(ns * i + 1):(ns * (i + 1)), :] = A_knext
block_B[(1 + (i) * ns):((i + 1) * ns), :] = AB_k
LinearAlgebra.mul!(KA, K, A_knext)
block_KA[(1 + nu * i):(nu * (i + 1)), :] .= KA
AB_klast = copy(AB_k)
A_klast = copy(A_knext)
end
LinearAlgebra.mul!(A_knext, A_k, A_klast)
block_A[(ns * N + 1):(ns * (N + 1)), :] = A_knext
for i = 1:N
A_view = @view block_A[(1 + (i - 1) * ns):(i * ns), :]
for j = (i + 1):(N + 1)
LinearAlgebra.mul!(Aw, A_view, w[(1 + (j - i - 1) * ns):((j - i) * ns)])
block_Aw[(1 + (j - 1) * ns):(j * ns)] .+= Aw
end
end
for i = 1:N
Aw_view = @view block_Aw[(1 + (i - 1) * ns):(i * ns)]
LinearAlgebra.mul!(KAw, K, Aw_view)
block_KAw[(1 + (i - 1) * nu):(i * nu)] .= KAw
end
DenseLQDynamicBlocks{T, V, M}(
block_A,
block_B,
block_Aw,
block_h,
block_h01,
block_h02,
h_const,
h0_const,
block_d,
block_dw,
block_KA,
block_KAw,
)
end
function _build_H_blocks(
Q,
R,
block_A::M,
block_B::M,
Aw,
S,
Qf,
K,
s0,
N,
) where {T, M <: AbstractMatrix{T}}
ns = size(Q, 1)
nu = size(R, 1)
if K == nothing
K = _init_similar(Q, nu, ns, T)
end
H = _init_similar(block_A, nu * N, nu * N, T)
# block_h01, block_h02, and block_h are stored in DenseLQDynamicBlocks to provide quick updates when redefining s0
# block_h01 = A^T((Q + KTRK + 2 * SK))A where Q, K, R, S, and A are block matrices
# block_h02 = A^T((Q + KTRK + 2 * SK))block_matrix_A w
# block_h = (QB + SKB + K^T R K B + K^T S^T B)^T A + (S + K^T R)^T A
block_h01 = _init_similar(Q, ns, ns, T)
block_h02 = _init_similar(s0, ns, T)
block_h = _init_similar(block_A, nu * N, ns, T)
h_constant = _init_similar(s0, nu * N, T)
h0_constant = T(0)
# quad term refers to the summation of Q, K^T RK, SK, and K^T S^T that is left and right multiplied by B in the Hessian
quad_term = _init_similar(Q, ns, ns, T)
quad_term_B = _init_similar(block_B, size(block_B, 1), size(block_B, 2), T)
QfB = _init_similar(block_B, size(block_B, 1), size(block_B, 2), T)
quad_term_AB = _init_similar(block_A, ns, nu, T)
QfAB = _init_similar(block_A, ns, nu, T)
RK_STB = _init_similar(block_B, nu, nu, T)
BQB = _init_similar(block_B, nu, nu, T)
BQfB = _init_similar(block_B, nu, nu, T)
SK = _init_similar(Q, ns, ns, T)
RK = _init_similar(Q, nu, ns, T)
KTRK = _init_similar(Q, ns, ns, T)
RK_ST = _init_similar(Q, nu, ns, T)
QB_block_vec = _init_similar(quad_term_B, ns * (N + 1), nu, T)
h = _init_similar(s0, nu * N, T)
BTQA = _init_similar(Q, nu, ns, T)
RK_STA = _init_similar(Q, nu, ns, T)
BTQAw = _init_similar(s0, nu, T)
RK_STAw = _init_similar(s0, nu, T)
QA = _init_similar(Q, ns, ns, T)
KTRKA = _init_similar(Q, ns, ns, T)
SKA = _init_similar(Q, ns, ns, T)
KTSTA = _init_similar(Q, ns, ns, T)
QAw = _init_similar(s0, ns, T)
KTRKAw = _init_similar(s0, ns, T)
SKAw = _init_similar(s0, ns, T)
KTSTAw = _init_similar(s0, ns, T)
AQAs0 = _init_similar(s0, ns, T)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(RK, R, K)
LinearAlgebra.mul!(KTRK, K', RK)
LinearAlgebra.axpy!(1.0, Q, quad_term)
LinearAlgebra.axpy!(1.0, SK, quad_term)
# axpy!(1.0, SK', quad_term) includes scalar operations because of the adjoint
# .+= is more efficient with adjoint
quad_term .+= SK'
LinearAlgebra.axpy!(1.0, KTRK, quad_term)
LinearAlgebra.copyto!(RK_ST, RK)
RK_ST .+= S'
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(quad_term_AB, quad_term, B_sub_block)
LinearAlgebra.mul!(QfAB, Qf, B_sub_block)
quad_term_B[(1 + (i - 1) * ns):(i * ns), :] = quad_term_AB
QfB[(1 + (i - 1) * ns):(i * ns), :] = QfAB
for j = 1:(N + 1 - i)
right_block = block_B[(1 + (j - 1 + i - 1) * ns):((j + i - 1) * ns), :]
LinearAlgebra.mul!(BQB, quad_term_AB', right_block)
LinearAlgebra.mul!(BQfB, QfAB', right_block)
for k = 1:(N - j - i + 2)
row_range = (1 + nu * (k + (j - 1) - 1)):(nu * (k + (j - 1)))
col_range = (1 + nu * (k - 1)):(nu * k)
if k == N - j - i + 2
view(H, row_range, col_range) .+= BQfB
else
view(H, row_range, col_range) .+= BQB
end
end
end
LinearAlgebra.mul!(RK_STB, RK_ST, B_sub_block)
for m = 1:(N - i)
row_range = (1 + nu * (m - 1 + i)):(nu * (m + i))
col_range = (1 + nu * (m - 1)):(nu * m)
view(H, row_range, col_range) .+= RK_STB
end
view(H, (1 + nu * (i - 1)):(nu * i), (1 + nu * (i - 1)):(nu * i)) .+= R
end
for i = 1:N
# quad_term_B = QB + SKB + KTRKB + KTSTB = (Q + SK + KTRK + KTST) B
fill!(QB_block_vec, T(0))
rows_QB = 1:(ns * (N - i))
rows_QfB = (1 + ns * (N - i)):(ns * (N - i + 1))
QB_block_vec[(1 + ns * i):(ns * N), :] = quad_term_B[rows_QB, :]
QB_block_vec[(1 + ns * N):(ns * (N + 1)), :] = QfB[rows_QfB, :]
LinearAlgebra.mul!(BTQA, QB_block_vec', block_A)
LinearAlgebra.mul!(RK_STA, RK_ST, block_A[(ns * (i - 1) + 1):(ns * i), :])
LinearAlgebra.mul!(BTQAw, QB_block_vec', Aw)
LinearAlgebra.mul!(RK_STAw, RK_ST, Aw[(1 + ns * (i - 1)):(ns * i)])
h_view = @view block_h[(1 + nu * (i - 1)):(nu * i), :]
LinearAlgebra.axpy!(1, BTQA, h_view)
LinearAlgebra.axpy!(1, RK_STA, h_view)
h_constant_view = @view h_constant[(1 + nu * (i - 1)):(nu * i)]
LinearAlgebra.axpy!(1, BTQAw, h_constant_view)
LinearAlgebra.axpy!(1, RK_STAw, h_constant_view)
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(QA, Q, A_view)
LinearAlgebra.mul!(KTRKA, KTRK, A_view)
LinearAlgebra.mul!(SKA, SK, A_view)
LinearAlgebra.mul!(KTSTA, SK', A_view)
LinearAlgebra.mul!(QAw, Q, Aw_view)
LinearAlgebra.mul!(KTRKAw, KTRK, Aw_view)
LinearAlgebra.mul!(SKAw, SK, Aw_view)
LinearAlgebra.mul!(KTSTAw, SK', Aw_view)
LinearAlgebra.mul!(block_h01, A_view', QA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', KTRKA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', SKA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', KTSTA, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', QAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', KTRKAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', SKAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', KTSTAw, 1, 1)
h0_constant += LinearAlgebra.dot(Aw_view, QAw)
h0_constant += LinearAlgebra.dot(Aw_view, KTRKAw)
h0_constant += LinearAlgebra.dot(Aw_view, SKAw)
h0_constant += LinearAlgebra.dot(Aw_view, KTSTAw)
end
A_view = @view block_A[(1 + ns * N):(ns * (N + 1)), :]
Aw_view = @view Aw[(1 + ns * N):(ns * (N + 1))]
LinearAlgebra.mul!(QA, Qf, A_view)
LinearAlgebra.mul!(block_h01, A_view', QA, 1, 1)
LinearAlgebra.mul!(QAw, Qf, Aw_view)
LinearAlgebra.mul!(block_h02, A_view', QAw, 1, 1)
h0_constant += LinearAlgebra.dot(Aw_view, QAw)
#LinearAlgebra.mul!(h0_constant, Aw_view', QAw, 1, 1)
LinearAlgebra.mul!(h, block_h, s0)
LinearAlgebra.mul!(AQAs0, block_h01, s0)
h0 = LinearAlgebra.dot(AQAs0, s0)
h0 += h0_constant
h0 += LinearAlgebra.dot(block_h02, s0) * T(2)
h += h_constant
return (
H = H,
c = h,
c0 = h0 / T(2),
block_h = block_h,
block_h01 = block_h01,
block_h02 = block_h02,
h_constant = h_constant,
h0_constant = h0_constant / T(2),
)
end
function _set_G_blocks!(
G,
dl,
du,
block_B::M,
block_A::M,
block_d::M,
block_Aw,
block_dw,
s0,
E,
F,
K::MK,
N,
) where {T, M <: AbstractMatrix{T}, MK <: Nothing}
ns = size(E, 2)
nu = size(F, 2)
nc = size(E, 1)
G[1:nc, :] = F
EB = _init_similar(block_B, nc, nu, T)
EA = _init_similar(block_B, nc, ns, T)
d = _init_similar(block_dw, nc, T)
for i = 1:N
if i != N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(EB, E, B_sub_block)
G[(1 + nc * i):(nc * (i + 1)), :] = EB
end
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view block_Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(EA, E, A_view)
LinearAlgebra.mul!(d, E, Aw_view)
block_d[(1 + nc * (i - 1)):(nc * i), :] .= EA
block_dw[(1 + nc * (i - 1)):(nc * i)] .= d
end
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
LinearAlgebra.axpy!(-1, block_dw, dl)
LinearAlgebra.axpy!(-1, block_dw, du)
end
function _set_G_blocks!(
G,
dl,
du,
block_B,
block_A,
block_d,
block_Aw,
block_dw,
s0,
E,
F,
K::MK,
N,
) where {T, MK <: AbstractMatrix{T}}
ns = size(E, 2)
nu = size(F, 2)
nc = size(E, 1)
G[1:nc, :] = F
E_FK = _init_similar(E, nc, ns, T)
E_FKA = _init_similar(E, nc, ns, T)
FK = _init_similar(E, nc, ns, T)
EB = _init_similar(E, nc, nu, T)
d = _init_similar(s0, nc, T)
LinearAlgebra.copyto!(E_FK, E)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1.0, FK, E_FK)
for i = 1:N
if i != N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(EB, E_FK, B_sub_block)
G[(1 + nc * i):(nc * (i + 1)), :] = EB
end
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view block_Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(E_FKA, E_FK, A_view)
LinearAlgebra.mul!(d, E_FK, Aw_view)
block_d[(1 + nc * (i - 1)):(nc * i), :] .= E_FKA
block_dw[(1 + nc * (i - 1)):(nc * i)] .= d
end
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
LinearAlgebra.axpy!(-1, block_dw, dl)
LinearAlgebra.axpy!(-1, block_dw, du)
end
function _set_J1_dense!(J1, G, N)
# Only used for explicit Jacobian, not implicit Jacobian
nu = size(G, 2)
nc = Int(size(G, 1) / N)
for i = 1:N
col_range = (1 + nu * (i - 1)):(nu * i)
J1[(1 + nc * (i - 1)):(nc * N), col_range] = G[1:((N - i + 1) * nc), :]
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 42029 | @doc raw"""
SparseLQDynamicModel(dnlp::LQDynamicData) -> SparseLQDynamicModel
SparseLQDynamicModel(s0, A, B, Q, R, N; ...) -> SparseLQDynamicModel
A constructor for building a `SparseLQDynamicModel <: QuadraticModels.AbstractQuadraticModel`
Input data is for the problem of the form
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
Data is converted to the form
```math
\begin{aligned}
\min &\; \frac{1}{2} z^T H z \\
\textrm{s.t.} &\; \textrm{lcon} \le Jz \le \textrm{ucon}\\
&\; \textrm{lvar} \le z \le \textrm{uvar}
\end{aligned}
```
Resulting `H` and `J` matrices are stored as `QuadraticModels.QPData` within the `SparseLQDynamicModel` struct and
variable and constraint limits are stored within `NLPModels.NLPModelMeta`
If `K` is defined, then `u` variables are replaced by `v` variables, and `u` can be queried by `get_u` and `get_s` within `DynamicNLPModels.jl`
"""
function SparseLQDynamicModel(
dnlp::LQDynamicData{T, V, M},
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
_build_sparse_lq_dynamic_model(dnlp)
end
function SparseLQDynamicModel(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
dnlp = LQDynamicData(
s0,
A,
B,
Q,
R,
N;
Qf = Qf,
S = S,
E = E,
F = F,
K = K,
w = w,
sl = sl,
su = su,
ul = ul,
uu = uu,
gl = gl,
gu = gu,
)
SparseLQDynamicModel(dnlp)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(Int, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
H_nzval = zeros(T, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(Int, N * (ns^2 + ns * nu + ns) + N * (nc * ns + nc * nu))
J_nzval = zeros(T, N * (ns^2 + ns * nu + ns) + N * (nc * ns + nc * nu))
_set_sparse_H!(H_colptr, H_rowval, H_nzval, Q, R, N; Qf = Qf, S = S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(J_colptr, J_rowval, J_nzval, A, B, E, F, K, N)
J = SparseArrays.SparseMatrixCSC(
(nc + ns) * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
SparseArrays.dropzeros!(H)
SparseArrays.dropzeros!(J)
c0 = zero(T)
nvar = ns * (N + 1) + nu * N
c = _init_similar(s0, nvar, T)
lvar = _init_similar(s0, nvar, T)
uvar = _init_similar(s0, nvar, T)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * nc, T)
ucon = _init_similar(s0, ns * N + N * nc, T)
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
for j = 1:N
lvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = ul
uvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = uu
end
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
bool_vec = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds = sum(bool_vec)
# Transform u variables to v variables
new_Q = _init_similar(Q, size(Q, 1), size(Q, 2), T)
new_S = _init_similar(S, size(S, 1), size(S, 2), T)
new_A = _init_similar(A, size(A, 1), size(A, 2), T)
new_E = _init_similar(E, size(E, 1), size(E, 2), T)
KTR = _init_similar(Q, size(K, 2), size(R, 2), T)
SK = _init_similar(Q, size(S, 1), size(K, 2), T)
KTRK = _init_similar(Q, size(K, 2), size(K, 2), T)
BK = _init_similar(Q, size(B, 1), size(K, 2), T)
FK = _init_similar(Q, size(F, 1), size(K, 2), T)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(Int, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
H_nzval = zeros(T, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
N * (ns^2 + ns * nu + ns) +
N * (nc * ns + nc * nu) +
N * (ns * num_real_bounds + num_real_bounds),
)
J_nzval = zeros(
T,
N * (ns^2 + ns * nu + ns) +
N * (nc * ns + nc * nu) +
N * (ns * num_real_bounds + num_real_bounds),
)
LinearAlgebra.copyto!(new_Q, Q)
LinearAlgebra.copyto!(new_S, S)
LinearAlgebra.copyto!(new_A, A)
LinearAlgebra.copyto!(new_E, E)
LinearAlgebra.mul!(KTR, K', R)
LinearAlgebra.axpy!(1, KTR, new_S)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(KTRK, KTR, K)
LinearAlgebra.axpy!(1, SK, new_Q)
LinearAlgebra.axpy!(1, SK', new_Q)
LinearAlgebra.axpy!(1, KTRK, new_Q)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, new_A)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1, FK, new_E)
# Get H and J matrices from new matrices
_set_sparse_H!(H_colptr, H_rowval, H_nzval, new_Q, R, N; Qf = Qf, S = new_S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
new_A,
B,
new_E,
F,
K,
bool_vec,
N,
num_real_bounds,
)
J = SparseArrays.SparseMatrixCSC(
ns * N + nc * N + num_real_bounds * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
SparseArrays.dropzeros!(H)
SparseArrays.dropzeros!(J)
# Remove algebraic constraints if u variable is unbounded on both upper and lower ends
lcon3 = _init_similar(ul, nu * N, T)
ucon3 = _init_similar(ul, nu * N, T)
ul = ul[bool_vec]
uu = uu[bool_vec]
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
nvar = ns * (N + 1) + nu * N
lvar = similar(s0, nvar)
fill!(lvar, -Inf)
uvar = similar(s0, nvar)
fill!(uvar, Inf)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ucon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
if length(lcon3) > 0
lcon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = lcon3
ucon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = ucon3
end
c0 = zero(T)
c = _init_similar(s0, nvar, T)
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: SparseMatrixCSC{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
SparseArrays.dropzeros!(A)
SparseArrays.dropzeros!(B)
SparseArrays.dropzeros!(Q)
SparseArrays.dropzeros!(R)
SparseArrays.dropzeros!(Qf)
SparseArrays.dropzeros!(E)
SparseArrays.dropzeros!(F)
SparseArrays.dropzeros!(S)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(S.nzval) * N +
length(Qf.nzval),
)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
length(A.rowval) * N +
length(B.rowval) * N +
length(E.rowval) * N +
length(F.rowval) * N +
ns * N,
)
J_nzval = zeros(
T,
length(A.nzval) * N +
length(B.nzval) * N +
length(E.nzval) * N +
length(F.nzval) * N +
ns * N,
)
_set_sparse_H!(H_colptr, H_rowval, H_nzval, Q, R, N; Qf = Qf, S = S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(J_colptr, J_rowval, J_nzval, A, B, E, F, K, N)
J = SparseArrays.SparseMatrixCSC(
(nc + ns) * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
c0 = zero(T)
nvar = ns * (N + 1) + nu * N
c = _init_similar(s0, nvar, T)
lvar = _init_similar(s0, nvar, T)
uvar = _init_similar(s0, nvar, T)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * nc, T)
ucon = _init_similar(s0, ns * N + N * nc, T)
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
for j = 1:N
lvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = ul
uvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = uu
end
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: SparseMatrixCSC{T}, MK <: SparseMatrixCSC{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
SparseArrays.dropzeros!(A)
SparseArrays.dropzeros!(B)
SparseArrays.dropzeros!(Q)
SparseArrays.dropzeros!(R)
SparseArrays.dropzeros!(Qf)
SparseArrays.dropzeros!(E)
SparseArrays.dropzeros!(F)
SparseArrays.dropzeros!(S)
SparseArrays.dropzeros!(K)
bool_vec = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds = sum(bool_vec)
# Transform u variables to v variables
new_Q = _init_similar(Q, size(Q, 1), size(Q, 2), T)
new_S = _init_similar(S, size(S, 1), size(S, 2), T)
new_A = _init_similar(A, size(A, 1), size(A, 2), T)
new_E = _init_similar(E, size(E, 1), size(E, 2), T)
KTR = _init_similar(Q, size(K, 2), size(R, 2), T)
SK = _init_similar(Q, size(S, 1), size(K, 2), T)
KTRK = _init_similar(Q, size(K, 2), size(K, 2), T)
BK = _init_similar(Q, size(B, 1), size(K, 2), T)
FK = _init_similar(Q, size(F, 1), size(K, 2), T)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(S.nzval) * N +
length(Qf.nzval),
)
LinearAlgebra.copyto!(new_Q, Q)
LinearAlgebra.copyto!(new_S, S)
LinearAlgebra.copyto!(new_A, A)
LinearAlgebra.copyto!(new_E, E)
LinearAlgebra.mul!(KTR, K', R)
LinearAlgebra.axpy!(1, KTR, new_S)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(KTRK, KTR, K)
LinearAlgebra.axpy!(1, SK, new_Q)
LinearAlgebra.axpy!(1, SK', new_Q)
LinearAlgebra.axpy!(1, KTRK, new_Q)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, new_A)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1, FK, new_E)
SparseArrays.dropzeros!(new_Q)
SparseArrays.dropzeros!(new_A)
SparseArrays.dropzeros!(new_E)
SparseArrays.dropzeros!(new_S)
K_sparse = K[bool_vec, :]
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(new_S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(new_S.nzval) * N +
length(Qf.nzval),
)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
length(new_A.rowval) * N +
length(B.rowval) * N +
length(new_E.rowval) * N +
length(F.rowval) * N +
ns * N +
length(K_sparse.rowval) * N +
num_real_bounds * N,
)
J_nzval = zeros(
T,
length(new_A.nzval) * N +
length(B.nzval) * N +
length(new_E.nzval) * N +
length(F.nzval) * N +
ns * N +
length(K_sparse.nzval) * N +
num_real_bounds * N,
)
# Get H and J matrices from new matrices
_set_sparse_H!(H_colptr, H_rowval, H_nzval, new_Q, R, N; Qf = Qf, S = new_S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
new_A,
B,
new_E,
F,
K,
bool_vec,
N,
num_real_bounds,
)
J = SparseArrays.SparseMatrixCSC(
ns * N + nc * N + num_real_bounds * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
# Remove algebraic constraints if u variable is unbounded on both upper and lower ends
lcon3 = _init_similar(ul, nu * N, T)
ucon3 = _init_similar(ul, nu * N, T)
ul = ul[bool_vec]
uu = uu[bool_vec]
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
nvar = ns * (N + 1) + nu * N
lvar = similar(s0, nvar)
fill!(lvar, -Inf)
uvar = similar(s0, nvar)
fill!(uvar, Inf)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ucon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
if length(lcon3) > 0
lcon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = lcon3
ucon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = ucon3
end
c0 = zero(T)
c = _init_similar(s0, nvar, T)
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
#set the data needed to build a SparseArrays.SparseMatrixCSC matrix. H_colptr, H_rowval, and H_nzval
#are set so that they can be passed to SparseMatrixCSC() to obtain the `H` matrix such that
# z^T H z = sum_{i=1}^{N-1} s_i^T Q s + sum_{i=1}^{N-1} u^T R u + s_N^T Qf s_n .
function _set_sparse_H!(
H_colptr,
H_rowval,
H_nzval,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = zeros(T, size(Q, 1), size(R, 1)),
) where {T, M <: AbstractMatrix{T}}
ns = size(Q, 1)
nu = size(R, 1)
for i = 1:N
for j = 1:ns
H_nzval[(1 + (i - 1) * (ns^2 + nu * ns) + (j - 1) * (ns + nu)):(ns * j + nu * (j - 1) + (i - 1) * (ns^2 + nu * ns))] =
@view Q[:, j]
H_nzval[(1 + (i - 1) * (ns^2 + nu * ns) + j * ns + (j - 1) * nu):((i - 1) * (ns^2 + nu * ns) + j * (ns + nu))] =
@view S[j, :]
H_rowval[(1 + (i - 1) * (ns^2 + nu * ns) + (j - 1) * ns + (j - 1) * nu):(ns * j + nu * (j - 1) + (i - 1) * (ns^2 + nu * ns))] =
(1 + (i - 1) * ns):(ns * i)
H_rowval[(1 + (i - 1) * (ns^2 + nu * ns) + j * ns + (j - 1) * nu):((i - 1) * (ns^2 + nu * ns) + j * (ns + nu))] =
(1 + (N + 1) * ns + nu * (i - 1)):((N + 1) * ns + nu * i)
H_colptr[((i - 1) * ns + j)] =
1 + (ns + nu) * (j - 1) + (i - 1) * (ns * nu + ns * ns)
end
end
for j = 1:ns
H_nzval[(1 + N * (ns^2 + nu * ns) + (j - 1) * ns):(ns * j + N * (ns^2 + nu * ns))] =
@view Qf[:, j]
H_rowval[(1 + N * (ns^2 + nu * ns) + (j - 1) * ns):(ns * j + N * (ns^2 + nu * ns))] =
(1 + N * ns):((N + 1) * ns)
H_colptr[(N * ns + j)] = 1 + ns * (j - 1) + N * (ns * nu + ns * ns)
end
offset = ns^2 * (N + 1) + ns * nu * N
for i = 1:N
for j = 1:nu
H_nzval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * (nu + ns)):(offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns)] =
@view S[:, j]
H_nzval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns):(offset + (i - 1) * (nu^2 + ns * nu) + j * (ns + nu))] =
@view R[:, j]
H_rowval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * (nu + ns)):(offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns)] =
(1 + (i - 1) * ns):(i * ns)
H_rowval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns):(offset + (i - 1) * (nu^2 + ns * nu) + j * (ns + nu))] =
(1 + (N + 1) * ns + (i - 1) * nu):((N + 1) * ns + i * nu)
H_colptr[(N + 1) * ns + (i - 1) * nu + j] =
1 + offset + (ns + nu) * (j - 1) + (nu^2 + ns * nu) * (i - 1)
end
end
H_colptr[ns * (N + 1) + nu * N + 1] = length(H_nzval) + 1
end
function _set_sparse_H!(
H_colptr,
H_rowval,
H_nzval,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = spzeros(T, size(Q, 1), size(R, 1)),
) where {T, M <: SparseMatrixCSC{T}}
ST = SparseArrays.sparse(S')
ns = size(Q, 1)
nu = size(R, 1)
H_colptr[1] = 1
for i = 1:N
for j = 1:ns
Q_offset = length(Q.colptr[j]:(Q.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (i - 1) + j]):(H_colptr[ns * (i - 1) + j] + Q_offset - 1)] =
Q.nzval[Q.colptr[j]:(Q.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (i - 1) + j]):(H_colptr[ns * (i - 1) + j] + Q_offset - 1)] =
Q.rowval[Q.colptr[j]:(Q.colptr[j + 1] - 1)] .+ ns * (i - 1)
ST_offset = length(ST.colptr[j]:(ST.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (i - 1) + j] + Q_offset):(H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset - 1)] =
ST.nzval[ST.colptr[j]:(ST.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (i - 1) + j] + Q_offset):(H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset - 1)] =
ST.rowval[ST.colptr[j]:(ST.colptr[j + 1] - 1)] .+
(nu * (i - 1) + ns * (N + 1))
H_colptr[ns * (i - 1) + j + 1] =
H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset
end
end
for j = 1:ns
Qf_offset = length(Qf.colptr[j]:(Qf.colptr[j + 1] - 1))
H_nzval[(H_colptr[N * ns + j]):(H_colptr[N * ns + j] + Qf_offset - 1)] =
Qf.nzval[Qf.colptr[j]:(Qf.colptr[j + 1] - 1)]
H_rowval[(H_colptr[N * ns + j]):(H_colptr[N * ns + j] + Qf_offset - 1)] =
Qf.rowval[Qf.colptr[j]:(Qf.colptr[j + 1] - 1)] .+ (ns * N)
H_colptr[ns * N + j + 1] = H_colptr[ns * N + j] + Qf_offset
end
for i = 1:N
for j = 1:nu
S_offset = length(S.colptr[j]:(S.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j]):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset - 1)] =
S.nzval[S.colptr[j]:(S.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j]):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset - 1)] =
S.rowval[S.colptr[j]:(S.colptr[j + 1] - 1)] .+ ((i - 1) * ns)
R_offset = length(R.colptr[j]:(R.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset - 1)] =
R.nzval[R.colptr[j]:(R.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset - 1)] =
R.rowval[R.colptr[j]:(R.colptr[j + 1] - 1)] .+ ((i - 1) * nu + ns * (N + 1))
H_colptr[ns * (N + 1) + nu * (i - 1) + j + 1] =
H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset
end
end
end
# set the data needed to build a SparseArrays.SparseMatrixCSC matrix. J_colptr, J_rowval, and J_nzval
# are set so that they can be passed to SparseMatrixCSC() to obtain the Jacobian, `J`. The Jacobian
# contains the data for the following constraints:
# As_i + Bu_i = s_{i + 1}
# gl <= Es_i + Fu_i <= get_u
# If `K` is defined, then this matrix also contains the constraints
# ul <= Kx_i + v_i <= uu
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A,
B,
E,
F,
K::MK,
bool_vec,
N,
nb,
) where {T, MK <: AbstractMatrix{T}}
# nb = num_real_bounds
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
I_mat = _init_similar(A, nu, nu)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
# Set the first block column of A, E, and K
for j = 1:ns
J_nzval[(1 + (j - 1) * (ns + nc + nb)):((j - 1) * (nc + nb) + j * ns)] =
@view A[:, j]
J_nzval[(1 + (j - 1) * (nc + nb) + j * ns):(j * (ns + nc) + (j - 1) * nb)] =
@view E[:, j]
J_nzval[(1 + j * (ns + nc) + (j - 1) * nb):(j * (ns + nc + nb))] =
@view K[:, j][bool_vec]
J_rowval[(1 + (j - 1) * (ns + nc + nb)):((j - 1) * (nc + nb) + j * ns)] = 1:ns
J_rowval[(1 + (j - 1) * (nc + nb) + j * ns):(j * (ns + nc) + (j - 1) * nb)] =
(1 + ns * N):(nc + ns * N)
J_rowval[(1 + j * (ns + nc) + (j - 1) * nb):(j * (ns + nc + nb))] =
(1 + (ns + nc) * N):((ns + nc) * N + nb)
J_colptr[j] = 1 + (j - 1) * (ns + nc + nb)
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
offset = (i - 1) * ns * (ns + nc + nb) + (i - 2) * ns
for j = 1:ns
J_nzval[1 + offset + (j - 1) * (ns + nc + nb + 1)] = T(-1)
J_nzval[(1 + offset + (j - 1) * (ns + nc + nb) + j):(offset + j * ns + (j - 1) * (nc + nb) + j)] =
@view A[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * (nc + nb) + j):(offset + j * (ns + nc) + (j - 1) * nb + j)] =
@view E[:, j]
J_nzval[(1 + offset + j * (ns + nc) + (j - 1) * nb + j):(offset + j * (ns + nc + nb) + j)] =
@view K[:, j][bool_vec]
J_rowval[1 + offset + (j - 1) * (ns + nc + nb + 1)] = ns * (i - 2) + j
J_rowval[(1 + offset + (j - 1) * (ns + nc + nb) + j):(offset + j * ns + (j - 1) * (nc + nb) + j)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * (nc + nb) + j):(offset + j * (ns + nc) + (j - 1) * nb + j)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_rowval[(1 + offset + j * (ns + nc) + (j - 1) * nb + j):(offset + j * (ns + nc + nb) + j)] =
(1 + N * (ns + nc) + (i - 1) * nb):(N * (ns + nc) + i * nb)
J_colptr[(i - 1) * ns + j] = 1 + (j - 1) * (ns + nc + nb + 1) + offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[j + ns * (ns + nc + nb + 1) * N - ns] = T(-1)
J_rowval[j + ns * (ns + nc + nb + 1) * N - ns] = j + (N - 1) * ns
J_colptr[ns * N + j] = 1 + ns * (ns + nc + nb + 1) * N - ns + (j - 1)
end
# Set the remaining block columns corresponding to inputs: B, F, I
nscol_offset = N * (ns^2 + nc * ns + nb * ns + ns)
for i = 1:N
offset = (i - 1) * (nu * ns + nu * nc + nb) + nscol_offset
bool_offset = 0
for j = 1:nu
J_nzval[(1 + offset + (j - 1) * (ns + nc) + bool_offset):(offset + j * ns + (j - 1) * nc + bool_offset)] =
@view B[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc + bool_offset):(offset + j * (ns + nc) + bool_offset)] =
@view F[:, j]
if bool_vec[j]
J_nzval[1 + offset + j * (ns + nc) + bool_offset] = T(1)
J_rowval[1 + offset + j * (ns + nc) + bool_offset] =
(N * (ns + nc) + (i - 1) * nb + 1 + (bool_offset))
end
J_rowval[(1 + offset + (j - 1) * (ns + nc) + bool_offset):(offset + j * ns + (j - 1) * nc + bool_offset)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc + bool_offset):(offset + j * (ns + nc) + bool_offset)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(ns * (N + 1) + (i - 1) * nu + j)] =
1 + offset + (j - 1) * (ns + nc) + bool_offset
bool_offset += bool_vec[j]
end
end
J_colptr[ns * (N + 1) + nu * N + 1] = length(J_nzval) + 1
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E,
F,
K::MK,
N,
) where {T, M <: AbstractMatrix{T}, MK <: Nothing}
# nb = num_real_bounds
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
# Set the first block column of A, E, and K
for j = 1:ns
J_nzval[(1 + (j - 1) * (ns + nc)):((j - 1) * nc + j * ns)] = @view A[:, j]
J_nzval[(1 + (j - 1) * nc + j * ns):(j * (ns + nc))] = @view E[:, j]
J_rowval[(1 + (j - 1) * (ns + nc)):((j - 1) * nc + j * ns)] = 1:ns
J_rowval[(1 + (j - 1) * nc + j * ns):(j * (ns + nc))] = (1 + ns * N):(nc + ns * N)
J_colptr[j] = 1 + (j - 1) * (ns + nc)
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
offset = (i - 1) * ns * (ns + nc) + (i - 2) * ns
for j = 1:ns
J_nzval[1 + offset + (j - 1) * (ns + nc + 1)] = T(-1)
J_nzval[(1 + offset + (j - 1) * (ns + nc) + j):(offset + j * ns + (j - 1) * nc + j)] =
@view A[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc + j):(offset + j * (ns + nc) + j)] =
@view E[:, j]
J_rowval[1 + offset + (j - 1) * (ns + nc + 1)] = ns * (i - 2) + j
J_rowval[(1 + offset + (j - 1) * (ns + nc) + j):(offset + j * ns + (j - 1) * nc + j)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc + j):(offset + j * (ns + nc) + j)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(i - 1) * ns + j] = 1 + (j - 1) * (ns + nc + 1) + offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[j + ns * (ns + nc + 1) * N - ns] = T(-1)
J_rowval[j + ns * (ns + nc + 1) * N - ns] = j + (N - 1) * ns
J_colptr[ns * N + j] = 1 + ns * (ns + nc + 1) * N - ns + (j - 1)
end
# Set the remaining block columns corresponding to inputs: B, F, I
nscol_offset = N * (ns^2 + nc * ns + ns)
for i = 1:N
offset = (i - 1) * (nu * ns + nu * nc) + nscol_offset
for j = 1:nu
J_nzval[(1 + offset + (j - 1) * (ns + nc)):(offset + j * ns + (j - 1) * nc)] =
@view B[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc):(offset + j * (ns + nc))] =
@view F[:, j]
J_rowval[(1 + offset + (j - 1) * (ns + nc)):(offset + j * ns + (j - 1) * nc)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc):(offset + j * (ns + nc))] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(ns * (N + 1) + (i - 1) * nu + j)] = 1 + offset + (j - 1) * (ns + nc)
end
end
J_colptr[ns * (N + 1) + nu * N + 1] = length(J_nzval) + 1
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E::M,
F::M,
K::MK,
bool_vec,
N,
nb,
) where {T, M <: SparseMatrixCSC{T}, MK <: SparseMatrixCSC{T}}
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
I_mat = _init_similar(K, nu, nu)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
KI = I_mat[bool_vec, :]
K_sparse = K[bool_vec, :]
J_colptr[1] = 1
# Set the first block column of A, E, and K
for j = 1:ns
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)]
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
J_nzval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N)
K_offset = length(K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j] + A_offset + E_offset):(J_colptr[j] + A_offset + E_offset + K_offset - 1)] =
K_sparse.nzval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j] + A_offset + E_offset):(J_colptr[j] + A_offset + E_offset + K_offset - 1)] =
K_sparse.rowval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)] .+
((ns + nc) * N)
)
J_colptr[j + 1] = J_colptr[j] + A_offset + E_offset + K_offset
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
for j = 1:ns
J_nzval[J_colptr[j + (i - 1) * ns]] = T(-1)
J_rowval[J_colptr[j + (i - 1) * ns]] = ns * (i - 2) + j
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)] .+ (ns * (i - 1))
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
K_offset = length(K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset + K_offset - 1)] =
K_sparse.nzval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset + K_offset - 1)] =
K_sparse.rowval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)] .+
((ns + nc) * N + nb * (i - 1))
)
J_colptr[ns * (i - 1) + j + 1] =
J_colptr[ns * (i - 1) + j] + 1 + A_offset + E_offset + K_offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[J_colptr[ns * N + j]] = T(-1)
J_rowval[J_colptr[ns * N + j]] = ns * (N - 1) + j
J_colptr[ns * N + j + 1] = J_colptr[ns * N + j] + 1
end
# Set the remaining block columns corresponding to inputs: B, F, I
for i = 1:N
offset = ns * (N + 1) + nu * (i - 1)
for j = 1:nu
B_offset = length(B.colptr[j]:(B.colptr[j + 1] - 1))
J_nzval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.nzval[B.colptr[j]:(B.colptr[j + 1] - 1)]
J_rowval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.rowval[B.colptr[j]:(B.colptr[j + 1] - 1)] .+ (ns * (i - 1))
F_offset = length(F.colptr[j]:(F.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.nzval[F.colptr[j]:(F.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.rowval[F.colptr[j]:(F.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
KI_offset = length(KI.colptr[j]:(KI.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset + F_offset):(J_colptr[offset + j] + B_offset + F_offset + KI_offset - 1)] =
KI.nzval[KI.colptr[j]:(KI.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset + F_offset):(J_colptr[offset + j] + B_offset + F_offset + KI_offset - 1)] =
KI.rowval[KI.colptr[j]:(KI.colptr[j + 1] - 1)] .+
((ns + nc) * N + nb * (i - 1))
)
J_colptr[offset + j + 1] =
J_colptr[offset + j] + B_offset + F_offset + KI_offset
end
end
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E::M,
F::M,
K::MK,
N,
) where {T, M <: SparseMatrixCSC{T}, MK <: Nothing}
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
J_colptr[1] = 1
# Set the first block column of A, E, and K
for j = 1:ns
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)]
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
J_nzval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N)
J_colptr[j + 1] = J_colptr[j] + A_offset + E_offset
end
# Set the remaining block columns corresponding to states: -I, A, E
for i = 2:N
for j = 1:ns
J_nzval[J_colptr[j + (i - 1) * ns]] = T(-1)
J_rowval[J_colptr[j + (i - 1) * ns]] = ns * (i - 2) + j
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)] .+ (ns * (i - 1))
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
J_colptr[ns * (i - 1) + j + 1] =
J_colptr[ns * (i - 1) + j] + 1 + A_offset + E_offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[J_colptr[ns * N + j]] = T(-1)
J_rowval[J_colptr[ns * N + j]] = ns * (N - 1) + j
J_colptr[ns * N + j + 1] = J_colptr[ns * N + j] + 1
end
# Set the remaining block columns corresponding to inputs: B, F
for i = 1:N
offset = ns * (N + 1) + nu * (i - 1)
for j = 1:nu
B_offset = length(B.colptr[j]:(B.colptr[j + 1] - 1))
J_nzval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.nzval[B.colptr[j]:(B.colptr[j + 1] - 1)]
J_rowval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.rowval[B.colptr[j]:(B.colptr[j + 1] - 1)] .+ (ns * (i - 1))
F_offset = length(F.colptr[j]:(F.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.nzval[F.colptr[j]:(F.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.rowval[F.colptr[j]:(F.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
J_colptr[offset + j + 1] = J_colptr[offset + j] + B_offset + F_offset
end
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 30656 | """
get_u(solution_ref, lqdm::SparseLQDynamicModel) -> u <: vector
get_u(solution_ref, lqdm::DenseLQDynamicModel) -> u <: vector
Query the solution `u` from the solver. If `K = nothing`, the solution for `u` is queried from `solution_ref.solution`
If `K <: AbstractMatrix`, `solution_ref.solution` returns `v`, and `get_u` solves for `u` using the `K` matrix (and the `A` and `B` matrices if `lqdm <: DenseLQDynamicModel`)
"""
function get_u(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: AbstractMatrix{T},
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
nu = lqdm.dynamic_data.nu
N = lqdm.dynamic_data.N
K = lqdm.dynamic_data.K
u = zeros(T, nu * N)
for i = 1:N
start_v = (i - 1) * nu + 1
end_v = i * nu
start_s = (i - 1) * ns + 1
end_s = i * ns
Ks = zeros(T, size(K, 1), 1)
s = solution[start_s:end_s]
v = solution[(ns * (N + 1) + start_v):(ns * (N + 1) + end_v)]
LinearAlgebra.mul!(Ks, K, s)
LinearAlgebra.axpy!(1, v, Ks)
u[start_v:end_v] = Ks
end
return u
end
function get_u(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: AbstractMatrix{T},
}
dnlp = lqdm.dynamic_data
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
K = dnlp.K
block_A = lqdm.blocks.A
block_B = lqdm.blocks.B
block_Aw = lqdm.blocks.Aw
v = solver_status.solution
As0 = zeros(T, ns * (N + 1))
Bv = zeros(T, ns)
s = zeros(T, ns * (N + 1))
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
for j = 1:(N - i + 1)
v_sub_vec = v[(1 + nu * (j - 1)):(nu * j)]
LinearAlgebra.mul!(Bv, B_sub_block, v_sub_vec)
s[(1 + ns * (i + j - 1)):(ns * (i + j))] .+= Bv
end
end
LinearAlgebra.mul!(As0, block_A, dnlp.s0)
LinearAlgebra.axpy!(1, As0, s)
LinearAlgebra.axpy!(1, block_Aw, s)
Ks = _init_similar(dnlp.s0, size(K, 1), T)
u = copy(v)
for i = 1:N
LinearAlgebra.mul!(Ks, K, s[(1 + ns * (i - 1)):(ns * i)])
u[(1 + nu * (i - 1)):(nu * i)] .+= Ks
end
return u
end
function get_u(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: Nothing,
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
nu = lqdm.dynamic_data.nu
N = lqdm.dynamic_data.N
u = solution[(ns * (N + 1) + 1):end]
return u
end
function get_u(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: Nothing,
}
return copy(solver_status.solution)
end
"""
get_s(solution_ref, lqdm::SparseLQDynamicModel) -> s <: vector
get_s(solution_ref, lqdm::DenseLQDynamicModel) -> s <: vector
Query the solution `s` from the solver. If `lqdm <: SparseLQDynamicModel`, the solution is queried directly from `solution_ref.solution`
If `lqdm <: DenseLQDynamicModel`, then `solution_ref.solution` returns `u` (if `K = nothing`) or `v` (if `K <: AbstactMatrix`), and `s` is found form
transforming `u` or `v` into `s` using `A`, `B`, and `K` matrices.
"""
function get_s(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix},
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
N = lqdm.dynamic_data.N
s = solution[1:(ns * (N + 1))]
return s
end
function get_s(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix},
}
dnlp = lqdm.dynamic_data
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
block_A = lqdm.blocks.A
block_B = lqdm.blocks.B
block_Aw = lqdm.blocks.Aw
v = solver_status.solution
As0 = zeros(T, ns * (N + 1))
Bv = zeros(T, ns)
s = zeros(T, ns * (N + 1))
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
for j = 1:(N - i + 1)
v_sub_vec = v[(1 + nu * (j - 1)):(nu * j)]
LinearAlgebra.mul!(Bv, B_sub_block, v_sub_vec)
s[(1 + ns * (i + j - 1)):(ns * (i + j))] .+= Bv
end
end
LinearAlgebra.mul!(As0, block_A, dnlp.s0)
LinearAlgebra.axpy!(1, As0, s)
LinearAlgebra.axpy!(1, block_Aw, s)
return s
end
for field in fieldnames(LQDynamicData)
method = Symbol("get_", field)
@eval begin
@doc """
$($method)(LQDynamicData)
$($method)(SparseLQDynamicModel)
$($method)(DenseLQDynamicModel)
Return the value of $($(QuoteNode(field))) from `LQDynamicData` or `SparseLQDynamicModel.dynamic_data` or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData) = getproperty(dyn_data, $(QuoteNode(field)))
end
@eval $method(dyn_model::SparseLQDynamicModel) = $method(dyn_model.dynamic_data)
@eval $method(dyn_model::DenseLQDynamicModel) = $method(dyn_model.dynamic_data)
@eval export $method
end
for field in [:A, :B, :Q, :R, :Qf, :E, :F, :S, :K]
method = Symbol("set_", field, "!")
@eval begin
@doc """
$($method)(LQDynamicData, row, col, val)
$($method)(SparseLQDynamicModel, row, col, val)
$($method)(DenseLQDynamicModel, row, col, val)
Set the value of entry $($(QuoteNode(field)))[row, col] to val for `LQDynamicData`, `SparseLQDynamicModel.dynamic_data`, or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData, row, col, val) = (dyn_data.$field[row, col] = val)
end
@eval $method(dyn_model::SparseLQDynamicModel, row, col, val) =
(dyn_model.dynamic_data.$field[row, col] = val)
@eval $method(dyn_model::DenseLQDynamicModel, row, col, val) =
(dyn_model.dynamic_data.$field[row, col] = val)
@eval export $method
end
for field in [:s0, :sl, :su, :ul, :uu, :gl, :gu]
method = Symbol("set_", field, "!")
@eval begin
@doc """
$($method)(LQDynamicData, index, val)
$($method)(SparseLQDynamicModel, index, val)
$($method)(DenseLQDynamicModel, index, val)
Set the value of entry $($(QuoteNode(field)))[index] to val for `LQDynamicData`, `SparseLQDynamicModel.dynamic_data`, or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData, index, val) = (dyn_data.$field[index] = val)
end
@eval $method(dyn_model::SparseLQDynamicModel, index, val) =
(dyn_model.dynamic_data.$field[index] = val)
@eval $method(dyn_model::DenseLQDynamicModel, index, val) =
(dyn_model.dynamic_data.$field[index] = val)
@eval export $method
end
function fill_structure!(S::SparseMatrixCSC, rows, cols)
count = 1
@inbounds for col = 1:size(S, 2), k = S.colptr[col]:(S.colptr[col + 1] - 1)
rows[count] = S.rowval[k]
cols[count] = col
count += 1
end
end
function fill_coord!(S::SparseMatrixCSC, vals, obj_weight)
count = 1
@inbounds for col = 1:size(S, 2), k = S.colptr[col]:(S.colptr[col + 1] - 1)
vals[count] = obj_weight * S.nzval[k]
count += 1
end
end
function NLPModels.hess_structure!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
fill_structure!(qp.data.H, rows, cols)
return rows, cols
end
function NLPModels.hess_structure!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
count = 1
for j = 1:(qp.meta.nvar)
for i = j:(qp.meta.nvar)
rows[count] = i
cols[count] = j
count += 1
end
end
return rows, cols
end
function NLPModels.hess_coord!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector{T},
vals::AbstractVector{T};
obj_weight::Real = one(eltype(x)),
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
NLPModels.increment!(qp, :neval_hess)
fill_coord!(qp.data.H, vals, obj_weight)
return vals
end
function NLPModels.hess_coord!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector{T},
vals::AbstractVector{T};
obj_weight::Real = one(eltype(x)),
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
NLPModels.increment!(qp, :neval_hess)
count = 1
for j = 1:(qp.meta.nvar)
for i = j:(qp.meta.nvar)
vals[count] = obj_weight * qp.data.H[i, j]
count += 1
end
end
return vals
end
NLPModels.hess_coord!(
qp::SparseLQDynamicModel,
x::AbstractVector,
y::AbstractVector,
vals::AbstractVector;
obj_weight::Real = one(eltype(x)),
) = NLPModels.hess_coord!(qp, x, vals, obj_weight = obj_weight)
NLPModels.hess_coord!(
qp::DenseLQDynamicModel,
x::AbstractVector,
y::AbstractVector,
vals::AbstractVector;
obj_weight::Real = one(eltype(x)),
) = NLPModels.hess_coord!(qp, x, vals, obj_weight = obj_weight)
function NLPModels.jac_structure!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
fill_structure!(qp.data.A, rows, cols)
return rows, cols
end
function NLPModels.jac_structure!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
count = 1
for j = 1:(qp.meta.nvar)
for i = 1:(qp.meta.ncon)
rows[count] = i
cols[count] = j
count += 1
end
end
return rows, cols
end
function NLPModels.jac_coord!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector,
vals::AbstractVector,
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
NLPModels.increment!(qp, :neval_jac)
fill_coord!(qp.data.A, vals, one(T))
return vals
end
function NLPModels.jac_coord!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector,
vals::AbstractVector,
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
NLPModels.increment!(qp, :neval_jac)
count = 1
for j = 1:(qp.meta.nvar)
for i = 1:(qp.meta.ncon)
vals[count] = qp.data.A[i, j]
count += 1
end
end
return vals
end
function _dnlp_unsafe_wrap(
tensor::A,
dims::Tuple,
shift = 1,
) where {T, A <: AbstractArray{T}}
return unsafe_wrap(Matrix{T}, pointer(tensor, shift), dims)
end
function _dnlp_unsafe_wrap(
tensor::A,
dims::Tuple,
shift = 1,
) where {T, A <: CUDA.CuArray{T, 3, CUDA.Mem.DeviceBuffer}}
return unsafe_wrap(
CUDA.CuArray{T, 2, CUDA.Mem.DeviceBuffer},
pointer(tensor, shift),
dims,
)
end
function LinearAlgebra.mul!(
y::V,
Jac::LQJacobianOperator{T, M, A},
x::V,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
fill!(y, zero(T))
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
for i = 1:N
sub_B1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
sub_B2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
sub_B3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N - i + 1)
sub_x = view(x, (1 + (j - 1) * nu):(j * nu))
LinearAlgebra.mul!(
view(y, (1 + nc * (j + i - 2)):(nc * (j + i - 1))),
sub_B1,
sub_x,
1,
1,
)
LinearAlgebra.mul!(
view(y, (1 + nc * N + nsc * (j + i - 2)):(nc * N + nsc * (j + i - 1))),
sub_B2,
sub_x,
1,
1,
)
LinearAlgebra.mul!(
view(
y,
(1 + nc * N + nsc * N + nuc * (j + i - 2)):(nc * N + nsc * N + nuc * (j + i - 1)),
),
sub_B3,
sub_x,
1,
1,
)
end
end
end
function LinearAlgebra.mul!(
x::V,
Jac::LQJacobianOperator{T, M, A},
y::V,
) where {
T,
V <: CUDA.CuArray{T, 1, CUDA.Mem.DeviceBuffer},
M <: AbstractMatrix{T},
A <: AbstractArray{T},
}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
x1 = Jac.x1
x2 = Jac.x2
x3 = Jac.x3
y1 = Jac.y
fill!(x1, zero(T))
fill!(x2, zero(T))
fill!(x3, zero(T))
for i = 1:N
y1 .= y[(1 + (i - 1) * nu):(i * nu)]
x1_view = view(x1, :, :, i:N)
x2_view = view(x2, :, :, i:N)
x3_view = view(x3, :, :, i:N)
J1_view = view(J1, :, :, 1:(N - i + 1))
J2_view = view(J2, :, :, 1:(N - i + 1))
J3_view = view(J3, :, :, 1:(N - i + 1))
y1_view = view(y1, :, :, i:N)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J1_view, y1_view, 1, x1_view)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J2_view, y1_view, 1, x2_view)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J3_view, y1_view, 1, x3_view)
end
x[1:(nc * N)] .= reshape(x1, nc * N)
x[(1 + nc * N):((nc + nsc) * N)] .= reshape(x2, nsc * N)
x[(1 + (nc + nsc) * N):((nc + nsc + nuc) * N)] .= reshape(x3, nuc * N)
end
function LinearAlgebra.mul!(
y::V,
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
x::V,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
fill!(y, zero(T))
jac_op = get_jacobian(Jac)
J1 = jac_op.truncated_jac1
J2 = jac_op.truncated_jac2
J3 = jac_op.truncated_jac3
N = jac_op.N
nu = jac_op.nu
nc = jac_op.nc
nsc = jac_op.nsc
nuc = jac_op.nuc
for i = 1:N
sub_B1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
sub_B2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
sub_B3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N - i + 1)
x1 = view(x, (1 + (j + i - 2) * nc):((j + i - 1) * nc))
x2 = view(x, (1 + nc * N + (j + i - 2) * nsc):(nc * N + (j + i - 1) * nsc))
x3 = view(
x,
(1 + nc * N + nsc * N + (j + i - 2) * nuc):(nc * N + nsc * N + (j + i - 1) * nuc),
)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B1', x1, 1, 1)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B2', x2, 1, 1)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B3', x3, 1, 1)
end
end
end
function LinearAlgebra.mul!(
y::V,
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
x::V,
) where {
T,
V <: CUDA.CuArray{T, 1, CUDA.Mem.DeviceBuffer},
M <: AbstractMatrix{T},
A <: AbstractArray{T},
}
fill!(y, zero(T))
jac_op = get_jacobian(Jac)
J1 = jac_op.truncated_jac1
J2 = jac_op.truncated_jac2
J3 = jac_op.truncated_jac3
N = jac_op.N
nu = jac_op.nu
nc = jac_op.nc
nsc = jac_op.nsc
nuc = jac_op.nuc
x1 = jac_op.x1
x2 = jac_op.x2
x3 = jac_op.x3
y1 = jac_op.y
x1 .= reshape(x[1:(nc * N)], (nc, 1, N))
x2 .= reshape(x[(1 + nc * N):((nc + nsc) * N)], (nsc, 1, N))
x3 .= reshape(x[(1 + (nc + nsc) * N):((nc + nsc + nuc) * N)], (nuc, 1, N))
for i = 1:N
fill!(y1, zero(T))
y1_view = view(y1, :, :, 1:(N - i + 1))
x1_view = view(x1, :, :, i:N)
x2_view = view(x2, :, :, i:N)
x3_view = view(x3, :, :, i:N)
J1_view = view(J1, :, :, 1:(N - i + 1))
J2_view = view(J2, :, :, 1:(N - i + 1))
J3_view = view(J3, :, :, 1:(N - i + 1))
CUBLAS.gemm_strided_batched!('T', 'N', 1, J1_view, x1_view, 1, y1_view)
CUBLAS.gemm_strided_batched!('T', 'N', 1, J2_view, x2_view, 1, y1_view)
CUBLAS.gemm_strided_batched!('T', 'N', 1, J3_view, x3_view, 1, y1_view)
view(y, (1 + (i - 1) * nu):(i * nu)) .= sum(y1_view, dims = (2, 3))
end
end
"""
get_jacobian(lqdm::DenseLQDynamicModel) -> LQJacobianOperator
get_jacobian(Jac::AdjointLinearOpeartor{T, LQJacobianOperator}) -> LQJacobianOperator
Gets the `LQJacobianOperator` from `DenseLQDynamicModel` (if the `QPdata` contains a `LQJacobian Operator`)
or returns the `LQJacobian Operator` from the adjoint of the `LQJacobianOperator`
"""
function get_jacobian(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {T, V, M1, M2, M3, M4, MK}
return lqdm.data.A
end
function get_jacobian(
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return Jac'
end
function Base.length(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return length(Jac.truncated_jac1) +
length(Jac.truncated_jac2) +
length(Jac.truncated_jac3)
end
function Base.size(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return (
size(Jac.truncated_jac1, 1) +
size(Jac.truncated_jac2, 1) +
size(Jac.truncated_jac3, 1),
size(Jac.truncated_jac1, 2),
)
end
function Base.eltype(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
return T
end
function Base.isreal(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
return isreal(Jac.truncated_jac1) &&
isreal(Jac.truncated_jac2) &&
isreal(Jac.truncated_jac3)
end
function Base.show(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
show(Jac.truncated_jac1)
end
function Base.display(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
display(Jac.truncated_jac1)
end
"""
LinearOperators.reset!(Jac::LQJacobianOperator{T, V, M})
Resets the values of attributes `SJ1`, `SJ2`, and `SJ3` to zero
"""
function LinearOperators.reset!(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
fill!(Jac.SJ1, T(0))
fill!(Jac.SJ2, T(0))
fill!(Jac.SJ3, T(0))
end
function NLPModels.jac_op(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
x::V,
) where {T, V <: AbstractVector{T}, M1, M2 <: LQJacobianOperator, M3, M4, MK}
return lqdm.data.A
end
"""
add_jtsj!(H::M, Jac::LQJacobianOperator{T, V, M}, Σ::V, alpha::Number = 1, beta::Number = 1)
Generates `Jac' Σ Jac` and adds it to the matrix `H`.
`alpha` and `beta` are scalar multipliers such `beta H + alpha Jac' Σ Jac` is stored in `H`, overwriting the existing value of `H`
"""
function add_jtsj!(
H::M,
Jac::LQJacobianOperator{T, M, A},
Σ::V,
alpha::Number = 1,
beta::Number = 1,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
ΣJ1 = Jac.SJ1
ΣJ2 = Jac.SJ2
ΣJ3 = Jac.SJ3
LinearAlgebra.lmul!(beta, H)
for i = 1:N
left_block1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
left_block2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
left_block3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N + 1 - i)
Σ_range1 = (1 + (N - j) * nc):((N - j + 1) * nc)
Σ_range2 = (1 + nc * N + (N - j) * nsc):(nc * N + (N - j + 1) * nsc)
Σ_range3 =
(1 + (nc + nsc) * N + (N - j) * nuc):((nc + nsc) * N + (N - j + 1) * nuc)
ΣJ1 .= left_block1 .* view(Σ, Σ_range1)
ΣJ2 .= left_block2 .* view(Σ, Σ_range2)
ΣJ3 .= left_block3 .* view(Σ, Σ_range3)
for k = 1:(N - j - i + 2)
right_block1 =
_dnlp_unsafe_wrap(J1, (nc, nu), (1 + (k + i - 2) * (nc * nu)))
right_block2 =
_dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (k + i - 2) * (nsc * nu)))
right_block3 =
_dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (k + i - 2) * (nuc * nu)))
row_range = (1 + nu * (N - i - j + 1)):(nu * (N - i - j + 2))
col_range = (1 + nu * (N - i - k - j + 2)):(nu * (N - i - k - j + 3))
LinearAlgebra.mul!(
view(H, row_range, col_range),
ΣJ1',
right_block1,
alpha,
1,
)
LinearAlgebra.mul!(
view(H, row_range, col_range),
ΣJ2',
right_block2,
alpha,
1,
)
LinearAlgebra.mul!(
view(H, row_range, col_range),
ΣJ3',
right_block3,
alpha,
1,
)
end
end
end
end
function add_jtsj!(
H::M,
Jac::LQJacobianOperator{T, M, A},
Σ::V,
alpha::Number = 1,
beta::Number = 1,
) where {T, V <: CUDA.CuVector, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
ΣJ1 = Jac.SJ1
ΣJ2 = Jac.SJ2
ΣJ3 = Jac.SJ3
H_sub_block = Jac.H_sub_block
LinearAlgebra.lmul!(beta, H)
for i = 1:N
left_block1 = view(J1, :, :, i)
left_block2 = view(J2, :, :, i)
left_block3 = view(J3, :, :, i)
for j = 1:(N + 1 - i)
Σ_range1 = (1 + (N - j) * nc):((N - j + 1) * nc)
Σ_range2 = (1 + nc * N + (N - j) * nsc):(nc * N + (N - j + 1) * nsc)
Σ_range3 =
(1 + (nc + nsc) * N + (N - j) * nuc):((nc + nsc) * N + (N - j + 1) * nuc)
ΣJ1 .= left_block1 .* view(Σ, Σ_range1)
ΣJ2 .= left_block2 .* view(Σ, Σ_range2)
ΣJ3 .= left_block3 .* view(Σ, Σ_range3)
for k = 1:(N - j - i + 2)
right_block1 = view(J1, :, :, (k + i - 1))
right_block2 = view(J2, :, :, (k + i - 1))
right_block3 = view(J3, :, :, (k + i - 1))
row_range = (1 + nu * (N - i - j + 1)):(nu * (N - i - j + 2))
col_range = (1 + nu * (N - i - k - j + 2)):(nu * (N - i - k - j + 3))
LinearAlgebra.mul!(H_sub_block, ΣJ1', right_block1)
H[row_range, col_range] .+= alpha .* H_sub_block
LinearAlgebra.mul!(H_sub_block, ΣJ2', right_block2)
H[row_range, col_range] .+= alpha .* H_sub_block
LinearAlgebra.mul!(H_sub_block, ΣJ3', right_block3)
H[row_range, col_range] .+= alpha .* H_sub_block
end
end
end
end
"""
reset_s0!(lqdm::SparseLQDynamicModel, s0)
reset_s0!(lqdm::DenseLQDynamicModel, s0)
Resets `s0` within `lqdm.dynamic_data`. For a `SparseLQDynamicModel`, this updates the variable bounds which fix the value of `s0`.
For a `DenseLQDynamicModel`, also resets the constraint bounds on the Jacobian and resets the linear and constant terms within the
objective function (i.e., `lqdm.data.c` and `lqdm.data.c0`). This provides a way to update the model after each sample period.
"""
function reset_s0!(
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, MK}
dnlp = lqdm.dynamic_data
ns = dnlp.ns
lqdm.dynamic_data.s0 .= s0
lqdm.meta.lvar[1:ns] .= s0
lqdm.meta.uvar[1:ns] .= s0
end
function reset_s0!(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, M4, MK <: Nothing}
dnlp = lqdm.dynamic_data
dense_blocks = lqdm.blocks
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.E
ul = dnlp.ul
uu = dnlp.uu
sl = dnlp.sl
su = dnlp.su
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
# Get matrices for multiplying by s0
block_A = dense_blocks.A
block_Aw = dense_blocks.Aw
block_h = dense_blocks.h
block_h0 = dense_blocks.h01
block_d = dense_blocks.d
block_dw = dense_blocks.dw
block_h02 = dense_blocks.h02
h_constant = dense_blocks.h_constant
h0_constant = dense_blocks.h0_constant
lcon = lqdm.meta.lcon
ucon = lqdm.meta.ucon
# Reset s0
lqdm.dynamic_data.s0 .= s0
As0 = _init_similar(s0, ns * (N + 1), T)
Qs0 = _init_similar(s0, ns, T)
dl = repeat(gl, N)
du = repeat(gu, N)
bool_vec_s = (sl .!= -Inf .|| su .!= Inf)
nsc = sum(bool_vec_s)
sl = sl[bool_vec_s]
su = su[bool_vec_s]
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
# Reset constraint bounds corresponding to E and F matrices
lcon[1:(nc * N)] .= dl
ucon[1:(nc * N)] .= du
lcon[1:(nc * N)] .-= block_dw
ucon[1:(nc * N)] .-= block_dw
LinearAlgebra.mul!(As0, block_A, s0)
# reset linear term
LinearAlgebra.mul!(lqdm.data.c, block_h, s0)
lqdm.data.c += h_constant
# reset constant term
LinearAlgebra.mul!(Qs0, block_h0, s0)
lqdm.data.c0 = LinearAlgebra.dot(s0, Qs0) / T(2)
lqdm.data.c0 += h0_constant
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
for i = 1:N
# Reset bounds on constraints from state variable bounds
lcon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
sl .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + ns * i):((i + 1) * ns)][bool_vec_s]
ucon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
su .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + ns * i):((i + 1) * ns)][bool_vec_s]
end
end
function reset_s0!(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, M4, MK <: AbstractMatrix{T}}
dnlp = lqdm.dynamic_data
dense_blocks = lqdm.blocks
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.E
K = dnlp.K
ul = dnlp.ul
uu = dnlp.uu
sl = dnlp.sl
su = dnlp.su
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
# Get matrices for multiplying by s0
block_A = dense_blocks.A
block_Aw = dense_blocks.Aw
block_h = dense_blocks.h
block_h0 = dense_blocks.h01
block_d = dense_blocks.d
block_dw = dense_blocks.dw
block_KA = dense_blocks.KA
block_KAw = dense_blocks.KAw
block_h02 = dense_blocks.h02
h_constant = dense_blocks.h_constant
h0_constant = dense_blocks.h0_constant
lcon = lqdm.meta.lcon
ucon = lqdm.meta.ucon
# Reset s0
lqdm.dynamic_data.s0 .= s0
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
As0 = _init_similar(s0, ns * (N + 1), T)
Qs0 = _init_similar(s0, ns, T)
KAs0 = _init_similar(s0, nu * N, T)
dl = repeat(gl, N)
du = repeat(gu, N)
bool_vec_s = (sl .!= -Inf .|| su .!= Inf)
nsc = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
nuc = sum(bool_vec_u)
sl = sl[bool_vec_s]
su = su[bool_vec_s]
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
# Reset constraint bounds corresponding to E and F matrices
lcon[1:(nc * N)] .= dl
ucon[1:(nc * N)] .= du
lcon[1:(nc * N)] .-= block_dw
ucon[1:(nc * N)] .-= block_dw
LinearAlgebra.mul!(As0, block_A, s0)
LinearAlgebra.mul!(KAs0, block_KA, s0)
# reset linear term
LinearAlgebra.mul!(lqdm.data.c, block_h, s0)
lqdm.data.c += h_constant
# reset constant term
LinearAlgebra.mul!(Qs0, block_h0, s0)
lqdm.data.c0 = LinearAlgebra.dot(s0, Qs0) / T(2)
lqdm.data.c0 += h0_constant
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
for i = 1:N
# Reset bounds on constraints from state variable bounds
lcon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
sl .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + i * ns):((i + 1) * ns)][bool_vec_s]
ucon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
su .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + i * ns):((i + 1) * ns)][bool_vec_s]
# Reset bounds on constraints from input variable bounds
lcon[(1 + (nc + nsc) * N + nuc * (i - 1)):((nc + nsc) * N + nuc * i)] .=
ul .- KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .-
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
ucon[(1 + (nc + nsc) * N + nuc * (i - 1)):((nc + nsc) * N + nuc * i)] .=
uu .- KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .-
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 6953 | function test_mul(lq_dense, lq_dense_imp)
dnlp = lq_dense.dynamic_data
N = dnlp.N
nu = dnlp.nu
J = get_jacobian(lq_dense)
J_imp = get_jacobian(lq_dense_imp)
Random.seed!(10)
x = rand(nu * N)
y = rand(size(J, 1))
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x))
y_imp = similar(lq_dense_imp.dynamic_data.s0, length(y))
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.copyto!(y_imp, y)
LinearAlgebra.mul!(y, J, x)
LinearAlgebra.mul!(y_imp, J_imp, x_imp)
@test y ≈ Vector(y_imp) atol = 1e-14
x = rand(nu * N)
y = rand(size(J, 1))
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x))
y_imp = similar(lq_dense_imp.dynamic_data.s0, length(y))
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.copyto!(y_imp, y)
LinearAlgebra.mul!(x, J', y)
LinearAlgebra.mul!(x_imp, J_imp', y_imp)
@test x ≈ Vector(x_imp) atol = 1e-14
end
function test_add_jtsj(lq_dense, lq_dense_imp)
dnlp = lq_dense.dynamic_data
N = dnlp.N
nu = dnlp.nu
H = zeros(nu * N, nu * N)
Random.seed!(10)
J = get_jacobian(lq_dense)
J_imp = get_jacobian(lq_dense_imp)
ΣJ = similar(J); fill!(ΣJ, 0)
x = rand(size(J, 1))
H_imp = similar(lq_dense_imp.data.H, nu * N, nu * N); fill!(H_imp, 0)
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x));
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.mul!(ΣJ, Diagonal(x), J)
LinearAlgebra.mul!(H, J', ΣJ)
add_jtsj!(H_imp, J_imp, x_imp)
@test LowerTriangular(Array(H_imp)) ≈ LowerTriangular(H) atol = 1e-10
end
function dynamic_data_to_CUDA(dnlp::LQDynamicData)
s0c = CuVector{Float64}(undef, length(dnlp.s0))
Ac = CuArray{Float64}(undef, size(dnlp.A))
Bc = CuArray{Float64}(undef, size(dnlp.B))
Qc = CuArray{Float64}(undef, size(dnlp.Q))
Rc = CuArray{Float64}(undef, size(dnlp.R))
Sc = CuArray{Float64}(undef, size(dnlp.S))
Ec = CuArray{Float64}(undef, size(dnlp.E))
Fc = CuArray{Float64}(undef, size(dnlp.F))
wc = CuArray{Float64}(undef, length(dnlp.w))
Qfc = CuArray{Float64}(undef, size(dnlp.Qf))
glc = CuVector{Float64}(undef, length(dnlp.gl))
guc = CuVector{Float64}(undef, length(dnlp.gu))
ulc = CuVector{Float64}(undef, length(dnlp.ul))
uuc = CuVector{Float64}(undef, length(dnlp.uu))
slc = CuVector{Float64}(undef, length(dnlp.sl))
suc = CuVector{Float64}(undef, length(dnlp.su))
LinearAlgebra.copyto!(Ac, dnlp.A)
LinearAlgebra.copyto!(Bc, dnlp.B)
LinearAlgebra.copyto!(Qc, dnlp.Q)
LinearAlgebra.copyto!(Rc, dnlp.R)
LinearAlgebra.copyto!(s0c, dnlp.s0)
LinearAlgebra.copyto!(Sc, dnlp.S)
LinearAlgebra.copyto!(Ec, dnlp.E)
LinearAlgebra.copyto!(Fc, dnlp.F)
LinearAlgebra.copyto!(wc, dnlp.w)
LinearAlgebra.copyto!(Qfc, dnlp.Qf)
LinearAlgebra.copyto!(glc, dnlp.gl)
LinearAlgebra.copyto!(guc, dnlp.gu)
LinearAlgebra.copyto!(ulc, dnlp.ul)
LinearAlgebra.copyto!(uuc, dnlp.uu)
LinearAlgebra.copyto!(slc, dnlp.sl)
LinearAlgebra.copyto!(suc, dnlp.su)
if dnlp.K != nothing
Kc = CuArray{Float64}(undef, size(dnlp.K))
LinearAlgebra.copyto!(Kc, dnlp.K)
else
Kc = nothing
end
LQDynamicData(s0c, Ac, Bc, Qc, Rc, dnlp.N; Qf = Qfc, S = Sc,
E = Ec, F = Fc, K = Kc, sl = slc, su = suc, ul = ulc, uu = uuc, gl = glc, gu = guc, w = wc
)
end
function test_sparse_support(lqdm)
d = lqdm.dynamic_data
(lqdm_sparse_data = SparseLQDynamicModel(d.s0, sparse(d.A), sparse(d.B), sparse(d.Q), sparse(d.R), d.N;
sl = d.sl, ul = d.ul, su = d.su, uu = d.uu, Qf = sparse(d.Qf), K = (d.K == nothing ? nothing : sparse(d.K)),
S = sparse(d.S), E = sparse(d.E), F = sparse(d.F), gl = d.gl, gu = d.gu))
@test lqdm.data.H ≈ lqdm_sparse_data.data.H atol = 1e-10
@test lqdm.data.A ≈ lqdm_sparse_data.data.A atol = 1e-10
end
function test_dense_reset_s0(dnlp, lq_dense, new_s0)
lq_dense_test = DenseLQDynamicModel(dnlp)
dnlp.s0 .= new_s0
lq_dense_new_s0 = DenseLQDynamicModel(dnlp)
reset_s0!(lq_dense_test, new_s0)
@test lq_dense_test.data.H ≈ lq_dense_new_s0.data.H atol = 1e-10
@test lq_dense_test.data.A ≈ lq_dense_new_s0.data.A atol = 1e-10
@test lq_dense_test.data.c ≈ lq_dense_new_s0.data.c atol = 1e-10
@test lq_dense_test.data.c0 ≈ lq_dense_new_s0.data.c0 atol = 1e-10
@test lq_dense_test.meta.lcon ≈ lq_dense_new_s0.meta.lcon atol = 1e-8
@test lq_dense_test.meta.ucon ≈ lq_dense_new_s0.meta.ucon atol = 1e-8
@test lq_dense_test.dynamic_data.s0 == lq_dense_new_s0.dynamic_data.s0
end
function test_sparse_reset_s0(dnlp, lq_sparse, new_s0)
reset_s0!(lq_sparse, new_s0)
@test lq_sparse.dynamic_data.s0 == new_s0
end
function runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
optimize!(model)
solution_ref_sparse = madnlp(lq_sparse, max_iter=100)
solution_ref_dense = madnlp(lq_dense, max_iter=100)
solution_ref_sparse_from_data = madnlp(lq_sparse_from_data, max_iter=100)
solution_ref_dense_from_data = madnlp(lq_dense_from_data, max_iter=100)
@test objective_value(model) ≈ solution_ref_sparse.objective atol = 1e-7
@test objective_value(model) ≈ solution_ref_dense.objective atol = 1e-5
@test objective_value(model) ≈ solution_ref_sparse_from_data.objective atol = 1e-7
@test objective_value(model) ≈ solution_ref_dense_from_data.objective atol = 1e-5
@test solution_ref_sparse.solution[(ns * (N + 1) + 1):(ns * (N + 1) + nu*N)] ≈ solution_ref_dense.solution atol = 1e-5
@test solution_ref_sparse_from_data.solution[(ns * (N + 1) + 1):(ns * (N + 1) + nu*N)] ≈ solution_ref_dense_from_data.solution atol = 1e-5
# Test get_u and get_s functions with no K matrix
s_values = value.(all_variables(model)[1:(ns * (N + 1))])
u_values = value.(all_variables(model)[(1 + ns * (N + 1)):(ns * (N + 1) + nu * N)])
@test s_values ≈ get_s(solution_ref_sparse, lq_sparse) atol = 1e-6
@test u_values ≈ get_u(solution_ref_sparse, lq_sparse) atol = 1e-6
@test s_values ≈ get_s(solution_ref_dense, lq_dense) atol = 2e-5
@test u_values ≈ get_u(solution_ref_dense, lq_dense) atol = 2e-5
test_sparse_support(lq_sparse)
lq_dense_imp = DenseLQDynamicModel(dnlp; implicit = true)
imp_test_set = []
push!(imp_test_set, lq_dense_imp)
if CUDA.has_cuda_gpu()
dnlp_cuda = dynamic_data_to_CUDA(dnlp)
lq_dense_cuda = DenseLQDynamicModel(dnlp_cuda; implicit=true)
push!(imp_test_set, lq_dense_cuda)
end
@testset "Test mul and add_jtsj!" for lq_imp in imp_test_set
test_mul(lq_dense, lq_imp)
test_add_jtsj(lq_dense, lq_imp)
end
new_s0 = copy(dnlp.s0) .+ .5
test_dense_reset_s0(dnlp, lq_dense, new_s0)
new_s0 = copy(dnlp.s0) .+ 1
test_sparse_reset_s0(dnlp, lq_sparse, new_s0)
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 13163 | using Test, DynamicNLPModels, MadNLP, Random, JuMP, LinearAlgebra, SparseArrays, CUDA
include("sparse_lq_test.jl")
include("functions.jl")
N = 3 # number of time steps
ns = 2 # number of states
nu = 1 # number of inputs
# generate random Q, R, A, and B matrices
Random.seed!(10)
Q_rand = Random.rand(ns, ns)
Q = Q_rand * Q_rand' + I
R_rand = Random.rand(nu,nu)
R = R_rand * R_rand' + I
A_rand = rand(ns, ns)
A = A_rand * A_rand' + I
B = rand(ns, nu)
# generate upper and lower bounds
sl = rand(ns)
ul = fill(-15.0, nu)
su = sl .+ 4
uu = ul .+ 10
s0 = sl .+ 2
su_with_inf = copy(su)
sl_with_inf = copy(sl)
su_with_inf[1] = Inf
sl_with_inf[1] = -Inf
Qf_rand = Random.rand(ns,ns)
Qf = Qf_rand * Qf_rand' + I
E = rand(3, ns)
F = rand(3, nu)
gl = fill(-5.0, 3)
gu = fill(15.0, 3)
S = rand(ns, nu)
w = rand(0.0:.0001:.25, ns * N)
K = rand(nu, ns)
# Test with no bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with lower bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with upper bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, su = su, uu = uu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; su = su, uu = uu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; su = su, uu = uu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; su = su, uu = uu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with upper and lower bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl=sl, ul=ul, su = su, uu = uu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl=sl, ul=ul, su = su, uu = uu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with Qf matrix
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with E and F matrix bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test edge case where one state is unbounded, other(s) is bounded
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
@test size(lq_dense.data.A, 1) == size(E, 1) * 3 + sum(su_with_inf .!= Inf .|| sl_with_inf .!= -Inf) * N
# Test S matrix case
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case without S
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case with S
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case with S and with partial bounds on u
nu = 2 # number of inputs
# generate random Q, R, A, and B matrices
Random.seed!(3)
R_rand = Random.rand(nu,nu)
R = R_rand * transpose(R_rand) + I
B = rand(ns, nu)
# generate upper and lower bounds
ul = fill(-20.0, nu)
uu = ul .+ 30
ul_with_inf = copy(ul)
uu_with_inf = copy(uu)
uu_with_inf[1] = Inf
ul_with_inf[1] = -Inf
F = rand(3, nu)
S = rand(ns, nu)
K = rand(nu, ns)
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K with no bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test get_* and set_* functions
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
@test get_A(dnlp) == A
@test get_A(lq_sparse) == A
@test get_A(lq_dense) == A
rand_val = rand()
Qtest = copy(Q)
Qtest[1, 2] = rand_val
set_Q!(dnlp, 1,2, rand_val)
@test get_Q(dnlp) == Qtest
Qtest[1, 1] = rand_val
set_Q!(lq_sparse, 1, 1, rand_val)
@test get_Q(lq_sparse) == Qtest
Qtest[2, 1] = rand_val
set_Q!(lq_dense, 2, 1, rand_val)
@test get_Q(lq_dense) == Qtest
rand_val = rand()
gltest = copy(gl)
gltest[1] = rand_val
set_gl!(dnlp, 1, rand_val)
@test get_gl(dnlp) == gltest
gltest[2] = rand_val
set_gl!(lq_sparse, 2, rand_val)
@test get_gl(lq_sparse) == gltest
gltest[3] = rand_val
set_gl!(lq_dense, 3, rand_val)
@test get_gl(lq_dense) == gltest
# Test non-default vector/matrix on GenericArrays
s0 = randn(Float32,2)
A = randn(Float32,2,2)
B = randn(Float32,2,2)
Q = randn(Float32,2,2)
R = randn(Float32,2,2)
S = randn(Float32,2,2)
K = randn(Float32,2,2)
E = randn(Float32,2,2)
F = randn(Float32,2,2)
gl = randn(Float32,2)
gu = gl .+ 2
sl = s0 .- 1
su = s0 .+ 1
ul = randn(Float32,2)
uu = ul .+ 2
w = Float32.(rand(0.0:.0001:.25, ns * 10))
s0 = Test.GenericArray(s0)
A = Test.GenericArray(A)
B = Test.GenericArray(B)
Q = Test.GenericArray(Q)
R = Test.GenericArray(R)
S = Test.GenericArray(S)
K = Test.GenericArray(K)
E = Test.GenericArray(E)
F = Test.GenericArray(F)
gl = Test.GenericArray(gl)
gu = Test.GenericArray(gu)
sl = Test.GenericArray(sl)
su = Test.GenericArray(su)
ul = Test.GenericArray(ul)
uu = Test.GenericArray(uu)
w = Test.GenericArray(w)
@test (DenseLQDynamicModel(s0, A, B, Q, R, 10; S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
DenseLQDynamicModel{Float32, GenericArray{Float32, 1}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, Nothing})
@test (DenseLQDynamicModel(s0, A, B, Q, R, 10; K = K, S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
DenseLQDynamicModel{Float32, GenericArray{Float32, 1}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}})
@test (SparseLQDynamicModel(s0, A, B, Q, R, 10; S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
SparseLQDynamicModel{Float32, GenericArray{Float32, 1}, SparseMatrixCSC{Float32, Int64}, SparseMatrixCSC{Float32, Int64}, GenericArray{Float32, 2}, Nothing})
@test (SparseLQDynamicModel(s0, A, B, Q, R, 10; K = K, S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
SparseLQDynamicModel{Float32, GenericArray{Float32, 1}, SparseMatrixCSC{Float32, Int64}, SparseMatrixCSC{Float32, Int64}, GenericArray{Float32, 2}, GenericArray{Float32, 2}})
# Test LQJacobianOperator APIs
lq_dense_imp = DenseLQDynamicModel(dnlp; implicit=true)
@test length(get_jacobian(lq_dense_imp)) == (length(get_jacobian(lq_dense_imp).truncated_jac1)
+ length(get_jacobian(lq_dense_imp).truncated_jac2) + length(get_jacobian(lq_dense_imp).truncated_jac3))
(@test size(get_jacobian(lq_dense_imp)) == (size(get_jacobian(lq_dense_imp).truncated_jac1, 1) + size(get_jacobian(lq_dense_imp).truncated_jac2, 1)
+ size(get_jacobian(lq_dense_imp).truncated_jac3, 1), size(get_jacobian(lq_dense_imp).truncated_jac1, 2)))
@test isreal(get_jacobian(lq_dense_imp)) == isreal(get_jacobian(lq_dense_imp).truncated_jac1)
@test eltype(get_jacobian(lq_dense_imp)) == eltype(get_jacobian(lq_dense_imp).truncated_jac1)
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | code | 3816 | """
build_QP_JuMP_model(Q,R,A,B,N;...) -> JuMP.Model(...)
Return a `JuMP.jl` Model for the quadratic problem
min 1/2 ( sum_{i=1}^{N-1} s_i^T Q s + sum_{i=1}^{N-1} u^T R u + s_N^T Qf s_n )
s.t. s_{i+1} = As_i + Bs_i for i = 1,..., N-1
Optional Arguments
- `Qf = []`: matrix multiplied by s_N in objective function (defaults to Q if not given)
- `c = zeros(N*size(Q,1) + N*size(R,1)`: linear term added to objective funciton, c^T z
- `sl = fill(-Inf, size(Q,1))`: lower bound on state variables
- `su = fill(Inf, size(Q,1))`: upper bound on state variables
- `ul = fill(-Inf, size(Q,1))`: lower bound on input variables
- `uu = fill(Inf, size(Q,1))`: upper bound on input variables
- `s0 = []`: initial state of the first state variables
"""
function build_QP_JuMP_model(
Q,R,A,B, N;
s0 = zeros(size(Q, 1)),
sl = [],
su = [],
ul = [],
uu = [],
Qf = Q,
E = [],
F = [],
gl = [],
gu = [],
S = zeros(size(Q, 1), size(R, 1)),
K = zeros(size(R, 1), size(Q, 1)),
w = zeros(size(Q, 1))
)
ns = size(Q,1) # Number of states
nu = size(R,1) # Number of inputs
NS = 1:ns # set of states
NN = 1:N # set of times
NU = 1:nu # set of inputs
model = Model(MadNLP.Optimizer) # define model
@variable(model, s[NS, 0:N]) # define states
@variable(model, u[NU, 0:(N-1)]) # define inputs
if !iszero(K)
@variable(model, v[NU, 0:(N-1)])
end
# Bound states/inputs
if length(sl) > 0
for i in NS
for j in 0:N
if sl[i] != -Inf
@constraint(model, s[i,j] >= sl[i])
end
end
end
end
if length(su) > 0
for i in NS
for j in 0:N
if su[i] != Inf
@constraint(model, s[i,j] <= su[i])
end
end
end
end
if length(ul) > 0
for i in NU
for j in 0:(N-1)
if ul[i] != -Inf
@constraint(model, u[i,j] >= ul[i])
end
end
end
end
if length(uu) > 0
for i in NU
for j in 0:(N-1)
if uu[i] != Inf
@constraint(model, u[i,j] <= uu[i])
end
end
end
end
if length(s0) >0
for i in NS
JuMP.fix(s[i,0], s0[i])
end
end
# Give constraints from A, B, matrices
for s1 in NS
for t in 0:(N - 1)
@constraint(model, s[s1, t + 1] == sum(A[s1, s2] * s[s2, t] for s2 in NS) + sum(B[s1, u1] * u[u1, t] for u1 in NU) + w[s1 + t * ns])
end
end
# Constraints for Kx + v = u
if !iszero(K)
for u1 in NU
@constraint(model, [t in 0:(N - 1)], u[u1, t] == v[u1, t] + sum( K[u1, s1] * s[s1,t] for s1 in NS))
end
end
# Add E, F constraints
if length(E) > 0
for i in 1:size(E,1)
@constraint(model,[t in 0:(N-1)], gl[i] <= sum(E[i, s1] * s[s1, t] for s1 in NS) + sum(F[i,u1] * u[u1, t] for u1 in NU))
@constraint(model,[t in 0:(N-1)], gu[i] >= sum(E[i, s1] * s[s1, t] for s1 in NS) + sum(F[i,u1] * u[u1, t] for u1 in NU))
end
end
# Give objective function as xT Q x + uT R u where x is summed over T and u is summed over T-1
@objective(model,Min, sum( 1/2 * Q[s1, s2]*s[s1,t]*s[s2,t] for s1 in NS, s2 in NS, t in 0:(N-1)) +
sum( 1/2 * R[u1,u2] * u[u1, t] * u[u2,t] for t in 0:(N-1) , u1 in NU, u2 in NU) +
sum( 1/2 * Qf[s1,s2] * s[s1,N] * s[s2, N] for s1 in NS, s2 in NS) +
sum( S[s1, u1] * s[s1, t] * u[u1, t] for s1 in NS, u1 in NU, t in 0:(N-1))
)
return model
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs | 4786 | # DynamicNLPModels.jl
| **Documentation** | **Build Status** | **Coverage** |
|:-----------------:|:----------------:|:----------------:|
| [](https://madnlp.github.io/DynamicNLPModels.jl/dev) | [](https://github.com/MadNLP/DynamicNLPModels.jl/actions) | [](https://codecov.io/gh/MadNLP/DynamicNLPModels.jl) |
DynamicNLPModels.jl is a package for [Julia](https://julialang.org/) designed for representing linear [model predictive control (MPC)](https://en.wikipedia.org/wiki/Model_predictive_control) problems. It includes an API for building a model from user defined data and querying solutions.
## Installation
To install this package, please use
```julia
using Pkg
Pkg.add(url="https://github.com/MadNLP/DynamicNLPModels.jl.git")
```
or
```julia
pkg> add https://github.com/MadNLP/DynamicNLPModels.jl.git
```
## Overview
DynamicNLPModels.jl can construct both sparse and condensed formulations for MPC problems based on user defined data. We use the methods discussed by [Jerez et al.](https://doi.org/10.1016/j.automatica.2012.03.010) to eliminate the states and condense the problem. DynamicNLPModels.jl constructs models that are subtypes of `AbstractNLPModel` from [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) enabling both the sparse and condensed models to be solved with a variety of different solver packages in Julia. DynamicNLPModels was designed in part with the goal of solving linear MPC problems on the GPU. This can be done within [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl) using [MadNLPGPU.jl](https://github.com/MadNLP/MadNLP.jl/tree/master/lib/MadNLPGPU).
The general sparse formulation used within DynamicNLPModels.jl is
$$\begin{align*}
\min_{s, u, v} \quad & s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} \quad & s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
& u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
& g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
& s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
& u^l \le u_i \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
& s_0 = \bar{s}
\end{align*}$$
where $s_i$ are the states, $u_i$ are the inputs$, $N$ is the time horizon, $\bar{s}$ are the initial states, and $Q$, $R$, $A$, and $B$ are user defined data. The matrices $Q_f$, $S$, $K$, $E$, and $F$ and the vectors $w$, $g^l$, $g^u$, $s^l$, $s^u$, $u^l$, and $u^u$ are optional data. $v_t$ is only needed in the condensed formulation, and it arises when $K$ is defined by the user to ensure numerical stability of the condensed problem.
The condensed formulation used within DynamicNLPModels.jl is
$$\begin{align*}
\min_{\boldsymbol{v}} \quad & \frac{1}{2} \boldsymbol{v}^\top \boldsymbol{H} \boldsymbol{v} + \boldsymbol{h}^\top \boldsymbol{v} + \boldsymbol{h}_0\\
\textrm{s.t.} \quad & d^l \le \boldsymbol{J} \boldsymbol{v} \le d^u.
\end{align*}$$
## Getting Started
DynamicNLPModels.jl takes user defined data to form a `SparseLQDyanmicModel` or a `DenseLQDynamicModel`. The user can first create an object containing the `LQDynamicData`, or they can pass the data directly to the `SparseLQDynamicModel` or `DenseLQDynamicModel` constructors.
```julia
using DynamicNLPModels, Random, LinearAlgebra
Q = 1.5 * Matrix(I, (3, 3))
R = 2.0 * Matrix(I, (2, 2))
A = rand(3, 3)
B = rand(3, 2)
N = 5
s0 = [1.0, 2.0, 3.0]
lqdd = LQDynamicData(s0, A, B, Q, R, N; **kwargs)
sparse_lqdm = SparseLQDynamicModel(lqdd)
dense_lqdm = DenseLQDynamicModel(lqdd)
# or
sparse_lqdm = SparseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
dense_lqdm = DenseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
Optional data (such as $s^l$, $s^u$, $S$, or $Q_f$) can be passed as key word arguments. The models `sparse_lqdm` or `dense_lqdm` can be solved by different solvers such as MadNLP.jl or Ipopt (Ipopt requires the extension NLPModelsIpopt.jl). An example script under `\examples` shows how the dense problem can be solved on a GPU using MadNLPGPU.jl.
DynamicNLPModels.jl also includes an API for querying solutions and reseting data. Solutions can be queried using `get_u(solver_ref, dynamic_model)` and `get_s(solver_ref, dynamic_model)`. The problem can be reset with a new $s_0$ by calling `reset_s0!(dynamic_model, s0)`.
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs | 59 | # API Manual
```@autodocs
Modules = [DynamicNLPModels]
```
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs | 7295 |
# Getting Started
DynamicNLPModels.jl takes user defined data to construct a linear MPC problem of the form
```math
\begin{aligned}
\min_{s, u, v} &\; s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} &\;s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
&\; u^l \le u_i \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s_0 = \bar{s}.
\end{aligned}
```
This data is stored within the struct `LQDynamicData`, which can be created by passing the data `s0`, `A`, `B`, `Q`, `R` and `N` to the constructor as in the example below.
```julia
using DynamicNLPModels, Random, LinearAlgebra
Q = 1.5 * Matrix(I, (3, 3))
R = 2.0 * Matrix(I, (2, 2))
A = rand(3, 3)
B = rand(3, 2)
N = 5
s0 = [1.0, 2.0, 3.0]
lqdd = LQDynamicData(s0, A, B, Q, R, N; **kwargs)
```
`LQDynamicData` contains the following fields. All fields after `R` are keyword arguments:
* `ns`: number of states (determined from size of `Q`)
* `nu`: number of inputs (determined from size of `R`)
* `N` : number of time steps
* `s0`: a vector of initial states
* `A` : matrix that is multiplied by the states that corresponds to the dynamics of the problem. Number of columns is equal to `ns`
* `B` : matrix that is multiplied by the inputs that corresonds to the dynamics of the problem. Number of columns is equal to `nu`
* `Q` : objective function matrix for system states from ``0, 1, \cdots, (N - 1)``
* `R` : objective function matrix for system inputs from ``0, 1, \cdots, (N - 1)``
* `Qf`: objective function matrix for system states at time ``N``
* `S` : objective function matrix for system states and inputs
* `E` : constraint matrix multiplied by system states. Number of columns is equal to `ns`
* `F` : constraint matrix multiplied by system inputs. Number of columns is equal to `nu`
* `K` : feedback gain matrix. Used to ensure numerical stability of the condensed problem. Not necessary within the sparse problem
* `w` : constant term within dynamic constraints. At this time, this is the only data that is time varying. This vector must be length `ns` * `N`, where each set of `ns` entries corresponds to that time (i.e., entries `1:ns` correspond to time ``0``, entries `(ns + 1):(2 * ns)` corresond to time ``1``, etc.)
* `sl` : lower bounds on state variables
* `su` : upper bounds on state variables
* `ul` : lower bounds on ipnut variables
* `uu` : upper bounds on input variables
* `gl` : lower bounds on the constraints ``Es_i + Fu_i``
* `gu` : upper bounds on the constraints ``Es_i + Fu_i``
## `SparseLQDynamicModel`
A `SparseLQDynamicModel` can be created by either passing `LQDynamicData` to the constructor or passing the data itself, where the same keyword options exist which can be used for `LQDynamicData`.
```julia
sparse_lqdm = SparseLQDynamicModel(lqdd)
# or
sparse_lqdm = SparseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
The `SparseLQDynamicModel` contains four fields:
* `dynamic_data` which contains the `LQDynamicData`
* `data` which is the `QPData` from [QuadraticModels.jl](https://github.com/JuliaSmoothOptimizers/QuadraticModels.jl). This object also contains the following data:
- `H` which is the Hessian of the linear MPC problem
- `A` which is the Jacobian of the linear MPC problem such that ``\textrm{lcon} \le A z \le \textrm{ucon}``
- `c` which is the linear term of a quadratic objective function
- `c0` which is the constant term of a quadratic objective function
* `meta` which contains the `NLPModelMeta` for the problem from NLPModels.jl
* `counters` which is the `Counters` object from NLPModels.jl
!!! note
The `SparseLQDynamicModel` requires that all matrices in the `LQDynamicData` be the same type. It is recommended that the user be aware of how to most efficiently store their data in the `Q`, `R`, `A`, and `B` matrices as this impacts how efficiently the `SparseLQDynamicModel` is constructed. When `Q`, `R`, `A`, and `B` are sparse, building the `SparseLQDynamicModel` is much faster when these are passed as sparse rather than dense matrices.
## `DenseLQDynamicModel`
The `DenseLQDynamicModel` eliminates the states within the linear MPC problem to build an equivalent optimization problem that is only a function of the inputs. This can be particularly useful when the number of states is large compared to the number of inputs.
A `DenseLQDynamicModel` can be created by either passing `LQDynamicData` to the constructor or passing the data itself, where the same keyword options exist which can be used for `LQDynamicData`.
```julia
dense_lqdm = DenseLQDynamicModel(lqdd)
# or
dense_lqdm = DenseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
The `DenseLQDynamicModel` contains five fields:
* `dynamic_data` which contains the `LQDynamicData`
* `data` which is the `QPData` from [QuadraticModels.jl](https://github.com/JuliaSmoothOptimizers/QuadraticModels.jl). This object also contains the following data:
- `H` which is the Hessian of the condensed linear MPC problem
- `A` which is the Jacobian of the condensed linear MPC problem such that ``\textrm{lcon} \le A z \le \textrm{ucon}``
- `c` which is the linear term of the condensed linear MPC problem
- `c0` which is the constant term of the condensed linear MPC problem
* `meta` which contains the `NLPModelMeta` for the problem from NLPModels.jl
* `counters` which is the `Counters` object from NLPModels.jl
* `blocks` which contains the data needed to condense the model and then to update the condensed model when `s0` is reset.
The `DenseLQDynamicModel` is formed from dense matrices, and this dense system can be solved on a GPU using MadNLP.jl and MadNLPGPU.jl For an example script for performing this, please see the the [examples directory](https://github.com/MadNLP/DynamicNLPModels.jl/tree/main/examples) of the main repository.
## API functions
An API has been created for working with `LQDynamicData` and the sparse and dense models. All functions can be seen in the API Manual section. However, we give a short overview of these functions here.
* `reset_s0!(LQDynamicModel, new_s0)`: resets the model in place with a new `s0` value. This could be called after each sampling period in MPC to reset the model with a new measured value
* `get_s(solver_ref, LQDynamicModel)`: returns the optimal solution for the states from a given solver reference
* `get_u(solver_ref, LQDynamicModel)`: returns the optimal solution for the inputs from a given solver reference; when `K` is defined, the solver reference contains the optimal ``v`` values rather than optimal ``u`` values, adn this function converts ``v`` to ``u`` and returns the ``u`` values
* `get_*`: returns the data of `*` where `*` is an object within `LQDynamicData`
* `set_*!`: sets the value within the data of `*` for a given entry to a user defined value
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs | 3275 | # Introduction
[DynamicNLPModels.jl](https://github.com/MadNLP/DynamicNLPModels.jl) is a package for [Julia](https://julialang.org/) designed for representing linear [model predictive control (MPC)](https://en.wikipedia.org/wiki/Model_predictive_control) problems. It includes an API for building a model from user defined data and querying solutions.
!!! note
This documentation is also available in [PDF format](DynamicNLPModels.pdf).
## Installation
To install this package, please use
```julia
using Pkg
Pkg.add(url="https://github.com/MadNLP/DynamicNLPModels.jl.git")
```
or
```julia
pkg> add https://github.com/MadNLP/DynamicNLPModels.jl.git
```
## Overview
DynamicNLPModels.jl can construct both sparse and condensed formulations for MPC problems based on user defined data. We use the methods discussed by [Jerez et al.](https://doi.org/10.1016/j.automatica.2012.03.010) to eliminate the states and condense the problem. DynamicNLPModels.jl constructs models that are subtypes of `AbstractNLPModel` from [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) enabling both the sparse and condensed models to be solved with a variety of different solver packages in Julia. DynamicNLPModels was designed in part with the goal of solving linear MPC problems on the GPU. This can be done within [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl) using [MadNLPGPU.jl](https://github.com/MadNLP/MadNLP.jl/tree/master/lib/MadNLPGPU).
The general sparse formulation used within DynamicNLPModels.jl is
```math
\begin{aligned}
\min_{s, u, v} &\; s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} &\;s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
&\; u^l \le u_t \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s_0 = \bar{s}
\end{aligned}
```
where ``s_i`` are the states, ``u_i`` are the inputs, ``N`` is the time horizon, ``\bar{s}`` are the initial states, and ``Q``, ``R``, ``A``, and ``B`` are user defined data. The matrices ``Q_f``, ``S``, ``K``, ``E``, and ``F`` and the vectors ``w``, ``g^l``, ``g^u``, ``s^l``, ``s^u``, ``u^l``, and ``u^u`` are optional data. ``v_t`` is only needed in the condensed formulation, and it arises when ``K`` is defined by the user to ensure numerical stability of the condensed problem.
The condensed formulation used within DynamicNLPModels.jl is
```math
\begin{aligned}
\min_{\boldsymbol{v}} &\;\; \frac{1}{2} \boldsymbol{v}^\top \boldsymbol{H} \boldsymbol{v} + \boldsymbol{h}^\top \boldsymbol{v} + \boldsymbol{h}_0\\
\textrm{s.t.} &\; d^l \le \boldsymbol{J} \boldsymbol{v} \le d^u.
\end{aligned}
```
# Bug reports and support
This package is new and still undergoing some development. If you encounter a bug, please report it through Github's [issue tracker](https://github.com/MadNLP/DynamicNLPModels.jl/issues). | DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 79 | using Documenter, TopologyPreprocessing
makedocs(sitename="My Documentation")
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 20085 | using Eirene
using Pipe
#%%
# ==============================
# ======== Tested code ========
#%%
# ================================
# ======== Untested code ========
"""
get_barcodes(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.barcode for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of
different size, because of different number of detected cycles. First column
of each array contains birth step, second column contains death step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
function get_barcodes(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1, sorted::Bool = false)
barcodes = Matrix{Float64}[]
for d = min_dim:max_dim
result = barcode(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(barcodes[d-1]))
end
if sorted
result = result[sortperm(result[:, 1]), :]
end
push!(barcodes, result)
end
return barcodes
end
"""
plot_barcodes(barcodes; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of barcodesstored in `barcodes` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
"""
function plot_barcodes!(barcodes::Vector, plot_ref;
min_dim::Integer = 1,
barcodes_labels::Bool = true,
default_labels::Bool = true,
sort_by_birth::Bool = false,
alpha::Float64 = 1.0,
kwargs...)#; plot_size = (width=1200, height=800),
# TODO add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
# TODO add ordering of bars to firstly birth time, then death time
# TODO dims should be given as range, not a single min dim
# barcodes = all_barcodes_geom
max_dim = size(barcodes, 1) - (1 - min_dim) # TODO not sure if this is correct solution
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 8
end
colors_set = get_bettis_color_palete(min_dim = min_dim)
dims_indices = 1:length(min_dim:max_dim)
all_sizes = [size(barcodes[k], 1) for k = dims_indices]
ranges_sums = vcat(0, [sum(all_sizes[1:k]) for k = dims_indices])
y_val_ranges = [ranges_sums[k]+1:ranges_sums[k+1] for k = dims_indices]
if sort_by_birth
sort_barcodes!(barcodes, min_dim, max_dim)
end
total_cycles = sum([size(barcodes[k], 1) for (k, dim) in enumerate(min_dim:max_dim)])
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (p, dim) = enumerate(min_dim:max_dim)# 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# @info p, dim
args = (lc = colors_set[p], linewidth = lw)
# b = barcodes[p][1:1:(end-1),:]
b = barcodes[p][:, :]
all_infs = findall(x -> isinf(x), b)
for k in all_infs
b[k] = 1
end
# if dim == 0
# b = sort(b, dims = 1)
# end
total_bars = size(b, 1)
y_vals = [[k, k] for k in y_val_ranges[p]]
lc = colors_set[p]
for k = 1:total_bars
# TODO change label to empty one
plot!(b[k, :], y_vals[k]; label = "", lc = lc, alpha = alpha)#; args...)
end
if false && betti_labels
label = "β$(dim)"
end
# plot!(label=label)
end
# display(plot_ref)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
all_labels = reshape(["β$(k)" for k in 1:max_dim], (1, max_dim))
plot!(label = all_labels)
plot!(legend = true)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Birth/Death")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Cycle")
end
# ylims!(0, 2*total_cycles)
ylims!(0, total_cycles + 2)
return plot_ref
end
function plot_barcodes(barcodes::Vector;
kwargs...)
plot_ref = plot(; kwargs...)
plot_barcodes!(barcodes, plot_ref;
kwargs...)
end
function sort_barcodes!(barcodes, min_dim, max_dim)
sorted_barcodes = copy(barcodes)
for (dim_index, dim) = enumerate(min_dim:max_dim)
# if dim == 0
# permutation = sortperm(barcodes[dim_index][:, 2])
# else
permutation = sortperm(barcodes[dim_index][:, 1])
# end
sorted_barcodes[dim_index] = barcodes[dim_index][permutation, :]
# end
end
barcodes = sorted_barcodes
# for (dim_index, dim) = enumerate(min_dim:max_dim)
# if dim == 0
# sort!(barcodes[dim_index], dims = 1)
# else
# sort!(barcodes[dim_index], dims = 1)
# end
# end
end
# TODO This has to be imported from other file
# function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
# """
# function get_bettis_color_palete()
#
# Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
# """
# # TODO what does the number in the function below is used for?
#
# if use_set == 1
# cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
# if min_dim == 0
# colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
# else
# colors_set = RGB[]
# end
# colors_set = vcat(
# colors_set,
# [
# RGB(255 / 256, 206 / 256, 0 / 256),
# RGB(248 / 256, 23 / 256, 0 / 256),
# RGB(97 / 256, 169 / 256, 255 / 256),
# RGB(163 / 256, 0 / 256, 185 / 256),
# RGB(33 / 256, 96 / 256, 45 / 256),
# RGB(4 / 256, 0 / 256, 199 / 256),
# RGB(135 / 256, 88 / 256, 0 / 256),
# ],
# cur_colors,
# )
# else
# use_set == 2
# cur_colors = get_color_palette(:auto, 1)
# cur_colors3 = get_color_palette(:lightrainbow, 1)
# cur_colors2 = get_color_palette(:cyclic1, 1)
# if min_dim == 0
# # colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# else
# colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# # colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
# end
# # for c = [collect(11:25);]
# # push!(colors_set, cur_colors2[c])
# # end
# colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
# end
#
# return colors_set
# end
function get_birth_death_ratio(barcodes; max_dim::Integer = 3)
# birth_death_raio_π = [[all_barcodes_geom[k][m,2]/all_barcodes_geom[k][m,1] for m= 1:size(all_barcodes_geom[k],1)] for k in 1:max_dim]
birth_death_ratio_π = [barcodes[k][:, 2] ./ barcodes[k][:, 1] for k in 1:max_dim]
return birth_death_ratio_π
end
function get_barcode_lifetime(barcodes; max_dim::Integer = 3)
lifetime = [barcodes[k][:, 2] .- barcodes[k][:, 1] for k in 1:max_dim]
return lifetime
end
#%%
"""
get_barcode_max_lifetime(lifetimes, min_dim, max_dim)
Returns the maximal life times of barcode for all dimensions.
"""
function get_barcode_max_lifetime(lifetimes)
total_lifetimes = length(lifetimes)
all_max_lifetimes = zeros(total_lifetimes, 1)
for k in 1:total_lifetimes
all_max_lifetimes[k] = findmax(lifetimes[k])[1]
end
return all_max_lifetimes
end
"""
boxplot_birth_death(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function boxplot_birth_death(birth_death_ratio_π, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
total_plots = size(birth_death_ratio_π, 1)
for k in 1:total_plots
StatsPlots.boxplot!(bplot, birth_death_ratio_π[k], labels = "β$(k)", color = data_colors[k])
StatsPlots.dotplot!(bplot, birth_death_ratio_π[k], color = data_colors[k])
end
return bplot
end
"""
boxplot_birth_death(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function boxplot_lifetime(barcode_lifetime, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
total_plots = size(barcode_lifetime, 1)
for k in 1:total_plots
StatsPlots.boxplot!(bplot, barcode_lifetime[k], labels = "β$(k)", color = data_colors[k])
StatsPlots.dotplot!(bplot, barcode_lifetime[k], color = data_colors[k])
end
return bplot
end
#%%
"""
get_barcode_max_db_ratios(lifetimes, min_dim, max_dim)
Returns the maximal life times of barcode for all dimensions.
"""
function get_barcode_max_db_ratios(db_ratos)
total_db = length(db_ratos)
all_max_db = zeros(total_db, 1)
for k in 1:total_db
all_max_db[k] = findmax(db_ratos[k])[1]
end
return all_max_db
end
"""
get_normalised_barcodes(barcodes::Vector, betti_numbers::Array)
Returns the barcodes which values are within [0,1] range.
1. get corresponding bettis
2. get max number of steps
3. divide all values by total number of steps.
"""
function get_normalised_barcodes(barcodes, betti_numbers::Array)
if typeof(betti_numbers) == Vector
total_steps = size(betti_numbers[1], 1)
else
total_steps = size(betti_numbers, 1)
end
return barcodes ./ total_steps
end
"""
get_normalised_barcodes_collection(barcodes_collection, bettis_collection)
Applies get_normalised_barcodes to the collection of barcodes and corresponding
betti curves.
"""
function get_normalised_barcodes_collection(barcodes_collection, bettis_collection)
if size(barcodes_collection, 1) != size(bettis_collection, 1)
throw(BoundsError(barcodes_collection, bettis_collection,
"Both collections must have same number of elements",
))
else
total_collections = size(barcodes_collection, 1)
end
normed_collection = deepcopy(barcodes_collection)
for k = 1:total_collections
normed_collection[k] = get_normalised_barcodes(barcodes_collection[k], bettis_collection[k])
end
return normed_collection
end
"""
plot_bd_diagram(barcodes;
dims::Range,
use_js::Bool=false,
kwargs...)
Creates a birth/death diagram from `barcodes` and returns the handlers to
the plots.
By default, dims is set to range '1:length(barcodes)', which plots all of
the diagrams. If set to an integer, plots only 1 dimension.
If 'use_js' is set to true, plotly backend is used for plotting.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO dims are defined as if the dimensions always starts at 1- this has to be changed
"""
function plot_bd_diagram(barcodes; dims = 1:length(barcodes),
use_js::Bool = false,
class_sizes = [],
class_labels = [],
normilised_diagonal::Bool = true,
alpha=0.4,
kwargs...)
# TODO max min should be ready to use from input data- might be better to have better structures as an inupt
# max_dim = size(barcodes, 1)
# min_dim = findmin(dims)[1]
min_dim = dims[1]
max_dim = dims[end]
if max_dim > length(barcodes)
throw(DimensionMismatch("Can not have dims range larger than barcodes length"))
end
# all_dims = min_dim:max_dim
# if findmax(dims)[1] > max_dim
# throw(DomainError(
# min_dim,
# "\'dims\' must be less than maximal dimension in \'bettis\'",
# ))
# end
# lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
# if !isnothing(lw_pos)
# lw = kwargs[lw_pos]
# else
# lw = 2
# end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = min_dim)
if use_js
plotly()
else
gr()
end
plot_ref = plot(; xlims = (0, 1), ylims = (0, 1), kwargs...)
# Add diagonal
if normilised_diagonal
max_coord = 1
else
max_x = max([k for k in vcat([barcodes[d][:,1] for (d, dim) in dims|> enumerate]...) if !isinf(k) ]...)
max_y = max([k for k in vcat([barcodes[d][:,2] for (d, dim) in dims|> enumerate]...) if !isinf(k) ]...)
max_coord = max(max_x, max_y)
end
scaling_factor = 1.05
min_val = -0.05
plot!([0, scaling_factor*max_coord], [0, scaling_factor*max_coord], label = "")
xlims!(min_val, scaling_factor*max_coord)
ylims!(min_val, scaling_factor*max_coord)
for (p, dim) in enumerate(dims)
# colors_set[p]
my_vec = barcodes[p]
# TODO class size is not a default and basic bd diagram property- should be factored out to other function
if class_labels != [] && class_sizes != []
labels = ["class/size $(class_labels[k])/$(class_sizes[class_labels[k]])" for k in 1:size(class_labels, 1)]
elseif class_sizes == []
labels = ["class: $(k)" for k in 1:size(my_vec, 1)]
else
labels = ["class/size $(k)/$(class_sizes[k])" for k in 1:size(my_vec, 1)]
end
args = (color = colors_set[p],
# linewidth = lw,
label = "β$(dim)",
aspect_ratio = 1,
size = (600, 600),
legend = :bottomright,
framestyle=:origin,
alpha=alpha,
# hover = labels,
kwargs...)
if class_labels != []
class_sizes != []
for x = class_labels
plot!(my_vec[Int(x), 1], my_vec[Int(x), 2], seriestype = :scatter; args...)
end
end
plot!(my_vec[:, 1], my_vec[:, 2], seriestype = :scatter; args...)
end
xlabel!("birth")
ylabel!("death")
return plot_ref
end
#
"""
plot_all_bd_diagrams(barcodes_collection;
min_dim::Integer=1,
betti_labels::Bool=true,
default_labels::Bool=true,
all_legend=false,
my_alpha=0.12,
aspect_ratio=1,
kwargs...)
Creates a set of birth/death diagrams from `barcodes_collection`
and returns a dictionary with the handlers to the plots.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
"""
function plot_all_bd_diagrams(barcodes_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
all_legend = false,
my_alpha = 0.12,
aspect_ratio = 1,
base_w = 600,
base_h = 600,
kwargs...)
total_dims = size(barcodes_collection[1], 1)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
title_pos = findfirst(x -> x == :title, keys(kwargs))
if !isnothing(title_pos)
my_title = kwargs[title_pos]
else
my_title = "Birth death diagram"
end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = min_dim)
plot_dict = Dict()
for b = 1:total_dims
args = (lc = colors_set[b],
linewidth = lw,
label = false,
aspect_ratio = aspect_ratio,
size = (base_w, base_h),
kwargs...)
plot_dict["β$(b)"] = scatter(; xlims = (0, 1), ylims = (0, 1), dpi = 300, args...)
for bars = barcodes_collection
barcode = bars[b]
scatter!(barcode[:, 1], barcode[:, 2],
markeralpha = my_alpha,
markercolor = colors_set[b],
dpi = 300)
end
plot!(legend = all_legend)
plot!(title = (my_title * ", β$(b)"))
# legend_pos = findfirst(x -> x == :legend, keys(kwargs))
# if !isnothing(legend_pos)
# plot!(legend = kwargs[legend_pos])
# else
# plot!(legend = betti_labels)
# end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Birth")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Death")
end
end
return plot_dict
end
## ===-
# Simpler plotting
"""
plot_bd_diagram(barcodes;
dims::Range,
use_js::Bool=false,
kwargs...)
Creates a birth/death diagram from `barcodes` and returns the handlers to
the plots.
By default, dims is set to range '1:length(barcodes)', which plots all of
the diagrams. If set to an integer, plots only 1 dimension.
If 'use_js' is set to true, plotly backend is used for plotting.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO dims are defined as if the dimensions always starts at 1- this has to be changed
"""
function plot_simple_bd_diagram(barcodes; dims = 1:length(barcodes), max_bd = 0, use_js::Bool = false,
kwargs...)
# TODO max min should be ready to use from input data- might be better to have better structures as an inupt
max_dim = size(barcodes, 1)
min_dim = findmin(dims)[1]
# all_dims = min_dim:max_dim
if findmax(dims)[1] > max_dim
throw(DomainError(
min_dim,
"\'dims\' must be less than maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = 1)
if use_js
plotly()
else
gr()
end
plot_ref = plot(; kwargs...)
for (p, dim) in dims |> enumerate
# colors_set[p]
my_vec = barcodes[p]
args = (color = colors_set[p],
linewidth = lw,
aspect_ratio = 1,
size = (600, 600),
legend = :bottomright,
kwargs...)
# scatter!(my_vec[:, 1], my_vec[:, 2], args...)
plot!(my_vec[:, 1], my_vec[:, 2], seriestype = :scatter; args...)
end
# Add diagonal
if max_bd > 0
max_x = max_bd
max_y = max_bd
plot!([0, max_y], [0, max_y], label = "")
else
all_births = vcat([barcodes[d][:, 1] for d in dims]...)
all_deaths = vcat([barcodes[d][:, 2] for d in dims]...)
max_x = findmax(all_births)[1]
max_y = findmax(all_deaths)[1]
plot!([0, max_y], [0, max_y], label = "")
end
return plot_ref
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 28399 | # ==============================
# ======== Tested code ========
using Eirene
using Plots
using StatsPlots
#%%
"""
get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.betticurve for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of size
(n,2), where n is the maximal number of steps taken to compute Betti curve of dimensions
ranging form `min_dim` to `max_dim`. First column of each array contains numbered steps.
Second column are the Betti curve values for corresponding step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
function get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
bettis = Matrix{Float64}[]
for d = min_dim:max_dim
result = betticurve(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(bettis[d-1]))
end
push!(bettis, result)
end
return bettis
end
# TODO add get_bettis_from_matrix, to wrap C= eirene...; get bettis
#%%
"""
normalise_bettis(bettis::Vector)
normalise_bettis(bettis::Array)
Normalise the number of steps for Betti curves. 'bettis' can be either vector of
arrays (each array contain Betti curve of different dimension) or an array
containing Betti curve of a single dimension.
"""
function normalise_bettis(bettis::Vector)
@debug "Vector version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)[1][1]
max_dim = size(norm_bettis)[1]
@debug "typeof(max_dim) :" typeof(max_dim[1])
for d = 1:(max_dim)
if !isempty(norm_bettis[d])
norm_bettis[d][:, 1] /= findmax(norm_bettis[d][:, 1])[1]
end
end
return norm_bettis
end
#%%
function normalise_bettis(bettis::Array)
@debug "Array version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)
if !isempty(norm_bettis)
norm_bettis[:, 1] /= findmax(norm_bettis[:, 1])[1]
end
return norm_bettis
end
#%%
# function vectorize_bettis(betti_curves::Array{Matrix{Float64,2}})
"""
vectorize_bettis(betti_curves::Matrix{Float64})
Reshapes the 'betti_curves' from type Array{Matrices{Float64,2}} into
Matrix{Float64}.
The resulting matrix size is (n, k), where 'n' is equal to the number of
rows in each matrix, 'k' is equal to the number of matrices.
TODO: Change the name- it takse vector and returns a matrix.
TODO: get bettis could have an arguent betti_type which would determine resulting type
"""
function vectorize_bettis(betti_curves::Vector{Array{Float64,2}})
first_betti = 1
last_betti = size(betti_curves,1)
return hcat([betti_curves[k][:, 2] for k = first_betti:last_betti]...)
end
#%%
@deprecate vectorize_bettis(eirene_results::Dict, maxdim::Integer, mindim::Integer) vectorize_bettis(betti_curves)
# ===
#%%
"""
get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Takes the eirene result and computes Betti curves for dimensions in range
'mindim:maxdim'. Every Betti curve is stored in successive column of the
resulting array.
TODO: this should return a matrix, where first col are indices and rest are B values (1st col is missing now)
"""
function get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
all_bettis = get_bettis(results_eirene, max_dim, min_dim = min_dim)
bettis_vector = vectorize_bettis(all_bettis)
return bettis_vector
end
# ==
#%%
"""
plot_bettis(bettis; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
function plot_bettis(bettis::Vector;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
kwargs...)#; plot_size = (width=1200, height=800),
max_dim = size(bettis, 1)
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension.",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# Create iterator for all loops
all_iterations = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
if use_edge_density
for p = all_iterations
max_step = findmax(bettis[p][:, 1])[1]
bettis[p][:, 1] ./= max_step
end
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = all_iterations
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "β$(p)")
end
plot!(bettis[index][:, 1], bettis[index][:, 2]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
"""
plot_bettis(bettis::Array; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
function plot_bettis(bettis::Array;
# min_dim::Integer = 1,
dims_range=1:size(bettis,2),
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
max_dim = dims_range[end]
min_dim = dims_range[1]
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
total_steps = size(bettis, 1)
if normalised
x_vals = range(0, stop=1, length=total_steps)
else
x_vals = range(0, stop=total_steps)
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# if use_edge_density
# # for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for (index, p) in enumerate(min_dim:max_dim)
# max_step = findmax(bettis[:, 1])[1]
# bettis[p][:, 1] ./=max_step
# end
# end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "β$(p)")
end
plot!(x_vals, bettis[:, index]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
# ======= Untested code
# TODO add default kwargs paring function -> parse_kwargs()
"""
plot_all_bettis ...
"""
function plot_all_bettis(bettis_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
total_dims = size(bettis_collection[1],2)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
max_y_val = find_max_betti(bettis_collection)
plot_ref = plot(; kwargs...)
for b = 1:total_dims
args = (lc = colors_set[b], linewidth = lw, alpha=0.12,label=false, ylims=(0,max_y_val))
for bettis = bettis_collection
betti_vals = bettis[:,b]
total_steps = size(bettis, 1)
x_vals = range(0, stop=1, length=total_steps)
plot!(x_vals, betti_vals; args...)
end
# my_label = "β$(b)"
# betti_vals = results_d["bettis_collection"][:hc][end]
# x_vals = range(0, stop=1, length=size(betti_vals, 1))
# plot!(x_vals, betti_vals; lc = colors_set[b], linewidth = 1, alpha=0.1,label=my_label, ylims=(0,max_y_val))
end
plot!(legend=true)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
return plot_ref
end
"""
find_max_betti(bettis_collection::Array)
Returns the highest Betti curve value from all dimensions.
"""
function find_max_betti(bettis_collection::Array)
if typeof(bettis_collection) == Vector
bettis_collection = vectorize_bettis(bettis_collection)
end
max_y_val = 0
for betti_set in bettis_collection
local_max = findmax(betti_set)[1]
if local_max > max_y_val
max_y_val = local_max
end
end
return max_y_val
end
# ======= Untested code == end
#%%
"""
printready_plot_bettis(kwargs)
Creates a plot using 'plot_bettis' with arguments which were tested to be very
good for using them in prints. Used arguments are:
"""
function printready_plot_bettis(kwargs)
return nothing
end
#%%
"""
function get_bettis_color_palete()
Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
"""
function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
# TODO what does the number in the function below is used for?
if use_set == 1
cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
if min_dim == 0
colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
else
colors_set = []
end
max_RGB = 256
colors_set = vcat(
colors_set,
[
RGB(255 / max_RGB, 206 / max_RGB, 0 / max_RGB),
RGB(248 / max_RGB, 23 / max_RGB, 0 / max_RGB),
RGB(97 / max_RGB, 169 / max_RGB, 255 / max_RGB),
RGB(163 / max_RGB, 0 / max_RGB, 185 / max_RGB),
RGB(33 / max_RGB, 96 / max_RGB, 45 / max_RGB),
RGB(4 / max_RGB, 0 / max_RGB, 199 / max_RGB),
RGB(135 / max_RGB, 88 / max_RGB, 0 / max_RGB),
],
cur_colors,
)
else
use_set == 2
cur_colors = get_color_palette(:auto, 1)
cur_colors3 = get_color_palette(:lightrainbow, 1)
cur_colors2 = get_color_palette(:cyclic1, 1)
if min_dim == 0
# colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
else
colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
end
# for c = [collect(11:25);]
# push!(colors_set, cur_colors2[c])
# end
colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
end
return colors_set
end
# ==============================
# ======= Untested code =======
# using Measures
# using Plots.PlotMeasures
#
# # Source: https://github.com/JuliaPlots/Plots.jl/issues/897
# function setdefaultplottingparams(;upscale=2)
# #8x upscaling in resolution
# fntsm = Plots.font("sans-serif", pointsize=round(12.0*upscale))
# fntlg = Plots.font("sans-serif", pointsize=round(18.0*upscale))
# default(titlefont=fntlg, guidefont=fntlg, tickfont=fntsm, legendfont=fntsm)
# default(size=(800*upscale,600*upscale)) #Plot canvas size
# default(dpi=500) #Only for PyPlot - presently broken
# end
#%%
"""
plot_bettis_collection(bettis_collection, bett_num; step=1, show_plt=true, R=0., G=0.4, B=1.0)
PLots collection of Betti curves of rank 'bett-num'. Every successive plot has
lower opacity than predecessor. 'step' defines step between collection elements
that are ploted. By default, plot is displayed after carteation. This can be
disabled by setting 'show_plt' to false.
Color of the plot can be set with 'R', 'G', 'B' parameters.
"""
function plot_bettis_collection(bettis_collection,
bett_num,
max_rank;
step = 1,
show_plt = true,
R = 0.0,
G = 0.4,
B = 1.0)
step > 0 || error("Steps should be natural number!")
bettis_total = size(bettis_collection, 1)
colors_set = zeros(Float64, bettis_total, 4)
colors_set[:, 1] .= R
colors_set[:, 2] .= G
colors_set[:, 3] .= B
max_betti = get_max_betti_from_collection(bettis_collection)
@info "max_betti" max_betti
x = 0
y = bettis_total * 0.1
va_range = collect(range(bettis_total + x, y, length = bettis_total))
colors_set[:, 4] .= va_range / findmax(va_range)[1]
rgba_set = RGBA[]
for k = 1:size(colors_set, 1)
push!(
rgba_set,
RGBA(colors_set[k, 1], colors_set[k, 2], colors_set[k, 3], colors_set[k, 4]),
)
end
plt_reference = plot(1, title = "Betti curves collection, rank $(bett_num)", label = "")
for b = 1:step:bettis_total
betti = bettis_collection[b]
x_vals_1 = (1:size(betti[:, bett_num], 1)) / size(betti[:, bett_num], 1)
plot!(x_vals_1, betti[:, bett_num], lc = rgba_set[b], label = "rank=$(max_rank-b)")
plot!(ylim = (0, max_betti))
end
xlabel!("Normalised steps")
ylabel!("Number of cycles")
plot!(legend = true)
show_plt && display(plt_reference)
return plt_reference
end
#%%
"""
get_max_bettis(bettis)
Returns the maximal bettis of Betti curves for all dimensions.
"""
function get_max_bettis(bettis)
all_max_bettis = findmax(bettis, dims=1)[1]
return all_max_bettis
end
# TODO change name
# TODO check what for dim is used, change to min dim
function get_max_betti_from_collection(bettis_collection; dim = 1)
max_betti = 0
for betti in bettis_collection
# global max_betti
local_max = findmax(betti)[1]
if (local_max > max_betti)
max_betti = local_max
end
end
return max_betti
end
#%%
"""
plot_and_save_bettis(eirene_results, plot_title::String,
results_path::String; extension = ".png",
data_size::String="", do_save=true,
extend_title=true, do_normalise=true, max_dim=3,
legend_on=true)
Plot Betti curves from 0 up to `max_dim` using `eirene_results` from Eirene library and
returns handler for figure. Optionally, if `do_save` is set, saves the figure
or if `do_normalise` is set, sets the steps range to be normalised to the
horizontal axis maximal value.
"""
function plot_and_save_bettis(bettis,
plot_title::String,
results_path::String;
file_name = "",
extension = ".png",
do_save = true,
do_normalise = true,
min_dim = 0,
max_dim = 3,
legend_on = true,
kwargs...)
bettis = get_bettis(eirene_results, max_dim)
if do_normalise
bettis = normalise_bettis(bettis)
end
plot_ref =
plot_bettis(bettis, plot_title, legend_on = legend_on, min_dim = min_dim, kwargs...)
if do_save
if isempty(file_name)
file_name = plot_title * extension
elseif isempty(findall(x -> x == extension[2:end], split(file_name, ".")))
#check for the extension in file name
file_name *= extension
end
save_figure_with_params(
plot_ref,
results_path;
extension = extension,
prefix = split(file_name, ".")[1],
)
end
return plot_ref
end
# TODO merge functions for getting betti curves
# Original function returns 2 different types of betti curves. If no default
# value parameters is given, it returns vector of matrices. If num of steps is
# given, then it return matrix maxdim x numsteps.
# """
# bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, mindim=1)
#
# Takes the `matr` and computes Betti curves up to `maxdim`. Return matrix only
# with betti curve values
#
#
# Function taken from: https://github.com/alexyarosh/hyperbolic
# """
#%%
@deprecate bettis_eirene(matr, maxdim; mintime = -Inf, maxtime = Inf, numofsteps = Inf, mindim = 1) get_bettis(results_eirene, max_dim; min_dim = 1)
@deprecate get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis(results_eirene, max_dim; min_dim = 1)
@deprecate get_bettis_from_image2(img_name;file_path = "",plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true)
@deprecate plot_and_save_bettis2(eirene_results, plot_title::String, results_path::String; file_name = "", extension = ".png", data_size::String = "", do_save = true, extend_title = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true) plot_and_save_bettis(bettis, plot_title::String, results_path::String; file_name = "", extension = ".png", do_save = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true, kwargs...)
@deprecate get_and_plot_bettis(eirene_results; max_dim = 3, min_dim = 1, plot_title = "", legend_on = false) get_bettis(results_eirene, max_dim; min_dim = 1)
#%%
"""
lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
Takes ordered matrix 'input_matrix' and reduces the resolution of values in the
matrix into 'total_bins' bins.
"""
function lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
new_ordered_matrix = zeros(size(ordered_matrix))
max_val = findmax(ordered_matrix)[1]
min_val = findmin(ordered_matrix)[1]
bin_step = max_val ÷ total_bins
old_bins = min_val:bin_step:max_val
for bin = 1:total_bins
@debug "First step threshold is $(old_bins[bin])"
indices = findall(x -> (x >= old_bins[bin]), ordered_matrix)
new_ordered_matrix[indices] .= bin - 1
end
@debug "Max_val in new matrix is " findmax(new_ordered_matrix)
@debug "And should be " total_bins - 1
return new_ordered_matrix
end
#%%
"""
average_bettis(bettis_matrix; up_factor=8)
Takes the average values of betti curves stored in 'bettis_matrix'.
'bettis_matrix' consist of different simulations(first index of the matrix),
different ranks (third index of the matrix). Second index of the matrices
(saples) may vary accross many simulations and for this reason, all betti curves
are upsampled by a factor of 'upsample_factor' and then the average for every
dimension is computed.
"""
function average_bettis(bettis_matrix::Matrix; up_factor = 8)
bettis_matrix_backup = copy(bettis_matrix)
simulations = size(bettis_matrix, 1)
dimensions = size(bettis_matrix[1], 1)
max_samples = 0
for k = 1:simulations
# global max_samples
current_len = length(bettis_matrix[k][1][:, 1])
if max_samples < current_len
max_samples = current_len
end
end
bettis_size = size(bettis_matrix)
total_upsamples = (max_samples - 1) * up_factor + 1
x_resampled = range(0, 1, step = total_upsamples)
avg_bettis = zeros(total_upsamples, dimensions)
std_bettis = copy(avg_bettis)
resampled_bettis = zeros(simulations, total_upsamples, dimensions)
# resample betti curves
for simulation = 1:simulations, betti = 1:dimensions
resampled_bettis[simulation, :, betti] =
upsample_vector2(bettis_matrix[simulation][betti][:, 2], total_upsamples)
end
# average and std Betti
for dimension = 1:dimensions
avg_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
std_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
end
return avg_bettis, std_bettis
end
#%%
function upsample_vector2(input_vector, total_upsamples)
total_orig_samples = size(input_vector, 1) - 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_upsamples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
#%%
"""
upsample_vector(input_vector; upsample_factor::Int=8)
Takes an 'input_vector' and returns a vector which has 'upsample_factor' many
times more samples. New samples are interpolated with 'spl' function from
'Dierckx' package.
"""
function upsample_vector(input_vector; upsample_factor::Int = 8)
total_orig_samples = size(input_vector, 1) - 1
total_samples = upsample_factor * total_orig_samples + 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_samples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
# =========--=======-========-==========-=======-
# From bettis areas
# Area under Betti curve functions
#%%
"""
get_area_under_betti_curve(betti_curves, min_dim, max_dim)
Computes the area under Betti curves stored in 'betti_curves', where each row is
a Betti curve and each column is a value.
"""
function get_area_under_betti_curve(betti_curves::Union{Matrix{Float64}, Array{Array{Float64,2}}};do_normalised::Bool=false)
#TODO check this part
if size(betti_curves,2) < 2
bettis_vector = vectorize_bettis(betti_curves)
else
bettis_vector = betti_curves
end
# @info sum(bettis_vector, dims=1)
bettis_area = sum(bettis_vector, dims=1)
if do_normalised
total_steps = size(bettis_vector,1)
bettis_area ./= total_steps
end
# @info bettis_area
return bettis_area
end
@deprecate get_area_under_betti_curve(C, min_dim, max_dim) get_area_under_betti_curve(betti_curves; do_normalised=false)
#%%
"""
get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
Computes topology of every matrix in dataset, computes Betti curves for dimensions
min_dim up to max_dim and returns vector (or matrix) of areas under Betti curves.
"""
function get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
areas_vector = Array[]
for data = dataset
@info "Computing topology."
C = eirene(data, maxdim=max_dim,)
matrix_bettis = get_bettis(C,max_dim, min_dim=min_dim)
push!(areas_vector, get_area_under_betti_curve(matrix_bettis))
end
if return_matrix
return vcat([areas_vector[k] for k=1:10]...)
else
return areas_vector
end
end
#%%
"""
get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
for (index, value) in enumerate(min_dim:max_dim)
StatsPlots.boxplot!(bplot, areas_matrix[:,index], labels="β$(value)", color=data_colors[value])
end
return bplot
end
# function get_bettis_collection_from_matrices(ordered_matrices_collection; max_dim::Int=3, min_dim::Int=1)
# bettis_collection = Array[]
#
# for matrix = ordered_matrices_collection
# @debug "Computing Bettis..."
# eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
#
# bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
# push!(bettis_collection, bettis)
# end
#
# return bettis_collection
# end
#
# #%%
# TODO find what are the alternative functions for the functions below
# @deprecate get_dataset_topology(dataset; min_dim::Integer=1, max_dim::Integer=3, get_curves::Bool=true, get_areas::Bool=true, get_persistence_diagrams::Bool=true, do_normalise::Bool=true)
# @deprecate get_bettis_collection(ordered_matrices_collection; max_B_dim=3)
# @deprecate reshape_bettis(bettis)
# @deprecate print_hmap_with_bettis(ordered_matrices_collection, bettis_collection, plot_data::PlottingData)
# @deprecate make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti)
# @deprecate matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
# @deprecate multiscale_matrix_testing(sample_space_dims = 3, maxsim = 5, min_B_dim = 1, max_B_dim = 3, size_start = 10, size_step = 5, size_stop = 50; do_random = true, control_saving = false, perform_eavl = false)
# @deprecate plot_betti_numbers(betti_numbers, edge_density, title="Geometric matrix"; stop=0.6)
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 23592 | using LinearAlgebra
import Plots.plot as plot
# using Plots
using Random
include("PlottingWrappers.jl")
include("PointsSubstitution.jl")
"""
function expand_matrix(input_matrix, expansion_size, last_components
Takes 'input_matrix' (an ordering matrix used for creating cliques) and and adds
2×'expansion_size' number of rows. 'last_components' are the values in original
matrix that are added last to the clique.
Results may be be plotted with same funtion with sufix "with_plot".
"""
function expand_matrix(input_matrix, expansion_size, last_components)
new_comp = last_components
matrix_size = size(input_matrix, 1)
for mat_sizes = matrix_size:2:(matrix_size+2expansion_size)
input_matrix, new_comp = add_step_to_matrix(input_matrix, new_comp)
end
return input_matrix
end
function expand_matrix_with_plot(args...)
input_matrix = expand_matrix_with_plot(args...)
expand_plt_ref = plot_square_heatmap(input_matrix, 1, size(input_matrix, 1);
plt_title="Original, size:$(matrix_size)",
color_palete=:lightrainbow)
display(expand_plt_ref)
return input_matrix, expand_plt_ref
end
# Shuffle matrix entries
"""
function shuffle_matrix(input_matrix, shuffles; do_plot=false)
Takes symmetric 'input_matrix' and randomly swaps rows 'shuffles' many times.
Results may be plotted by setting 'do_plot=true'.
"""
function shuffle_matrix(input_matrix, total_shuffles)
matrix_size = size(input_matrix, 1)
rows = randcycle(matrix_size)
shuffled_ord_mat = copy(input_matrix)
for k = 1:total_shuffles
# global shuffled_ord_mat, rows
srcs, trgts = rand(rows, 2)
swap_rows!(shuffled_ord_mat, srcs, trgts)
end
return shuffled_ord_mat
end
function shuffle_matrix_with_plotting(args...)
shuffled_ord_mat = shuffle_matrix(args...)
if do_plot
shuff_plt_ref = plot_square_heatmap(shuffled_ord_mat, 1, size(shuffled_ord_mat, 1);
plt_title="Shuffled, size:$(matrix_size)",
color_palete=:lightrainbow)
display(shuff_plt_ref)
end
return input_matrix, shuff_plt_ref
end
"""
function organize_shuff_matrix(input_matrix; do_plots=false)
Reorganizes 'input_matrix' so that values highest values in a row are positioned
next to the diagonal.
Results may be plotted by setting 'do_plot=true'.
"""
function organize_shuff_matrix(input_matrix)
unscrambled_matrix = copy(input_matrix)
matrix_size = size(input_matrix, 1)
for k = matrix_size:-2:2
max_row_val = findmax(unscrambled_matrix[k, :])[2]
# put to the previous last position
swap_rows!(unscrambled_matrix, max_row_val, k - 1)
# skip 1 row and work on next one
end
return unscrambled_matrix
end
function organize_shuff_matrix_with_plotting(input_matrix)
unscrambled_matrix = organize_shuff_matrix(input_matrix)
reorganized_plt_ref = plot_square_heatmap(unscrambled_matrix, 1, size(unscrambled_matrix, 1);
plt_title="unscrambled_matrix, size:$(matrix_size)",
color_palete=:lightrainbow
)
display(reorganized_plt_ref)
return unscrambled_matrix, reorganized_plt_ref
end
"""
function order_max_vals_near_diagonal(input_matrix; do_plots=false, direction=:descending)
Orders values in 'input_matrix' so that values next to diagonal are descending
(by default).
TODO- not working- Optionally, ascending order can be used by setting 'direction' to
':ascending'.
Results may be plotted by setting 'do_plot=true'.
"""
function order_max_vals_near_diagonal(input_matrix; direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix, 1)
if direction == :descending
# ordering_function = findmax
new_ord_value = -1
iteration_values = matrix_size:-2:2
# iteration_values = 2:2:matrix_size
elseif direction == :ascending
# ordering_function = findmin
new_ord_value = findmax(input_matrix)[1] * 2
iteration_values = 2:2:matrix_size
else
# @error "Unknow ordering was given"
throw("Unknow ordering was given")
end
reordered_matrix = copy(input_matrix)
row_indices = 1:2:matrix_size
col_indices = 2:2:matrix_size
coord_set = [CartesianIndex(row_indices[k], col_indices[k]) for k = 1:matrix_size÷2]
diag_max_values = reordered_matrix[coord_set]
for k = iteration_values
max_val, max_ind = findmax(diag_max_values)
(direction == :descending) ? (position = floor(k ÷ 2)) : (position = floor(k ÷ 2))
diag_max_values[max_ind] = diag_max_values[position]
diag_max_values[position] = new_ord_value
max_ind *= 2
swap_rows!(reordered_matrix, k, max_ind)
swap_rows!(reordered_matrix, k - 1, max_ind - 1)
end
return reordered_matrix
end
function order_max_vals_near_diagonal_with_plotting(input_matrix; kwargs...)
reordered_matrix = order_max_vals_near_diagonal(input_matrix; kwargs...)
reorganized_plt_ref = plot_square_heatmap(reordered_matrix, 1, size(reordered_matrix, 1);
plt_title="reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
display(reorganized_plt_ref)
return reordered_matrix, reorganized_plt_ref
end
"""
function fine_tune_matrix(input_matrix; do_plots=false)
Check if velues next to the maximal values are organized in descending order.
"""
function fine_tune_matrix(input_matrix)
# Find max values next to the diagonal
matrix_size = size(input_matrix, 1)
fine_tune_matrix = copy(input_matrix)
# if direction == :descending
# # ordering_function = findmax
# new_ord_value = -1
# iteration_values = matrix_size:-2:2
# # iteration_values = 2:2:matrix_size
#
# elseif direction == :ascending
# # ordering_function = findmin
# new_ord_value = findmax(input_matrix)[1]*2
# iteration_values = 2:2:matrix_size
# else
# # @error "Unknow ordering was given"
# throw("Unknow ordering was given")
# end
for k = 2:2:matrix_size-1
if fine_tune_matrix[k-1, k+1] > fine_tune_matrix[k, k+1]
swap_rows!(fine_tune_matrix, k, k - 1)
end
end
return fine_tune_matrix
end
function fine_tune_matrix_with_ploting(input_matrix)
fine_tune_matrix = fine_tune_matrix(input_matrix)
fine_tuned_plt_ref = plot_square_heatmap(fine_tune_matrix, 1, size(reordered_matrix, 1);
plt_title="fine_tuned, size:$(matrix_size)",
color_palete=:lightrainbow)
display(fine_tuned_plt_ref)
return fine_tune_matrix, fine_tuned_plt_ref
end
# TODO separate plotting from processing
function order_max_vals_by_row_avg(input_matrix; do_plots=false)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
# Row average
row_avg = reshape(mean(input_matrix, dims=1),(matrix_size, 1))
# Using funciton below, because sortperm is not working on Array{Float64,2}
sorted_rows_indexes = [findall(x->x==sort(row_avg, dims=1)[k], row_avg)[1][1] for k=1:matrix_size]
matrix_indices = collect(range(1,matrix_size))
# Sort indices by values (highest to lowest)
# Create a list of indices, which corresponding valeus are ordered
sorted_indices = sort!([1:matrix_size;],
by=i->(sorted_rows_indexes[i],matrix_indices[i]))
sorted_matrix = copy(input_matrix)
for k = 1:matrix_size÷2 #iteration_values
max_ind = sorted_indices[k]
sorted_indices[k] = k
sorted_indices[max_ind] = max_ind
swap_rows!(sorted_matrix, k, max_ind)
# swap_rows!(sorted_matrix, k-1, max_ind-1)
end
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(sorted_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
# TODO separate plotting from processing
input_mat_plt_ref = plot_square_heatmap(input_matrix, 1,size(reordered_matrix,1);
plt_title = "input_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
common_plot1 = plot(input_mat_plt_ref, reorganized_plt_ref, layout=(1,2),
size=(800,400))
# reordered_matrix = copy(input_matrix)
# row_indices = 1:2:matrix_size
# col_indices = 2:2:matrix_size
# coord_set = [CartesianIndex(row_indices[k], col_indices[k]) for k=1:matrix_size÷2]
#
#
# for k = iteration_values
# max_val, max_ind = findmax(diag_max_values)
# position = floor(k÷2)
# diag_max_values[max_ind] = diag_max_values[position]
# diag_max_values[position] = new_ord_value
# max_ind *= 2
#
# swap_rows!(reordered_matrix, k, max_ind)
# swap_rows!(reordered_matrix, k-1, max_ind-1)
# end
if do_plots
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(sorted_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
display(reorganized_plt_ref)
end
return reordered_matrix, reorganized_plt_ref
end
function order_max_vals_near_diagonal2(input_matrix; do_final_plot=false, do_all_plots = false, direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
reordered_matrix = copy(input_matrix)
reorganized_plt_ref = []
# for every row in matrix
for k = 1:2:matrix_size-1
# global reordered_matrix
# TODO separate plotting from processing
reorganized_plt_ref_pt0 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
max_val, max_ind = findmax(reordered_matrix[k:end, k:end])
# Take the smaller coordinate
# (max_ind[1] < max_ind[2]) ? (target_row = max_ind[1]) : (target_row = max_ind[2])
target_row = max_ind[1]+k-1
reordered_matrix = swap_rows(reordered_matrix, k, target_row)
# TODO separate plotting from processing
reorganized_plt_ref_pt1 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
val, second_target = findmax(reordered_matrix[k,k:end])
second_target = second_target+k-1
reordered_matrix = swap_rows(reordered_matrix, k+1, second_target)
# end
#
#
if do_all_plots
# TODO separate plotting from processing
reorganized_plt_ref_pt2 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
reorganized_plt_ref = plot(reorganized_plt_ref_pt0, reorganized_plt_ref_pt1, reorganized_plt_ref_pt2, layout=(1,3), size=(1400,400))
display(reorganized_plt_ref)
end
end
if do_final_plot
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
# display(reorganized_plt_ref)
else
reorganized_plt_ref=[]
end
return reordered_matrix, reorganized_plt_ref
end
"""
function get_key_for_value(d::Dict, target_value)
Returns key of the dictionary which corresponds to the given target value.
"""
function get_key_for_value(d::Dict, target_value)
for (key, value) in d
if value == target_value
return key
end
end
end
function order_max_vals_near_diagonal3(input_matrix, ordering; direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
reordered_matrix = deepcopy(input_matrix)
new_ordering = Dict()
# for every row in matrix
for m = 1:2:matrix_size-1
# global reordered_matrix
max_val, max_ind = findmax(reordered_matrix[m:end, m:end])
# Take the smaller coordinate
first_target = max_ind[1]+m-1
reordered_matrix = swap_rows(reordered_matrix, m, first_target)
# check for duplicates
val, second_target = findmax(reordered_matrix[m,m:end])
second_target = second_target+m-1
if first_target == second_target
@debug "are same"
second_target -= 1
end
reordered_matrix = swap_rows(reordered_matrix, m+1, second_target)
# find key which initially had region = first_target
region1 = get_key_for_value(ordering, first_target)
region2 = get_key_for_value(ordering, second_target)
if region1 in keys(new_ordering)
@warn "repeated"
end
if region2 in keys(new_ordering)
@warn "repeated2"
end
new_ordering[region1] = m
new_ordering[region2] = m +1
println("Replaced $(region1) fom $(ordering[region1]) to $(m)")
println("Replaced $(region2) fom $(ordering[region2]) to $(m+1)")
end
return reordered_matrix, new_ordering
end
##
# """
# matrix_poling!(input_matrix; method = "avg_pooling")
#
# Takes a matrix and changes it's values to the same value, according to 'method'.
# Possible methods are:
# - 'max_pooling'- finds maximal value and replaces all values with the maximal
# value.
# - 'avg_pooling'- changes values to the average value
# - 'gauss_pooling'- uses gausian kernel as weights to the values in the matrix
# """
# function matrix_poling!(input_matrix::Array; method::String = "max_pooling")
# if method == "max_pooling"
# max_val = findmax(input_matrix)[1]
# input_matrix .= max_val
# end
# return input_matrix
# end
# matrix_poling(mat[1:3,1:3]; method = "gauss_pooling")
function matrix_poling(input_matrix::Array; method = "avg_pooling", kernel_size=3,gauss_sigma=1)
out_matrix = copy(input_matrix)
if method == "max_pooling"
max_val = findmax(out_matrix)[1]
out_matrix .= max_val
elseif method == "avg_pooling"
avg_val = mean(out_matrix)
out_matrix .= floor(Int,avg_val)
elseif method == "gauss_pooling"
@debug "Gauss pooling"
# gauss_kernel = ones(kernel_size,kernel_size)
# gauss_kernel[kernel_size÷2+1,kernel_size÷2+1] = 2
filtering_kernel = Kernel.gaussian(gauss_sigma)
# gauss_kernel2 = imfilter(gauss_kernel, filtering_kernel)
# gauss_kernel3 = gauss_kernel2.-findmin(gauss_kernel2)[1]/findmax(gauss_kernel2)[1]
# gauss_kernel3 = gauss_kernel3./sum(gauss_kernel3)
out_matrix = imfilter(out_matrix, filtering_kernel)
# out_matrix += out_matrix.*gauss_kernel3
out_matrix .= floor.(Int,out_matrix)
end
return out_matrix
end
function subsample_matrix(square_matrix::Array; subsamp_size::Int=2, method="max_pooling")
if !issymmetric(square_matrix)
error("Input matrix is not square")
return
end
if subsamp_size == 2
reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
elseif subsamp_size%2 == 0
new_matrix = reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return reorganize_matrix(new_matrix; subsamp_size=2, method=method)
else
new_matrix = reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return new_matrix
end
end
#= Should the result be overlapping or not? Options:
- filter it as a whole image, return diagonal
- filter subimages- the problem is that htere will be edge effect at every border of cells
- filter subimages and assign midde value to whole patch
- filter whole upper diagonal matrix
Plot gaussian kernel
Should we care about
=#
function reorganize_matrix(square_matrix::Array; subsamp_size::Int=2, method="max_pooling", overlap::Int=0,gauss_sigma=1)
if method == "gauss_pooling"
(subsamp_size >= 3) || error("Can not do gaussian pooling for area smaller than 3x3")
end
@debug method
# Subsample upper half
square_matrix2 = Float64.(copy(square_matrix))
total_rows, total_cols = size(square_matrix)
size_mismatch_flag = false
# if total_rows%2 != 0
# total_rows -= 1
# end
# if total_cols%2 != 0
# total_cols -= 1
# end
if method == "gauss_pooling"
square_matrix2 = zeros(Int,size(square_matrix))
square_matrix2[1:end-1,2:end] = UpperTriangular(square_matrix[1:end-1,2:end])
# flip matrix
do_matrix_flip = true
if do_matrix_flip
square_matrix3 = zeros(Float64,size(square_matrix))
for row in 0:total_rows-1
for col in 0:total_cols-1
square_matrix3[row+1,col+1] = square_matrix[end-row,end-col]
end
end
square_matrix3[1:end-1,:] = square_matrix3[2:end,:]
square_matrix3[:,2:end] = square_matrix3[:,1:end-1]
else
square_matrix3 = copy(square_matrix2)
square_matrix3[1:end-1,:] = square_matrix2[2:end,:]
square_matrix3[:,1:end-1] = square_matrix2[:,2:end]
end
for row in 1:total_rows
for col in 1:row
square_matrix2[row,col] = square_matrix3[row,col]
end
end
filtering_kernel = Kernel.gaussian(gauss_sigma)
square_matrix2 = imfilter(square_matrix2, filtering_kernel)
square_matrix2 .= Int.(floor.(Int,square_matrix2))
elseif method == "row_pooling"
function gauss_func(σ,len;μ=0)
maxv = len÷2
minv= -len÷2
if len%2 == 0
maxv-=1
end
x = collect(minv:1:maxv)
return exp.(-(((x.-μ)./σ)./2).^2)./(σ*sqrt(2π))
end
# Take 'subsamp_size'in total in horizontal and in vertical line from
# current matrix element
# subsamp_size = 5
val_range = subsamp_size÷2
r = (subsamp_size÷2)*2
# total_rows = 266
# total_cols = 266
# row = 3
for row = 1:1:(total_rows-1)
for col = (row+1):1:total_cols
if row < r && col <= r
row_range = row - 1
col_range = val_range + (val_range-row_range÷2)
else
row_range = val_range
end
if row > total_rows-r && col >= total_cols-r
col_range = total_cols - row -1
row_range = val_range + (val_range-col_range)
else
col_range = val_range
end
r_beg = row - row_range
r_end = row + row_range
c_beg = col - col_range
c_end = col + col_range
# if r_beg < 1 && r_end > total_rows
if r_beg < 1
r_end += abs(r_beg)+1
r_beg = 1
end
if r_end > col
r_beg -= abs(r_end-col)
if r_beg <1
r_beg=1
end
r_end = col-1
end
# end # if both
# if c_beg < row+1 && c_end > total_cols
if c_beg < row+1
c_end += abs(c_beg-(row+1))
c_beg = row+1
end
if c_end > total_cols
c_beg -= abs(total_rows-c_end)
c_end = total_cols
end
vrange = r_beg:r_end
try
square_matrix2[row,col] += sum(
square_matrix[vrange,col]
.* gauss_func(gauss_sigma,length(vrange))
)
vrange = c_beg:c_end
square_matrix2[row,col] += sum(
square_matrix[row,c_beg:c_end] .*
gauss_func(gauss_sigma,length(vrange))
)
catch e
@error "Failed to compute row pooling"
@error "row" row
@error "col" col
square_matrix2[row,col] = 0
break
# error(e)
end
end # for col
end # for rows
else
step = subsamp_size-overlap
for row = 1:step:(total_rows-2)
for col = (row+subsamp_size):step:total_cols
r_beg = row
r_end = row+subsamp_size-1
c_beg = col
c_end = col+subsamp_size-1
if r_end > total_rows || c_end > total_cols
size_mismatch_flag = true
continue
end
square_matrix2[r_beg:r_end,c_beg:c_end] =
matrix_poling(square_matrix[r_beg:r_end,c_beg:c_end]; method=method,kernel_size=subsamp_size, gauss_sigma=gauss_sigma)
end # for col
size_mismatch_flag && continue
end # for rows
end # if method
# Copy over lower half
for row in 2:total_rows
for col in 1:row-1
square_matrix2[row,col] = square_matrix2[col,row]
end
end
# keep same values on diagonal
for row in 1:total_rows
square_matrix2[row,row] = square_matrix[row,row]
end
return square_matrix2
end
function pool_matrix(square_matrix::Array; method="max_pooling")
out_matrix = copy(square_matrix)
pool_matrix!(square_matrix; method=method)
return out_matrix
end
"""
add_random_patch(input_matrix; patch_size=1, total_patches=1, locations)
Takes a matrix and replaces some values with random values. Returns a new matrix
with replaced values and indicies where replacement took place.
Values can be
replaced by setting 'patch_size' to values bigger than 1. If the input matrix
is symmetric, then output matrix will be symmetric as well (values from above
diagnoal will be copied over values from below diagonal).
"""
function add_random_patch(input_matrix::Matrix; patch_size=1, total_patches=1, locations=CartesianIndex(0))
total_rows, total_cols = size(input_matrix)
max_row = total_rows-patch_size+1
max_col = total_cols-patch_size+1
output_matrix = copy(input_matrix)
max_val = findmax(output_matrix)[1]
min_val = findmin(output_matrix)[1]
matrix_type = typeof(output_matrix[1])
if patch_size>total_rows || patch_size>total_cols
error(DimensionMismatch,": Patch size is bigger than the matrix!")
end
# ===
issymmetric(input_matrix) ? (symmetrize_matrix = true) : (symmetrize_matrix = false)
if locations == CartesianIndex(0)
@debug "Locations were not specified- random locations will be used"
if symmetrize_matrix
possible_indices = findall(x->true,UpperTriangular(output_matrix))
possible_indices = possible_indices[findall(x->x[1]<=x[2], possible_indices)]
possible_indices = possible_indices[findall(x->x[1]<=max_row, possible_indices)]
possible_indices = possible_indices[findall(x->x[2]<=max_col, possible_indices)]
else
possible_indices = possible_indices = findall(x->true,output_matrix)
end
tartget_indices = possible_indices[randcycle(length(possible_indices))]
else
wrong_indices = findall(x->x[1]>max_row || x[2]>max_col, locations)
if isempty(wrong_indices)
tartget_indices = locations
total_patches = size(locations)[1]
else
error(DimensionMismatch,": Given indices are bigger than the matrix dimensions!")
end
end
changed_indices = CartesianIndex[]
for replacement=1:total_patches
row = tartget_indices[replacement][1]
col = tartget_indices[replacement][2]
r_range = row:row+patch_size-1
c_range = col:col+patch_size-1
for ind in CartesianIndices((r_range,c_range))
push!(changed_indices,ind)
end
new_rand_matrix = floor.(matrix_type, rand(patch_size,patch_size) .* (max_val-min_val+1) .+ min_val)
output_matrix[r_range,c_range] .= new_rand_matrix
end
if symmetrize_matrix
# Inverse second column
changed_indices2 = [changed_indices changed_indices]
for ind = 1:size(changed_indices)[1]
c_ind = changed_indices2[ind,2]
changed_indices2[ind,2] = CartesianIndex(c_ind[2],c_ind[1])
end
# Copy over lower half
for row in 2:total_rows
for col in 1:row-1
output_matrix[row,col] = output_matrix[col,row]
end
end
@debug "Returned symmetric matrix" output_matrix
return output_matrix, changed_indices2
else
return output_matrix, changed_indices
end
end
function scramble_matrix(in_matrix::Array; k::Int=2, max_iterations=-1)
out_matrix = copy(in_matrix)
total_rows, total_cols = size(in_matrix)
counter = 0
if max_iterations < 1
max_iterations = (total_cols*(total_cols-1))/2
end
for row = 1:k:total_rows-k
# @info "row:" row
for col=total_cols:-k:row+1
# @info "col:" col
if row == col-k+1
# @info "shoulbreak"
continue
end
indices = collect(CartesianIndices((row:row+k-1,col-k+1:col)))
permut_indices = shuffle(indices)
out_matrix[indices] .= in_matrix[permut_indices]
counter +=1
if counter >= max_iterations
break
end
end
if counter >= max_iterations
break
end
end
return out_matrix
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 15396 | using LinearAlgebra
using StatsBase
"""
shift_to_non_negative(matrix::Array)
Returns a matrix in which values are non-negative. This is done by finding the
minimal value in the input matrix and adding its absolute value to the matrix
elements.
"""
function shift_to_non_negative(matrix::Array)
min_val = findmin(matrix)[1]
if min_val < 0
return matrix .-= min_val
else
return matrix
end
end
"""
normalize_to_01(matrix::Array; use_factor=false, norm_factor=256)
Returns a matrix which values are in range [0, 1]. If 'use_factor' is set to
'true' then values are normalized to 'norm_factor' (by default set to 256).
If the values in the input matrix are below 0, then they are shifted so that only positive numbers are in
the matrix (the values are normalized to new maximal value or norm_factor).
"""
function normalize_to_01(matrix::Array; use_factor = false, norm_factor = 256)
normalized_matrix = copy(matrix)
min_val = findmin(normalized_matrix)[1]
if min_val < 0
normalized_matrix .+= abs(min_val)
else
normalized_matrix .-= abs(min_val)
end
max_val = findmax(normalized_matrix)[1]
if use_factor
if max_val > norm_factor
@warn "Maximal values exceed \'norm_factor\'."
end
normalized_matrix = normalized_matrix ./ norm_factor
else
normalized_matrix = normalized_matrix ./ max_val
end
return normalized_matrix
end
# function symmetrize_image(image)
"""
function diagonal_symmetrize(image::Matrix; below_over_upper::Bool=false)
Takes an 'image' in the form of a matrix and return a copy which is symmetric
with respect to diagonal- values above diagonal are copied over values below the
diagonal. This can be inverted by setting 'below_over_upper=true'.
If the input matrix is not square, then square matrix is created by taking
matrix of k times 'k' elements, 'k=min(r,c)' where 'r' is number of rows and 'c'
is number of columns.
"""
function diagonal_symmetrize(image::Matrix; below_over_upper::Bool = false)
w, h = size(image)
mat_size = findmin([w, h])[1]
img = copy(image[1:mat_size, 1:mat_size])
# Get all cartesian indices from input matrix
matrix_indices = CartesianIndices((1:mat_size, 1:mat_size))
# Filter out indices below diagonal
if below_over_upper
matrix_indices = findall(x -> x[1] > x[2], matrix_indices)
else
matrix_indices = findall(x -> x[2] > x[1], matrix_indices)
end
# how many elements are above diagonal
repetition_number = Int(ceil((mat_size * (mat_size - 1)) / 2))
# Substitute elements
for k = 1:repetition_number
# n_pos = matrix_indices[k]
mat_ind = matrix_indices[k]
# ordered_matrix[mat_ind] = k
img[mat_ind[2], mat_ind[1]] = img[mat_ind]
end
try
checksquare(img)
catch err
if isa(err, DimensionMismatch)
@error "Resulting matrix is not a square matrix"
throw(err)
end
end
# issymmetric(Float64.(img))
return img
end
# =====
# matrix ordering
"""
function get_ordered_matrix(in_matrix::Matrix;
assign_same_values::Bool = false,
force_symmetry::Bool = false,
small_dist_grouping::Bool = false,
min_dist::Number = 1e-16,
total_dist_groups::Int = 0,
ordering_start::Int=1)
Takes a @input_matrix and returns ordered form of this matrix.
The ordered form is a matrix which elements represent ordering from smallest to
highest values in @input_matrix.
If @input_matrix is symmetric, then ordering happens only with upper diagonal.
Lower diagonal is symetrically copied from values above diagonal.
By default, if there is a geoup of entriess with the same value, they all are
assigned with the same ordering number. This can be changed with
@assign_same_values parameter.
Symetry ordering can be froced with @force_symmetry parameter.
By setting 'small_dist_grouping' to true, all the values that difference is
lower than 'min_dist', will be assigned with the same order number.
# Examples
```julia-repl
julia> a = [0 11 12;
11 0 13;
12 13 0];
julia> get_ordered_matrix(a)
3×3 Array{Int64,2}:
0 1 2
1 0 3
2 3 0
```
```julia-repl
julia> b = [38 37 36 30;
37 34 30 32;
36 30 31 30;
30 32 30 29]
julia> get_ordered_matrix(b; assign_same_values=false)
4×4 Array{Int64,2}:
0 6 5 2
6 0 1 4
5 1 0 3
2 4 3 0
julia> get_ordered_matrix(b; assign_same_values=true)
4×4 Array{Int64,2}:
0 4 3 1
4 0 1 2
3 1 0 1
1 2 1 0
```
"""
function get_ordered_matrix(in_matrix::Matrix;
assign_same_values::Bool = false,
force_symmetry::Bool = false,
small_dist_grouping::Bool = false,
min_dist::Number = 1e-16,
total_dist_groups::Int = 0,
ordering_start::Int=1)
# TODO Symmetry must be forced for matrix in which there are NaN elements- needs
# to be further investigated
# TODO not working for negative only values
# TODO check for square matrix
# ==
mat_size = size(in_matrix)
ord_mat = zeros(Int, mat_size)
# how many elements are above diagonal
if issymmetric(in_matrix) || force_symmetry
matrix_indices =
generate_indices(mat_size, symmetry_order = true, include_diagonal = false)
do_symmetry = true
else
matrix_indices = generate_indices(mat_size, symmetry_order = false)
do_symmetry = false
end
total_elements = length(matrix_indices)
# Collect vector of indices
all_ind_collected = arr_to_vec(matrix_indices)
# Sort indices vector according to inpu array
# TODO Cant this be done with sortperm? in_matrix > UpperTriangular |> sortperm
index_sorting = sort_indices_by_values(in_matrix, all_ind_collected)
ordering_number = ordering_start
for k = 1:total_elements
# global ordering_number
next_sorted_pos = index_sorting[k]
mat_ind = matrix_indices[next_sorted_pos]
if assign_same_values && k != 1
prev_sorted_pos = index_sorting[k-1]
prev_mat_ind = matrix_indices[prev_sorted_pos]
cond1 = in_matrix[prev_mat_ind] == in_matrix[mat_ind]
cond2 = small_dist_grouping
cond3 = abs(in_matrix[prev_mat_ind] - in_matrix[mat_ind]) < min_dist
if cond1 || (cond2 && cond3)
ordering_number -= 1
end
end
set_values!(ord_mat, mat_ind, ordering_number; do_symmetry = do_symmetry)
ordering_number += 1
# else
# set_values!(ord_mat, mat_ind, ordering_number; do_symmetry=do_symmetry)
# ordering_number+=1
# end
end
return ord_mat
end
# TODO this one has to be specified for 3 dim matrix
function get_ordered_matrix(input_array::Array{Any,3}; do_slices = true, dims = 0)
arr_size = size(input_array)
out_arr = zeros(Int, arr_size)
if do_slices
# param check
if dims > length(arr_size)
throw(DomainError("Given dimension is greater than total size of array."))
elseif dims > 0
throw(DomainError("Given dimension must be positive value."))
elseif dims <= 3
throw(DomainError("Given dimension must be lower than 3."))
end
for dim = 1:arr_size[dims]
if dims == 1
out_arr[dim, :, :] = get_ordered_matrix(input_array[dim, :, :])
elseif dims == 2
out_arr[:, dim, :] = get_ordered_matrix(input_array[:, dim, :])
elseif dims == 3
out_arr[:, :, dim] = get_ordered_matrix(input_array[:, :, dim])
end
end
else
out_arr = get_ordered_matrix(input_array)
end
end
function get_ordered_matrix(input_array::Array)
out_array = copy(input_array)
arr_size = size(input_array)
total_elements = length(input_array)
# Collect vector of indices
all_ind_collected = collect(reshape(generate_indices(arr_size), (length(input_array))))
# Sort indices vector according to inpu array
index_sorting = sort!(
[1:total_elements;],
by = i -> (input_array[all_ind_collected][i], all_ind_collected[i]),
)
for k = 1:total_elements
target = index_sorting[k]
out_array[target] = k
end
return out_array
end
# Care must be taken so that values from 'input_matrix' are within distance
# groups, otherwise error is thrown.
"""
function group_distances(input_matrix::Array, total_dist_groups::Int)
Takes a matrix and rearranges values into 'total_dist_groups' number of groups.
Every group is assigned with number value from range '<0,1>'.
"""
function group_distances(input_matrix::Array, total_dist_groups::Int)
normed_matrix = normalize_to_01(input_matrix)
target_matrix = copy(normed_matrix)
h, w = size(input_matrix)
if h * w < total_dist_groups
throw(DomainError("Total number of groups exceed total number of entries in input matrix"))
end
total_borders = total_dist_groups + 1
range_val = collect(range(0, 1, length = total_borders))
for k = 2:total_borders
indices = findall(x -> x >= range_val[k-1] && x <= range_val[k], normed_matrix)
target_matrix[indices] .= range_val[k]
end
unique(target_matrix)
# Sets last range to values smaller than unity, just in case this might cause trobules
# normed_matrix[normed_matrix .> range_val[end-1]] .= 0.99
return target_matrix
end
"""
generate_indices(matrix_size::Tuple; symmetry_order::Bool=false, include_diagonal::Bool=true)
Return all the possible indices of the matrix of size 'matrix_size'.
'matrix_size' may be a tuple or a series of integer arguments corresponding to
the lengths in each dimension.
If 'symetry_order' is set to'true', then only indices of values below diagonal
are returned.
"""
function generate_indices(matrix_size::Tuple;
symmetry_order::Bool = false,
include_diagonal::Bool = true)
# Get all cartesian indices from input matrix
matrix_indices = CartesianIndices(matrix_size)
# Filter out indices below diagonal
if symmetry_order
matrix_indices = findall(x -> x[1] <= x[2], matrix_indices)
else
matrix_indices = findall(x -> true, matrix_indices)
end
if !include_diagonal
filter!(x -> x[1] != x[2], matrix_indices)
end
return matrix_indices
end
"""
generate_indices(matrix_size::Int; symmetry_order::Bool=false, include_diagonal::Bool=true)
Generate indices for a matrix of given dimensions. 'generate_indices' is a
series of integer arguments corresponding to the lengths in each dimension.
"""
function generate_indices(matrix_size::Int;
symmetry_order::Bool = false,
include_diagonal::Bool = true)
return generate_indices(
(matrix_size, matrix_size);
symmetry_order = symmetry_order,
include_diagonal = include_diagonal,
)
end
"""
arr_to_vec(some_array::Array)
Takes an array and reshapes it into a vector.
"""
function arr_to_vec(some_array::Array)
return collect(reshape(some_array, length(some_array)))
end
function cartesianInd_to_vec(some_array::CartesianIndices)
return collect(reshape(some_array, length(some_array)))
end
"""
sort_indices_by_values(values_matrix::T, index_vector) where {T<:VecOrMat}
Sorts the 'index_vector' according to corresponding values in the 'values_matrix'
and returns a Vector of intigers which is an list of ordering of
'sorted index_vector'.
"""
function sort_indices_by_values(values_matrix::T, index_vector) where {T<:VecOrMat}
if !isa(index_vector, Vector)
throw(TypeError(
:sort_indices_by_values,
"\'index_vector\' must be a vector, otherwise an ordering list can no be created!",
Vector,
typeof(index_vector),
))
end
total_elements = length(index_vector)
return sort!(
[1:total_elements;],
by = i -> (values_matrix[index_vector][i], index_vector[i]),
)
end
"""
set_values!(input_matrix::Matrix, position::CartesianIndex, target_value::Number; do_symmetry=false)
Assigns 'target_value' to indices at 'input_matrix[position[1], position[2]]'.
If 'do_symmetry' is set to 'true', then the 'target_value' is also assigned at
position 'input_matrix[position[2], position[1]]'.
"""
function set_values!(input_matrix::Matrix,
position::CartesianIndex,
target_value::Number;
do_symmetry::Bool = false)
input_matrix[position[1], position[2]] = target_value
if do_symmetry
input_matrix[position[2], position[1]] = target_value
end
return input_matrix
end
# matrix ordering
# =====
function get_high_dim_ordered_matrix(input_matrix)
matrix_size = size(input_matrix)
ordered_matrix_3D = zeros(Int, matrix_size)
for slice = 1:matrix_size[1]
ordered_matrix_3D[slice, :, :] = get_ordered_matrix(input_matrix[slice, :, :])
end
return ordered_matrix_3D
end
"""
reduce_arrs_to_min_len(arrs)
Takes vector of vectors of different length and returns array of arrays which
are of the same length. Length in the output is the shortest vector length from
the input- values above this size are discarded.
"""
function reduce_arrs_to_min_len(arrs::Array)
@debug "Argument specific"
new_arr = copy(arrs)
simulation = size(new_arr, 1)
min_size = Inf
for m = 1:simulation
@debug "Simulation number" m
current_size = size(new_arr[m], 1)
@debug "Current size: " current_size
if convert(Float64, current_size) < min_size
min_size = current_size
@debug "min size changed to: " min_size
end
end
# min_size = Int.(min_size)
@debug "Concatenating"
for m = 1:simulation
new_arr[m] = new_arr[m][1:min_size, :]
end
min_size = Inf
return new_arr
end
"""
increase_arrs_to_max_len(arrs)
Takes vector of vectors of different length and returns array of arrays which
are of the same length. Length in the output is the longest vector length from
the input- values above this size are discarded.
"""
function increase_arrs_to_max_len(arrs)
new_arr = copy(arrs)
simulation = size(new_arr, 1)
max_size = 0
for m = 1:simulation
@debug "Simulation number" m
current_size = size(new_arr[m], 1)
@debug "Current size: " current_size
if convert(Float64, current_size) > max_size
max_size = current_size
@debug "min size changed to: " max_size
end
end
# max_size = Int.(max_size)
@debug "Concatenating"
for m = 1:simulation
correct_len_arr = zeros(Int, max_size, 3)
correct_len_arr[1:size(arrs[m], 1), :] = new_arr[m][:, :]
new_arr[m] = correct_len_arr
end
# min_size = Inf
return new_arr
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 5883 | using Distances
using Random
# export generate_random_point_cloud,
# generate_geometric_matrix,
# generate_shuffled_matrix,
# generate_random_matrix,
# generate_matrix_ordering,
# # generate_set_of_graphs,
# # plot_betti_numbers,
# # save_matrix_to_file;
"""
Returns a random matrix of size @number_of_points x @dimensions in which every
column is a point and every n-th row is a position in the n-th dimension.
"""
generate_random_point_cloud(number_of_points = 12, dimensions=2) =
rand(Float64, dimensions, number_of_points)
"""
Return a matrix which stores the pariwise distances between every point in the
@random_points matrix.
"""
function generate_geometric_matrix(random_points)
geometric_matrix = Distances.pairwise(Euclidean(), random_points, dims=2)
return geometric_matrix
end
"""
Returns a ordered geometric matrix, which was generated by samping 'dims'
dimensional unit cuve with 'total_points' samples.
"""
function get_ordered_geom_matrix(dims::Integer, total_points::Integer)
# TODO to be completed
end
"""
Returns a symetric matrix with randomly permuted valuse from the @input_matrix.
"""
function generate_shuffled_matrix(input_matrix)
matrix_size = size(input_matrix,1)
indicies_collection = findall(x->x>0, input_matrix)
rand!(indicies_collection, indicies_collection)
shuffeled_matrix = copy(input_matrix)
# Swap the elements
n=1
for k in 1:matrix_size
for m in k+1:matrix_size
a = indicies_collection[n][1]
b = indicies_collection[n][2]
shuffeled_matrix[k,m] = input_matrix[a,b]
shuffeled_matrix[m,k] = input_matrix[b,a]
shuffeled_matrix[a,b] = input_matrix[k,m]
shuffeled_matrix[b,a] = input_matrix[m,k]
n +=1
end
end
return shuffeled_matrix
end
"""
Returns matrix with random values which are symmetric accros the diagonal. The
matrix has @matrix_size rows and @matrix_size columns.
"""
function generate_random_matrix(matrix_size)
elemnts_above_diagonal = Int((matrix_size^2-matrix_size)/2)
random_matrix = zeros(matrix_size, matrix_size)
set_of_random_numbers = rand(elemnts_above_diagonal)
h = 1
for k in 1:matrix_size
for m in k+1:matrix_size
random_matrix[k,m] = set_of_random_numbers[h]
random_matrix[m,k] = set_of_random_numbers[h]
h += 1
end
end
return random_matrix
end
# function generate_set_of_graphs(matrix_size, matrix_ordering)
# """
# Returns set of graphs generated from the @matrix_ordering. In every succesive
# graph, single connection between points is added.
#
# NOTE: the function does not take the coordinates of the numbered vertices.
# """
# vetrices = matrix_size
# edges = matrix_ordering
# num_of_edges = size(edges)[2]
#
# set_of_graphs = [a=Graph(vetrices) for a=1:num_of_edges]
# edges_counter = zeros(Int, num_of_edges)
# edge_density = zeros(num_of_edges)
#
# k=1
# for k in range(1,stop=num_of_edges)~
# add_edge!(set_of_graphs[k], edges[1,k], edges[2,k]);
# edges_counter[k] = ne(set_of_graphs[k])
# edge_density[k] = edges_counter[k]/binomial(matrix_size,2)
#
# if k<num_of_edges # if is used to eliminate copying at last iteration
# set_of_graphs[k+1] = copy(set_of_graphs[k])
# end
# end
# return set_of_graphs, edge_density
# end
# function save_matrix_to_file(matrix, filename)
# """
# Saves given @matrix to the csv file with the name @filename. If there is no path
# added to the @filename, then file saved is in local folder.
# """
# open(filename, "w") do io
# writedlm(io, matrix, ',')
# end
# end
# =====
# Copied form Julia learning repo
"""
Returns ordering of the @geometric_matrix given as an input. If value @ascending
is set to true, the values are number from the lowest value, to the highest. If
false, the values are numbered from highest to the lowest.
"""
function generate_matrix_ordering(geometric_matrix, ascending = true)
matrix_size = size(geometric_matrix, 2)
elemnts_above_diagonal = Int((matrix_size^2-matrix_size)/2)
matrix_ordering = zeros(Int, 2,elemnts_above_diagonal)
A = copy(geometric_matrix)
(ascending) ? (method=findmax) : (method=findmin)
for element in 1:elemnts_above_diagonal
# Find maximal distance
minimal_value = method(A)
# Get the coordinates (only 2 dimensions, because it is distance matrix)
matrix_ordering[1,element] = Int(minimal_value[2][1])
matrix_ordering[2,element] = Int(minimal_value[2][2])
#
# # Zero minval in A (above and below diagonal) so next minval can be found
A[matrix_ordering[1,element], matrix_ordering[2,element]] = 0.0
A[matrix_ordering[2,element], matrix_ordering[1,element]] = 0.0
end
# change from min to max order to the max to min order (? necessary ?)
if ascending
matrix_ordering = matrix_ordering[:,end:-1:1]
end
return matrix_ordering
end
"""
function get_geometric_matrix(points, dimensions; save_as_file=false)
Created a point cloud with 'points' number of points from 'dimension'
dimensional eucidean unit cube and computes distances between the points.
Distance matrix may be saved to csv file by setting 'save_as_file' to 'true'.
"""
function get_geometric_matrix(points, dimensions; save_as_file=false)
point_cloud = generate_random_point_cloud(points,dimensions)
geom_mat = generate_geometric_matrix(point_cloud)
if save_as_file
open("geometric_matrix_points$(points)_dims$(dimensions).csv", "w") do io
writedlm(io, geom_mat, ',')
end
end
return geom_mat
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 7899 | import Plots.plot as plot
import Plots.plot! as plot!
import Plots.heatmap as heatmap
import Plots.@layout as @layout
# include("TopologyStructures.jl")
"""
plot_square_heatmap(matrix, tick_step, tick_end;
plt_title, img_size=(900, 800), img_dpi=300)
Takes matrix and plots it as a heatmap. Funtion returns the handler to the
heatmap.
"""
function plot_square_heatmap(matrix, tick_step, tick_end;
plt_title="", yflip_matrix=true,
plot_params= (dpi=300,
size=(900,800),
lw=1,
thickness_scaling=1,
top_margin= 0,
left_margin=[0 0],
bottom_margin= 0
),
color_palete=:lightrainbow,
add_labels=true)
heat_map = heatmap(matrix, color=color_palete,
title=plt_title,
size=plot_params.size, dpi=plot_params.dpi,
ticks=0:tick_step:tick_end);
yflip_matrix && plot!( yflip = true,);
if add_labels
xlabel!("Matrix index")
ylabel!("Matrix index")
end
return heat_map
end
#%%
"""
row_plot(bd_plots::Dict;base_h = 600, base_w = 600, kwargs...)
Plots all the plots from the input dictionary 'bd_plots' in 'layout=(1,n)',
where 'n' is total number of plots.
By default, the plots dimensions are: the height='base_h'; the
width= n * base_w.
"""
function row_plot(bd_plots::Dict;base_h = 800, base_w = 800,
top_margin= 10mm,
left_margin=[10mm 10mm],
bottom_margin= 10mm,
kwargs...)
total_dims = length(bd_plots)
all_keys = keys(bd_plots)
all_plts = tuple()
for k = 1:total_dims
all_plts = (all_plts..., bd_plots["β$(k)"])
end
nice_plot = plot(all_plts...,
layout=(1,total_dims),
size=(total_dims*base_w,base_h),
# left_margin=left_margin,
# top_margin=top_margin,
# bottom_margin=bottom_margin,
thickness_scaling=2,
margin=2mm,
kwargs...)
return nice_plot
end
#%%
"""
plotimg(matrix_to_plot)
Display an image as a plot. The values from the input matrix are adjusted to the
value range of [0, 1].
If @cut_off is true then the matrix values above 256 are set to 256 and then all
values are normalized to the value 256. If @cut_off is false, then values are
normalized to maximal value.
"""
function plotimg(matrix_to_plot, cut_off=false)
matrix_type = typeof(matrix_to_plot)
min_val = findmin(matrix_to_plot)[1]
int_types_arr = [Matrix{UInt8}; Matrix{UInt16}; Matrix{UInt32};
Matrix{UInt64}; Matrix{UInt128}; Matrix{Int8};
Matrix{Int16}; Matrix{Int32}; Matrix{Int64};
Matrix{Int128}]
float_types_arr = [Matrix{Float16} Matrix{Float32} Matrix{Float64}]
if min_val<0
matrix_to_plot = shift_to_non_negative(matrix_to_plot)
end
max_val = findmax(matrix_to_plot)[1]
if max_val > 256 && cut_off
matrix_to_plot[findall(x -> x>256, matrix_to_plot)] = 256
end
if in(matrix_type, int_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot)
elseif in(matrix_type, float_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot, max_val)
end
return colorview(Gray, matrix_to_plot)
end
#%%
"""
plot_image_analysis(plots_set; description::NamedTuple, original_img, kwargs...)
Takes set of plots and puts them in 2 coulm layout. If 'description' is given,
adds entry with the data processing description. If 'original_img' is given, it
is also displayed next to the descrtions field.
'kwargs' are plot properties.
"""
function plot_image_analysis(plots_set; description::NamedTuple, original_img, kwargs...)
kwarg_keys = kwargs.keys()
(!isempty(original)) ? (orig_img_flag = true) : (orig_img_flag = false)
(!isempty(description)) ? (desc_flag = true) : (desc_flag = false)
l = @layout [a{0.2w} [grid(3,3) b{0.2h}]]
total_plot_sets = 7
total_cols = 2
total_rows = ceil(Int,total_plot_sets/total_cols)
if orig_img_flag || desc_flag
total_rows +=1
end
height_unit = 1/total_rows
matrix = [1 2 3;
4 5 6;
7 8 9]
l = @layout [a{0.4w,} b{0.4w,};
# grid(1,4);
# grid(1,4);
# grid(1,4);
# grid(1,4);
]
# [grid(2,2) grid(2,2)]]
# [a [grid(4,2) b]]]
data = [rand(10, 4), rand(11, 4)]
l = @layout [a{0.4w} b
c d e f
c d e f
c d e f
c d e f
c d e f]
ref = plot(grid=false,
axis=false,
layout = l,
legend = false,
# seriestype = [:scatter :path],
dpi=300,
size=(900,1200),
)
ref.series_list
p2 = plot!(ref.subplots[18],rand(10, 1),seriestype = :scatter,axis=true,grid=true, title="")
p2 = plot!(ref.subplots[21],rand(10, 10),seriestype = :heatmap, legend=true, xlabel="index", ylabel="index")
annotate!(ref.subplots[0], 0, 0, "my text", :red)
p1.subplots
# color scheme
end
# TODO add depreciation for this function
# """
# get_all_plots_from_set(orig_matrix::TopologyMatrixSet; name_prefix="")
#
# Takes a collection of matrix computed for topological analysis and creates set
# of their heatmaps and related Betti curves.
#
# """
# function get_all_plots_from_set(orig_matrix::TopologyMatrixSet; name_prefix="")
# # ===
# # Get heatmaps
# original_heatmaps_set = TopologyMatrixHeatmapsSet(orig_matrix)
# # patched_heatmaps_set = TopologyMatrixHeatmapsSet(patched_matrix)
#
# # ===
# # Get Betti plots
# original_bettis = TopologyMatrixBettisSet(orig_matrix)
# original_bettis_plots = TopologyMatrixBettisPlots(original_bettis)
# # patched_bettis_plots = TopologyMatrixBettisPlots(patched_bettis)
#
# mat_size = size(orig_matrix.ordered_matrix,1)
# common_plots_set = Any[]
# for k = 1:size(orig_matrix.description_vector,1)
# matrix_type = orig_matrix.description_vector[k]
#
#
# # ===
# # Common plot
# common_plot1 = plot(original_heatmaps_set.heatmap_plots_set[k],
# original_bettis_plots.betti_plots_set[k],
# layout=(1,2), size=(800,400))
# plot!(common_plot1, title = matrix_type*"_r$(orig_matrix.ranks_collection[k])")
# # met_par.do_dsiplay && display(common_plot1)
#
# push!(common_plots_set, common_plot1)
# end
#
# # load image
# file_path = orig_matrix.params.img_path*orig_matrix.params.file_name
# if isfile(file_path)
# img1_gray = Gray.(load(file_path))
# additional_plot = plot(img1_gray, legend = false);
# else
# # TODO Change empty plot for plot with properties
# additional_plot = plot(legend = false);
# end
#
# parameters_list_plot = plot()
# first_plot = plot(additional_plot, parameters_list_plot)
#
# plt_size = size(common_plots_set,1)
#
# all_plot1 = plot(additional_plot,
# common_plots_set[1], # original matrix
# common_plots_set[2], # original reordered- highest values located next to diagonal
# common_plots_set[3], # max pooling of values in subsquares, original matrirx
# common_plots_set[4], # max pooling of values in subsquares, reorganized matrix
# common_plots_set[5], # renumbered max pooling of values in subsquares, reorganized matrix
# common_plots_set[6], # renumbered max pooling of original matrix
# common_plots_set[7], # reordered renumbered max pooling of original matrix
# layout=(plt_size÷2+1,2), size=(1200*2,plt_size÷2*400))
# return all_plot1
# end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 7271 | # Set of functions
#
# After matrices generation, Betti curves will be generated
# For better optimization, operations on indices should be done first
# TODO Check if the threshold of values is applied here and if it has some
# consequence on results
using Eirene
using Random
include("MatrixToolbox.jl")
struct PlottingData
mat_size::Int64
dim::Int
src_pts_number::Int
trgt_pts_number::Int
src_points::Vector{Int}
trgt_points::Array{Int}
targets::Vector{Int}
# Constructor for input data
function PlottingData(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int)
steps_set = [0]
src_points = [0]
trgt_points = [0]
new(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points,
trgt_points, steps_set)
end
function PlottingData(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points::Vector{Int},
trgt_points::Array{Int}, trgt_sptep::Int)
if trgt_sptep == 0
steps_set = [trgt_pts_number]
else
steps_set = collect(1:trgt_sptep:trgt_pts_number)
# Make sure that all points are used
isempty(findall(x->x==trgt_pts_number, steps_set)) && push!(steps_set, trgt_pts_number)
end
new(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points::Vector{Int},
trgt_points::Array{Int}, steps_set)
end
end
function get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
total_pts_number = src_pts_number + src_pts_number*trgt_pts_number
total_pts_number > mat_size && error("Too many points to substitute!")
elements_collection = randcycle(mat_size)
src_points = elements_collection[1:src_pts_number]
strat_val = src_pts_number+1
stop_val = total_pts_number
trgt_points = elements_collection[strat_val:stop_val]
trgt_points = reshape(trgt_points, trgt_pts_number, src_pts_number)
return src_points, trgt_points
end
# ===
function replace_matrix_rows(matrix, srcs, trgts)
replacement_row = get_row(matrix, pt_src)
new_matrix = set_row(matrix, trgt_points, replacement_row)
end
function get_row(matrix, pt_src)
return matrix[pt_src,:]
end
function set_row!(matrix::Array, pt_trgt::Int, replacement_row::Array)
@debug "set_row! func"
replacement_row[pt_trgt] = 0
matrix[pt_trgt, :] .= replacement_row
matrix[:, pt_trgt] .= replacement_row
return matrix
end
function set_row(matrix::Array, pt_trgt::Int, replacement_row::Array)
@debug "set_row func"
new_matrix = copy(matrix)
return set_row!(new_matrix, pt_trgt, replacement_row)
end
function set_row(matrix::Array, pt_trgt::Array, replacement_row::Array)
@debug "set_row func"
new_matrix = copy(matrix)
for point in pt_trgt
new_matrix = set_row!(new_matrix, point, replacement_row)
end
return new_matrix
end
function matrix_organization(matr, src_points, trgt_points)
working_matrix = copy(matr)
mat_size = size(matr, 1)
src_number = size(src_points,1)
swapping_sources = Any[]
step = 0
matrix_indices = CartesianIndices((1:mat_size, 1:mat_size))
matrix_indices = findall(x->x[1]<x[2], matrix_indices)
sorted_values = matr[matrix_indices]
ordered_indices = sort!([1:mat_size;],
by=i->(sorted_values[i],matrix_indices[i]))
sort(matr[:,1])
# matr[src_points[src_pt],1]
for src_pt = 1:src_number
# find all source
target_set = findall(x-> x==matr[src_points[src_pt],1], matr[:,1])
# swap all equivalents
for tragt = target_set
swap_rows!(working_matrix, tragt, mat_size-step)
step +=1
end
end
return working_matrix
end
# matrix = ordered_matrices_collection[15]
function swap_rows!(matrix, src_row_num, trgt_row_num)
src_backup = copy(matrix[src_row_num,:])
trgt_backup = copy(matrix[trgt_row_num,:])
matrix[src_row_num, trgt_row_num] = matrix[trgt_row_num, trgt_row_num]
matrix[trgt_row_num, src_row_num] = matrix[src_row_num,src_row_num]
matrix[src_row_num,src_row_num] = trgt_backup[src_row_num]
matrix[trgt_row_num, trgt_row_num] = src_backup[trgt_row_num]
src_backup = copy(matrix[src_row_num,:])
matrix[src_row_num,:] .= matrix[trgt_row_num, :]
matrix[trgt_row_num, :] .= src_backup
matrix[:, src_row_num] = matrix[src_row_num,:]
matrix[:, trgt_row_num] = matrix[trgt_row_num,:]
end
function swap_rows(matrix, src_row_num, trgt_row_num)
new_matrix = copy(matrix)
swap_rows!(new_matrix, src_row_num, trgt_row_num)
return new_matrix
end
function ordering_matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
mat_size = test_data.mat_size
dim = test_data.dim
src_pts_number = test_data.src_pts_number
trgt_pts_number = test_data.trgt_pts_number
trgt_steps = 0
src_points, trgt_points = get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
distance_matrix = generation_function(mat_size, dim)
distance_matrices_collection = get_dist_mat_collection(distance_matrix, src_points, trgt_points, trgt_steps)
ordered_matrices_collection = get_ordered_set(distance_matrices_collection)
bettis_collection = get_bettis_collection(ordered_matrices_collection)
plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
plotting_data = print_hmap_with_bettis(ordered_matrices_collection,
bettis_collection, plot_data)
return distance_matrices_collection, ordered_matrices_collection, bettis_collection, plot_data
end
# =================================
# Matrix modification functions
function make_matrix_steps!(input_matrix, step_number; step_size=2 )
# input_matrix = ord_mat
# step_number = 13
rows = step_number:(step_number+step_size-1)
cols = 1:step_number-1
min_value = findmin(input_matrix[rows,cols])[1]
input_matrix[rows,cols] .= min_value
input_matrix[cols,rows] .= min_value
end
"""
function add_step_to_matrix(input_matrix, last_components)
Takes a symmetric matrix 'input_matrix' and appends 2 columns and 2 rows such that
resulting geometric object structure is bigger by 1 dimension. 'last_components'
determines which etries in the matrix are used for closing high dimensional simplices.
"""
function add_step_to_matrix(input_matrix, last_components)
matrix_size = size(input_matrix,1)
new_matrix = zeros(Int,matrix_size +2, matrix_size+2)
new_matrix[1:matrix_size,1:matrix_size] .= input_matrix
min_closing_component = findmin(input_matrix[last_components])[1]
new_row1 = range(min_closing_component, length=matrix_size)
new_row2 = range(findmax(new_row1)[1]+1, length=matrix_size)
last = 2
new_matrix[matrix_size+1,1:end-last] = new_row1
new_matrix[matrix_size+2,1:end-last] = new_row2
new_matrix[1:end-last,matrix_size+1] = new_row1
new_matrix[1:end-last,matrix_size+2] = new_row2
# Adjust last components
max_new_matrix = findmax(new_matrix)[1]
new_matrix[last_components].=input_matrix[last_components].+(max_new_matrix-min_closing_component+1)
new_matrix[end-1,end ] = findmax(new_matrix)[1]+1
new_matrix[end, end-1] = findmax(new_matrix)[1]
new_max_val = findmax(new_matrix)[2]
new_component = copy(last_components)
push!(new_component, new_max_val)
push!(new_component, CartesianIndex(new_max_val[2], new_max_val[1]))
return new_matrix, new_component
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 1230 | module TopologyPreprocessing
# MatrixOrganization.jl
export matrix_poling,
subsample_matrix,
add_random_patch
# MatrixProcessing.jl
export shift_to_non_negative,
normalize_to_01,
diagonal_symmetrize,
group_distances,
generate_indices,
reduce_arrs_to_min_len,
increase_arrs_to_max_len,
get_ordered_matrix,
group_distances,
generate_indices,
arr_to_vec,
cartesianInd_to_vec,
sort_indices_by_values,
set_values!
# BettiCurves.jl
export get_bettis,
normalise_bettis,
get_vectorized_bettis,
plot_bettis,
get_bettis_color_palete
# BarCodes.jl
export get_barcodes,
plot_barcodes,
plot_barcodes!,
get_birth_death_ratio,
get_barcode_lifetime,
get_barcode_max_lifetime,
boxplot_birth_death,
boxplot_lifetime,
get_barcode_max_db_ratios
include("MatrixOrganization.jl")
include("MatrixProcessing.jl")
include("BettiCurves.jl")
include("Barcodes.jl")
end # module
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 4597 | # # ======================
# # Example usage
# using CSV
# using Plots
#
# file_name = "sts_for_VP_test.csv"
# csv_matrix = CSV.read(file_name)[:,2:end]
# almost_not_csv_matrix = Matrix(csv_matrix)
#
# file_name2 = "spikes.csv"
# spikes = load_csv_file_to_array(file_name2)
#
# file_name3 = "th_chunks.csv"
# th_chunks = load_csv_file_to_array(file_name3)
#
# sts = generate_spike_matrix(spikes; th_chunks=th_chunks)
# VPd = [get_selfdist(s; n_chan=32, cost=60., dt=0.01) for s in sts]
#
#
# plot_set = Any[]
# for matrix in VPd
# push!(plot_set, heatmap(matrix, color=:Reds_9, colorbar=false, yflip = true))
# end
#
# plot(plot_set[1], plot_set[2], plot_set[3],
# plot_set[4], plot_set[5], plot_set[6],
# plot_set[7], plot_set[8], plot_set[9],
# layout=(1,9), size=(9*1200,1100), legend=false, colorbar=false)
"""
get_selfdist(st_inp; n_chan=32, cost=60., dt=0.01)
Method for computing pair-wise spike distances from a range of spike trains.
Function copied from Mikolaj SETCOmodel
Inputs:
st_inp: [2 x N] array with spike times and indices of neurons.
N - number of spikes generated, 1st row - index of neuron generating given spikes, 2nd row - spike time.
n_chan - number of neurons (default: 32)
cost - cost parameter for VP spike distance, in ms (default: 60 ms)
dt - simulation timestep, in ms (default: 0.01 ms -> 100 kHz)
Output:
pc - [n_chan x n_chan] matrix containing pairwise VP spikes distances for each pair of neurons.
"""
function get_selfdist(st_inp; n_chan=32, cost=60., dt=0.01)
sts_new = Any[]
for i in 1:n_chan
push!(sts_new, st_inp[2,findall(x->x==i, st_inp[1,:])])
end
# sts = [st_inp[0,st_inp[1,:]==i] for i in 1:n_chan]
pc = zeros(n_chan, n_chan)
for i in 1:n_chan, j in 1:n_chan
pc[i,j] = spkd(sts_new[i], sts_new[j], dt/(cost))
end
return pc
end
# TODO Add test with MATLAB code run for some spike train and compare with
# results from this
"""
spkd(s1, s2, cost)
Fast implementation of victor-purpura spike distance (faster than neo & elephant python packages)
Direct Python port of http://www-users.med.cornell.edu/~jdvicto/pubalgor.htmlself.
The below code was tested against the original implementation and yielded exact results.
All credits go to the authors of the original code.
Code was translated from Frotran to Matlab, from Matlab to Python, from
Python to Julia. It was veryfied with MATLAB code.
Input:
s1,2: pair of vectors of spike times
cost: cost parameter for computing Victor-Purpura spike distance.
(Note: the above need to have the same units!)
Output:
d: VP spike distance.
"""
function spkd(s1, s2, cost)
nspi=length(s1)
nspj=length(s2)
# Why not to use this?
if cost==0
return d=abs(nspi-nspj)
elseif cost==Inf
return d=nspi+nspj
end
scr=zeros(nspi+1, nspj+1)
# initialize margins with cost of adding a spike
scr[:,1]=0:nspi
scr[1,:]=0:nspj
for i = 2:nspi+1, j = 2:nspj+1
component1 = scr[i-1,j]+1
component2 = scr[i,j-1]+1
component3 = scr[i-1,j-1]+cost*abs(s1[i-1]-s2[j-1])
scr[i,j] = min(component1, component2, component3)
end
d=scr[end,end]
return d
end
"""
generate_spike_matrix(spikes; th_chunks=[[0,0]])
Generates matrix of the form [2xN] array with spike times and indices of
neurons. N - number of spikes generated, 1st row - index of neuron generating
given spikes, 2nd row - spike time.
Resulting matrix is time sorted. Resulting matrix may be splint into fragments
by setting 'th_chunks' to a list of fragments in a way such that
'th_chunks[k,1]' is a starting index and 'th_chunks[k,2]' is an ending index of
k'th fragment.
"""
function generate_spike_matrix(spikes; th_chunks=[[0,0]], val_range=20:52)
if th_chunks == [[0,0]]
th_chunks[1,1] = 1
th_chunks[1,2] = size(spikes,1)
end
spike_train_simplified = Any[]
for i = 1:size(th_chunks,1)
#1 Get a chunk of spike trains of interest
syllable_spikes = spikes[th_chunks[i, 1]:th_chunks[i, 2], val_range]
# find all spikes
all_spikes = findall(x->x==1,syllable_spikes)
# convert to [2xN] matrix
total_spikes = length(all_spikes)
sorted_spikes = zeros(Int, 2,total_spikes)
for k = 1:total_spikes
sorted_spikes[1,k] = all_spikes[k][2]
sorted_spikes[2,k] = all_spikes[k][1]
end
push!(spike_train_simplified, sorted_spikes[:,sortperm(sorted_spikes[2,:])])
end
return spike_train_simplified
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 5963 | # Module taken from: https://github.com/alexyarosh/hyperbolic
using Plots
using Eirene
using Ripserer
using Statistics
# compute the persistent betti numbers of the Vietoris-Rips complex given by the distance matrix `matr`
# if mintime, maxtime and numofsteps are specified -- returns a `numofsteps x maxdim` array
# if either of the keyword arguments is not specified or is set to Inf, returns `maxdim` arrays for method=:eirene, or error for :ripser
# function bettis(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, method=:ripser)
# if (method == :ripser) || (method == :Ripser)
# if VERSION < v"0.7.0"
# error("Ripser requires at least Julia v 0.7.0")
# end
# return bettis_ripser(matr, maxdim, mintime=mintime, maxtime=maxtime, numofsteps=numofsteps)
# elseif (method == :eirene) || (method == :Eirene)
# return bettis_eirene(matr, maxdim, mintime=mintime, maxtime=maxtime, numofsteps=numofsteps)
# else
# error("Method $(method) is not supported. Supported methods are: method=:eirene, method=:ripser")
# end
# end
function bettis_ripser(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf)
if (mintime == -Inf) || (maxtime == -Inf) || (numofsteps == -Inf)
error("To use Ripser, specify parameters mintime, maxtime, numofsteps")
end
r = ripser(matr, dim_max = maxdim, threshold = maxtime)
int_length = maxtime-mintime
step_length= int_length/numofsteps
betts = zeros(numofsteps, maxdim)
for dim=1:maxdim
ints = r[dim+1]
for intl in ints
st = Int(ceil((intl[1]-mintime)/step_length))
if intl[2] == Inf
fin = numofsteps
else
fin = Int(ceil((intl[2]-mintime)/step_length))
end
betts[st:fin, dim] = map(x->x+1, betts[st:fin, dim])
end
end
return betts
end
#
# # Original function returns 2 different types of betti curves. If no default
# # value parameters is given, it returns vector of matrices. If num of steps is
# # given, then it return matrix maxdim x numsteps.
# function bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf)
# c = eirene(matr, minrad = mintime, maxrad= maxtime, numrad= numofsteps, maxdim=maxdim)
#
# int_length = maxtime-mintime
# step_length= int_length/numofsteps
#
# if (mintime == -Inf) || (maxtime == Inf) || (numofsteps == Inf)
# # return [betticurve(c, dim=maxdim) for d=1:maxdim]
# return hcat([betticurve(c, dim=d)[:,2] for d=1:maxdim]...)
# end
#
# betts = zeros(numofsteps, maxdim)
# # For every dimension compute betti curve
# for dim=1:maxdim
# bet = betticurve(c, dim=dim)
#
# #for every element in betti curve return betti value if index is positive
# for i=1:size(bet,1)
# b = bet[i,:]
# ind = Int(ceil((b[1]-mintime)/step_length))
# if ind > 0
# betts[ind,dim]=b[2]
# else
# betts[1,dim]=b[2]
# end
# end
# end
# return betts
# end
# average betti numbers over arrs
# assuming arrs is an array of arrays, where each arrs[j] is the same size
function average_bettis(arrs; maxdim=-1)
if size(arrs,2) > 1
return arrs
end
md = maxdim
if maxdim == -1
md = size(arrs[1],2)
end
numofints = size(arrs[1],1)
av_bet = zeros(numofints,md)
for i=1:numofints
for d=1:md
av_bet[i,d] = mean([arrs[j][i,d] for j=1:length(arrs)])
end
end
return av_bet
end
# compute standard deviation of betti numbers over arrays in arrs
# assuming arrs is an array of arrays, where each arrs[j] is the same size
function std_bettis(arrs; maxdim=-1)
md = maxdim
if maxdim == -1
md = size(arrs[1],2)
end
numofints = size(arrs[1],1)
std_bet = zeros(numofints,md)
if size(arrs,2) > 1
return std_bet
end
for i=1:numofints
for d=1:md
std_bet[i,d] = std([arrs[j][i,d] for j=1:length(arrs)])
end
end
return std_bet
end
# plot average curves at values `xval`, with averages given by `means` and standard deviations given by `std`
function plot_averages(xvals, means, stds; ribbon=true, label="", linestyle=:solid, color=:auto)
if ribbon
return plot(xvals, means, ribbon=stds,fillalpha=.3, labels=label, linestyle=linestyle, color=color)
else
return plot(xvals, means, labels=label, linestyle=linestyle, c=color)
end
end
function plot_averages!(xvals, means, stds; ribbon=true, label="", linestyle=:solid, color=:auto)
if ribbon
return plot!(xvals, means, ribbon=stds,fillalpha=.3, labels=label, linestyle=linestyle, color=color)
else
return plot!(xvals, means, labels=label, linestyle=linestyle, c=color)
end
end
function load_bettis(filename)
dict = load(filename)
for (matr_name, matr) in dict
return matr
end
end
# plot average curves at values `xval`, given that the bettis numbers are saved in `file`
function plot_averages(xvals, file::String; dim=1, ribbon=true, label="", linestyle=:solid, color=:auto)
matr = load_bettis(file)
av = average_bettis(matr)[:,dim]
if ribbon
st = std_bettis(matr)[:,dim]
return plot(xvals, av, ribbon=st,fillalpha=.3, labels=label, linestyle=linestyle, c=color)
else
return plot(xvals, av, labels=label, linestyle=linestyle, c=color)
end
end
function plot_averages!(xvals, file::String; dim=1, ribbon=true, label="", linestyle=:solid, color=:auto)
matr = load_bettis(file)
av = average_bettis(matr)[:,dim]
if ribbon
st = std_bettis(matr)[:,dim]
return plot!(xvals, av, ribbon=st,fillalpha=.3, labels=label, linestyle=linestyle, c=color)
else
return plot!(xvals, av, labels=label, linestyle=linestyle, c=color)
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 48032 | # ==============================
# ======== Tested code ========
using Eirene
using Plots
using StatsPlots
#%%
function get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
"""
get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.betticurve for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of size
(n,2), where n is the maximal number of steps taken to compute Betti curve of dimensions
ranging form `min_dim` to `max_dim`. First column of each array contains numbered steps.
Second column are the Betti curve values for corresponding step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
bettis = Matrix{Float64}[]
for d = min_dim:max_dim
result = betticurve(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(bettis[d-1]))
end
push!(bettis, result)
end
return bettis
end
# TODO add get_bettis_from_matrix, to wrap C= eirene...; get bettis
#%%
function normalise_bettis(bettis::Vector)
"""
normalise_bettis(bettis::Vector)
normalise_bettis(bettis::Array)
Normalise the number of steps for Betti curves. 'bettis' can be either vector of
arrays (each array contain Betti curve of different dimension) or an array
containing Betti curve of a single dimension.
"""
@debug "Vector version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)[1][1]
max_dim = size(norm_bettis)[1]
@debug "typeof(max_dim) :" typeof(max_dim[1])
for d = 1:(max_dim)
if !isempty(norm_bettis[d])
norm_bettis[d][:, 1] /= findmax(norm_bettis[d][:, 1])[1]
end
end
return norm_bettis
end
#%%
function normalise_bettis(bettis::Array)
@debug "Array version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)
if !isempty(norm_bettis)
norm_bettis[:, 1] /= findmax(norm_bettis[:, 1])[1]
end
return norm_bettis
end
#%%
# function vectorize_bettis(betti_curves::Array{Matrix{Float64,2}})
function vectorize_bettis(betti_curves::Vector{Array{Float64,2}})
"""
vectorize_bettis(betti_curves::Matrix{Float64})
Reshapes the 'betti_curves' from type Array{Matrices{Float64,2}} into
Matrix{Float64}.
The resulting matrix size is (n, k), where 'n' is equal to the number of
rows in each matrix, 'k' is equal to the number of matrices.
TODO: Change the name- it takse vector and returns a matrix.
TODO: get bettis could have an arguent betti_type which would determine resulting type
"""
first_betti = 1
last_betti = size(betti_curves,1)
return hcat([betti_curves[k][:, 2] for k = first_betti:last_betti]...)
end
#%%
@deprecate vectorize_bettis(eirene_results::Dict, maxdim::Integer, mindim::Integer) vectorize_bettis(betti_curves)
# ===
#%%
function get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
"""
get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Takes the eirene result and computes Betti curves for dimensions in range
'mindim:maxdim'. Every Betti curve is stored in successive column of the
resulting array.
TODO: this should return a matrix, where first col are indices and rest are B values (1st col is missing now)
"""
all_bettis = get_bettis(results_eirene, max_dim, min_dim = min_dim)
bettis_vector = vectorize_bettis(all_bettis)
return bettis_vector
end
# ==
#%%
function plot_bettis(bettis::Vector;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_bettis(bettis; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
max_dim = size(bettis, 1)
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# Create iterator for all loops
all_iterations = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
if use_edge_density
for p = all_iterations
max_step = findmax(bettis[p][:, 1])[1]
bettis[p][:, 1] ./= max_step
end
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = all_iterations
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "β$(p)")
end
plot!(bettis[index][:, 1], bettis[index][:, 2]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
function plot_bettis(bettis::Array;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_bettis(bettis::Array; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
max_dim = size(bettis, 2)-1-min_dim
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
total_steps = size(bettis, 1)
if normalised
x_vals = range(0, stop=1, length=total_steps)
else
x_vals = range(0, stop=total_steps)
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# if use_edge_density
# # for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for (index, p) in enumerate(min_dim:max_dim)
# max_step = findmax(bettis[:, 1])[1]
# bettis[p][:, 1] ./=max_step
# end
# end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "β$(p)")
end
plot!(x_vals, bettis[:, index]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
# ======= Untested code
# TODO add default kwargs paring function -> parse_kwargs()
function plot_all_bettis(bettis_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_all_bettis ...
"""
total_dims = size(bettis_collection[1],2)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
max_y_val = find_max_betti(bettis_collection)
plot_ref = plot(; kwargs...)
for b = 1:total_dims
args = (lc = colors_set[b], linewidth = lw, alpha=0.12,label=false, ylims=(0,max_y_val))
for bettis = bettis_collection
betti_vals = bettis[:,b]
total_steps = size(bettis, 1)
x_vals = range(0, stop=1, length=total_steps)
plot!(x_vals, betti_vals; args...)
end
# my_label = "β$(b)"
# betti_vals = results_d["bettis_collection"][:hc][end]
# x_vals = range(0, stop=1, length=size(betti_vals, 1))
# plot!(x_vals, betti_vals; lc = colors_set[b], linewidth = 1, alpha=0.1,label=my_label, ylims=(0,max_y_val))
end
plot!(legend=true)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
return plot_ref
end
function find_max_betti(bettis_collection::Array)
"""
find_max_betti(bettis_collection::Array)
Returns the highest Betti curve value from all dimensions.
"""
if typeof(bettis_collection) == Vector
bettis_collection = vectorize_bettis(bettis_collection)
end
max_y_val = 0
for betti_set in bettis_collection
local_max = findmax(betti_set)[1]
if local_max > max_y_val
max_y_val = local_max
end
end
return max_y_val
end
# ======= Untested code == end
#%%
function printready_plot_bettis(kwargs)
"""
printready_plot_bettis(kwargs)
Creates a plot using 'plot_bettis' with arguments which were tested to be very
good for using them in prints. Used arguments are:
"""
return nothing
end
#%%
function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
"""
function get_bettis_color_palete()
Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
"""
# TODO what does the number in the function below is used for?
if use_set == 1
cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
if min_dim == 0
colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
else
colors_set = []
end
max_RGB = 256
colors_set = vcat(
colors_set,
[
RGB(255 / max_RGB, 206 / max_RGB, 0 / max_RGB),
RGB(248 / max_RGB, 23 / max_RGB, 0 / max_RGB),
RGB(97 / max_RGB, 169 / max_RGB, 255 / max_RGB),
RGB(163 / max_RGB, 0 / max_RGB, 185 / max_RGB),
RGB(33 / max_RGB, 96 / max_RGB, 45 / max_RGB),
RGB(4 / max_RGB, 0 / max_RGB, 199 / max_RGB),
RGB(135 / max_RGB, 88 / max_RGB, 0 / max_RGB),
],
cur_colors,
)
else
use_set == 2
cur_colors = get_color_palette(:auto, 1)
cur_colors3 = get_color_palette(:lightrainbow, 1)
cur_colors2 = get_color_palette(:cyclic1, 1)
if min_dim == 0
# colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
else
colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
end
# for c = [collect(11:25);]
# push!(colors_set, cur_colors2[c])
# end
colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
end
return colors_set
end
# ==============================
# ======= Untested code =======
# using Measures
# using Plots.PlotMeasures
#
# # Source: https://github.com/JuliaPlots/Plots.jl/issues/897
# function setdefaultplottingparams(;upscale=2)
# #8x upscaling in resolution
# fntsm = Plots.font("sans-serif", pointsize=round(12.0*upscale))
# fntlg = Plots.font("sans-serif", pointsize=round(18.0*upscale))
# default(titlefont=fntlg, guidefont=fntlg, tickfont=fntsm, legendfont=fntsm)
# default(size=(800*upscale,600*upscale)) #Plot canvas size
# default(dpi=500) #Only for PyPlot - presently broken
# end
#%%
function plot_bettis_collection(bettis_collection,
bett_num,
max_rank;
step = 1,
show_plt = true,
R = 0.0,
G = 0.4,
B = 1.0)
"""
plot_bettis_collection(bettis_collection, bett_num; step=1, show_plt=true, R=0., G=0.4, B=1.0)
PLots collection of Betti curves of rank 'bett-num'. Every successive plot has
lower opacity than predecessor. 'step' defines step between collection elements
that are ploted. By default, plot is displayed after carteation. This can be
disabled by setting 'show_plt' to false.
Color of the plot can be set with 'R', 'G', 'B' parameters.
"""
step > 0 || error("Steps should be natural number!")
bettis_total = size(bettis_collection, 1)
colors_set = zeros(Float64, bettis_total, 4)
colors_set[:, 1] .= R
colors_set[:, 2] .= G
colors_set[:, 3] .= B
max_betti = get_max_betti_from_collection(bettis_collection)
@info "max_betti" max_betti
x = 0
y = bettis_total * 0.1
va_range = collect(range(bettis_total + x, y, length = bettis_total))
colors_set[:, 4] .= va_range / findmax(va_range)[1]
rgba_set = RGBA[]
for k = 1:size(colors_set, 1)
push!(
rgba_set,
RGBA(colors_set[k, 1], colors_set[k, 2], colors_set[k, 3], colors_set[k, 4]),
)
end
plt_reference = plot(1, title = "Betti curves collection, rank $(bett_num)", label = "")
for b = 1:step:bettis_total
betti = bettis_collection[b]
x_vals_1 = (1:size(betti[:, bett_num], 1)) / size(betti[:, bett_num], 1)
plot!(x_vals_1, betti[:, bett_num], lc = rgba_set[b], label = "rank=$(max_rank-b)")
plot!(ylim = (0, max_betti))
end
xlabel!("Normalised steps")
ylabel!("Number of cycles")
plot!(legend = true)
show_plt && display(plt_reference)
return plt_reference
end
#%%
function get_max_bettis(bettis)
"""
get_max_bettis(bettis)
Returns the maximal bettis of Betti curves for all dimensions.
"""
all_max_bettis = findmax(bettis, dims=1)[1]
return all_max_bettis
end
# TODO change name
# TODO check what for dim is used, change to min dim
function get_max_betti_from_collection(bettis_collection; dim = 1)
max_betti = 0
for betti in bettis_collection
# global max_betti
local_max = findmax(betti)[1]
if (local_max > max_betti)
max_betti = local_max
end
end
return max_betti
end
#%%
function plot_and_save_bettis(bettis,
plot_title::String,
results_path::String;
file_name = "",
extension = ".png",
do_save = true,
do_normalise = true,
min_dim = 0,
max_dim = 3,
legend_on = true,
kwargs...)
"""
plot_and_save_bettis(eirene_results, plot_title::String,
results_path::String; extension = ".png",
data_size::String="", do_save=true,
extend_title=true, do_normalise=true, max_dim=3,
legend_on=true)
Plot Betti curves from 0 up to `max_dim` using `eirene_results` from Eirene library and
returns handler for figure. Optionally, if `do_save` is set, saves the figure
or if `do_normalise` is set, sets the steps range to be normalised to the
horizontal axis maximal value.
"""
bettis = get_bettis(eirene_results, max_dim)
if do_normalise
bettis = normalise_bettis(bettis)
end
plot_ref =
plot_bettis(bettis, plot_title, legend_on = legend_on, min_dim = min_dim, kwargs...)
if do_save
if isempty(file_name)
file_name = plot_title * extension
elseif isempty(findall(x -> x == extension[2:end], split(file_name, ".")))
#check for the extension in file name
file_name *= extension
end
save_figure_with_params(
plot_ref,
results_path;
extension = extension,
prefix = split(file_name, ".")[1],
)
end
return plot_ref
end
# TODO merge functions for getting betti curves
# Original function returns 2 different types of betti curves. If no default
# value parameters is given, it returns vector of matrices. If num of steps is
# given, then it return matrix maxdim x numsteps.
# """
# bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, mindim=1)
#
# Takes the `matr` and computes Betti curves up to `maxdim`. Return matrix only
# with betti curve values
#
#
# Function taken from: https://github.com/alexyarosh/hyperbolic
# """
#%%
@deprecate bettis_eirene(matr, maxdim; mintime = -Inf, maxtime = Inf, numofsteps = Inf, mindim = 1) get_bettis(results_eirene, max_dim; min_dim = 1)
#%%
function get_bettis_from_image(img_name,
plot_params;
file_path = "",
plot_heatmaps = true,
save_heatmaps = false,
plot_betti_figrues = true)
"""
function get_bettis_from_image(img_name)
Computes Betti curves for the image file indicated by @img_name. If the image is
not symmetric, then it is the elements below diagonal are copied over the
elmenents above the diagonal.
"""
file_n = split(img_name, ".")[1]
img1_gray = Gray.(load(file_path * img_name))
img_size = size(img1_gray)
C_ij = Float64.(img1_gray)
if !issymmetric(C_ij)
img1_gray = symmetrize_image(img1_gray)
C_ij = Float64.(img1_gray)
end
img_size = size(C_ij, 1)
# C_ij =-C_ij
# C_ij .+= 1
# ==============================================================================
# =============================== Ordered matrix ===============================
if size(C_ij, 1) > 80
@warn "Running Eirene for big matrix: " img_size
@warn "Eirene may have trobules with big matrices/images."
end
ordered_matrix = get_ordered_matrix(C_ij; assing_same_values = false)
# ==============================================================================
# ============================ Persistance homology ============================
C = eirene(ordered_matrix, maxdim = 3, model = "vr")
# ==============================================================================
# ================================ Plot results ================================
# TODO separate plotting from processing
if plot_heatmaps
full_ordered_matrix = get_ordered_matrix(C_ij; assing_same_values = false)
heat_map2 = plot_square_heatmap(
full_ordered_matrix,
10,
img_size;
plt_title = "Order matrix of $(file_n)",
plot_params = plot_params,
)
if save_heatmaps
heatm_details = "_heatmap_$(file_n)"
savefig(heat_map2, heatmaps_path * "ordering" * heatm_details)
end
end
if plot_betti_figrues
plot_title = "Betti curves of $(file_n), size=$(img_size) "
figure_name = "betti_$(file_n)_n$(img_size)"
ref = plot_and_save_bettis(C,
plot_title,
figure_path,;
file_name = figure_name,
plot_params = plot_params,
do_save = false,
extend_title = false,
do_normalise = false,
max_dim = 3,
legend_on = true,
min_dim = 1)
end
display(img1_gray)
display(heat_map2)
display(ref)
end
# ===============================================
@deprecate get_bettis_from_image2(img_name;file_path = "",plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true)
@deprecate plot_and_save_bettis2(eirene_results, plot_title::String, results_path::String; file_name = "", extension = ".png", data_size::String = "", do_save = true, extend_title = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true) plot_and_save_bettis(bettis, plot_title::String, results_path::String; file_name = "", extension = ".png", do_save = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true, kwargs...)
#%%
function get_and_plot_bettis(eirene_results;
max_dim = 3,
min_dim = 1,
plot_title = "",
legend_on = false)
bettis = get_bettis(eirene_results, max_dim)
norm_bettis = normalise_bettis(bettis)
plot_ref =
plot_bettis2(norm_bettis, plot_title, legend_on = legend_on, min_dim = min_dim)
# display(plot_ref)
return plot_ref
end
#%%
function lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
"""
lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
Takes ordered matrix 'input_matrix' and reduces the resolution of values in the
matrix into 'total_bins' bins.
"""
new_ordered_matrix = zeros(size(ordered_matrix))
max_val = findmax(ordered_matrix)[1]
min_val = findmin(ordered_matrix)[1]
bin_step = max_val ÷ total_bins
old_bins = min_val:bin_step:max_val
for bin = 1:total_bins
@debug "First step threshold is $(old_bins[bin])"
indices = findall(x -> (x >= old_bins[bin]), ordered_matrix)
new_ordered_matrix[indices] .= bin - 1
end
@debug "Max_val in new matrix is " findmax(new_ordered_matrix)
@debug "And should be " total_bins - 1
return new_ordered_matrix
end
#%%
function average_bettis(bettis_matrix::Matrix; up_factor = 8)
"""
average_bettis(bettis_matrix; up_factor=8)
Takes the average values of betti curves stored in 'bettis_matrix'.
'bettis_matrix' consist of different simulations(first index of the matrix),
different ranks (third index of the matrix). Second index of the matrices
(saples) may vary accross many simulations and for this reason, all betti curves
are upsampled by a factor of 'upsample_factor' and then the average for every
dimension is computed.
"""
bettis_matrix_backup = copy(bettis_matrix)
simulations = size(bettis_matrix, 1)
dimensions = size(bettis_matrix[1], 1)
max_samples = 0
for k = 1:simulations
# global max_samples
current_len = length(bettis_matrix[k][1][:, 1])
if max_samples < current_len
max_samples = current_len
end
end
bettis_size = size(bettis_matrix)
total_upsamples = (max_samples - 1) * up_factor + 1
x_resampled = range(0, 1, step = total_upsamples)
avg_bettis = zeros(total_upsamples, dimensions)
std_bettis = copy(avg_bettis)
resampled_bettis = zeros(simulations, total_upsamples, dimensions)
# resample betti curves
for simulation = 1:simulations, betti = 1:dimensions
resampled_bettis[simulation, :, betti] =
upsample_vector2(bettis_matrix[simulation][betti][:, 2], total_upsamples)
end
# average and std Betti
for dimension = 1:dimensions
avg_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
std_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
end
return avg_bettis, std_bettis
end
#%%
function upsample_vector2(input_vector, total_upsamples)
total_orig_samples = size(input_vector, 1) - 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_upsamples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
#%%
function upsample_vector(input_vector; upsample_factor::Int = 8)
"""
upsample_vector(input_vector; upsample_factor::Int=8)
Takes an 'input_vector' and returns a vector which has 'upsample_factor' many
times more samples. New samples are interpolated with 'spl' function from
'Dierckx' package.
"""
total_orig_samples = size(input_vector, 1) - 1
total_samples = upsample_factor * total_orig_samples + 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_samples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
# =========--=======-========-==========-=======-
# From bettis areas
# Area under Betti curve functions
#%%
function get_area_under_betti_curve(betti_curves::Union{Matrix{Float64}, Array{Array{Float64,2}}};do_normalised::Bool=false)
"""
get_area_under_betti_curve(betti_curves, min_dim, max_dim)
Computes the area under Betti curves stored in 'betti_curves', where each row is
a Betti curve and each column is a value.
"""
#TODO check this part
if size(betti_curves,2) < 2
bettis_vector = vectorize_bettis(betti_curves)
else
bettis_vector = betti_curves
end
# @info sum(bettis_vector, dims=1)
bettis_area = sum(bettis_vector, dims=1)
if do_normalised
total_steps = size(bettis_vector,1)
bettis_area ./= total_steps
end
# @info bettis_area
return bettis_area
end
# function get_area_under_betti_curve(C, min_dim, max_dim)
# """
# get_area_under_betti_curve(C, min_dim, max_dim)
#
# Computes the Betti curves and returns their area under curve.
# """
# all_bettis = get_bettis(C,max_dim, min_dim=min_dim)
# bettis_vector = hcat([all_bettis[k][:,2] for k=min_dim:max_dim]...)
# # @info sum(bettis_vector, dims=1)
#
#
# total_steps = size(bettis_vector,1)
#
# bettis_area = sum(bettis_vector, dims=1) ./ total_steps
# # @info bettis_area
# return bettis_area
# end
#%%
function get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
"""
get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
Computes topology of every matrix in dataset, computes Betti curves for dimensions
min_dim up to max_dim and returns vector (or matrix) of areas under Betti curves.
"""
areas_vector = Array[]
for data = dataset
@info "Computing topology."
C = eirene(data, maxdim=max_dim,)
matrix_bettis = get_bettis(C,max_dim, min_dim=min_dim)
push!(areas_vector, get_area_under_betti_curve(matrix_bettis))
end
if return_matrix
return vcat([areas_vector[k] for k=1:10]...)
else
return areas_vector
end
end
# struct TopologyData
# min_dim::Integer
# max_dim::Integer
#
# do_normalise::Bool=true
#
# betti_curves
# normed_bettis
# betti_areas::Matrix{Int}
#
# # Constructor for input data
# function TopologyData(my_matrix::Matrix, max_dim::Int; min_dim::Int, do_normalise::Bool=true)
# min_dim = min_dim
# max_dim = max_dim
#
# @info "Computing topology for maxdim =" max_dim
# C = eirene(my_matrix, maxdim=max_dim)
# betti_curves = get_bettis(C, max_dim, min_dim=min_dim)
# normed_bettis = normalise_bettis(betti_curves)
# betti_areas = get_area_under_betti_curve(betti_curves; do_normalised=do_normalise)
# end
# end
#%%
function get_dataset_topology(dataset;
min_dim::Integer=1,
max_dim::Integer=3,
get_curves::Bool=true,
get_areas::Bool=true,
get_persistence_diagrams::Bool=true,
do_normalise::Bool=true)
topology_set = TopologyData[]
for some_matrix in dataset
resulting_topology = TopologyData(some_matrix, max_dim, min_dim=min_dim, do_normalise=do_normalise)
push!(topology_set, resulting_topology)
end
return topology_set
end
#%%
function get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
"""
get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
for (index, value) in enumerate(min_dim:max_dim)
StatsPlots.boxplot!(bplot, areas_matrix[:,index], labels="β$(value)", color=data_colors[value])
end
return bplot
end
function get_bettis_collection_from_matrices(ordered_matrices_collection; max_dim::Int=3, min_dim::Int=1)
bettis_collection = Array[]
for matrix = ordered_matrices_collection
@debug "Computing Bettis..."
eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
push!(bettis_collection, bettis)
end
return bettis_collection
end
# =========--=======-========-==========-=======-
# Code from Points substitution:
# Compute series of betti curves
# function get_bettis_collection(ordered_matrices_collection; max_B_dim=3)
# bettis_collection = Array[]
#
# for matrix = ordered_matrices_collection
# @debug "Computing Bettis..."
# eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
#
# bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
# push!(bettis_collection, bettis)
# end
#
# return bettis_collection
# end
#
# # Plot series of betti curves with their heatmaps
# function reshape_bettis(bettis)
# bettis_count = size(bettis,1)
# output_betti = zeros(size(bettis[1],1), bettis_count)
#
# for betti = 1:bettis_count
# output_betti[:,betti] = bettis[betti][:,2]
# end
# return output_betti
# end
#
# function get_ord_mat_collection(matrix_collection)
# mat_size = size(matrix_collection[1],1)
# ordered_mat_coll = [zeros(Int, mat_size,mat_size) for k=1:length(matrix_collection)]
#
# size(matrix_collection)
# for matrix = 1:length(matrix_collection)
# ordered_mat_coll[matrix] = Int.(get_ordered_matrix(matrix_collection[matrix]))
# end
# return ordered_mat_coll
# end
#
#
#
#
#
# function print_hmap_with_bettis(ordered_matrices_collection, bettis_collection,
# plot_data::PlottingData)
# num_plots = size(ordered_matrices_collection,1)
# sources = 1:(plot_data.src_pts_number)
# targets = 1:(plot_data.trgt_pts_number)
# plot_set = Any[]
#
# max_betti = get_max_betti_from_collection(bettis_collection;dim=1)
#
# index = 1
# for src = 1:size(sources,1), trgt = 1:size(targets,1)
# # index = src * trgt
# ordered_geom_gr = ordered_matrices_collection[index]
# bettis = bettis_collection[index]
# title_hmap = "trgt:$(targets[trgt])_src:$(sources[src])_r:$(rank(ordered_geom_gr))"
# title_bettis = "gr_trg=$(targets[trgt])_src=$(sources[src])_steps=$(size(bettis,1))"
# push!(plot_set, make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti))
# index +=1
# end
#
# return plot_set
# end
#
# function make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti)
# # @debug "src" src
# # @debug "trgt" trgt
# hmap_plot = plot_square_heatmap(ordered_geom_gr, 10,size(ordered_geom_gr,1);plt_title = title_hmap)
# plot!(yflip = true,)
#
# bettis_plot_ref = plot(title=title_bettis);
# max_dim = size(bettis,2)
# for p = 1:max_dim
# x_vals = collect(1:size(bettis[:,1],1))./size(bettis[:,1])
#
# plot!(x_vals, bettis[:,p], label="\\beta_"*string(p));
# plot!(legend=true, )
# end
#
# plot!(ylim=(0,max_betti))
# plot!(xlim=(0,1))
# ylabel!("Number of cycles")
# xlabel!("Steps")
#
# final_plot = plot(hmap_plot, bettis_plot_ref, layout = 2,
# top_margin=2mm,
# left_margin=0mm,
# bottom_margin=2mm,
# size=(600,300))
# display(final_plot)
# return final_plot
# end
#
# # TODO BUG: substitution does not work- all the plots are the same
# function main_generation1()
# mat_size = 6
# dim = 80
# src_pts_number = 1
# trgt_pts_number = 2
# trgt_steps = 0
#
# src_points, trgt_points =
# get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
#
# matrix_collection =
# get_matrix_collection(mat_size, dim, src_points, trgt_points; trgt_step=trgt_steps)
#
# ordered_matrices_collection = get_ord_mat_collection(matrix_collection)
#
# bettis_collection = get_bettis_collection(ordered_matrices_collection)
#
#
# plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
# # plot_data = PlottingData2(mat_size , dim, )
#
# plotting_data = print_hmap_with_bettis(ordered_matrices_collection,
# bettis_collection, plot_data)
# end
#
#
# function get_geom_matrix(mat_size, dim)
# # TODO change the matrix collection shape to be a matrix, not a vector
# point_cloud = generate_random_point_cloud(mat_size, dim)
# matrix_collection = generate_geometric_matrix(point_cloud)
# # matrix_collection = get_ordered_matrix(matrix_collection; assing_same_values=true)
#
# return matrix_collection
# end
#
# function get_rand_matrix(mat_size, dim)
# matrix_collection = generate_random_matrix(mat_size)
# matrix_collection = get_ordered_matrix(matrix_collection; assing_same_values=true)
#
# return matrix_collection
# end
#
# # TODO Analyse zero point behaviour
# function get_dist_mat_collection(dist_matrix, src_points, trgt_points, trgt_steps; do_ordering=false)
# dist_matrix_backup = copy(dist_matrix)
# mat_size = size(dist_matrix,1)
# src_points_num = size(src_points,1)
# trgt_points_num = size(trgt_points,1)
# # ordered_mat_coll = [zeros(Int, mat_size,mat_size) for k=1:(src_points_num*trgt_points_num)]
# ordered_mat_coll = Array[]
#
# swapping_iterator = 0
#
# for srcs = 1:src_points_num
# # replacement_row = get_row(dist_matrix, src_points[srcs])
#
# for target = 1:trgt_points_num
# @debug "src:" src_points[srcs]
# @debug "trgt:" trgt_points[target, srcs]
# replacement_row = get_row(dist_matrix_backup, src_points[srcs])
# # dist_matrix_backup .=
# set_row!(dist_matrix_backup, trgt_points[target, srcs], replacement_row)
# # ordered_mat_coll[srcs * target] = copy(dist_matrix_backup)
# if do_ordering
# swap_rows!(dist_matrix_backup, trgt_points[target, srcs], mat_size-swapping_iterator)
# swapping_iterator +=1
# end
# push!(ordered_mat_coll, copy(dist_matrix_backup))
# end
# end
#
# return ordered_mat_coll
# end
#
# function get_ordered_set(distance_matrices_collection)
# result = copy(distance_matrices_collection)
#
# for matrix = 1:size(distance_matrices_collection,1)
# result[matrix] = get_ordered_matrix(distance_matrices_collection[matrix];assing_same_values=true )
# end
# return result
# end
#
# function matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
# mat_size = test_data.mat_size
# dim = test_data.dim
# src_pts_number = test_data.src_pts_number
# trgt_pts_number = test_data.trgt_pts_number
# trgt_steps = 0
#
# src_points, trgt_points = get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
# distance_matrix = generation_function(mat_size, dim)
#
# distance_matrices_collection = get_dist_mat_collection(distance_matrix, src_points, trgt_points, trgt_steps)
# ordered_matrices_collection = get_ordered_set(distance_matrices_collection)
# bettis_collection = get_bettis_collection(ordered_matrices_collection)
#
# plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
#
# plots_set = print_hmap_with_bettis(ordered_matrices_collection,
# bettis_collection, plot_data)
#
#
# return distance_matrices_collection, ordered_matrices_collection, bettis_collection, plot_data, plots_set
# end
#%%
# This does not belong here
function multiscale_matrix_testing(sample_space_dims = 3,
maxsim = 5,
min_B_dim = 1,
max_B_dim = 3,
size_start = 10,
size_step = 5,
size_stop = 50;
do_random = true,
control_saving = false,
perform_eavl = false)
"""
multiscale_matrix_testing(sample_space_dims = 3,
maxsim=5,
min_B_dim = 1,
max_B_dim = 3,
size_start = 10,
size_step = 5,
size_stop = 80; do_random=true)
Function for testing the average number of cycles from geometric and random
matrices.
It is possible to save intermidiate results- for that, @control_saving must be
set true.
Performance of computation of Betti curves can be monitored, if the
@perform_eavl is set too true. Bydefault, it is set to false.
"""
num_of_bettis = length(collect(min_B_dim:max_B_dim))
if length(sample_space_dims) > 1
@warn "Can not do random processing for multiple dimensions"
do_random = false
end
geom_mat_results = Any[]
if do_random
rand_mat_results = Any[]
result_list = [geom_mat_results, rand_mat_results]
else
result_list = [geom_mat_results]
end
for sample_space_dim in sample_space_dims
if !do_random
@info "Sampling space size: " sample_space_dim
end
repetitions = size_start:size_step:size_stop
for space_samples in repetitions
@info "Generating data for: " space_samples
# ==========================================
# ============= Generate data ==============
# ===
# Generate random matrix
if do_random
symm_mat_rand = [generate_random_matrix(space_samples) for i = 1:maxsim]
ordered_mat_rand = [
get_ordered_matrix(symm_mat_rand[i]; assing_same_values = false)
for i = 1:maxsim
]
end
# ===
# Generate geometric matrix
pts_rand = [
generate_random_point_cloud(sample_space_dim, space_samples)
for i = 1:maxsim
]
symm_mat_geom = [generate_geometric_matrix(pts_rand[i]') for i = 1:maxsim]
ordered_mat_geom = [
get_ordered_matrix(symm_mat_geom[i]; assign_same_values = false)
for i = 1:maxsim
]
# ======================================================================
# ========================= Do the Betti analysis ======================
if do_random
set = [ordered_mat_geom, ordered_mat_rand]
else
set = [ordered_mat_geom]
end
for matrix_set in set
@debug("Betti analysis!")
# ===
# Generate bettis
many_bettis = Array[]
if perform_eavl
many_timings = Float64[]
many_bytes = Float64[]
many_gctime = Float64[]
many_memallocs = Base.GC_Diff[]
end
for i = 1:maxsim
if i % 10 == 0
@info "Computing Bettis for: " i
end
if perform_eavl
results, timing, bytes, gctime, memallocs = @timed bettis_eirene(
matrix_set[i],
max_B_dim,
mindim = min_B_dim,
)
push!(many_bettis, results)
push!(many_timings, timing)
push!(many_bytes, bytes)
push!(many_gctime, gctime)
push!(many_memallocs, memallocs)
else
push!(
many_bettis,
bettis_eirene(matrix_set[i], max_B_dim, mindim = min_B_dim),
)
end
end
# ===
# Get maximal number of cycles from each Betti from simulations
max_cycles = zeros(maxsim, max_B_dim)
for i = 1:maxsim, betti_dim = 1:max_B_dim
@debug("\tFindmax in bettis")
max_cycles[i, betti_dim] = findmax(many_bettis[i][:, betti_dim])[1]
end
# ===
# Get the statistics
avg_cycles = zeros(1, length(min_B_dim:max_B_dim))
std_cycles = zeros(1, length(min_B_dim:max_B_dim))
k = 1
for betti_dim = min_B_dim:max_B_dim
avg_cycles[k] = mean(max_cycles[:, betti_dim])
std_cycles[k] = std(max_cycles[:, betti_dim])
k += 1
end
# ===
# Put results into dictionary
betti_statistics = Dict()
if matrix_set == ordered_mat_geom
@debug("Saving ordered")
betti_statistics["matrix_type"] = "ordered"
betti_statistics["space_dim"] = sample_space_dim
result_list = geom_mat_results
else
@debug("Saving radom")
betti_statistics["matrix_type"] = "random"
result_list = rand_mat_results
end
betti_statistics["space_samples"] = space_samples
betti_statistics["simualtions"] = maxsim
betti_statistics["min_betti_dim"] = min_B_dim
betti_statistics["max_betti_dim"] = max_B_dim
betti_statistics["avg_cycles"] = avg_cycles
betti_statistics["std_cycles"] = std_cycles
if perform_eavl
betti_statistics["many_timings"] = many_timings
betti_statistics["many_bytes"] = many_bytes
betti_statistics["many_gctime"] = many_gctime
betti_statistics["many_memallocs"] = many_memallocs
end
push!(result_list, betti_statistics)
end # matrix type loop
@debug("===============")
if control_saving
if do_random
save(
"multiscale_matrix_testing_$(space_samples)_$(sample_space_dim).jld",
"rand_mat_results",
rand_mat_results,
"geom_mat_results",
geom_mat_results,
)
else
save(
"multiscale_matrix_testing_dimension_$(space_samples)_$(sample_space_dim).jld",
"geom_mat_results",
geom_mat_results,
)
end
end
end # matrix_size_loop
end # sampled space dimension
if do_random
return geom_mat_results, rand_mat_results
else
return geom_mat_results
end
end
# function plot_betti_numbers(betti_numbers, edge_density, title="Geometric matrix"; stop=0.6)
# """
# Plots Betti curves. The betti numbers should be obtained with the clique-top
# library.
# """
# p1 = plot(edge_density, betti_numbers[:,1], label="beta_0", title=title, legend=:topleft) #, ylims = (0,maxy)
# plot!(edge_density, betti_numbers[:,2], label="beta_1")
# if size(betti_numbers,2)>2
# plot!(edge_density, betti_numbers[:,3], label="beta_2")
# end
#
# return p1
# end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 388 | """
check_existing_dir(dir_path::String)
Checks if the directory under `dir_path` exists. If not, throws IOError
"""
function check_existing_dir(dir_path::String)
if !isdir(dir_path)
@warn "Folder $(data_path) does not exists in current directory."
@warn "Terminating execution."
throw(ErrorException("Can nor find folder \""*dir_path*"\"."))
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 4819 | using LinearAlgebra
# integrate sinh^(n)(ax) from 0 to r
function integrate_sinh(n; r=1.0, a=1.0)
if n==0
return r
elseif n==1
return (cosh(a*r)-1)/a
else
return (sinh(a*r)^(n-1))*cosh(a*r)/(a*n) - (n-1)/n * integrate_sinh(n-2,r=r, a=a)
end
end
function hyp_radial_density(r, d; curvature=-1.0, radius=1.0)
# k = 1/(curvature^2)
k = 1.0
hyp_r = (sinh(r/k))^(d-1)/integrate_sinh(d-1,r=radius, a=radius/k)
return hyp_r
end
function euc_radial_density(r, d; radius=1.0)
return d*(r^(d-1))/radius^d
end
# rejection sampling n=numofpts points from density dens, where the argument lies between 0 and maxval
function rejection_sampling(dens::Function, maxval,numofpts=1)
max_val = dens(maxval)
iter = 1
rands = Array{Float64,1}(undef, numofpts)
while iter <= numofpts
x = rand()*maxval
val = dens(x)
u = rand()
if u*max_val < val
rands[iter] = x
iter+=1
end
end
return rands
end
function sample_hyp_rad(d, numofpts=1; curvature=-1.0, radius=1.0)
rands = rejection_sampling(x->hyp_radial_density(x,d,curvature=curvature, radius=radius), radius,numofpts)
# ...radius within the Poincare ball
euc_rands = map(x->tanh(x/2.0),rands)
return euc_rands
end
function sample_euc_rad(d, numofpts=1; radius=1.0)
rands = rejection_sampling(x->euc_radial_density(x,d,radius=radius), radius, numofpts)
return rands
end
function sample_sph(d, numofpts=1; curvature=1.0)
rands = []
i=0
while i<=numofpts
vec = randn(d+1)
if vec[d+1]>0
push!(rands, normalize(vec))
i+=1
end
end
return rands
end
function sample_sphere(d, numofpts=1)
vecs = randn(d, numofpts)
rands = []
for i=1:numofpts
push!(rands, normalize(vecs[:,i]))
end
return rands
end
function sample_hyp(d, numofpts=1; radius=1.0, curvature=-1)
sphere_pts = sample_sphere(d,numofpts)
radii = sample_hyp_rad(d, numofpts, radius=radius, curvature=curvature)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
return ball_pts
end
function sample_euc(d, numofpts=1; radius=1.0)
sphere_pts = sample_sphere(d,numofpts)
radii = sample_euc_rad(d, numofpts, radius=radius)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
return ball_pts
end
function sample_ball(d, numofpts=1; radius=1.0, curvature=0.0)
sphere_pts = sample_sphere(d,numofpts)
if curvature < 0
radii = sample_hyp_rad(d, numofpts, radius=radius, curvature=curvature)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
elseif curvature == 0.0
radii = sample_euc_rad(d, numofpts, radius=radius)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
elseif curvature > 0
ball_pts = sample_sph(d, numofpts, curvature=curvature)
end
end
function hyp_distance(pts; curvature=-1.0)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
nx = 1-(norm(pts[i]))^2
ny = 1-(norm(pts[j]))^2
delta = 2 * norm(pts[i]-pts[j])^2/(nx*ny)
distances[i,j] = acosh(1+delta)
distances[j,i] = distances[i,j]
end
end
return distances
end
function euc_distance(pts)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
distances[i,j] = norm(pts[i]-pts[j])
distances[j,i] = distances[i,j]
end
end
return distances
end
function sph_distance(pts; curvature=1.0)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
distances[i,j] = acos(dot(pts[i],pts[j]))
distances[j,i] = distances[i,j]
end
end
return distances
end
function distance_matrix(pts; curvature=0.0)
if curvature < 0
return hyp_distance(pts, curvature=curvature)
elseif curvature == 0
return euc_distance(pts)
elseif curvature > 0
return sph_distance(pts, curvature=curvature)
end
end
function to_density(matr)
dens_matr = zeros(size(matr,1), size(matr,2))
n = size(matr)[1]
all_entries = sort(setdiff(unique(matr), 0.0))
total = binomial(n,2)
for i=1:n
for j=i+1:n
dens_matr[i,j] = (findfirst(x->x==matr[i,j], all_entries))/total
dens_matr[j,i] = dens_matr[i,j]
end
end
return dens_matr
end
function to_density!(matr)
matr = to_density(matr)
return matr
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 20266 | using Statistics
using Combinatorics
# using ImageFiltering
"""
rotate_img_around_center(img, angle = 5pi/6)
Function rotates a single image (or a frame) around the center of the image by
@angle radians.
"""
function rotate_img_around_center(img, angle = 5pi/6)
θ = angle
rot = recenter(RotMatrix(θ), [size(img)...] .÷ 2) # a rotation around the center
x_translation = 0
y_translation = 0
tform = rot ∘ Translation(y_translation, x_translation)
img2 = warp(img, rot, axes(img))
return img2
end
"""
get_local_gradients(video_array, centers, sub_img_size)
Computes the gradients in the subimage, takes the mean of sum of absolute
values of both hotizontal and vertical gradients as a representative of a
subimage.
"""
function get_local_gradients(video_array, centers, sub_img_size)
@debug "Entering get_local_gradients"
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
@debug "starting frame procesing"
for frame = 1:len
img = video_array[frame]
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
img_grad_abs = map(abs, img_grad[1]) + map(abs, img_grad[2])
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
sub_img = img_grad_abs[(c_x-half_size):(c_x+half_size),
(c_y-half_size):(c_y+half_size)]
extracted_pixels[index_x, index_y, frame] =mean(sub_img)
end
end
# @debug "Next frame" frame
end
return extracted_pixels
end
"""
get_img_gradient(img)
Computes the gradients in the `img`.
"""
function get_img_gradient(img)
@debug "Entering get_local_gradients"
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
grad_1 = img_grad[1] .+ abs(findmin(img_grad[1])[1])
grad_1 ./= findmax(grad_1)[1]
grad_2 = img_grad[2] .+ abs(findmin(img_grad[2])[1])
grad_2 ./= findmax(grad_2)[1]
grad_sum = grad_1 + grad_2
grad_sum .+= abs(findmin(grad_sum)[1])
grad_sum ./= findmax(grad_sum)[1]
return grad_sum
end
"""
get_local_img_correlations(img, centers, sub_img_size, shift;
with_gradient=false)
Computes the correlation between the subimages and subimages shifted by values
from range -`shift`:`shift` and returns array of size
length(`centers`) x length(`centers`).
Each of the subimage is center around values stored in @centers
"""
function get_local_img_correlations(img, centers, sub_img_size::Int;
with_gradient=false)
# TODO BUG- function is not workig for even numbers
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size#
h, w = size(img)
extracted_pixels = zeros(sub_img_size, sub_img_size)
local_correlation = zeros(size(centers,1))
if with_gradient
img = get_img_gradient(img)
end
position = 1;
for index = centers
c_x = index[1]
c_y = index[2]
c_x_range = (c_x-half_range):(c_x+half_range)
c_y_range = (c_y-half_range):(c_y+half_range)
subimage = img[c_x_range,c_y_range]
center = img[c_x_range, c_y_range]
corelation = center .* subimage
corelation = sum(corelation)
local_correlation[position] += corelation
local_correlation[position] /= 256*(sub_img_size^2)^2
position += 1;
end
return local_correlation
end
"""
get_local_img_correlations(img,centers, masks)
Takes `img` and computes crosscorrelation with set of `masks` around the
`centers`. Crosscorrelation is computed as convolution of the mask and the area
around coordinates stored in `centres`.
"""
function get_local_img_correlations(img, centers, masks::Vector; with_gradient=false)
masks_num = length(masks)
sub_img_size = size(masks[1],1)
# half_size = ceil(Int,(sub_img_size-1)/2)
half_size = (sub_img_size)÷2
half_range = half_size
h, w = size(img)
local_correlation = zeros(masks_num, size(centers,1) )
index = centers[1]
masks_num = length(masks)
if with_gradient
img = get_img_gradient(img)
end
# position = 1;
# for index = centers
for pos = 1:length(centers)
# global position
index = centers[pos]
c_x = index[1]
c_y = index[2]
c_x_range = (c_x-half_range):(c_x+half_range)
c_y_range = (c_y-half_range):(c_y+half_range)
center = img[c_x_range, c_y_range]
# mask_pos = 1
# for mask in masks
for mask_pos = 1:length(masks)
mask = masks[mask_pos]
corelation = center .* mask
corelation = sum(corelation)
local_correlation[mask_pos, pos] += corelation
local_correlation[mask_pos, pos] /= (sub_img_size^2)
# local_correlation[position, mask_pos ] = sum(imfilter(center, mask))/(sub_img_size^2)
# mask_pos +=1
end
# position += 1;
end
return local_correlation
end
"""
extract_pixels_from_img(img, indicies_set, video_dim_tuple)
Takes every frame from @video_array and extracts pixels which indicies are in
@indicies_set, thus creating video with only chosen indicies.
"""
function extract_pixels_from_img(img, indicies_set, video_dim_tuple)
rows = size(indicies_set,2)
columns = size(indicies_set,2)
video_length = video_dim_tuple[3]
extracted_pixels = zeros(rows, columns, video_length)
extracted_pixels[:,:,] =
img[indicies_set[1,:],indicies_set[2,:]]
return extracted_pixels
end
"""
Returns evenly distributed centers of size `image_size`
"""
function get_local_img_centers(points_per_dim, img_size, shift=0, sub_img_size=0 )
# TODO Applied teproray solution here, so it works only for local gradients
# start = 0
# (points_per_dim>shift) ? start_ind = ceil(Int, points_per_dim/2)+ shift :
# start=shift
start_ind = ceil(Int, sub_img_size/2)
min_va, = findmin(img_size)
last_ind = min_va - start_ind
set = broadcast(floor, Int, range(start_ind, step=sub_img_size, stop=last_ind))
num_indexes = size(set,1)
centers = Any[]
for row = 1:num_indexes
for column = 1:num_indexes
push!(centers, CartesianIndex(set[row], set[column]))
end
end
return centers
end
"""
get_img_local_centers(img_size, sub_img_size=10)
Tiles the image of size @img_size into square subimages of size @sub_img_size
and returns vector with CartesianIndex coordinates of the subimages centre in
original image.
By default, takes smaller value from @img_size and then divides it by
@sub_img_size. Resulting value will be the number of returned subimages per
dimension. If @use_square is set to false, then evry dimension is treated
separately, resulting in rectangular grid.
It is possible to set overlap of the tiles with @overlap parameter. By default
it is set to zero, but can be any pixel value smaller that @sub_img_size. If
@overlap is set to value in range (0,1], a fraction of @sub_img_size is used.
"""
function get_img_local_centers(img_size, sub_img_size=10; use_square=true,
overlap=0)
@assert sub_img_size <= findmin(img_size)[1] "@sub_img_size is bigger than image!"
@assert sub_img_size > 0 "sub_img_size must be positive number"
@assert overlap<=sub_img_size "The overlap is biger than subimage size!"
@assert overlap >= 0 "overlap must be positive"
centers = CartesianIndex[]
start_ind = ceil(Int, sub_img_size/2)
if 2*start_ind == sub_img_size
start_ind +=1
end
if overlap>0 && overlap<1
overlap = floor(Int, sub_img_size*overlap)
end
if use_square
size_v = findmin(img_size)[1]
size_h = findmin(img_size)[1]
else
size_v = img_size[1]
size_h = img_size[2]
end
last_ind_v = size_v - start_ind # TODO check if it is starting at 1st row, not second
last_ind_h = size_h - start_ind
val_range_v = floor.(Int, range(start_ind, step=sub_img_size-overlap, stop=last_ind_v))
val_range_h = floor.(Int, range(start_ind, step=sub_img_size-overlap, stop=last_ind_h))
if isempty(val_range_v) && size_v <= sub_img_size
val_range_v = [start_ind]
end
if isempty(val_range_h) && size_h <= sub_img_size
val_range_h = [start_ind]
end
num_indexes_v = size(val_range_v,1)
num_indexes_h = size(val_range_h,1)
for row = 1:num_indexes_v, column = 1:num_indexes_h
push!(centers, CartesianIndex(val_range_v[row], val_range_h[column]))
end
return centers
end
"""
vectorize_img(video)
Rearrenges the video so that set of n frames (2D matrix varying in
time) the set of vectors is returned, in which each row is a pixel, and each
column is the value of the pixel in n-th frame.
"""
function vectorize_img(img)
rows, columns = size(img)
num_of_elements = rows*columns
vectorized_img = zeros(num_of_elements)
index = 1;
for row=1:rows
for column=1:columns
vectorized_img[index] = img[row, column];
index = index+1;
end
end
return vectorized_img
end
"""
get_video_mask(points_per_dim, video_dimensions; distribution="uniform", sorted=true, x=1, y=1)
Returns matrix of size @points_per_dim x 2 in which indicies of video frame are
stored. The indicies are chosen based one the @distribution argument. One option
is uniform distribution, the second is random distribution.
Uniform distribution: distance between the points in given dimension is the
even, but vertical distance may be different from horizontal distance between points. This depends on the size of a frame in a image.
Random distribution: the distance between the points is not constant, because
the points are chosen randomly in the ranges 1:horizontal size of frame,
1:vertical size of frame. The returned values may be sorted in ascending order,
if @sorted=true.
"""
function get_video_mask(points_per_dim, video_dimensions;
distribution="uniform", sorted=true, patch_params)
video_height, video_width, = video_dimensions
x=patch_params["x"]
y=patch_params["y"]
spread=patch_params["spread"]
if x == 1
x = floor(Int,video_width/2)
@warn "Given x is to close to the border. Seeting the value to " x
elseif x < ceil(Int,points_per_dim/2)
x = ceil(Int,points_per_dim/2)
@warn "Given x is to close to the border. Seeting the value to " x
elseif x > video_width-ceil(Int,points_per_dim/2)
x = video_width - ceil(Int,points_per_dim/2)
@warn "Given x is to close to the border. Seeting the value to " x
end
if y == 1
y = floor(Int,video_height/2)
@warn "Given y is to close to the border. Seeting the value to " y
elseif y < ceil(Int,points_per_dim/2)
y = ceil(Int,points_per_dim/2)
@warn "Given y is to close to the border. Seeting the value to " y
elseif y > video_height-ceil(Int,points_per_dim/2)
y = video_height - ceil(Int,points_per_dim/2)
@warn "Given y is to close to the border. Seeting the value to " y
end
if spread*points_per_dim+x > video_width || spread*points_per_dim+y > video_height
@warn "Given patch parameters might result in indicies exceeding frame size."
end
if distribution == "uniform"
columns = points_per_dim
rows = points_per_dim
# +1 is used so that the number of points returned is as requested
row_step = floor(Int,video_height/rows)
column_step = floor(Int,video_width/columns)
(video_height/row_step != points_per_dim) ? row_step+=1 : row_step
(video_width/column_step !=
points_per_dim) ? column_step+=1 : video_width
vertical_indicies = collect(1:row_step:video_height)
horizontal_indicies = collect(1:column_step:video_width)
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies = reshape(horizontal_indicies, (1,points_per_dim))
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "random"
vertical_indicies = rand(1:video_height,1, points_per_dim)
horizontal_indicies = rand(1:video_width,1, points_per_dim)
if sorted
vertical_indicies = sort(vertical_indicies[1,:])
horizontal_indicies = sort(horizontal_indicies[1,:])
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies =
reshape(horizontal_indicies, (1,points_per_dim))
end
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "patch"
indicies_set = [collect(1:spread:(spread*points_per_dim)).+x collect(1:spread:(spread*points_per_dim)).+y]'
end
return indicies_set
end
"""
get_gabor_mask_set(;filt_size=25, σ=[2], theta_rad=[0], λ=[15], γ=[0.2],
psi_rad=[0], re_part=true, im_part=false)
Returns set of gabor filters generated with given parameters. Parameters are described
below. Function uses Kernel.gabor() from ImageFiltering.
# Arguments
- `filt_size=30` : controls the patch in which filter is created, not wavelet itself
- `σ=2` : controls the width of the waves and thus number of cycles per unit
- `theta_rad=0` : is the rotation in radians,
- `λ=15` : controls the number of waves within the window- higher values- less waves
- `γ=0.2` : is the aspect ratio; small values give long filters
- `psi_rad=0` : phase in radians
- `re_part::Bool`: determines if real part of the Gabor filter is returned; real
part is normalized to be in range [-0.5,0.5]
- `im_part::Bool`: determines if imaginary part of the Gabor filter is returned
imaginary part is normalized to be in range [-0.5,0.5]
if both `re_part` and `im_part` are true, then absolute value of complex number
of form `re_part + im_part im` is returned (it is also normalized to range
[-0.5,0.5]).
"""
function get_gabor_mask_set(;filt_size=25, σ=[2], theta_rad=[0], λ=[15], γ=[0.2],
psi_rad=[0], re_part=true, im_part=false,
do_norm=true, do_negative=true)
kernels = Any[]
for sigma = σ
for angle1 = theta_rad
θ = angle1; #pi*(angle1/180)
for lambda in λ
for gamma in γ
for angle2 in psi_rad
ψ = angle2; #pi*(angle2/180)
kernel = Kernel.gabor(filt_size, filt_size,
sigma,
θ,
lambda,
gamma,
ψ)
if re_part && !im_part
# @debug "Doing real part"
if do_norm
kernel[1] .+= abs(findmin(kernel[1])[1])
kernel[1] ./= findmax(kernel[1])[1]
# @debug "Doing norm"
if do_negative
kernel[1] .-= 0.5
end
end
push!(kernels,Gray.((kernel[1])))
elseif im_part && !re_part
if do_norm
kernel[2] .+= abs(findmin(kernel[2])[1])
kernel[2] ./= findmax(kernel[2])[1]
if do_negative
kernel[2] .-= 0.5;
end
end
push!(kernels,Gray.((kernel[2])))
else
@debug "Using abs(re(A)+im(A))"
result = abs.(kernel[1] + kernel[2]im);
if do_norm
result .+= abs(findmin(result)[1])
result ./= findmax(result)[1]
end
push!(kernels,Gray.())
end
end # angle2
end # gamma
end # lambda
end # angle1
end # sigmas
return kernels
end
"""
rearrange_filters_arr(im_filter; showing_number=-1)
Creates image with elements stored in `im_filters`. `showing_number` determines
how many of the element from `im_fiter` are displayed.
`im_filters` is an array with elements of type Matrix{Gray}.
"""
function rearrange_filters_arr(im_filter; showing_number=-1, columns=-1)
mask_size = size(im_filter[1],1)
im_filter_num = length(im_filter)
if showing_number == -1 || showing_number > im_filter_num
max_indeks = im_filter_num
else
max_indeks = showing_number
end
if columns == -1
columns = Int(ceil(sqrt(im_filter_num)))
end
rows= Int(ceil(im_filter_num/columns))
all_filters = zeros(Gray, rows*mask_size, columns*mask_size)
mask_index = 1
for row in 1:rows
start_row = (row-1)*mask_size+1
row_range = start_row:(start_row+mask_size-1)
for col = 1:columns
start_col = (col-1)*mask_size+1
col_range = start_col:(start_col+mask_size-1)
if mask_index > max_indeks
break
else
all_filters[row_range, col_range] = im_filter[mask_index]
mask_index += 1
end
end
if mask_index > max_indeks
break
end
end
return all_filters
end
# TODO remove img size from arguments
function get_local_correlations(method::String, img, img_size, sub_img_size;
masks = 0,
points_per_dim=1,
shift=0,
with_grad = true,
overlap = 0,
use_square=true)
if method == "correlation"
@debug "local correlation"
centers = get_local_img_centers(points_per_dim, img_size, shift,
sub_img_size)
extracted_pixels_matrix = get_local_img_correlations(img, centers,
sub_img_size, shift)
elseif method == "gradient_gabor"
@info "local gradient gabor comparison"
centers = get_img_local_centers(img_size, sub_img_size)
local_correlations = get_local_img_correlations(img, centers, masks;
with_gradient = with_grad)
elseif method == "gabor"
@debug "local gabor comparison"
centers = get_img_local_centers(img_size, sub_img_size; overlap = overlap, use_square=use_square)
local_correlations = get_local_img_correlations(img, centers, masks )
elseif method == "gradient"
@debug "local gradient analysis"
centers = get_local_img_centers(points_per_dim, img_size, shift,
sub_img_size)
local_correlations = get_local_img_correlations(img, centers, sub_img_size;
with_gradient=with_grad)
else
indicies_set = get_video_mask(points_per_dim, img_size,
distribution="uniform", patch_params=patch_params)
local_correlations = extract_pixels_from_img(img, indicies_set,
img_size)
end
return local_correlations
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 3382 |
# ============================================================================
# exported from MatrixProcessing on 10.09.2020
"""
get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
Computes pairwise correlation of the input signals accordingly to the formula
presented in paper "Clique topology reveals intrinsic geometric structure
in neural correlations" by Chad Giusti et al.
The Computations are done only for upper half of the matrix, the lower half is
a copy of upper half. Computation-wise the difference is at level of 1e-16, but
this causes that inverse is not the same as non-inverse matrix.
"""
function get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
number_of_signals = size(vectorized_video,1)
T = size(vectorized_video,2)
C_ij = zeros(number_of_signals,number_of_signals);
# log_C_ij = zeros(number_of_signals,number_of_signals);
# this is given in frames
lags = -tau_max:1:tau_max
for row=1:number_of_signals
for column=row:number_of_signals
signal_ij = vectorized_video[row,:];
signal_ji = vectorized_video[column,:];
# cross_corelation
ccg_ij = crosscov(signal_ij, signal_ji, lags);
ccg_ij = ccg_ij ./ T;
A = sum(ccg_ij[tau_max+1:end]);
B = sum(ccg_ij[1:tau_max+1]);
r_i_r_j = 1;
C_ij[row, column] = max(A, B)/(tau_max*r_i_r_j);
C_ij[column, row] = C_ij[row, column]
# log_C_i_j[row, column] = log10(abs(C_ij[row, column]));
end
end
return C_ij
end
"""
get_subimg_correlations(video_array, centers, sub_img_size, shift)
Computes the correlation between the subimages and subimages shifted by values
from range -@shift:@shift and returns array with frames of size
length(@centers) x length(@centers) with the number of frames equal to the
number of rames in @video_array.
Each of the subimage is center around values stored in @centers
"""
# TODO Check if this is the same as some of the functions from the ImageProcessing
function get_subimg_correlations(video_array, centers, sub_img_size, shift)
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size + shift
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
for frame = 1:len
img = video_array[frame]
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
subimage = img[(c_x-half_range):(c_x+half_range),
(c_y-half_range):(c_y+half_range)]
center = img[(c_x-half_size):(c_x+half_size), (c_y-half_size):(c_y+half_size)]
for left_boundary = 1:(2*shift+1)
for lower_boundary = 1:(2*shift+1)
corelation = center .* subimage[left_boundary:left_boundary+sub_img_size-1, lower_boundary:lower_boundary+sub_img_size-1]
corelation = sum(corelation)
extracted_pixels[index_x, index_y, frame] += corelation
end
end
extracted_pixels[index_x, index_y, frame] /= 256*(sub_img_size^2)*(shift*2)^2
end
end
end
return extracted_pixels
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 959 | """
Save figures.
"""
function save_figure(plot_ref, results_path, plot_title; extension=".png" )
full_path = results_path*plot_title*extension
savefig(plot_ref, full_path)
@info "File saved under: " full_path
end
"""
Save betti curves.
"""
function save_betti(plot_ref, results_path, plot_title)
full_title = "betti_curves_"*plot_title;
save_figure(plot_ref, results_path, full_title)
end
"""
Save figures with set of parameters given as 'kwargs'.
"""
function save_figure_with_params(plot_reference, results_path; extension=".png", prefix="", kwargs... )
plot_title = ""
kwargs_kyes = keys(kwargs)
kwargs_vals = values(kwargs)
total_params = size(collect(kwargs_vals),1)
for param = 1:total_params
plot_title *= "$(kwargs_kyes[param])_$(kwargs_vals[param])_"
end
full_path = results_path*prefix*plot_title*extension
# savefig(plot_ref, full_path)
@info "File saved as: " full_path
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 6669 | using Eirene
using Plots
include("clique_top_Julia/CliqueTop.jl")
include("VideoProcessing.jl")
include("MatrixToolbox.jl")
include("Settings.jl")
function testing_pariwise_corr()
do_clique_top = test_params["do_clique_top"]
do_eirene = test_params["do_eirene"]
save_figures = test_params["save_figures"]
plot_betti_figrues = test_params["plot_betti_figrues"]
plot_vectorized_video = test_params["plot_vectorized_video"]
size_limiter = test_params["size_limiter"]
ind_distrib = test_params["ind_distrib"]
videos_set = test_params["videos_set"]
tau_max_set = test_params["tau_max_set"]
points_per_dim_set = test_params["points_per_dim_set"]
shifts_set = test_params["shifts_set"]
patch_params = test_params["patch_params"]
video_path = test_params["video_path"]
results_path = test_params["results_path"]
videos = test_params["videos_names"]
do_local_corr = false
do_local_grad = false
if ind_distrib == "local_corr"
shift_set = test_params["shift_set"]
sub_img_size_set = [9]
do_local_corr = true
do_local_grad = false
@debug "Doing local correlation" do_local_corr
elseif ind_distrib == "local_grad"
shift_set = [1]
sub_img_size_set = test_params["sub_img_size_set"]
do_local_corr = false
do_local_grad = true
@debug "Doing local gradient" do_local_grad
else
shift_set = [1]
sub_img_size_set = [9]
do_local_corr = false
do_local_grad = false
end
@info "Using following distribution: " test_params["ind_distrib"]
@debug "All videos are: " videos_names
@debug "Video set is : " videos_set
for video in videos_set
choice = videos_names[video]
@info "Selected video: " choice
@debug "Path and choice is:" video_path*choice
video_array = get_video_array_from_file(video_path*choice)
@info "Array extracted."
video_dimensions = get_video_dimension(video_array)
for points_per_dim in points_per_dim_set
for shift in shift_set, sub_img_size in sub_img_size_set
if do_local_corr
centers = get_local_centers(points_per_dim, video_dimensions, shift, sub_img_size)
extracted_pixels_matrix = get_subimg_correlations(video_array, centers, sub_img_size, shift)
elseif do_local_grad
centers = get_local_centers(points_per_dim, video_dimensions, shift, sub_img_size)
extracted_pixels_matrix = get_local_gradients(video_array, centers, sub_img_size)
else
indicies_set = get_video_mask(points_per_dim, video_dimensions, distribution=ind_distrib, patch_params)
extracted_pixels_matrix = extract_pixels_from_video(video_array, indicies_set, video_dimensions)
end
@info "Pixels extracted."
vectorized_video = vectorize_video(extracted_pixels_matrix)
@info "Video is vectorized, proceeding to Pairwise correlation."
for tau in tau_max_set
## Compute pairwise correlation
C_ij = get_pairwise_correlation_matrix(vectorized_video, tau)
# set the diagonal to zero
for diag_elem in 1:size(C_ij,1)
C_ij[diag_elem,diag_elem] = 0
end
@info "Pairwise correlation finished, proceeding to persistance homology."
# Compute persistance homology with CliqueTopJulia
size_limiter = test_params["size_limiter"]
@debug "using size limiter = " size_limiter
if size_limiter > size(C_ij,1)
@warn "Used size limiter is larger than matrix dimension: " size_limiter size(C_ij,1)
@warn "Using maximal size instead"
size_limiter = size(C_ij,1)
end
@debug "do_clique_top: " do_clique_top
@debug "test_params['do_clique_top']: " test_params["do_clique_top"]
if do_clique_top
@debug pwd()
@time c_ij_betti_num, edge_density, persistence_intervals, unbounded_intervals = compute_clique_topology(C_ij[1:size_limiter, 1:size_limiter], edgeDensity = 0.6)
end
@debug "do_eirene: " do_eirene
if do_eirene
C = eirene(C_ij[1:size_limiter, 1:size_limiter],maxdim=3,model="vr")
end
# ---------------------------------------------------------
# Plot results
@debug "Proceeding to plotting."
if plot_vectorized_video
vector_plot_ref = heatmap(vectorized_video, color=:grays)
if save_figures
name = split(choice, ".")[1]
name = "vec_" * name * "_sz$(size_limiter)_p$(points_per_dim)_tau$(tau).png"
savefig(vector_plot_ref, name)
end #save vec
end #plot vec
if plot_betti_figrues && do_clique_top
betti_numbers = c_ij_betti_num
title = "Betti curves for pairwise corr. matrix"
p1 = plot_betti_numbers(c_ij_betti_num, edge_density, title);
heat_map1 = heatmap(C_ij, color=:lightrainbow, title="Pariwise Correlation matrix, number of points: $(points_per_dim)");
betti_plot_clq_ref = plot(p1, heat_map1, layout = (2,1))
if save_figures
saving_figures(betti_plot_clq_ref, results_cliq_path, choice, points_per_dim, tau, size_limiter)
end#save fig
end #plot cliq
if plot_betti_figrues && do_eirene
p1, heat_map1 = plot_eirene_betti_curves(C, C_ij)
betti_plot_ei_ref = plot(p1, heat_map1, layout = (2,1))
if save_figures
saving_figures(betti_plot_ei_ref, results_cliq_path, choice, points_per_dim, tau, size_limiter)
end#save fig
end #plot eirene
end #for tau
end #for shift
end #for points_per_dim
end #for video set
@info "Finished testing"
end #func
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 9266 | #=
Creted: 2020-05-04
Author: Emil Dmitruk
Structures for storing matrices used at different stages of preprocessing for
topological analysis with Eirene library.
=#
# TODO draw a diagram of all structures
# ===
struct MethodsParams
plot_filters::Bool
plot_heatmaps::Bool
plot_betti_figrues::Bool
lower_dist_mat_resolution::Bool
lower_ord_mat_resolution::Bool
legend_on::Bool
do_dsiplay::Bool
save_gabor_params::Bool
save_subelements::Bool
save_figure::Bool
function MethodsParams(;plot_filters = false,
plot_heatmaps = true,
plot_betti_figrues = true,
lower_dist_mat_resolution = false,
lower_ord_mat_resolution = false,
legend_on = true,
do_dsiplay = false,
save_gabor_params = false,
save_subelements = false,
save_figure = false)
new(plot_filters, plot_heatmaps, plot_betti_figrues,
lower_dist_mat_resolution, lower_ord_mat_resolution,
legend_on, do_dsiplay, save_gabor_params, save_subelements,
save_figure)
end
end
struct ImageTopologyParams
total_bins::Int
max_size_limiter::Int
min_B_dim::Int
max_B_dim::Int
file_name::String
img_path::String
gruping::Bool
sub_img_size::Int
sub_sample_size::Int
pooling_method::String
gabor_set::Int
overlap::Number
gaussian_blurr::Number
function ImageTopologyParams(;total_bins = 5, max_size_limiter = 200,
min_B_dim = 1, max_B_dim = 4,
file_name = "", img_path="img/",
gruping = true, sub_img_size = 33, sub_sample_size=2,
pooling_method = "avg_pooling", gabor_set = 4, overlap = 0.0,
gaussian_blurr=0.25)
new(total_bins, max_size_limiter, min_B_dim, max_B_dim,
file_name, img_path, gruping, sub_img_size, sub_sample_size,
pooling_method, gabor_set, overlap, gaussian_blurr)
end
end
function get_params_description(par::ImageTopologyParams)
if par.pooling_method == "gauss_pooling"
met = "_$(par.pooling_method)$(ceil(Int, par.gaussian_blurr*100))"
else
met = "_$(par.pooling_method)"
end
return "file_$(split(par.file_name,'.')[1])"*
"_subimgsize$(par.sub_img_size)"*
"_maxB_$(par.max_B_dim)"*
"_minB_$(par.min_B_dim)"*
"_gaborset_$(par.gabor_set)"*
"_poolingsize_$(par.sub_sample_size)"*
"$(met)_"*
"_overlap_$(Int(par.overlap*10))_"
end
function get_ord_mat_from_img(par::ImageTopologyParams, met_par::MethodsParams; get_distances=false)
@info "Current img_size" par.sub_img_size
@info "Using file: " par.file_name
@debug "Used params: " par.total_bins, par.gabor_set
size_limiter = par.max_size_limiter
# =============================== Get image masks ======================
masks = get_filter_set(par.gabor_set, par.sub_img_size; plot_filters = false,
save_gabor_params = met_par.save_gabor_params)
# =============================== Get image ================================
file_n = split(par.file_name, ".")[1]
loaded_img = load(par.img_path*par.file_name)
img1_gray = Gray.(loaded_img)
img_size = size(img1_gray)
# ================================ Process Image =======================
# get the correlation matrix accordingly to choosen method
local_correlations = get_local_correlations("gabor", img1_gray,img_size,
par.sub_img_size; masks = masks,
overlap=par.overlap)
# ======================== Compute pairwise correlation ================
dist_mat = pairwise(Euclidean(), local_correlations, dims=2)
# =============================== Ordered matrix =======================
size_limiter = size(dist_mat,1)
if size_limiter > par.max_size_limiter
@warn "Restricting matrix size, because matrix is too big"
size_limiter = par.max_size_limiter
end
# ===
# Matrix gupping
met_par.lower_dist_mat_resolution && group_distances!(dist_mat, par.total_bins)
# ===
# Matrix ordering
ordered_matrix = get_ordered_matrix(dist_mat[1:size_limiter,1:size_limiter];
assign_same_values=par.gruping)
if met_par.lower_ord_mat_resolution
ordered_matrix = lower_ordmat_resolution(ordered_matrix, par.total_bins)
end
if get_distances
return dist_mat
else
return ordered_matrix
end
end
# ===
struct TopologyMatrixSet
file_name::String
# 2 below are not necessary, if params are includede in this structure
sub_sample_size::Int
pooling_method::String
# Matrices
ordered_matrix::Array
reordered_matrix
# reordered_map_ref
pooled_matrix
pooled_reord_matrix
renum_pooled_orig_matrix
renum_pooled_reord_matrix
reordered_renum_pooled_orig_matrix
matrix_collection
description_vector
ranks_collection
params::ImageTopologyParams
function TopologyMatrixSet(input_matrix::Array, params::ImageTopologyParams
; description_vector)
# TODO add parameter which describes which methods should be used
# @warn "Using constant in structure definition- TODO: change to variable"
# file_name = images_set[6]
# sub_sample_size = 2
# pooling_method = "avg_pooling"
# ===
file_name = params.file_name
reordered_matrix, reordered_map_ref =
order_max_vals_near_diagonal2(input_matrix; do_final_plot=false, do_all_plots = false);
pooled_matrix = reorganize_matrix(input_matrix; subsamp_size=params.sub_sample_size, method=params.pooling_method, gauss_sigma=params.gaussian_blurr)
pooled_reord_matrix = reorganize_matrix(reordered_matrix; subsamp_size=params.sub_sample_size, method=params.pooling_method, gauss_sigma=params.gaussian_blurr)
# =
# gaussian_blurr = g_blurr
# used_kernel = Kernel.gaussian(gaussian_blurr)
# pooled_matrix = ceil.(Int,imfilter(input_matrix, used_kernel))
# pooled_reord_matrix = ceil.(Int,imfilter(reordered_matrix, used_kernel))
# =
renum_pooled_orig_matrix = get_ordered_matrix(pooled_matrix; assign_same_values=true)
renum_pooled_reord_matrix = get_ordered_matrix(pooled_reord_matrix; assign_same_values=true)
reordered_renum_pooled_orig_matrix, reordered_renum_pooled_orig_matrix_ref =
order_max_vals_near_diagonal2(renum_pooled_orig_matrix; do_final_plot=false, do_all_plots = false);
matrix_collection = Array[]
push!(matrix_collection, input_matrix)
push!(matrix_collection, reordered_matrix)
push!(matrix_collection, pooled_matrix)
push!(matrix_collection, pooled_reord_matrix)
push!(matrix_collection, renum_pooled_orig_matrix)
push!(matrix_collection, renum_pooled_reord_matrix)
push!(matrix_collection, reordered_renum_pooled_orig_matrix)
ranks_collection = zeros(Int,size(matrix_collection)[1])
for mat = 1: size(matrix_collection)[1]
ranks_collection[mat] = rank(matrix_collection[mat])
end
new(file_name, params.sub_sample_size, params.pooling_method, input_matrix,
reordered_matrix,
# reordered_map_ref,
pooled_matrix, pooled_reord_matrix,
renum_pooled_orig_matrix, renum_pooled_reord_matrix,
reordered_renum_pooled_orig_matrix,
matrix_collection, description_vector, ranks_collection, params)
end
end
# ===
struct TopologyMatrixBettisSet
min_B_dim::Int
max_B_dim::Int
bettis_collection
function TopologyMatrixBettisSet(top_mat_set::TopologyMatrixSet;min_B_dim=1, max_B_dim=3)
bettis_collection = Any[]
for matrix = top_mat_set.matrix_collection
# ===
# Persistent homology
eirene_geom = eirene(matrix,maxdim=top_mat_set.params.max_B_dim,model="vr")
bett_geom = get_bettis(eirene_geom, top_mat_set.params.max_B_dim, min_dim = top_mat_set.params.min_B_dim)
push!(bettis_collection,bett_geom)
end
new(min_B_dim, max_B_dim, bettis_collection)
end
end
# ===
struct MatrixHeatmap
heat_map
matrix_property::String
function MatrixHeatmap(in_array, description)
hmap_len = size(in_array)[1]
ordered_map1 = plot_square_heatmap(in_array, 5, hmap_len; plt_title=description,)
new(ordered_map1, description)
end
end
# ===
struct TopologyMatrixHeatmapsSet
heatmaps::Array{MatrixHeatmap}
heatmap_plots_set
function TopologyMatrixHeatmapsSet(topology_matrix::TopologyMatrixSet)
heatmaps = [MatrixHeatmap(topology_matrix.ordered_matrix,"original"),
MatrixHeatmap(topology_matrix.reordered_matrix,"reordered"),
MatrixHeatmap(topology_matrix.pooled_matrix,"pooled_origi"),
MatrixHeatmap(topology_matrix.pooled_reord_matrix,"pooled_reordered"),
MatrixHeatmap(topology_matrix.renum_pooled_orig_matrix,"renum_pooled_orig"),
MatrixHeatmap(topology_matrix.renum_pooled_reord_matrix,"renum_pooled_reord"),
MatrixHeatmap(topology_matrix.reordered_renum_pooled_orig_matrix,"reordered_renum_pooled_original"),
]
heatmap_plots_set = Any[]
for hmap in heatmaps
push!(heatmap_plots_set,hmap.heat_map)
end
new(heatmaps,heatmap_plots_set)
end
end
# ===
struct BettiPlot
betti_plot
function BettiPlot(in_array; min_B_dim=1)
betti_plot = plot_bettis2(in_array, "", legend_on=false, min_dim=min_B_dim);
xlabel!("Steps");
new(betti_plot)
end
end
# ===
struct TopologyMatrixBettisPlots
betti_plots_set
function TopologyMatrixBettisPlots(bettis_collection::TopologyMatrixBettisSet)
total_bettis = size(bettis_collection.bettis_collection)[1]
betti_plots_set = Any[]
for bett = 1:total_bettis
betti_plot = BettiPlot(bettis_collection.bettis_collection[bett])
push!(betti_plots_set, betti_plot.betti_plot)
end
new(betti_plots_set)
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 18524 | # import Makie
import VideoIO
using StatsBase
using Images
using ImageFeatures
# using TestImages
# using ImageDraw
using CoordinateTransformations
# using Makie
# using Logging
export get_video_array_from_file,
get_video_dimension,
get_video_mask,
extract_pixels_from_video,
vectorize_video,
get_pairwise_correlation_matrix,
get_average_from_tiles,
rotate_img_around_center,
rotate_vid_around_center,
export_images_to_vid,
rotate_and_save_video,
get_local_correlations,
get_local_centers,
get_local_gradients,
normalize_to_01,
shift_to_non_negative,
plotimg;
"""
get_video_array_from_file(video_name)
Returns array to which video frames are copied. Frames are in grayscale.
Function opens stream, then loads the file and gets all the frames from a
video.
"""
function get_video_array_from_file(video_name)
video_streamer = VideoIO.open(video_name) # candle
video_array = Vector{Array{UInt8}}(undef,0);
video_file = VideoIO.openvideo(video_streamer, target_format=VideoIO.AV_PIX_FMT_GRAY8)
while !eof(video_file)
push!(video_array,reinterpret(UInt8, read(video_file)))
end
close(video_file)
return video_array
end
"""
get_video_dimension(video_array)
Returns the tuple which contains width, height and the number of the frames of
array in whcih video was loaded.
"""
function get_video_dimension(video_array)
v_hei = size(video_array[1],1)
v_wid = size(video_array[1],2)
v_len = size(video_array,1)
video_dim_tuple = (video_height=v_hei, video_width=v_wid, video_length=v_len)
return video_dim_tuple
end
"""
get_video_mask(points_per_dim, video_dimensions; distribution="uniform", sorted=true, x=1, y=1)
Returns matrix of size @points_per_dim x 2 in which indicies of video frame are
stored. The indicies are chosen based one the @distribution argument. One option
is uniform distribution, the second is random distribution.
Uniform distribution: distance between the points in given dimension is the
even, but vertical distance may be different from horizontal distance between points. This depends on the size of a frame in a image.
Random distribution: the distance between the points is not constant, because
the points are chosen randomly in the ranges 1:horizontal size of frame,
1:vertical size of frame. The returned values may be sorted in ascending order,
if @sorted=true.
"""
function get_video_mask(points_per_dim, video_dimensions;
distribution="uniform", sorted=true, patch_params)
video_height, video_width, = video_dimensions
y=patch_params["y"]
spread=patch_params["spread"]
if x == 1
x = Int64(floor(video_width/2))
@warn "Given x is to close to the border. Seeting the value to " x
elseif x < Int64(ceil(points_per_dim/2))
x = Int64(ceil(points_per_dim/2))
@warn "Given x is to close to the border. Seeting the value to " x
elseif x > video_width-Int64(ceil(points_per_dim/2))
x = video_width - Int64(ceil(points_per_dim/2))
@warn "Given x is to close to the border. Seeting the value to " x
end
if y == 1
y = Int64(floor(video_height/2))
@warn "Given y is to close to the border. Seeting the value to " y
elseif y < Int64(ceil(points_per_dim/2))
y = Int64(ceil(points_per_dim/2))
@warn "Given y is to close to the border. Seeting the value to " y
elseif y > video_height-Int64(ceil(points_per_dim/2))
y = video_height - Int64(ceil(points_per_dim/2))
@warn "Given y is to close to the border. Seeting the value to " y
end
if spread*points_per_dim+x > video_width || spread*points_per_dim+y > video_height
@warn "Given patch parameters might result in indicies exceeding frame size."
end
if distribution == "uniform"
columns = points_per_dim
rows = points_per_dim
# +1 is used so that the number of points returned is as requested
row_step = Int64(floor(video_height/rows))
column_step = Int64(floor(video_width/columns))
(video_height/row_step != points_per_dim) ? row_step+=1 : row_step
(video_width/column_step !=
points_per_dim) ? column_step+=1 : video_width
vertical_indicies = collect(1:row_step:video_height)
horizontal_indicies = collect(1:column_step:video_width)
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies = reshape(horizontal_indicies, (1,points_per_dim))
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "random"
vertical_indicies = rand(1:video_height,1, points_per_dim)
horizontal_indicies = rand(1:video_width,1, points_per_dim)
if sorted
vertical_indicies = sort(vertical_indicies[1,:])
horizontal_indicies = sort(horizontal_indicies[1,:])
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies =
reshape(horizontal_indicies, (1,points_per_dim))
end
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "patch"
indicies_set = [collect(1:spread:(spread*points_per_dim)).+x collect(1:spread:(spread*points_per_dim)).+y]'
end
return indicies_set
end
"""
extract_pixels_from_video(video_array, indicies_set, video_dim_tuple)
Takes every frame from @video_array and extracts pixels which indicies are in
@indicies_set, thus creating video with only chosen indicies.
"""
function extract_pixels_from_video(video_array, indicies_set, video_dim_tuple)
rows = size(indicies_set,2)
columns = size(indicies_set,2)
video_length = video_dim_tuple[3]
extracted_pixels = zeros(rows, columns, video_length)
for frame_number in 1:video_length
extracted_pixels[:,:,frame_number] =
video_array[frame_number][indicies_set[1,:],indicies_set[2,:]]
end
return extracted_pixels
end
"""
vectorize_video(video)
Rearrenges the video so that set of n frames (2D matrix varying in
time) the set of vectors is returned, in which each row is a pixel, and each
column is the value of the pixel in n-th frame.
"""
function vectorize_video(video)
video_length = size(video, 3)
rows = size(video,1)
columns = size(video,2)
number_of_vectors = rows*columns
vectorized_video = zeros(number_of_vectors, video_length)
index = 1;
for row=1:rows
for column=1:columns
vectorized_video[index,:] = video[row, column,:];
index = index+1;
end
end
return vectorized_video
end
"""
get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
Computes pairwise correlation of the input signals accordingly to the formula
presented in paper "Clique topology reveals intrinsic geometric structure
in neural correlations" by Chad Giusti et al.
The Computations are done only for upper half of the matrix, the lower half is
a copy of upper half. Computation-wise the difference is at level of 1e-16, but
this causes that inverse is not the same as non-inverse matrix.
"""
function get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
number_of_signals = size(vectorized_video,1)
T = size(vectorized_video,2)
C_ij = zeros(number_of_signals,number_of_signals);
# log_C_ij = zeros(number_of_signals,number_of_signals);
# this is given in frames
lags = -tau_max:1:tau_max
for row=1:number_of_signals
for column=row:number_of_signals
signal_ij = vectorized_video[row,:];
signal_ji = vectorized_video[column,:];
# cross_corelation
ccg_ij = crosscov(signal_ij, signal_ji, lags);
ccg_ij = ccg_ij ./ T;
A = sum(ccg_ij[tau_max+1:end]);
B = sum(ccg_ij[1:tau_max+1]);
r_i_r_j = 1;
C_ij[row, column] = max(A, B)/(tau_max*r_i_r_j);
C_ij[column, row] = C_ij[row, column]
# log_C_i_j[row, column] = log10(abs(C_ij[row, column]));
end
end
return C_ij
end
"""
get_average_from_tiles(extracted_pixels_matrix, N)
Fnction takes a 3D array in which video is stored and splits every frame into
non overlaping tiles of size NxN. If size of @extracted_pixels_matrix is not
square of N, then only N^2 x N^2 matrix will be used for averaging.
"""
function get_average_from_tiles(extracted_pixels_matrix, N)
# N = size(extracted_pixels,1)
num_frames = size(extracted_pixels_matrix,3)
mask_matrix = ones(N, N)
result_matrix = zeros(N, N, num_frames)
col_index = 1
row_index = 1
for frame = 1:num_frames
for col = 1:N:N^2
for row = 1:N:N^2
result_matrix[mod(col,N), mod(row,N), frame] =
dot(extracted_pixels_matrix[col:(col+N-1),
row:(row+N-1), frame], mask_matrix) ./N^2
row_index += 1
end
col_index += 1
end
end
return result_matrix
end
"""
rotate_img_around_center(img, angle = 5pi/6)
Function rotates a single image (or a frame) around the center of the image by
@angle radians.
"""
function rotate_img_around_center(img, angle = 5pi/6)
θ = angle
rot = recenter(RotMatrix(θ), [size(img)...] .÷ 2) # a rotation around the center
x_translation = 0
y_translation = 0
tform = rot ∘ Translation(y_translation, x_translation)
img2 = warp(img, rot, axes(img))
return img2
end
"""
rotate_vid_around_center(img, rotation = 5pi/6)
Function rotates a video around the center of the image by @rotation radians and
the outout into matrix.
"""
function rotate_vid_around_center(src_vid_path,src_vid_name; rotation = 5pi/6)
video_array = []
video_src_strm = VideoIO.open(src_vid_path*src_vid_name)
video_src = VideoIO.openvideo(video_src_strm,
target_format=VideoIO.AV_PIX_FMT_GRAY8)
while !eof(video_src)
img = read(video_src)
img = rotate_img_around_center(img, rotation)
push!(video_array,img)
end
close(video_src)
return video_array
end
"""
export_images_to_vid(video_array, dest_file)
Exports set of images stored in @video_array to the dest_file.
"""
function export_images_to_vid(video_array, dest_file)
@debug "Exporting set of images to file"
fname = dest_file
video_dimensions = get_video_dimension(video_array)
h = video_dimensions.video_height
w = video_dimensions.video_width
nframes = video_dimensions.video_length
overwrite=true
fps=30
options = ``
ow = overwrite ? `-y` : `-n`
open(`ffmpeg
-loglevel warning
$ow
-f rawvideo
-pix_fmt rgb24
-s:v $(h)x$(w)
-r $fps
-i pipe:0
$options
-vf "transpose=0"
-pix_fmt yuv420p
$fname`, "w") do out
for i = 1:nframes
write(out, convert.(RGB{N0f8}, clamp01.(video_array[i])))
end
end
@debug "Video was saved"
end
"""
rotate_and_save_video(src_vid_path, src_vid_name, dest_vid_name;
rotation=5pi/6)
Fuction opens the @src_vid_name file, collects all the frames and then rotates
the frame aroung the center and saves new video as @dest_vid_name at
@src_vid_path.
Function was tested for following extensions;
.mov
A solution for writing to a video file was taken from:
https://discourse.julialang.org/t/creating-a-video-from-a-stack-of-images/646/7
"""
function rotate_and_save_video(src_vid_path, src_vid_name, dest_vid_name, rotation=5pi/6)
@debug src_vid_path src_vid_name dest_vid_name
if !isfile(src_vid_path*src_vid_name)
@warn "Source file at given path does not exist. Please give another name."
return
elseif isfile(src_vid_path*dest_vid_name)
@warn "File with destination video name at src_video_path already exists. Please give another name."
return
end
video_array = rotate_vid_around_ceter(src_vid_path, src_vid_name)
@debug "Video was rotated"
export_images_to_exist_vid(video_array, src_vid_path*dest_vid_name)
@info "The file was created:\n $fname"
end
"""
get_local_correlations(video_array, centers, sub_img_size, shift)
Computes the correlation between the subimages and subimages shifted by values
from range -@shift:@shift and returns array with frames of size
length(@centers) x length(@centers) with the number of frames equal to the
number of rames in @video_array.
Each of the subimage is center around values stored in @centers
"""
function get_local_correlations(video_array, centers, sub_img_size, shift)
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size + shift
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
for frame = 1:len
img = video_array[frame]
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
subimage = img[(c_x-half_range):(c_x+half_range),
(c_y-half_range):(c_y+half_range)]
center = img[(c_x-half_size):(c_x+half_size), (c_y-half_size):(c_y+half_size)]
for left_boundary = 1:(2*shift+1)
for lower_boundary = 1:(2*shift+1)
corelation = center .* subimage[left_boundary:left_boundary+sub_img_size-1, lower_boundary:lower_boundary+sub_img_size-1]
corelation = sum(corelation)
extracted_pixels[index_x, index_y, frame] += corelation
end
end
extracted_pixels[index_x, index_y, frame] /= 256*(sub_img_size^2)*(shift*2)^2
end
end
end
return extracted_pixels
end
function get_local_centers(points_per_dim, video_dimensions, shift=0, sub_img_size=0 )
/# TODO Applied teproray solution here, so it works only for local gradients
# start = 0
# (points_per_dim>shift) ? start_ind = ceil(Int, points_per_dim/2)+ shift :
# start=shift
start_ind = ceil(Int, sub_img_size/2)
min_va, = findmin(video_dimensions)
last_ind = min_va - start_ind
set = broadcast(floor, Int, range(start_ind, stop=last_ind, length=points_per_dim))
centers = [set set]'
return centers
end
"""
get_local_gradients(video_array, centers, sub_img_size)
Computes the gradients in the subimage, takes the mean of sum of absolute
values of both hotizontal and vertical gradients as a representative of a
subimage.
"""
function get_local_gradients(video_array, centers, sub_img_size)
@debug "Entering get_local_gradients"
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
@debug "starting frame procesing"
for frame = 1:len
img = video_array[frame]
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
img_grad_abs = map(abs, img_grad[1]) + map(abs, img_grad[2])
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
sub_img = img_grad_abs[(c_x-half_size):(c_x+half_size),
(c_y-half_size):(c_y+half_size)]
extracted_pixels[index_x, index_y, frame] =mean(sub_img)
end
end
# @debug "Next frame" frame
end
return extracted_pixels
end
"""
normalize_to_01(matrix, norm_factor=256)
Returns a matrix which values are in range [0, 1]. If the values in the input
matrix are below 0, then they are shifted so that only positive numbers are in
the matrix. If the values in the matrix of shifted matrix excced value of the
@norm_factor parameter, then the matrix is normalized to the maximal value from
the matrix.
"""
function normalize_to_01(matrix, norm_factor=256)
normalized_matrix = copy(matrix)
min_val = findmin(normalized_matrix)[1]
max_val = findmax(normalized_matrix)[1]
if min_val < 0
normalized_matrix .-= min_val
end
if max_val > norm_factor
@warn "Values normalized to maximal value, not notmalization factor."
normalized_matrix = normalized_matrix./max_val
else
normalized_matrix = normalized_matrix./norm_factor
end
return normalized_matrix
end
"""
shift_to_non_negative(matrix)
Returns a matrix in which values are non-negative. This is done by finding the
minimal value in the input matrix and adding its absolute value to the matix
elements.
"""
function shift_to_non_negative(matrix)
min_val = findmin(matrix)[1]
if min_val < 0
return matrix .-= min_val
else
return matrix
end
end
"""
plotimg(matrix_to_plot)
Display an image as a plot. The values from the input matrix are adjusted to the
value range of [0, 1].
If @cut_off is true then the matrix values above 256 are set to 256 and then all
values are normalized to the value 256. If @cut_off is false, then values are
normalized to maximal value.
"""
function plotimg(matrix_to_plot, cut_off=false)
matrix_type = typeof(matrix_to_plot)
min_val = findmin(matrix_to_plot)[1]
int_types_arr = [Matrix{UInt8}; Matrix{UInt16}; Matrix{UInt32};
Matrix{UInt64}; Matrix{UInt128}; Matrix{Int8};
Matrix{Int16}; Matrix{Int32}; Matrix{Int64};
Matrix{Int128}]
float_types_arr = [Matrix{Float16} Matrix{Float32} Matrix{Float64}]
if min_val<0
matrix_to_plot = shift_to_non_negative(matrix_to_plot)
end
max_val = findmax(matrix_to_plot)[1]
if max_val > 256 && cut_off
matrix_to_plot[findall(x -> x>256, matrix_to_plot)] = 256
end
if in(matrix_type, int_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot)
elseif in(matrix_type, float_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot, max_val)
end
return colorview(Gray, matrix_to_plot)
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 12072 | using TopologyPreprocessing
using Test
using Eirene
#%%
@testset "BettiCurves.jl" begin
sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ]
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
# get_bettis
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
@test length(all_bettis) == max_B_dim
@test all_bettis isa Vector{Array{Float64,2}}
end
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim, min_dim=min_B_dim)
@test length(all_bettis) == max_B_dim - (min_B_dim-1)
@test all_bettis isa Vector{Array{Float64,2}}
end
end
# normalise_bettis
# as betticurve results
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
betti_result = betticurve(eirene_results, dim=max_B_dim)
normed_all_bettis = normalise_bettis(betti_result)
@test typeof(normed_all_bettis) == typeof(betti_result)
@test length(normed_all_bettis) != max_B_dim
@test size(normed_all_bettis) == size(betti_result)
@test normed_all_bettis isa Array{Float64,2}
# Betti values are unchanged:
@test normed_all_bettis[:,2] == betti_result[:,2]
# Max val is 1
@test findmax(normed_all_bettis[:,1])[1] == 1.
end
end
# as get_bettis results
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
normed_all_bettis = normalise_bettis(all_bettis)
@test typeof(normed_all_bettis) == typeof(all_bettis)
@test length(normed_all_bettis) == max_B_dim
@test normed_all_bettis isa Vector{Array{Float64,2}}
# Betti values are unchanged:
@test normed_all_bettis[max_B_dim][:,2] == all_bettis[max_B_dim][:,2]
# Max val is 1
@test findmax(normed_all_bettis[max_B_dim][:,1])[1] == 1.
end
end
# get_vectorized_bettis
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
let eirene_bettis = get_bettis(eirene_results, max_B_dim, min_dim=min_B_dim),
vectorized_bettis = get_vectorized_bettis(eirene_results, max_B_dim, min_dim=min_B_dim)
@test size(vectorized_bettis)[2] == max_B_dim - (min_B_dim-1)
for d in min_B_dim:max_B_dim
@test vectorized_bettis[:,d] == eirene_bettis[d][:,2]
end
end
end
end
@testset "BettiCurves.jl -> plot bettis" begin
# TODO remove tests which test Plots.plot function and not plot_bettis functionality
sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ]
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
# plot_bettis tests for get_bettis:
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
p = plot_bettis(all_bettis);
@test length(p.series_list) == max_B_dim-(min_B_dim-1)
@test p.attr[:plot_title] == ""
@test_throws DomainError plot_bettis(all_bettis, min_dim=max_B_dim+1)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p.series_list[dim_index][:label] == "β$(dim)"
if !isnan(all_bettis[dim_index][:,1][1])
@test p.series_list[dim_index][:x] == all_bettis[dim_index][:,1]
@test p.series_list[dim_index][:y] == all_bettis[dim_index][:,2]
end
end
# for dim = min_B_dim:max_B_dim
# p = plot_bettis(all_bettis, min_dim = dim);
# @test length(p.series_list) == max_B_dim-(dim-1)
# end
let p1 = plot_bettis(all_bettis, betti_labels=false)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p1.series_list[dim][:label] == "y$(dim)"
if !isnan(all_bettis[dim_index][:,1][1])
@test p1.series_list[dim][:x] == all_bettis[dim][:,1]
@test p1.series_list[dim][:y] == all_bettis[dim][:,2]
end
end
end
let lw=4,
p1 = plot_bettis(all_bettis, betti_labels=true, lw=lw)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p1.series_list[dim_index][:label] == "β$(dim)"
@test p1.series_list[dim_index][:linewidth] == lw
end
end
let plt_title = "test_title",
p1 = plot_bettis(all_bettis, title=plt_title, lw=9, xlabel="2")
@test_skip p1.attr[:plot_title] == plt_title # why plot-title is not returning the title?
for dim = min_B_dim:max_B_dim
@test p1.series_list[dim][:label] == "β$(dim)"
end
end
let plt_title = "test_title",
lw = 9,
p1 = plot_bettis(all_bettis, title=plt_title, lw=lw, xlabel="2", default_labels=false)
@test_skip p1.attr[:plot_title] == plt_title # why plot-title is not returning the title?
for dim = min_B_dim:max_B_dim
@test p1.series_list[dim][:label] == "β$(dim)"
@test p1.series_list[dim][:linewidth] == lw
# @test for xlabel
# @test for no label
end
end
end
# plot_bettis tests for get_vectorized_bettis:
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
all_bettis = get_vectorized_bettis(eirene_results, max_B_dim)
end
end
@testset "BettiCurves.jl -> area under betti curves" begin
let sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ],
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;],
max_B_dim = 5,
min_B_dim = 1
#==
checks if the size is anyhow changed during proces;
checks is the values Array{Matrix} and reshaped matrix are the same
==#
for min_B_dim in [1, 2, 3, 4, 5]
eirene_results1 =
eirene(sample_distance_matrix1, model = "vr", maxdim = max_B_dim)
eirene_results2 =
eirene(sample_distance_matrix2, model = "vr", maxdim = max_B_dim)
bettis_collection = [
get_bettis(eirene_results1, max_B_dim),
get_bettis(eirene_results2, max_B_dim),
]
for bettis_col in bettis_collection
total_vecs = length(bettis_col)
vec_len, vec_width = size(bettis_col[1])
reshaped_betti = TopologyPreprocessing.vectorize_bettis(bettis_col)
@test vec_len .== size(reshaped_betti, 1)
@test total_vecs .== size(reshaped_betti, 2)
for k = 1:total_vecs
@test reshaped_betti[:, k] == bettis_col[k][:, 2]
end
end
end
#==
checks if get vectorized bettis has same values as get_bettis
==#
for min_B_dim in [1, 2, 3, 4, 5]
eirene_results1 =
eirene(sample_distance_matrix1, model = "vr", maxdim = max_B_dim)
eirene_results2 =
eirene(sample_distance_matrix2, model = "vr", maxdim = max_B_dim)
bettis_collection = [
get_bettis(eirene_results1, max_B_dim),
get_bettis(eirene_results2, max_B_dim),
]
vec_bett_collection = [
get_vectorized_bettis(eirene_results1, max_B_dim),
get_vectorized_bettis(eirene_results2, max_B_dim),
]
for index = 1:length(bettis_collection)
bettis_col = bettis_collection[index]
vec_bettis_col = vec_bett_collection[index]
total_vecs = length(bettis_col)
for k = 1:total_vecs
@test vec_bettis_col[:, k] == bettis_col[k][:, 2]
end
end
end
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 9341 | using TopologyPreprocessing
using Test
using Random
Random.seed!(1234)
# using MatrixOrganization
@testset "MatrixOrganization.jl -> matrix pooling" begin
in_vector = [1 2 3 4 3 2 1]
in_matrix = [1 2 3; 5 6 7; 8 9 0]
in_matrix2 = [1 2 3; 5 6 7; 8 9 0]
sqr_matrix0 = [1 2 3; 2 6 7; 3 7 0]
resulting_matrix0_2 = [1 2 3; 2 6 7; 3 7 0]
sqr_matrix1 = [ 0 1 13 4 5 9;
1 0 2 14 6 10;
13 2 0 3 7 11;
4 14 3 0 8 12;
5 6 7 8 0 15;
9 10 11 12 15 0]
resulting_matrix1_2 = [0 1 14 14 10 10;
1 0 14 14 10 10;
14 14 0 3 12 12;
14 14 3 0 12 12;
10 10 12 12 0 15;
10 10 12 12 15 0]
sqr_matrix2 = [ 0 1 113 4 5 9 13 17 81 82 83 84;
1 0 2 114 6 10 14 18 85 86 87 88;
113 2 0 3 7 11 15 19 89 90 91 92;
4 114 3 0 8 12 16 20 93 94 95 96;
5 6 7 8 0 21 115 24 29 30 31 32;
9 10 11 12 21 0 22 116 33 34 35 36;
13 14 15 16 115 22 0 23 37 38 39 40;
17 18 19 20 24 116 23 0 41 42 43 44;
81 85 89 93 29 33 37 41 0 25 117 28;
82 86 90 94 30 34 38 42 25 0 26 118;
83 87 91 95 31 35 39 43 117 26 0 27;
84 88 92 96 32 36 40 44 28 118 27 0]
result_matrix2_2 = [ 0 1 114 114 10 10 18 18 86 86 88 88;
1 0 114 114 10 10 18 18 86 86 88 88;
114 114 0 3 12 12 20 20 94 94 96 96;
114 114 3 0 12 12 20 20 94 94 96 96;
10 10 12 12 0 21 116 116 34 34 36 36;
10 10 12 12 21 0 116 116 34 34 36 36;
18 18 20 20 116 116 0 23 42 42 44 44;
18 18 20 20 116 116 23 0 42 42 44 44;
86 86 94 94 34 34 42 42 0 25 118 118;
86 86 94 94 34 34 42 42 25 0 118 118;
88 88 96 96 36 36 44 44 118 118 0 27;
88 88 96 96 36 36 44 44 118 118 27 0]
result_matrix2_3 = [ 0 1 113 114 114 114 89 89 89 92 92 92;
1 0 2 114 114 114 89 89 89 92 92 92;
113 2 0 114 114 114 89 89 89 92 92 92;
114 114 114 0 8 12 116 116 116 96 96 96;
114 114 114 8 0 21 116 116 116 96 96 96;
114 114 114 12 21 0 116 116 116 96 96 96;
89 89 89 116 116 116 0 23 37 117 117 117;
89 89 89 116 116 116 23 0 41 117 117 117;
89 89 89 116 116 116 37 41 0 117 117 117;
92 92 92 96 96 96 117 117 117 0 26 118;
92 92 92 96 96 96 117 117 117 26 0 27;
92 92 92 96 96 96 117 117 117 118 27 0]
result_matrix2_4 = [ 0 1 114 114 20 20 20 20 96 96 96 96;
1 0 114 114 20 20 20 20 96 96 96 96;
114 114 0 3 20 20 20 20 96 96 96 96;
114 114 3 0 20 20 20 20 96 96 96 96;
20 20 20 20 0 21 116 116 44 44 44 44;
20 20 20 20 21 0 116 116 44 44 44 44;
20 20 20 20 116 116 0 23 44 44 44 44;
20 20 20 20 116 116 23 0 44 44 44 44;
96 96 96 96 44 44 44 44 0 25 118 118;
96 96 96 96 44 44 44 44 25 0 118 118;
96 96 96 96 44 44 44 44 118 118 0 27;
96 96 96 96 44 44 44 44 118 118 27 0]
@test matrix_poling(in_vector) !== in_vector
@test matrix_poling(in_vector, method="max_pooling") == [4 4 4 4 4 4 4]
# @test matrix_poling!(in_vector) === in_vector
# @test matrix_poling!(in_vector) == [4 4 4 4 4 4 4]
@test matrix_poling(in_matrix) !== in_matrix
@test matrix_poling(in_matrix, method="max_pooling") == 9 .* ones(size(in_matrix))
# @test matrix_poling!(in_matrix) === in_matrix
# @test matrix_poling!(in_matrix) == 9 .* ones(size(in_matrix))
@test matrix_poling(in_matrix2[1:2,1:2], method="max_pooling") == 6 .* ones(size(in_matrix2[1:2,1:2]))
@test matrix_poling(in_matrix2[1:2,1:2], method="max_pooling") != in_matrix2[1:2,1:2]
# ====
# Subsampling matrix
# Function is supposed to work only on upper half, and here the upper half is too small, so there are no operations
@test subsample_matrix(sqr_matrix0, subsamp_size=2, method="max_pooling") == resulting_matrix0_2
@test subsample_matrix(sqr_matrix1, subsamp_size=2, method="max_pooling") == resulting_matrix1_2
@test subsample_matrix(sqr_matrix2, subsamp_size=2, method="max_pooling") == result_matrix2_2
@test subsample_matrix(sqr_matrix2, subsamp_size=3, method="max_pooling") == result_matrix2_3
@test subsample_matrix(sqr_matrix2, subsamp_size=4, method="max_pooling") == result_matrix2_4
end
@testset "MatrixOrganization.jl -> add_random_patch" begin
# TODO set seed for add_random_path
# TODO Seed has to be set for this test
in_vector = [1, 2, 3, 4, 3, 2, 1]
sqr_matrix0 = [ 1 2 3;
2 6 7;
3 7 0]
sqr_matrix1 = [1 2 3 4 5;
2 1 6 7 8;
3 6 1 9 10;
4 7 9 1 11;
5 8 10 11 1]
sqr_matrix2 = [ 0 1 13 4 5 9;
1 0 2 14 6 10;
13 2 0 3 7 11;
4 14 3 0 8 12;
5 6 7 8 0 15;
9 10 11 12 15 0]
sqr_matrix3 = [ 0 1 113 4 5 9 13 17 81 82 83 84;
1 0 2 114 6 10 14 18 85 86 87 88;
113 2 0 3 7 11 15 19 89 90 91 92;
4 114 3 0 8 12 16 20 93 94 95 96;
5 6 7 8 0 21 115 24 29 30 31 32;
9 10 11 12 21 0 22 116 33 34 35 36;
13 14 15 16 115 22 0 23 37 38 39 40;
17 18 19 20 24 116 23 0 41 42 43 44;
81 85 89 93 29 33 37 41 0 25 117 28;
82 86 90 94 30 34 38 42 25 0 26 118;
83 87 91 95 31 35 39 43 117 26 0 27;
84 88 92 96 32 36 40 44 28 118 27 0]
function get_unchanged_indices(input_matrix,ind)
indices = CartesianIndices(size(input_matrix))
indices = findall(x->x!=ind,indices)
for i = ind
indices = indices[findall(x->x!=i,indices)]
end
return indices
end
out_m, ind = add_random_patch(sqr_matrix0)
indices = get_unchanged_indices(sqr_matrix0,ind)
@test size(ind) == (1,2)
@test sqr_matrix0[indices] == out_m[indices]
big_sqr_matrix0 = sqr_matrix0 .*100
out_m, ind = add_random_patch(big_sqr_matrix0, patch_size=1,total_patches=2)
indices = get_unchanged_indices(big_sqr_matrix0,ind)
@test size(ind) == (2,2)
@test big_sqr_matrix0[indices] == out_m[indices]
@test sum(big_sqr_matrix0[ind] .!= out_m[ind]) == 4
@test sum(big_sqr_matrix0[ind] .== out_m[ind]) == 0
out_m, ind = add_random_patch(sqr_matrix1, patch_size=1,total_patches=2)
indices = get_unchanged_indices(sqr_matrix1,ind)
@test size(ind) == (2,2)
@test sqr_matrix1[indices] == out_m[indices]
# TODO those 2 tests fails when random value is the equal to one that is replaced
# @test sum(sqr_matrix1[ind] .!= out_m[ind]) == 4
# @test sum(sqr_matrix1[ind] .== out_m[ind]) == 0
# ===
input_matrix = sqr_matrix1
function test_adding_rand_patch(input_matrix, t_patches,p_size)
out_m, ind = add_random_patch(input_matrix, patch_size=p_size, total_patches=t_patches)
indices = get_unchanged_indices(input_matrix,ind)
@test size(ind) == (t_patches*p_size^2,2)
@test input_matrix[indices] == out_m[indices]
# For values from range, tests below does not make sense:
# @test sum(input_matrix[ind] .!= out_m[ind]) == length(ind)
# @test sum(input_matrix[ind] .== out_m[ind]) == 0
end
t_patches = 1
p_size = 2
test_adding_rand_patch(sqr_matrix0, t_patches,p_size)
test_adding_rand_patch(sqr_matrix1, t_patches,p_size)
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
t_patches = 1
p_size = 3
test_adding_rand_patch(sqr_matrix0, t_patches,p_size)
test_adding_rand_patch(sqr_matrix1, t_patches,p_size)
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
t_patches = 1
p_size = 4
correct_error = 3
# TODO change this into test_throws
try
add_random_patch(sqr_matrix0, patch_size=p_size, total_patches=t_patches)
catch err
my_error = 0
if isa(err, DomainError)
println("DomainError")
my_error = 1
elseif isa(err, DimensionMismatch)
println("DimensionMismatch")
my_error = 2
else
println("Unknow error")
my_error = 3
end
@test my_error == correct_error
end
test_adding_rand_patch(sqr_matrix1, t_patches, p_size)
test_adding_rand_patch(sqr_matrix2, t_patches, p_size)
test_adding_rand_patch(sqr_matrix3, t_patches, p_size)
t_patches = 3
p_size = 5
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
# ===
# locations
locations1 = [CartesianIndex(1,2), CartesianIndex(2,3)]
locations2 = [CartesianIndex(1,2), CartesianIndex(99,3)]
t_patches = 1
p_size = 1
out_m, ind = add_random_patch(sqr_matrix1, patch_size=p_size, total_patches=t_patches,locations=locations1)
indices = get_unchanged_indices(sqr_matrix1,ind)
@test ind[:,1] == locations1
@test size(ind) == (size(locations1)[1]*p_size^2,2)
@test sqr_matrix1[indices] == out_m[indices]
# The number of
@test sum(sqr_matrix1[locations1] .!= out_m[locations1]) == length(locations1)
@test sum(sqr_matrix1[locations1] .== out_m[locations1]) == 0
correct_error = 0
try
out_m, ind = add_random_patch(sqr_matrix1, patch_size=p_size, total_patches=t_patches,locations=locations2)
catch err
# global correct_error
if isa(err, DomainError)
correct_error = 1
else
correct_error = 2
end
finally
# global correct_error
@test correct_error == 2
end
# TODO test for index below diagonal
# TODO too many indices
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 28975 | using TopologyPreprocessing
using Test
using LinearAlgebra
# using MatrixOrganization
@testset "MatrixProcessing.jl -> basics" begin
# Ints
positive_matrix = [1 2 3;
4 5 6]
negative_matrix = [1 2 -3;
-4 5 6]
@test shift_to_non_negative(positive_matrix) == positive_matrix
@test isempty(findall(x->x<0, shift_to_non_negative(negative_matrix)))
# Floats
positive_matrix2 = [1. 2 3; 4 5 6]
negative_matrix2 = [1 2 -3; -4 5 6]
@test shift_to_non_negative(positive_matrix2) == positive_matrix2
@test isempty(findall(x->x<0, shift_to_non_negative(negative_matrix2)))
positive_3d_matrix = rand(2,3,4)
negative_3d_matrix = rand(2,3,4).*2 .-1
@test shift_to_non_negative(positive_3d_matrix) == positive_3d_matrix
@test isempty(findall(x->x<0, shift_to_non_negative(negative_3d_matrix)))
# =====================
@test findmin(normalize_to_01(negative_matrix))[1] == 0
@test findmax(normalize_to_01(negative_matrix))[1] == 1
powers_of_2 = [0 ; [2^k for k in 0:2:8]]
powers_of_3 = [0 ; [3^k for k in 0:2:8]]
@test findmin(normalize_to_01(powers_of_2, use_factor=true))[1] == 0
@test findmax(normalize_to_01(powers_of_2, use_factor=true))[1] == 1
@test findmin(normalize_to_01(powers_of_3, use_factor=true))[1] == 0
@test findmax(normalize_to_01(powers_of_3, use_factor=true))[1] > 1
@test findmin(normalize_to_01(powers_of_3, use_factor=true, norm_factor=3^8))[1] == 0
@test findmax(normalize_to_01(powers_of_3, use_factor=true, norm_factor=3^8))[1] == 1
# =====================
square_matrix = [1 2 3;
4 5 6;
7 8 9]
@test issymmetric(diagonal_symmetrize(square_matrix))
@test LinearAlgebra.checksquare(diagonal_symmetrize(square_matrix)) == 3
@test issymmetric(diagonal_symmetrize(positive_matrix))
@test LinearAlgebra.checksquare(diagonal_symmetrize(positive_matrix)) == 2
@test diagonal_symmetrize(square_matrix)[end,1] == square_matrix[1,end]
@test diagonal_symmetrize(square_matrix)[2,1] == square_matrix[1,2]
@test diagonal_symmetrize(square_matrix, below_over_upper=true)[1,end] == square_matrix[end,1]
@test diagonal_symmetrize(square_matrix, below_over_upper=true)[1,2] == square_matrix[2,1]
end
@testset "MatrixProcessing.jl -> distances and indices" begin
# Ints
positive_matrix = [1 2 3;4 5 6]
positive_matrix2 = [1. 2 3; 4 5 6]
positive_3d_matrix = rand(2,3,4)
negative_matrix = [1 2 -3;-4 5 6]
negative_matrix2 = [1 2 -3; -4 5 6]
negative_3d_matrix = rand(2,3,4).*2 .-1
negative_6d_matrix = rand(2,3,4,5,6,7).*2 .-1
powers_of_2 = [0 ; [2^k for k in 0:2:8]]
powers_of_3 = [0 ; [3^k for k in 0:2:8]]
square_matrix = [1 2 3;
4 5 6;
7 8 9]
# ========================================================
@test length(unique(group_distances(square_matrix,1))) == 1
@test length(unique(group_distances(square_matrix,2))) == 2
@test length(unique(group_distances(square_matrix,9))) == 9
@test_throws DomainError group_distances(square_matrix,20)
@test length(unique(group_distances(positive_matrix,1))) == 1
@test length(unique(group_distances(positive_matrix,2))) == 2
@test length(unique(group_distances(positive_matrix,6))) == 6
@test_throws DomainError group_distances(positive_matrix,20)
@test length(unique(group_distances(positive_3d_matrix,2))) == 2
@test length(unique(group_distances(negative_3d_matrix,2))) == 2
@test length(unique(group_distances(negative_6d_matrix,2))) == 2
group_distances(square_matrix,2)
# ========================================================
@test length(generate_indices(3))==3^2
@test length(generate_indices(223))==223^2
n=3
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) ÷ 2)
n=9
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) ÷ 2)
index_matrix1 = generate_indices(n, symmetry_order=true, include_diagonal=false)
@test length(findall(x-> x == CartesianIndex(n,n),index_matrix1)) == 0
@test length(findall(x-> x == CartesianIndex(n-2,n-1),index_matrix1)) == 1
@test length(findall(x-> x == CartesianIndex(n-1,n-2),index_matrix1)) == 0
n=223
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) ÷ 2)
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == ((n^2 - n) ÷ 2 + n)
n=3
index_matrix2 = generate_indices(n, symmetry_order=true, include_diagonal=true)
@test length(index_matrix2) == (((n-1)*n)÷2 +n)
@test findmax(index_matrix2)[1] == CartesianIndex(n,n)
@test length(findall(x-> x == CartesianIndex(1,n),index_matrix2)) == 1
@test length(findall(x-> x == CartesianIndex(1,1),index_matrix2)) == 1
@test length(findall(x-> x == CartesianIndex(n,n),index_matrix2)) == 1
n=9
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == (((n-1)*n)÷2 +n)
n=223
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == (((n-1)*n)÷2 +n)
n=9
@test length(generate_indices((n,n), symmetry_order=true, include_diagonal=true)) == (((n-1)*n)÷2 +n)
n=223
@test length(generate_indices((n,n), symmetry_order=true, include_diagonal=true)) == (((n-1)*n)÷2 +n)
end
# @testset "MatrixProcessing.jl -> arrays of arrays" begin
# # Need to be taken from Bettis
# # arr_of_arrs
#
# # reduce_arrs_to_min_len(arr_of_arrs)
#
# end
@testset "MatrixProcessing.jl -> matrix ordering helping functions" begin
let testing_matrix0 = Array{Float64,2}(undef, 2, 3),
testing_matrix1 = [1 2 3; 4 5 6; 7 8 9],
testing_matrix2 = ones((2,3,4))
testing_matrix3 = [1 2 3; 4 5 6]
testing_matrix4 = [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test arr_to_vec(testing_matrix0) isa Vector
@test length(testing_matrix0) == length(arr_to_vec(testing_matrix0))
@test arr_to_vec(testing_matrix1) isa Vector
@test length(testing_matrix1) == length(arr_to_vec(testing_matrix1))
@test arr_to_vec(testing_matrix1) == [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test arr_to_vec(testing_matrix2) isa Vector
@test length(testing_matrix2) == length(arr_to_vec(testing_matrix2))
@test arr_to_vec(testing_matrix3) isa Vector
@test length(testing_matrix3) == length(arr_to_vec(testing_matrix3))
@test arr_to_vec(testing_matrix3) == [1, 4, 2, 5, 3, 6]
@test arr_to_vec(testing_matrix4) isa Vector
@test length(testing_matrix4) == length(arr_to_vec(testing_matrix4))
@test arr_to_vec(testing_matrix4) == [1, 4, 7, 2, 5, 8, 3, 6, 9]
end
let testing_matrix1 = CartesianIndices((2,2)),
testing_matrix2 = CartesianIndices((9,1))
@test cartesianInd_to_vec(testing_matrix1) isa Vector
@test length(cartesianInd_to_vec(testing_matrix1)) == length(testing_matrix1)
@test cartesianInd_to_vec(testing_matrix2) isa Vector
@test length(cartesianInd_to_vec(testing_matrix2)) == length(testing_matrix2)
end
let vals_matrix1a = [1 2 3; 4 5 6],
vals_matrix1b = [6 5 4; 3 2 1],
vals_matrix1c = [4 5 6; 1 2 3]
let index_matrix1a = [CartesianIndex(1,1), CartesianIndex(1,2), CartesianIndex(1,3), CartesianIndex(2,1), CartesianIndex(2,2), CartesianIndex(2,3)]
@test length(sort_indices_by_values(vals_matrix1a, index_matrix1a)) == length(vals_matrix1a)
@test sort_indices_by_values(vals_matrix1a, index_matrix1a) == collect(1:6)
@test length(sort_indices_by_values(vals_matrix1b, index_matrix1a)) == length(vals_matrix1b)
@test sort_indices_by_values(vals_matrix1b, index_matrix1a) == collect(6:-1:1)
@test length(sort_indices_by_values(vals_matrix1c, index_matrix1a)) == length(vals_matrix1c)
@test sort_indices_by_values(vals_matrix1c, index_matrix1a) == [4, 5, 6, 1, 2, 3]
end
let index_matrix1b = CartesianIndices((2,3))
@test_throws TypeError sort_indices_by_values(vals_matrix1a, index_matrix1b)
end
end
let vals_matrix2 = [1 2 3; 4 5 6; 7 8 9],
index_matrix2a = CartesianIndices((3,3)),
index_matrix2b = [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(1,3);
CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3);
CartesianIndex(3,1) CartesianIndex(3,2) CartesianIndex(3,3)],
index_matrix2c = [CartesianIndex(1,1), CartesianIndex(1,2), CartesianIndex(1,3),
CartesianIndex(2,1), CartesianIndex(2,2), CartesianIndex(2,3),
CartesianIndex(3,1), CartesianIndex(3,2), CartesianIndex(3,3)]
@test_throws TypeError sort_indices_by_values(vals_matrix2, index_matrix2a)
@test_throws TypeError sort_indices_by_values(vals_matrix2, index_matrix2b)
@test sort_indices_by_values(vals_matrix2, index_matrix2c) isa Vector
@test sort_indices_by_values(vals_matrix2, index_matrix2c) == 1:9
@test length(sort_indices_by_values(vals_matrix2, index_matrix2c)) == length(vals_matrix2)
end
let vals_matrix3 = [1, 4, 7, 2, 5, 8, 3, 6, 9],
index_matrix3a = CartesianIndices((9,1)),
index_matrix3b = CartesianIndices((9,)),
index_matrix3c = [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test_throws TypeError sort_indices_by_values(vals_matrix3, index_matrix3a)
@test_throws TypeError sort_indices_by_values(vals_matrix3, index_matrix3b)
@test sort_indices_by_values(vals_matrix3, index_matrix3c) isa Vector
@test sort_indices_by_values(vals_matrix3, index_matrix3c) == 1:9
@test length(sort_indices_by_values(vals_matrix3, index_matrix3c)) == length(vals_matrix3)
end
let target_coords1 = CartesianIndex(2,3),
target_value = -20
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
set_values!(some_matrix, target_coords1, target_value; do_symmetry=false)
@test some_matrix[target_coords1] == target_value
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
another_matrix = set_values!(some_matrix, target_coords1, target_value; do_symmetry=false)
@test some_matrix[target_coords1] == target_value
@test another_matrix[target_coords1] == target_value
@test another_matrix === some_matrix
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
another_matrix = set_values!(some_matrix, target_coords1, target_value; do_symmetry=true)
@test some_matrix[target_coords1] == target_value
@test some_matrix[target_coords1[1],target_coords1[2]] == target_value
@test some_matrix[target_coords1[2],target_coords1[1]] == target_value
@test another_matrix === some_matrix
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9],
some_matrix2 = [1 2 3; 4 5 6; 7 8 9],
target_coords2 = CartesianIndex(8,9)
@test_throws BoundsError set_values!(some_matrix, target_coords2, target_value; do_symmetry=false)
@test some_matrix == some_matrix2
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9],
target_coords3 = CartesianIndices((2,2))
@test_throws MethodError set_values!(some_matrix, target_coords3, target_value; do_symmetry=false)
end
end
end
@testset "MatrixProcessing.jl -> matrix ordering" begin
"""
check_for_min_val_position(input_matrix::Matrix; force_symmetry=false, assign_same_values=false)
Takes an 'input_matrix' and checks if its ordered form has the minimum value
in the same position as the minimum value of 'input_matrix'.
"""
function check_for_min_val_position(input_matrix::Matrix; force_symmetry=false, assign_same_values=false)
# check if min val in uniquely value matrix is in the same position
ordered_matrix = get_ordered_matrix(input_matrix, force_symmetry=force_symmetry, assign_same_values=assign_same_values)
if issymmetric(input_matrix) || force_symmetry
off_diag_ind = generate_indices(size(input_matrix),include_diagonal=false)
else
off_diag_ind = generate_indices(size(input_matrix),include_diagonal=true)
end
min_val = findmin(input_matrix[off_diag_ind])[1]
# min_orig = findmin(input_matrix[off_diag_ind])[2]
all_min_input = findall(x->x==min_val,input_matrix[off_diag_ind])
all_min_ordered = findall(x->x==1,ordered_matrix[off_diag_ind])
return off_diag_ind[all_min_input] == off_diag_ind[all_min_ordered]
end
# ==
let power_matrix = zeros(Int, (2,3,4))
for k = 1:4
power_matrix[:,:,k] = reshape([(k+1)^n for n =1:6], (2,3))
end
ordered_power_matrix = copy(power_matrix)
ordered_power_matrix[:, :, 1] =[1 6 12;
3 8 13]
ordered_power_matrix[:, :, 2] =[2 11 17;
7 15 20]
ordered_power_matrix[:, :, 3] = [4 14 21;
9 18 23]
ordered_power_matrix[:, :, 4] =[5 16 22;
10 19 24]
@test !isempty(get_ordered_matrix(power_matrix) == ordered_power_matrix)
@test get_ordered_matrix(power_matrix) == ordered_power_matrix
# @test_throws MethodError get_ordered_matrix(power_matrix)
end
# ==
let square_matrix1 = [1 2 3; 4 5 6; 7 8 9],
square_matrix2 = [1 4 7; 2 5 8; 3 6 9]
@test get_ordered_matrix(10 .*square_matrix1) == (square_matrix1 )
@test get_ordered_matrix(10 .*square_matrix2) == (square_matrix2 )
@test sum(get_ordered_matrix(square_matrix1) .== square_matrix1 ) == 9
@test sum(get_ordered_matrix(square_matrix2) .== square_matrix2 ) == 9
@test get_ordered_matrix(10 .*square_matrix1, force_symmetry=true) == get_ordered_matrix(square_matrix1, force_symmetry=true)
@test get_ordered_matrix(10 .*square_matrix2, force_symmetry=true) == get_ordered_matrix(square_matrix2, force_symmetry=true)
# check if min val in uniquely value matrix is in the same position
let input_matrix = 10square_matrix1
@test check_for_min_val_position(input_matrix)
end
end
# ==
let square_matrix_same_vals1 = [1 2 3; 3 4 5; 6 7 8],
square_matrix_same_vals2 = [1 3 6; 2 4 7; 3 5 8]
@test get_ordered_matrix(10 .*square_matrix_same_vals1, assign_same_values=true) == (square_matrix_same_vals1 )
@test get_ordered_matrix(10 .*square_matrix_same_vals2, assign_same_values=true) == (square_matrix_same_vals2 )
# forcing symmetry test
some_ord_mat = get_ordered_matrix(10 .*square_matrix_same_vals1, force_symmetry=true, assign_same_values=false)
# remove 1, because 0 is not off diagonal
@test length(unique(some_ord_mat))-1 == (size(square_matrix_same_vals1,1)*(size(square_matrix_same_vals1,1)-1))/2
end
# ==
let square_matrix_same_vals3 = [1 2 3; 3 4 3; 5 6 7],
square_matrix_same_vals4 = [1 3 3; 2 4 6; 3 3 7]
@test get_ordered_matrix(10 .*square_matrix_same_vals3, force_symmetry=true, assign_same_values=true) ==
get_ordered_matrix(square_matrix_same_vals3, force_symmetry=true, assign_same_values=true)
@test get_ordered_matrix(10 .*square_matrix_same_vals4, force_symmetry=true, assign_same_values=true) ==
get_ordered_matrix(square_matrix_same_vals4, force_symmetry=true, assign_same_values=true)
end
# ==================
# Tests on symmetric matrices
let test_mat_b1 = [ 1 1 1 4 5 9 ;
1 1 2 3 6 10;
1 2 1 3 7 11;
4 3 3 1 8 12;
5 6 7 8 1 13;
9 10 11 12 13 1 ;]
# no same values
let test_mat_b1_ord1 = [ 0 1 2 6 7 11;
1 0 3 4 8 12;
2 3 0 5 9 13;
6 4 5 0 10 14;
7 8 9 10 0 15;
11 12 13 14 15 0],
test_mat_b1_indices = generate_indices(size(test_mat_b1))
ord_mat_b1_1 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=false, assign_same_values=false)
@test issymmetric(ord_mat_b1_1)
@test ord_mat_b1_1[test_mat_b1_indices] == test_mat_b1_ord1[test_mat_b1_indices]
ord_mat_b1_2 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b1_2)
@test ord_mat_b1_2[test_mat_b1_indices] == test_mat_b1_ord1[test_mat_b1_indices]
end
# assign same values
let test_mat_b1_ord2 = [ 0 1 1 4 5 9 ;
1 0 2 3 6 10;
1 2 0 3 7 11;
4 3 3 0 8 12;
5 6 7 8 0 13;
9 10 11 12 13 0 ;]
let ord_mat_b1_3 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=false, assign_same_values=true)
@test issymmetric(ord_mat_b1_3)
# Removed, because ordered diagonal is all 1: @test ord_mat_b1_3 == (test_mat_b1 )
@test ord_mat_b1_3 == test_mat_b1_ord2
end
let ord_mat_b1_4 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b1_4)
# Removed, because ordered diagonal is all 1: @test ord_mat_b1_4 == (test_mat_b1 )
@test ord_mat_b1_4 == test_mat_b1_ord2
end
end
let input_matrix = 10 .*test_mat_b1
@test check_for_min_val_position(input_matrix; force_symmetry=false, assign_same_values=true)
end
end
# ==
# Non-symmetric matrix test
let test_mat_b2 = [ 1 1 3 4 5 9 ;
1 1 2 3 6 10;
14 2 1 3 7 11;
4 15 3 1 8 12;
5 5 7 8 1 13;
9 10 11 12 13 1 ;]
let ord_mat_b2_1 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=false, assign_same_values=false)
@test !issymmetric(ord_mat_b2_1)
@test findall(x->x<=8,ord_mat_b2_1) == findall(x->x==1,test_mat_b2) # all values with one are first 8 values used for ordering
@test length(unique(ord_mat_b2_1)) == length(test_mat_b2) #check if all values are unique
end
let test_mat_b2_ord1 = [0 1 6 4 7 11;
1 0 2 5 8 12;
6 2 0 3 9 13;
4 5 3 0 10 14;
7 8 9 10 0 15;
11 12 13 14 15 0 ;]
test_mat_b2_indices = generate_indices(size(test_mat_b2))
filter!(x->x!=CartesianIndex(1,3), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b2_indices)
ord_mat_b2_2 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b2_2)
@test ord_mat_b2_2[test_mat_b2_indices] == test_mat_b2_ord1[test_mat_b2_indices]
# forcing symmetry test:
@test length(unique(ord_mat_b2_2))-1 == (size(test_mat_b2,1)*(size(test_mat_b2,1)-1))/2
end
# TODO to fix tests, add 1 to all values off diagonal for the input matrix
let test_mat_b2_ord2 = [0 0 2 3 4 8;
0 0 1 2 5 9;
13 1 0 2 6 10;
3 14 2 0 7 11;
4 4 6 7 0 12;
8 9 10 11 12 0 ;]
ord_mat_b2_3 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=false, assign_same_values=true)
@test_skip !issymmetric(ord_mat_b2_3)
@test_skip ord_mat_b2_3 == test_mat_b2_ord2
end
# TODO to fix tests, add 1 to all values off diagonal for the input matrix
let test_mat_b2_ord3 = [0 0 2 3 4 8
0 0 1 2 5 9
2 1 0 2 6 10
3 2 2 0 7 11
4 5 6 7 0 12
8 9 10 11 12 0 ;]
ord_mat_b2_4 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=true, assign_same_values=true)
@test_skip issymmetric(ord_mat_b2_4)
@test_skip ord_mat_b2_4 == test_mat_b2_ord3
end
end
# ==
let test_mat_b3 = [ 1 1 3 4 5 9 ;
1 1 2 3 6 10;
3 2 1 3 7 11;
4 3 3 1 8 12;
5 6 7 8 1 13;
9 10 11 12 13 1 ;]
let test_mat_b3_ord1 = [ 0 0 5 3 6 10;
0 0 1 4 7 11;
5 1 0 2 8 12;
3 4 2 0 9 13;
6 7 8 9 0 14;
10 11 12 13 14 0 ;],
test_mat_b3_indices = generate_indices(size(test_mat_b3))
filter!(x->x!=CartesianIndex(1,3), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b3_indices)
ord_mat_b3_1 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=false, assign_same_values=false)
@test issymmetric(ord_mat_b3_1)
@test_skip ord_mat_b3_1[test_mat_b3_indices] == test_mat_b3_ord1[test_mat_b3_indices]
ord_mat_b3_2 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b3_2)
@test_skip ord_mat_b3_2[test_mat_b3_indices] == test_mat_b3_ord1[test_mat_b3_indices]
end
# TODO remove tests that do not add anything new for testing and are just another similar case
let test_mat_b3_ord2 = [ 0 0 2 3 4 8 ;
0 0 1 2 5 9 ;
2 1 0 2 6 10;
3 2 2 0 7 11;
4 5 6 7 0 12;
8 9 10 11 12 0 ;]
ord_mat_b3_3 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=false, assign_same_values=true)
@test issymmetric(ord_mat_b3_3)
@test_skip ord_mat_b3_3 == (test_mat_b3 .-1)
@test_skip ord_mat_b3_3 == test_mat_b3_ord2
ord_mat_b3_4 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b3_4)
@test_skip ord_mat_b3_4 == (test_mat_b3 .-1)
@test_skip ord_mat_b3_4 == test_mat_b3_ord2
end
let input_matrix = 10 .*test_mat_b3
@test check_for_min_val_position(input_matrix; force_symmetry=false, assign_same_values=true)
end
end
# ==
let test_mat_b4 = [ 1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
@test_skip get_ordered_matrix(10 .*test_mat_b4, force_symmetry=false, assign_same_values=false) == (test_mat_b4 .-1)
@test_skip get_ordered_matrix(10 .*test_mat_b4, force_symmetry=false, assign_same_values=true) == (test_mat_b4 .-1)
let ord_mat = get_ordered_matrix(10 .*test_mat_b4, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat)
@test_skip ord_mat == (test_mat_b4 .-1)
end
let ord_mat = get_ordered_matrix(10 .*test_mat_b4, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat)
@test_skip ord_mat == (test_mat_b4 .-1)
end
let input_matrix = 10test_mat_b4
@test check_for_min_val_position(input_matrix)
end
end
# ==
let test_mat_b5 = -[1 1 3 4 5 9 ;
1 1 2 3 6 10;
14 2 1 3 7 11;
4 15 3 1 8 12;
5 5 7 8 1 13;
9 10 11 12 13 1 ;]
let ord_mat_b5_1 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=false, assign_same_values=false)
@test !issymmetric(ord_mat_b5_1)
@test_skip findall(x->x>=28,ord_mat_b5_1) == findall(x->x==-1,test_mat_b5) # all values with one are first 8 values used for ordering
@test length(unique(ord_mat_b5_1)) == length(test_mat_b5) #check if all values are unique
end
let test_mat_b5_ord1 = [ 0 14 9 11 8 4
14 0 13 10 7 3
9 13 0 12 6 2
11 10 12 0 5 1
8 7 6 5 0 0
4 3 2 1 0 0 ;]
test_mat_b5_indices = generate_indices(size(test_mat_b5))
filter!(x->x!=CartesianIndex(1,3), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b5_indices)
ord_mat_b5_2 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b5_2)
@test_skip ord_mat_b5_2[test_mat_b5_indices] == test_mat_b5_ord1[test_mat_b5_indices]
# forcing symmetry test:
@test length(unique(ord_mat_b5_2))-1 == (size(test_mat_b5,1)*(size(test_mat_b5,1)-1))/2
end
let test_mat_b5_ord2 = [14 14 12 11 10 6;
14 14 13 12 9 5;
1 13 14 12 8 4;
11 0 12 14 7 3;
10 10 8 7 14 2;
6 5 4 3 2 14],
ord_mat_b5_3 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=false, assign_same_values=true)
@test !issymmetric(ord_mat_b5_3)
@test_skip ord_mat_b5_3 == test_mat_b5_ord2
end
let test_mat_b5_ord3 = [0 12 10 9 8 4;
12 0 11 10 7 3;
10 11 0 10 6 2;
9 10 10 0 5 1;
8 7 6 5 0 0;
4 3 2 1 0 0]
ord_mat_b5_4 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b5_4)
@test_skip ord_mat_b5_4 == test_mat_b5_ord3
end
end
# ==================
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | code | 464 | using SafeTestsets
# TODO add description to the tests
## ===-===-===-===-===-===-===-===-
@safetestset "MatrixProcessing tests" begin
include("matrixProcessing_tests.jl")
end
## ===-===-===-===-===-===-===-===-
@safetestset "MatrixOrganization tests" begin
include("matrixOrganisation_tests.jl")
end
## ===-===-===-===-===-===-===-===-
@safetestset "BettiCurves tests" begin
include("bettiCurves_tests.jl")
end
## ===-===-===-===-===-===-===-===-
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | docs | 352 | # TopologyPreprocessing

## Installation
This package registeration is being processed now. After being registered, to install it, run the following.
```julia
julia> using Pkg
julia> Pkg.add("TopologyPreprocessing")
```
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | docs | 114 | # Example.jl Documentation
```@contents
```
## Functions
```@docs
get_barcodes(x)
```
## Index
```@index
```
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
|
[
"MIT"
] | 0.1.6 | ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e | code | 9780 | module OpenIDConnect
using HTTP
using JSON
using MbedTLS
using Base64
using Random
using JWTs
const DEFAULT_SCOPES = ["openid", "profile", "email"]
const DEFAULT_STATE_TIMEOUT_SECS = 60
const DEFAULT_SKEW_SECS = 2*60
const STATE_PURGE_TRIGGER = 1024
const DEFAULT_KEY_REFRESH_SECS = 60*60
export OIDCCtx, flow_request_authorization_code, flow_get_authorization_code, flow_get_token, flow_validate_id_token, flow_refresh_token
"""
Holds an OpenID Connect context that can be used in subsequent OpenID request flows.
The context holds request states, and configuration options.
"""
struct OIDCCtx
states::Dict{String,Float64}
state_timeout_secs::Int
allowed_skew_secs::Int
openid_config::Dict{String,Any}
http_tls_opts::Dict{Symbol,Any}
validator::JWKSet
key_refresh_secs::Int
last_key_refresh::Float64
client_id::String
client_secret::String
scopes::Vector{String}
redirect_uri::String
random_device::RandomDevice
function OIDCCtx(issuer::String, redirect_uri::String, client_id::String, client_secret::String, scopes::Vector{String}=DEFAULT_SCOPES;
verify::Union{Nothing,Bool}=nothing, cacrt::Union{Nothing,String,MbedTLS.CRT}=nothing,
state_timeout_secs::Int=DEFAULT_STATE_TIMEOUT_SECS, allowed_skew_secs::Int=DEFAULT_SKEW_SECS, key_refresh_secs::Int=DEFAULT_KEY_REFRESH_SECS,
random_device::RandomDevice=RandomDevice())
endswith(issuer, "/") || (issuer = issuer * "/")
openid_config_url = issuer * ".well-known/openid-configuration"
http_tls_opts = Dict{Symbol,Any}()
http_tls_opts[:socket_type_tls] = MbedTLS.SSLContext
if verify !== nothing
http_tls_opts[:require_ssl_verification] = verify
end
if cacrt !== nothing
if isa(cacrt, String)
cacrt = isfile(cacrt) ? MbedTLS.crt_parse_file(cacrt) : MbedTLS.crt_parse(cacrt)
end
conf = MbedTLS.SSLConfig(verify === nothing || verify)
MbedTLS.ca_chain!(conf, cacrt)
http_tls_opts[:sslconfig] = conf
end
# fetch and store the openid config, along with the additional args for SSL
openid_config = JSON.parse(String(HTTP.request("GET", openid_config_url; status_exception=true, http_tls_opts...).body))
validator = JWKSet(openid_config["jwks_uri"])
new(Dict{String,Float64}(), state_timeout_secs, allowed_skew_secs, openid_config, http_tls_opts, validator, key_refresh_secs, 0.0, client_id, client_secret, scopes, redirect_uri, random_device)
end
end
authorization_endpoint(ctx::OIDCCtx) = ctx.openid_config["authorization_endpoint"]
token_endpoint(ctx::OIDCCtx) = ctx.openid_config["token_endpoint"]
function remember_state(ctx::OIDCCtx, state::String)
ctx.states[state] = time()
nothing
end
function validate_state(ctx::OIDCCtx, state::String)
statestore = ctx.states
if state in keys(statestore)
t = statestore[state]
delete!(statestore, state)
if (time() - t) <= ctx.state_timeout_secs
return true
end
end
@info("encountered an unknown or expired state")
if length(statestore) > STATE_PURGE_TRIGGER
purge_states!(ctx)
end
false
end
function purge_states(ctx::OIDCCtx)
tnow = time()
tmout = ctx.state_timeout_secs
filter!(nv->(tnow-nv[2])>tmout, ctx.states)
nothing
end
"""
API calling error detected by this library
"""
struct APIError
error::String
end
"""
Error returned from OpenID server
See section 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html
"""
struct AuthServerError
error::String
error_description::Union{Nothing,String}
error_uri::Union{Nothing,String}
end
"""
Authentication request. Uses the authorization code flow.
Acceptable optional args as listed in section 3.1.2.1 of specifications (https://openid.net/specs/openid-connect-core-1_0.html)
Returns a String with the redirect URL.
Caller must perform the redirection.
"""
function flow_request_authorization_code(ctx::OIDCCtx; nonce=nothing, display=nothing, prompt=nothing, max_age=nothing, ui_locales=nothing, id_token_hint=nothing, login_hint=nothing, acr_values=nothing)
@debug("oidc negotiation: initiating...")
scopes = join(ctx.scopes, ' ')
state = randstring(ctx.random_device, 10)
remember_state(ctx, state)
query = Dict("response_type"=>"code", "client_id"=>ctx.client_id, "redirect_uri"=>ctx.redirect_uri, "scope"=>scopes, "state"=>state)
(nonce === nothing) || (query["nonce"] = String(nonce))
(display === nothing) || (query["display"] = String(display))
(prompt === nothing) || (query["prompt"] = String(prompt))
(max_age === nothing) || (query["max_age"] = String(max_age))
(ui_locales === nothing) || (query["ui_locales"] = String(ui_locales))
(id_token_hint === nothing) || (query["id_token_hint"] = String(id_token_hint))
(login_hint === nothing) || (query["login_hint"] = String(login_hint))
(acr_values === nothing) || (query["acr_values"] = String(acr_values))
uri = HTTP.URIs.URI(HTTP.URIs.URI(authorization_endpoint(ctx)); query=query)
return string(uri)
end
"""
Given the params from the redirected response from the authentication request, extract the authorization code.
See sections 3.1.2.5 and 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns the authorization code on success.
Returns one of APIError or AuthServerError on failure.
"""
function flow_get_authorization_code(ctx::OIDCCtx, @nospecialize(query))
state = get(query, "state", get(query, :state, nothing))
if state === nothing
return APIError("invalid request, no state found")
end
if validate_state(ctx, String(state)) === nothing
return APIError("invalid or expired state")
end
code = get(query, "code", get(query, :code, nothing))
if code !== nothing
return String(code)
end
errcode = get(query, "error", nothing)
if errcode !== nothing
return AuthServerError(errcode, get(query, "error_description", nothing), get(query, "error_uri", nothing))
end
return APIError("invalid request, no code or error found")
end
function parse_token_response(tok_res)
@info("oidc: success response from token endpoint")
resp_str = String(tok_res.body)
if tok_res.status == 200
return JSON.parse(resp_str)
end
try
err_resp = JSON.parse(resp_str)
errcode = get(err_resp, "error", nothing)
if errcode !== nothing
return AuthServerError(errcode, get(err_resp, "error_description", nothing), get(err_resp, "error_uri", nothing))
end
catch
return APIError("unknown response from server: " * resp_str)
end
end
"""
Token Request. Given the authorization code obtained, invoke the token end point and obtain an id_token, access_token, refresh_token.
See section 3.1.3.1 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns a JSON object containing tokens on success.
Returns a AuthServerError or APIError object on failure.
"""
function flow_get_token(ctx::OIDCCtx, code)
data = Dict("grant_type"=>"authorization_code",
"code"=>String(code),
"redirect_uri"=>ctx.redirect_uri,
"client_id"=>ctx.client_id,
"client_secret"=>ctx.client_secret)
headers = Dict("Content-Type"=>"application/x-www-form-urlencoded")
tok_res = HTTP.request("POST", token_endpoint(ctx), headers, HTTP.URIs.escapeuri(data); status_exception=false, ctx.http_tls_opts...)
return parse_token_response(tok_res)
end
"""
Token Refresh. Given the refresh code obtained, invoke the token end point and obtain new tokens.
See section 12 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns a JSON object containing tokens on success.
Returns a AuthServerError or APIError object on failure.
"""
function flow_refresh_token(ctx::OIDCCtx, refresh_token)
data = Dict("grant_type"=>"refresh_token",
"refresh_token"=>String(refresh_token),
"client_id"=>ctx.client_id,
"client_secret"=>ctx.client_secret)
headers = Dict("Content-Type"=>"application/x-www-form-urlencoded")
tok_res = HTTP.request("POST", token_endpoint(ctx), headers, HTTP.URIs.escapeuri(data); status_exception=false, ctx.http_tls_opts...)
return parse_token_response(tok_res)
end
"""
Validate an OIDC token.
Validates both the structure and signature.
See section 3.1.3.7 of https://openid.net/specs/openid-connect-core-1_0.html
"""
flow_validate_id_token(ctx::OIDCCtx, id_token) = flow_validate_id_token(ctx, JWT(;jwt=String(id_token)))
function flow_validate_id_token(ctx::OIDCCtx, jwt::JWT)
isvalid = false
if issigned(jwt)
try
tokclaims = claims(jwt)
issue_time = tokclaims["iat"] - ctx.allowed_skew_secs
expiry_time = tokclaims["exp"] + ctx.allowed_skew_secs
isvalid = issue_time <= round(Int, time()) <= expiry_time
catch ex
@info("invalid token format ($ex)")
end
if isvalid
validator = ctx.validator
if (time() - ctx.last_key_refresh) >= ctx.key_refresh_secs
jstr = String(HTTP.get(ctx.validator.url; ctx.http_tls_opts...).body)
keys = JSON.parse(jstr)["keys"]
keysetdict = Dict{String,JWK}()
refresh!(keys, keysetdict)
validator.keys = keysetdict
end
isvalid = validate!(jwt, validator)
end
end
return isvalid
end
end # module
| OpenIDConnect | https://github.com/tanmaykm/OpenIDConnect.jl.git |
|
[
"MIT"
] | 0.1.6 | ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e | code | 3577 | using OpenIDConnect
using Test
using Random
using HTTP
function test_state_store()
@testset "State store" begin
ctx = OIDCCtx("https://accounts.google.com", "http://127.0.0.1:8888/auth/login", "test_client_id", "test_client_secret"; state_timeout_secs=5)
state = randstring(10)
OpenIDConnect.remember_state(ctx, state)
@test length(ctx.states) == 1
@test OpenIDConnect.validate_state(ctx, state)
sleep(10)
@info("expecting an invalid state")
@test !OpenIDConnect.validate_state(ctx, state)
@test length(ctx.states) == 0
nothing
end
end
function test_oidc_flow()
@testset "OIDC flow" begin
ctx = OIDCCtx("https://accounts.google.com", "http://127.0.0.1:8888/auth/login", "test_client_id", "test_client_secret"; state_timeout_secs=5)
@test OpenIDConnect.authorization_endpoint(ctx) == "https://accounts.google.com/o/oauth2/v2/auth"
@test OpenIDConnect.token_endpoint(ctx) == "https://oauth2.googleapis.com/token"
# flow request authorization code
uri_string = flow_request_authorization_code(ctx)
uri = HTTP.URIs.URI(uri_string)
@test uri.host == "accounts.google.com"
query = HTTP.URIs.queryparams(uri)
@test get(query, "client_id", "") == "test_client_id"
@test get(query, "redirect_uri", "") == "http://127.0.0.1:8888/auth/login"
@test get(query, "scope", "") == "openid profile email"
@test get(query, "response_type", "") == "code"
@test !isempty(get(query, "state", ""))
uri_string = flow_request_authorization_code(ctx; nonce="test_nonce", display="test_display", prompt="test_prompt", max_age="12345", ui_locales="en", id_token_hint="test_id_tok_hint", login_hint="test_login_hint", acr_values="test_acr")
uri = HTTP.URIs.URI(uri_string)
@test uri.host == "accounts.google.com"
query = HTTP.URIs.queryparams(uri)
@test get(query, "client_id", "") == "test_client_id"
@test get(query, "redirect_uri", "") == "http://127.0.0.1:8888/auth/login"
@test get(query, "scope", "") == "openid profile email"
@test get(query, "response_type", "") == "code"
@test !isempty(get(query, "state", ""))
@test get(query, "nonce", "") == "test_nonce"
@test get(query, "display", "") == "test_display"
@test get(query, "prompt", "") == "test_prompt"
@test get(query, "max_age", "") == "12345"
@test get(query, "ui_locales", "") == "en"
@test get(query, "id_token_hint", "") == "test_id_tok_hint"
@test get(query, "login_hint", "") == "test_login_hint"
@test get(query, "acr_values", "") == "test_acr"
# flow get authorization code
@test isa(flow_get_authorization_code(ctx, Dict()), OpenIDConnect.APIError)
@info("expecting an invalid state")
@test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate")), OpenIDConnect.APIError)
OpenIDConnect.remember_state(ctx, "teststate")
@test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate")), OpenIDConnect.APIError)
@info("expecting an invalid state")
@test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate", "error"=>"testerror")), OpenIDConnect.AuthServerError)
@info("expecting an invalid state")
@test "testcode" == flow_get_authorization_code(ctx, Dict("state"=>"teststate", "code"=>"testcode"))
end
end
@testset "OpenIDConnect" begin
test_state_store()
test_oidc_flow()
end
| OpenIDConnect | https://github.com/tanmaykm/OpenIDConnect.jl.git |
|
[
"MIT"
] | 0.1.6 | ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e | code | 3736 | using Mux
using HTTP
using JSON
using OpenIDConnect
using JWTs
headers(req) = req[:headers]
query(req) = parse_query(req[:query])
function parse_query(qstr)
res = Dict{String,String}()
for qsub in split(qstr, '&')
nv = split(qsub, '=')
res[nv[1]] = length(nv) > 1 ? nv[2] : ""
end
res
end
function pretty(j)
iob = IOBuffer()
JSON.print(iob, j, 4)
String(take!(iob))
end
function login(oidcctx::OIDCCtx)
openid_config = oidcctx.openid_config
issuer = openid_config["issuer"]
openid_config_url = issuer * ".well-known/openid-configuration"
"""
<html><head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/oidc-client/1.5.1/oidc-client.js"></script>
<script>
var settings = {
issuer: '$issuer',
authority: '$openid_config_url',
metadata: {
issuer: '$issuer',
authorization_endpoint: '$(openid_config["authorization_endpoint"])',
userinfo_endpoint: '$(openid_config["token_endpoint"])',
jwks_uri: '$(openid_config["jwks_uri"])',
},
client_id: '$(oidcctx.client_id)',
redirect_uri: 'http://127.0.0.1:8888/auth/login',
response_type: 'code',
scope: 'openid email profile offline_access'
};
var mgr = new Oidc.UserManager(settings);
var user = mgr.signinRedirect();
</script>
</head><body></body></html>
"""
end
function show_token(oidcctx::OIDCCtx, authresp, authenticated)
id_token = authresp["id_token"]
jwt = JWT(;jwt=id_token)
isvalid = flow_validate_id_token(oidcctx, string(jwt))
token_claims = claims(jwt)
jbox_auth = Dict(
"Authorization" => ("Bearer " * id_token)
)
authenticated[] = true
can_refresh = "refresh_token" in keys(authresp)
refresh_link = can_refresh ? """<hr/><a href="/auth/refresh?refresh_token=$(authresp["refresh_token"])">Refresh</a>""" : ""
"""<html><body>
OpenID Authentication:
<pre>$(pretty(authresp))</pre><hr/>
JWT Token:
<pre>$(pretty(token_claims))</pre><hr/>
Authentication Bearer Token:
<pre>$(pretty(jbox_auth))</pre><hr/>
Validation success: $isvalid
$(refresh_link)
</body></html>"""
end
function token(oidcctx::OIDCCtx, req, authenticated)
resp = query(req)
code = resp["code"]
authresp = flow_get_token(oidcctx, code)
show_token(oidcctx, authresp, authenticated)
end
function refresh(oidcctx::OIDCCtx, req, authenticated)
resp = query(req)
refresh_token = resp["refresh_token"]
authresp = flow_refresh_token(oidcctx, refresh_token)
show_token(oidcctx, authresp, authenticated)
end
function main()
if length(ARGS) != 1
println("Usage: julia oidc_standalone.jl <configuration_file>")
exit(1)
end
config = open(ARGS[1]) do f
JSON.parse(f)
end
oidcctx = OIDCCtx(String(config["issuer"]), "http://127.0.0.1:8888/auth/login", String(config["client_id"]), String(config["client_secret"]), ["openid", "email", "profile", "offline_access"])
authenticated = Ref(false)
@app test = (
Mux.defaults,
page("/", req->login(oidcctx)),
page("/auth/login", req->token(oidcctx, req, authenticated)),
page("/auth/refresh", req->refresh(oidcctx, req, authenticated)),
Mux.notfound())
@info("Standalone OIDC test server starting on port 8888")
serve(test, 8888)
while config["do_refresh"] || !(authenticated[])
sleep(10)
end
sleep(10)
end
main()
| OpenIDConnect | https://github.com/tanmaykm/OpenIDConnect.jl.git |
|
[
"MIT"
] | 0.1.6 | ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e | docs | 5233 | # OpenIDConnect
[](https://github.com/tanmaykm/OpenIDConnect.jl/actions?query=workflow%3ACI+branch%3Amaster)
[](http://codecov.io/github/tanmaykm/OpenIDConnect.jl?branch=master)
[OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.html) is a simple identity layer on top of the OAuth 2.0 protocol. It enables Clients to verify the identity of the End-User based on the authentication performed by an Authorization Server, as well as to obtain basic profile information about the End-User in an interoperable and REST-like manner.
This is an implementation of OpenID Connect in Julia, with methods implementing the authorization code flow.
# OpenID Connect Context (OIDCCtx)
The OpenID Connect context holds all states for a single OpenID Connect client configuration.
```julia
function OIDCCtx(
issuer::String,
redirect_uri::String,
client_id::String,
client_secret::String,
scopes::Vector{String}=DEFAULT_SCOPES;
verify::Union{Nothing,Bool}=nothing,
cacrt::Union{Nothing,String,MbedTLS.CRT}=nothing,
state_timeout_secs::Int=DEFAULT_STATE_TIMEOUT_SECS,
allowed_skew_secs::Int=DEFAULT_SKEW_SECS,
key_refresh_secs::Int=DEFAULT_KEY_REFRESH_SECS),
random_device::RandomDevice=RandomDevice()
)
```
Parameters:
- `issuer`: Issuer URL, pointing to the OpenID server
- `redirect_uri`: The app URI to which OpenID server must redirect after authorization
- `client_id`, and `client_secret`: Client ID and secret that this context represents
- `scopes`: The scopes to request during authorization (default: openid, profile, email)
Keyword Parameters:
- `verify`: whether to validate the server certificate
- `cacrt`: the CA certificate to use to check the server certificate
- `state_timeout_secs`: seconds for which to keep the state associated with an authorization request (default: 60 seconds), server responses beyond this are rejected as stale
- `allowed_skew_secs`: while validating tokens, seconds to allow to account for time skew between machines (default: 120 seconds)
- `key_refresh_secs`: time interval in which to refresh the JWT signing keys (default: 1hr)
# Error Structures
- `OpenIDConnect.APIError`: Error detected at the client side. Members:
- `error`: error code or message (String)
- `OpenIDConnect.AuthServerError`: Error returned from the OpenID server (see section 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html)
- `error`: error code (String)
- `error_description`: optional error description (String)
- `error_uri`: optional error URI (String)
# Authorization Code Flow
## Authentication request.
### `flow_request_authorization_code`
Returns a String with the redirect URL. Caller must perform the redirection.
Acceptable optional args as listed in section 3.1.2.1 of specifications (https://openid.net/specs/openid-connect-core-1_0.html)
```julia
function flow_request_authorization_code(
ctx::OIDCCtx;
nonce=nothing,
display=nothing,
prompt=nothing,
max_age=nothing,
ui_locales=nothing,
id_token_hint=nothing,
login_hint=nothing,
acr_values=nothing
)
```
### `flow_get_authorization_code`
Given the params from the redirected response from the authentication request, extract the authorization code.
See sections 3.1.2.5 and 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns the authorization code on success.
Returns one of APIError or AuthServerError on failure.
```julia
function flow_get_authorization_code(
ctx::OIDCCtx,
query # name-value pair Dict with query parameters are received from the OpenID server redirect
)
```
## Token Requests
### `flow_get_token`
Token Request. Given the authorization code obtained, invoke the token end point and obtain an id_token, access_token, refresh_token.
See section 3.1.3.1 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns a JSON object containing tokens on success.
Returns a AuthServerError or APIError object on failure.
```julia
function flow_get_token(
ctx::OIDCCtx,
code
)
```
### `flow_refresh_token`
Token Refresh. Given the refresh code obtained, invoke the token end point and obtain new tokens.
See section 12 of https://openid.net/specs/openid-connect-core-1_0.html.
Returns a JSON object containing tokens on success.
Returns a AuthServerError or APIError object on failure.
```julia
function flow_refresh_token(
ctx::OIDCCtx,
refresh_token
)
```
## Token Validation
### `flow_validate_id_token`
Validate an OIDC token.
Validates both the structure and signature.
See section 3.1.3.7 of https://openid.net/specs/openid-connect-core-1_0.html
```
function flow_validate_id_token(
ctx::OIDCCtx,
id_token::Union{JWTs.JWT, String}
)
```
# Examples
An example application built using OpenIDClient with Mux and HTTP is available as a [tool](tools/oidc_standalone.jl). Populate a configuration file following this [template](tools/settings.template) and start the standalone application. Point your browser to it to experience the complete flow.
| OpenIDConnect | https://github.com/tanmaykm/OpenIDConnect.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 8979 | using BERT
using Knet
import Base: length, iterate
using Random
using CSV
using PyCall
using Dates
VOCABFILE = "bert-base-uncased-vocab.txt"
NUM_CLASSES = 2
LEARNING_RATE = 2e-5
NUM_OF_EPOCHS = 30
TRAIN = true
token2int = Dict()
f = open(VOCABFILE) do file
lines = readlines(file)
for (i,line) in enumerate(lines)
token2int[line] = i
end
end
int2token = Dict(value => key for (key, value) in token2int)
VOCABSIZE = length(token2int)
# include("preprocess.jl")
# include("optimizer.jl")
function convert_to_int_array(text, dict; lower_case=true)
tokens = bert_tokenize(text, dict, lower_case=lower_case)
out = Int[]
for token in tokens
if token in keys(dict)
push!(out, dict[token])
else
push!(out, dict["[UNK]"])
end
end
return out
end
function read_and_process(filename, dict; lower_case=true)
data = CSV.File(filename, delim="\t")
x = Array{Int,1}[]
y = Int8[]
for i in data
push!(x, convert_to_int_array(i.sentence, dict, lower_case=lower_case))
push!(y, Int8(i.label + 1)) # negative 1, positive 2
end
# Padding to maximum
# max_seq = findmax(length.(x))[1]
# for i in 1:length(x)
# append!(x[i], fill(1, max_seq - length(x[i]))) # 1 is for "[PAD]"
# end
return (x, y)
end
mutable struct ClassificationData
input_ids
input_mask
segment_ids
labels
batchsize
ninstances
shuffled
end
function ClassificationData(input_file, token2int; batchsize=8, shuffled=true, seq_len=64)
input_ids = []
input_mask = []
segment_ids = []
labels = []
(x, labels) = read_and_process(input_file, token2int)
for i in 1:length(x)
if length(x[i]) >= seq_len
x[i] = x[i][1:seq_len]
mask = Array{Int64}(ones(seq_len))
else
mask = Array{Int64}(ones(length(x[i])))
append!(x[i], fill(1, seq_len - length(x[i]))) # 1 is for "[PAD]"
append!(mask, fill(0, seq_len - length(mask))) # 0's vanish with masking operation
end
push!(input_ids, x[i])
push!(input_mask, mask)
push!(segment_ids, Array{Int64}(ones(seq_len)))
end
ninstances = length(input_ids)
return ClassificationData(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled)
end
function length(d::ClassificationData)
d, r = divrem(d.ninstances, d.batchsize)
return r == 0 ? d : d+1
end
function iterate(d::ClassificationData, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances))
state === nothing && return nothing
if length(state) > d.batchsize
new_state = state[d.batchsize+1:end]
input_ids = hcat(d.input_ids[state[1:d.batchsize]]...)
input_mask = hcat(d.input_mask[state[1:d.batchsize]]...)
segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...)
labels = hcat(d.labels[state[1:d.batchsize]]...)
else
new_state = nothing
input_ids = hcat(d.input_ids[state]...)
input_mask = hcat(d.input_mask[state]...)
segment_ids = hcat(d.segment_ids[state]...)
labels = hcat(d.labels[state]...)
end
return ((input_ids, input_mask, segment_ids, labels), new_state)
end
# mutable struct ClassificationData2
# input_ids
# input_mask
# segment_ids
# labels
# batchsize
# ninstances
# shuffled
# end
# function ClassificationData2(input_file; batchsize=8, shuffled=true, seq_len=64)
# input_ids = []
# input_mask = []
# segment_ids = []
# labels = []
# f = open(input_file)
# tmp = split.(readlines(f), "\t")
# for i in 1:length(tmp)
# instance = eval.(Meta.parse.(tmp[i]))
# push!(input_ids, (instance[1] .+ 1)[1:seq_len])
# push!(input_mask, instance[2][1:seq_len])
# push!(segment_ids, (instance[3] .+ 1)[1:seq_len])
# push!(labels, (instance[4] + 1))
# end
# ninstances = length(input_ids)
# return ClassificationData2(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled)
# end
# function length(d::ClassificationData2)
# d, r = divrem(d.ninstances, d.batchsize)
# return r == 0 ? d : d+1
# end
# function iterate(d::ClassificationData2, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances))
# state === nothing && return nothing
# if length(state) > d.batchsize
# new_state = state[d.batchsize+1:end]
# input_ids = hcat(d.input_ids[state[1:d.batchsize]]...)
# input_mask = hcat(d.input_mask[state[1:d.batchsize]]...)
# segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...)
# labels = hcat(d.labels[state[1:d.batchsize]]...)
# else
# new_state = nothing
# input_ids = hcat(d.input_ids[state]...)
# input_mask = hcat(d.input_mask[state]...)
# segment_ids = hcat(d.segment_ids[state]...)
# labels = hcat(d.labels[state]...)
# end
# return ((input_ids, input_mask, segment_ids, labels), new_state)
# end
# include("model.jl")
# Embedding Size, Vocab Size, Intermediate Hidden Size, Max Sequence Length, Sequence Length, Num of Segments, Num of Heads in Attention, Num of Encoders in Stack, Batch Size, Matrix Type, General Dropout Rate, Attention Dropout Rate, Activation Function
config = BertConfig(768, 30522, 3072, 512, 64, 2, 12, 12, 8, KnetArray{Float32}, 0.1, 0.1, "gelu")
if TRAIN
dtrn = ClassificationData("../project/mytrain.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
ddev = ClassificationData("../project/dev.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
else
dtst = ClassificationData("../project/mytest.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
end
if TRAIN
model = BertClassification(config, NUM_CLASSES)
@pyimport torch
torch_model = torch.load("../project/pytorch_model.bin")
model = load_from_torch_base(model, config.num_encoder, config.atype, torch_model)
end
function accuracy2(model, dtst)
true_count = 0
all_count = 0
for (x, attention_mask, segment_ids, y) in dtst
probs = model(x, segment_ids, attention_mask=attention_mask)
preds = map(x -> x[1], argmax(Array{Float32}(probs),dims=1))
true_count += sum(y .== preds)
all_count += length(y)
end
return true_count/all_count
end
function initopt!(model, t_total; lr=0.001, warmup=0.1)
for par in params(model)
if length(size(value(par))) === 1
par.opt = BertAdam(lr=lr, warmup=warmup, t_total=t_total, w_decay_rate=0.01)
else
par.opt = BertAdam(lr=lr, warmup=warmup, t_total=t_total)
end
end
end
function mytrain!(model, dtrn, ddev, best_acc)
losses = []
accs = []
for (k, (x, attention_mask, segment_ids, labels)) in enumerate(dtrn)
J = @diff model(x, segment_ids, labels, attention_mask=attention_mask)
for par in params(model)
g = grad(J, par)
update!(value(par), g, par.opt)
end
push!(losses, value(J))
if k % 500 == 0
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Training loss up to $k iteration is : ", Knet.mean(losses))
flush(stdout)
acc = accuracy2(model, ddev)
push!(accs, acc)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Accuracy at $k iteration : ", acc)
flush(stdout)
if acc > best_acc
best_acc = acc
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Saving...")
Knet.save("model_bert.jld2", "model", model)
flush(stdout)
end
end
end
return (best_acc, Knet.mean(losses), accs)
end
if TRAIN
t_total = length(dtrn) * NUM_OF_EPOCHS
initopt!(model, t_total, lr=LEARNING_RATE)
dev_accs = [0.0]
best_acc = 0.0
for epoch in 1:NUM_OF_EPOCHS
global best_acc
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Epoch : ", epoch)
flush(stdout)
(best_acc, lss, acc) = mytrain!(model, dtrn, ddev, best_acc)
append!(dev_accs, acc)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Training loss for $epoch epoch is : $lss")
println(dev_accs)
flush(stdout)
#=
acc = accuracy2(model, ddev)
println("Accuracy : ", acc)
if acc > best_acc
best_acc = acc
println("Saving...")
Knet.save("model_bert.jld2", "model", model)
end
=#
end
Knet.save("accuracies.jld2", "dev_accs", dev_accs)
else
model = Knet.load("model_bert.jld2", "model")
result = accuracy2(model, dtst)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Test accuracy is : $result")
end
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 3218 | using BERT
using Knet
import Base: length, iterate
using Random
using CSV
using PyCall
VOCABFILE = "bert-base-uncased-vocab.txt"
NUM_CLASSES = 2
token2int = Dict()
f = open(VOCABFILE) do file
lines = readlines(file)
for (i,line) in enumerate(lines)
token2int[line] = i
end
end
int2token = Dict(value => key for (key, value) in token2int)
VOCABSIZE = length(token2int)
mutable struct ClassificationData2
input_ids
input_mask
segment_ids
labels
batchsize
ninstances
shuffled
end
function ClassificationData2(input_file; batchsize=8, shuffled=true, seq_len=64)
input_ids = []
input_mask = []
segment_ids = []
labels = []
f = open(input_file)
tmp = split.(readlines(f), "\t")
for i in 1:length(tmp)
instance = eval.(Meta.parse.(tmp[i]))
push!(input_ids, (instance[1] .+ 1)[1:seq_len])
push!(input_mask, instance[2][1:seq_len])
push!(segment_ids, (instance[3] .+ 1)[1:seq_len])
push!(labels, (instance[4] + 1))
end
ninstances = length(input_ids)
return ClassificationData2(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled)
end
function length(d::ClassificationData2)
d, r = divrem(d.ninstances, d.batchsize)
return r == 0 ? d : d+1
end
function iterate(d::ClassificationData2, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances))
state === nothing && return nothing
if length(state) > d.batchsize
new_state = state[d.batchsize+1:end]
input_ids = hcat(d.input_ids[state[1:d.batchsize]]...)
input_mask = hcat(d.input_mask[state[1:d.batchsize]]...)
segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...)
labels = hcat(d.labels[state[1:d.batchsize]]...)
else
new_state = nothing
input_ids = hcat(d.input_ids[state]...)
input_mask = hcat(d.input_mask[state]...)
segment_ids = hcat(d.segment_ids[state]...)
labels = hcat(d.labels[state]...)
end
return ((input_ids, input_mask, segment_ids, labels), new_state)
end
# Embedding Size, Vocab Size, Intermediate Hidden Size, Max Sequence Length, Sequence Length, Num of Segments, Num of Heads in Attention, Num of Encoders in Stack, Batch Size, Matrix Type, General Dropout Rate, Attention Dropout Rate, Activation Function
config = BertConfig(768, 30522, 3072, 512, 64, 2, 12, 12, 8, KnetArray{Float32}, 0.1, 0.1, "gelu")
dtst = ClassificationData2("../project/sst-test.tsv", batchsize=config.batchsize, seq_len=config.seq_len)
model = BertClassification(config, NUM_CLASSES)
@pyimport torch
torch_model = torch.load("../project/model-64-32.pt")
model = load_from_torch_classification(model, config.num_encoder, config.atype, torch_model)
function accuracy2(model, dtst)
true_count = 0
all_count = 0
for (x, attention_mask, segment_ids, y) in dtst
probs = model(x, segment_ids, attention_mask=attention_mask)
preds = map(x -> x[1], argmax(Array{Float32}(probs),dims=1))
true_count += sum(y .== preds)
all_count += length(y)
end
return true_count/all_count
end
result = accuracy2(model, dtst)
println("Test accuracy is : $result")
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 398 | using BERT
config = BertConfig(128, 30022, 256, 512, 4, 2, 8, 2, 3, Array{Float32}, 0.1, 0.1, "relu")
model = BertPreTraining(config)
x = [213 234 7789; 712 9182 8912; 7812 12 432; 12389 1823 8483] # 4x3
segment_ids = [1 1 1;1 2 1;1 2 1;1 1 1]
mlm_labels = [-1 234 -1; -1 -1 8912; -1 -1 -1; 12389 -1 -1]
nsp_labels = [1, 2, 1]
loss = model(x, segment_ids, mlm_labels, nsp_labels)
println(loss)
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 337 | module BERT
export
BertPreTraining,
BertClassification,
BertConfig,
load_from_torch_base,
load_from_torch_pretraining,
load_from_torch_classification,
BertAdam,
bert_tokenize
using Knet, SpecialFunctions, LinearAlgebra
include("model.jl")
include("optimizer.jl")
include("preprocess.jl")
end # module
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 19559 | # import Base: *
# import Knet: getindex, setindex!
# Matmuls 2d and 3d arrays
# function *(a::AbstractArray{T,2}, b::AbstractArray{T,3}) where T<:Real
# b_sizes = size(b)
# a = a * reshape(b, b_sizes[1], :)
# return reshape(a, :, b_sizes[2:end]...)
# end
# Matmuls 2d and 3d arrays for KnetArrays
# function *(a::KnetArray{T,2}, b::KnetArray{T,3}) where T<:Real
# b_sizes = size(b)
# a = a * reshape(b, b_sizes[1], :)
# return reshape(a, :, b_sizes[2:end]...)
# end
# TODO :
# Since backprop doesn't work with this new import, we define it as a complex function consisting primitives that Autograd can take derivatives of. A primitive derivative of this function would speed things up.
# function matmul23(a::KnetArray{T,2}, b::KnetArray{T,3}) where T<:Real
# b_sizes = size(b)
# a = a * reshape(b, b_sizes[1], :)
# return reshape(a, :, b_sizes[2:end]...)
# end
gelu(x) = x .* 0.5 .* (1.0 .+ erf.(x ./ sqrt(2.0)))
function matmul23(a, b)
b_sizes = size(b)
a = a * reshape(b, b_sizes[1], :)
return reshape(a, :, b_sizes[2:end]...)
end
# Wrote these first, then realized we don't need them. Might come in handy later.
# function matmul23(a::AbstractArray{T,2}, b::AbstractArray{T,3}) where T<:Real
# b_sizes = size(b)
# a = a * reshape(b, b_sizes[1], :)
# return reshape(a, :, b_sizes[2:end]...)
# end
# function matmul23(a::Param{KnetArray{T,2}}, b::KnetArray{T,3}) where T<:Real
# matmul23(value(a), b)
# end
# function matmul23(a::Param{AbstractArray{T,2}}, b::AbstractArray{T,3}) where T<:Real
# matmul23(value(a), b)
# end
# function matmul23(a::Param{KnetArray{T,2}}, b::AutoGrad.Result{KnetArray{T,3}}) where T<:Real
# matmul23(value(a), value(b))
# end
# function matmul23(a::Param{AbstractArray{T,2}}, b::AutoGrad.Result{AbstractArray{T,3}}) where T<:Real
# matmul23(value(a), value(b))
# end
# @primitive *(x1::KnetArray{T,2},x2::KnetArray{T,3}),dy
# Not using this anymore
# function getindex(A::KnetArray{Float32,3}, ::Colon, I::Real, ::Colon)
# sizes = size(A)
# A = reshape(A, :, sizes[3])
# return A[(I-1)*sizes[1]+1:I*sizes[1],:]
# # reshape(A, :, size(A,3))[(I-1)*size(A,1)+1:I*size(A,1),:]
# end
# Does not work
# function setindex!(A::KnetArray{Float32,3}, v, ::Colon, I::Real, ::Colon)
# A = reshape(A, :, size(A,3))
# # setindex!(A, v, (I-1)*size(A,1)+1:I*size(A,1), ::Colon)
# A[(I-1)*size(A,1)+1:I*size(A,1),:] = v
# end
# std doesn't work!
std2(a, μ, ϵ) = sqrt.(Knet.mean(abs2.(a .- μ), dims=1) .+ ϵ)
# Legend
# V -> Vocab size, E -> Embedding size, S -> Sequence length, B -> Batch size
# H -> head_size, N -> num_heads
abstract type Layer end
# MAYBE TODO : sin-cos positionwise embeddings. This will reduce model size by max_seq_len * E
mutable struct Embedding <: Layer
w
end
Embedding(vocabsize::Int,embed::Int; atype=Array{Float32}) = Embedding(param(embed,vocabsize, atype=atype))
function (e::Embedding)(x)
e.w[:,x]
end
# If we need 0's as pads
#=
struct SegmentEmbedding <: Layer
w
atype
end
SegmentEmbedding(vocabsize::Int,embed::Int; atype=Array{Float32}) = SegmentEmbedding(param(embed,vocabsize, atype=atype), atype)
function (e::SegmentEmbedding)(x)
x != 0 ? e.w[:,x] : e.atype(zeros(size(e.w,1)))
end
=#
mutable struct Linear <: Layer
w
b
end
Linear(input_size::Int, output_size::Int; atype=Array{Float32}) = Linear(param(output_size, input_size, atype=atype), param0(output_size, atype=atype))
function (l::Linear)(x)
return l.w * x .+ l.b
end
mutable struct Linear3D <: Layer
w
b
end
Linear3D(input_size::Int, output_size::Int; atype=Array{Float32}) = Linear3D(param(output_size, input_size, atype=atype), param0(output_size, atype=atype))
function (l::Linear3D)(x)
return matmul23(l.w, x) .+ l.b
end
# Absolutely no difference between Dense and Linear! Except one has dropout and activation function.
mutable struct Dense <: Layer
linear
pdrop
func
end
function Dense(input_size::Int, output_size::Int; pdrop=0.0, func=identity, atype=Array{Float32}, threeD=false)
if threeD
return Dense(Linear3D(input_size, output_size, atype=atype), pdrop, func)
else
return Dense(Linear(input_size, output_size, atype=atype), pdrop, func)
end
end
function (a::Dense)(x)
return a.func.(dropout(a.linear(x), a.pdrop))
end
mutable struct LayerNormalization <: Layer
γ
β
ϵ
end
LayerNormalization(hidden_size::Int; epsilon=1e-12, atype=Array{Float32}) = LayerNormalization(Param(atype(ones(hidden_size))), param0(hidden_size, atype=atype), epsilon)
function (n::LayerNormalization)(x)
μ = Knet.mean(x, dims=1)
x = (x .- μ) ./ std2(x, μ, n.ϵ) # corrected=false for n
return n.γ .* x .+ n.β
end
mutable struct EmbedLayer <: Layer
wordpiece::Embedding
positional::Embedding
# segment::SegmentEmbedding
segment::Embedding
layer_norm::LayerNormalization
seq_len::Int
pdrop
end
function EmbedLayer(config)
wordpiece = Embedding(config.vocab_size, config.embed_size, atype=config.atype)
positional = Embedding(config.max_seq_len, config.embed_size, atype=config.atype)
#segment = SegmentEmbedding(config.num_segment, config.embed_size, atype=config.atype)
segment = Embedding(config.num_segment, config.embed_size, atype=config.atype)
layer_norm = LayerNormalization(config.embed_size, atype=config.atype)
return EmbedLayer(wordpiece, positional, segment, layer_norm, config.seq_len, config.pdrop)
end
function (e::EmbedLayer)(x, segment_ids) # segment_ids are SxB, containing 1 or 2, or 0 in case of pads.
x = e.wordpiece(x)
positions = zeros(Int64, e.seq_len, size(x,3)) .+ collect(1:e.seq_len) # size(x,3) is batchsize. Resulting matrix is SxB
x = x .+ e.positional(positions)
#x .+= reshape(hcat(e.segment.(segment_ids)...), (:, size(segment_ids,1),size(segment_ids,2)))
x = x .+ e.segment(segment_ids)
x = e.layer_norm(x)
return dropout(x, e.pdrop)
end
function divide_to_heads(x, num_heads, head_size, seq_len)
x = reshape(x, (head_size, num_heads, seq_len, :))
x = permutedims(x, (1,3,2,4))
return reshape(x, (head_size, seq_len, :)) # Reshape to 3D so bmm can handle it.
end
mutable struct SelfAttention <: Layer
query::Linear3D # N*H x E
key::Linear3D
value::Linear3D
linear::Linear3D
num_heads::Int
seq_len::Int
embed_size::Int
head_size::Int
head_size_sqrt::Int
attention_pdrop
pdrop
end
function SelfAttention(config)
config.embed_size % config.num_heads != 0 && throw("Embed size should be divisible by number of heads!")
head_size = Int(config.embed_size / config.num_heads)
head_size_sqrt = Int(sqrt(head_size))
head_size_sqrt * head_size_sqrt != head_size && throw("Square root of head size should be an integer!")
query = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype) # H*N is always equal to E
key = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype)
value = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype)
linear = Linear3D(config.embed_size, config.embed_size, atype=config.atype)
return SelfAttention(query, key, value, linear, config.num_heads, config.seq_len, config.embed_size, head_size, head_size_sqrt, config.attention_pdrop, config.pdrop)
end
function (s::SelfAttention)(x, attention_mask)
# We make all the batchsize ones colon, in case of batches smaller than batchsize.
# x is ExSxB
query = divide_to_heads(s.query(x), s.num_heads, s.head_size, s.seq_len) # H x S x N*B
key = divide_to_heads(s.key(x), s.num_heads, s.head_size, s.seq_len)
value = divide_to_heads(s.value(x), s.num_heads, s.head_size, s.seq_len)
# Scaled Dot Product Attention
query = bmm(permutedims(key, (2,1,3)), query)
query = query ./ s.head_size_sqrt # Scale down. I init this value to avoid taking sqrt every forward operation.
# Masking. First reshape to 4d, then add mask, then reshape back to 3d.
query = reshape(reshape(query, (s.seq_len, s.seq_len, s.num_heads, :)) .+ attention_mask, (s.seq_len, s.seq_len, :))
query = Knet.softmax(query, dims=1)
query = dropout(query, s.attention_pdrop)
query = bmm(value, query)
query = permutedims(reshape(query, (s.head_size, s.seq_len, s.num_heads, :)), (1,3,2,4))
query = reshape(query, (s.embed_size, s.seq_len, :)) # Concat
return dropout(s.linear(query), s.pdrop) # Linear transformation at the end
# In pytorch version dropout is after layer_norm!
end
mutable struct FeedForward <: Layer
dense::Dense
linear::Linear3D
pdrop
end
function FeedForward(config)
dense = Dense(config.embed_size, config.ff_hidden_size, func=eval(Meta.parse(config.func)), atype=config.atype, threeD=true)
linear = Linear3D(config.ff_hidden_size, config.embed_size, atype=config.atype)
return FeedForward(dense, linear, config.pdrop)
end
function (f::FeedForward)(x)
x = f.dense(x)
return dropout(f.linear(x), f.pdrop)
end
mutable struct Encoder <: Layer
self_attention::SelfAttention
layer_norm1::LayerNormalization
feed_forward::FeedForward
layer_norm2::LayerNormalization
end
function Encoder(config)
return Encoder(SelfAttention(config), LayerNormalization(config.embed_size, atype=config.atype), FeedForward(config), LayerNormalization(config.embed_size, atype=config.atype))
end
function (e::Encoder)(x, attention_mask)
x = e.layer_norm1(x .+ e.self_attention(x, attention_mask))
return e.layer_norm2(x .+ e.feed_forward(x))
end
mutable struct Bert <: Layer
embed_layer::EmbedLayer
encoder_stack
atype
end
function Bert(config)
embed_layer = EmbedLayer(config)
encoder_stack = Encoder[]
for _ in 1:config.num_encoder
push!(encoder_stack, Encoder(config))
end
return Bert(embed_layer, encoder_stack, config.atype)
end
# x and segment_ids are SxB integers
function (b::Bert)(x, segment_ids; attention_mask=nothing)
# Init attention_mask if it's not given
attention_mask = attention_mask == nothing ? ones(size(x)) : attention_mask
attention_mask = reshape(attention_mask, (size(attention_mask,1), 1, 1, size(attention_mask,2))) # Make it 4d
attention_mask = (1 .- attention_mask) .* -10000.0 # If integer was 0, now it is masking. ones(size(attention_mask))
attention_mask = b.atype(attention_mask)
x = b.embed_layer(x, segment_ids)
for encoder in b.encoder_stack
x = encoder(x, attention_mask)
end
return x
end
mutable struct Pooler <: Layer
linear::Linear
end
Pooler(embed_size::Int; atype=Array{Float32}) = Pooler(Linear(embed_size, embed_size, atype=atype))
function (p::Pooler)(x)
# TODO :
# Gave up on getindex function for 3D matrices because I could not figure out how to write setindex! for backprop
# x = reshape(x, :, size(x,3))
# return tanh.(p.linear(x[:,1,:])) # Use only CLS token. Returns ExB
return tanh.(p.linear(reshape(x, :, size(x,3))[1:size(x,1),:]))
end
mutable struct NSPHead <: Layer
linear::Linear
end
NSPHead(embed_size::Int; atype=Array{Float32}) = NSPHead(Linear(embed_size, 2, atype=atype))
(n::NSPHead)(x) = n.linear(x)
mutable struct MLMHead <: Layer
dense::Dense
layer_norm::LayerNormalization
linear::Linear3D
end
function MLMHead(config, embedding_matrix)
dense = Dense(config.embed_size, config.embed_size, func=eval(Meta.parse(config.func)), pdrop=0.0, atype=config.atype, threeD=true)
layer_norm = LayerNormalization(config.embed_size, atype=config.atype)
linear = Linear3D(config.embed_size, config.vocab_size, atype=config.atype)
# TODO : Do this a shared weight
#linear.w = permutedims(embedding_matrix, (2,1))
return MLMHead(dense, layer_norm, linear)
end
function (m::MLMHead)(x)
x = m.dense(x)
x = m.layer_norm(x)
return m.linear(x)
end
mutable struct BertPreTraining <: Layer
bert::Bert
pooler::Pooler
nsp::NSPHead
mlm::MLMHead
end
function BertPreTraining(config)
bert = Bert(config)
pooler = Pooler(config.embed_size, atype=config.atype)
nsp = NSPHead(config.embed_size, atype=config.atype)
mlm = MLMHead(config, bert.embed_layer.wordpiece.w) # TODO : Dont forget about embedding matrix
return BertPreTraining(bert, pooler, nsp, mlm)
end
# We do not need a predictor, since this is only for pretraining
function (b::BertPreTraining)(x, segment_ids, mlm_labels, nsp_labels; attention_mask=nothing) # mlm_labels are SxB, so we just flatten them.
x = b.bert(x, segment_ids, attention_mask=attention_mask)
nsp_preds = b.nsp(b.pooler(x)) # 2xB
mlm_preds = b.mlm(x) # VxSxB
mlm_preds = reshape(mlm_preds, size(mlm_preds, 1), :) # VxS*B
nsp_loss = nll(nsp_preds, nsp_labels)
mlm_labels = reshape(mlm_labels, :) # S*B
mlm_loss = nll(mlm_preds[:,mlm_labels.!=-1], mlm_labels[mlm_labels.!=-1])
return mlm_loss + nsp_loss
end
function (b::BertPreTraining)(dtrn)
lvals = []
for (x, attention_mask, segment_ids, mlm_labels, nsp_labels) in dtrn
push!(lvals, b(x, segment_ids, mlm_labels, nsp_labels, attention_mask=attention_mask))
end
return Knet.mean(lvals)
end
mutable struct BertClassification <: Layer
bert::Bert
pooler::Pooler
linear::Linear
pdrop
end
function BertClassification(config, num_of_classes)
bert = Bert(config)
pooler = Pooler(config.embed_size, atype=config.atype)
linear = Linear(config.embed_size, num_of_classes, atype=config.atype)
return BertClassification(bert, pooler, linear, config.pdrop)
end
function (b::BertClassification)(x, segment_ids; attention_mask=nothing)
x = b.bert(x, segment_ids, attention_mask=attention_mask)
x = dropout(b.pooler(x), b.pdrop) # 2xB
return b.linear(x)
end
function (b::BertClassification)(x, segment_ids, y; attention_mask=nothing)
return nll(b(x, segment_ids, attention_mask=attention_mask), y)
end
function (b::BertClassification)(dtrn)
lvals = []
for (x, attention_mask, segment_ids, y) in dtrn
push!(lvals, b(x, segment_ids, y, attention_mask=attention_mask))
end
return Knet.mean(lvals)
end
mutable struct BertConfig
embed_size::Int
vocab_size::Int
ff_hidden_size::Int
max_seq_len::Int
seq_len::Int
num_segment::Int
num_heads::Int
num_encoder::Int
batchsize::Int
atype
pdrop
attention_pdrop
func
end
function load_from_torch_base(model, num_encoder, atype, torch_model)
# Embed Layer
model.bert.embed_layer.wordpiece.w = Param(atype(permutedims(torch_model["bert.embeddings.word_embeddings.weight"][:cpu]()[:numpy](), (2,1))))
model.bert.embed_layer.positional.w = Param(atype(permutedims(torch_model["bert.embeddings.position_embeddings.weight"][:cpu]()[:numpy](), (2,1))))
model.bert.embed_layer.segment.w = Param(atype(permutedims(torch_model["bert.embeddings.token_type_embeddings.weight"][:cpu]()[:numpy](), (2,1))))
model.bert.embed_layer.layer_norm.γ = Param(atype(torch_model["bert.embeddings.LayerNorm.gamma"][:cpu]()[:numpy]()))
model.bert.embed_layer.layer_norm.β = Param(atype(torch_model["bert.embeddings.LayerNorm.beta"][:cpu]()[:numpy]()))
# Encoder Stack
for i in 1:num_encoder
model.bert.encoder_stack[i].self_attention.query.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.query.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.query.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.query.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.key.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.key.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.key.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.key.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.value.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.value.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.value.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.value.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.dense.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].self_attention.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.dense.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].layer_norm1.γ = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.LayerNorm.gamma"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].layer_norm1.β = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.LayerNorm.beta"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].feed_forward.dense.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).intermediate.dense.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].feed_forward.dense.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).intermediate.dense.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].feed_forward.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.dense.weight"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].feed_forward.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.dense.bias"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].layer_norm2.γ = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.LayerNorm.gamma"][:cpu]()[:numpy]()))
model.bert.encoder_stack[i].layer_norm2.β = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.LayerNorm.beta"][:cpu]()[:numpy]()))
end
# Pooler
model.pooler.linear.w = Param(atype(torch_model["bert.pooler.dense.weight"][:cpu]()[:numpy]()))
model.pooler.linear.b = Param(atype(torch_model["bert.pooler.dense.bias"][:cpu]()[:numpy]()))
return model
end
function load_from_torch_pretraining(model, num_encoder, atype, torch_model)
model = load_from_torch_base(model, num_encoder, atype, torch_model)
# NSP Head
model.nsp.linear.w = Param(atype(torch_model["cls.seq_relationship.weight"][:cpu]()[:numpy]()))
model.nsp.linear.b = Param(atype(torch_model["cls.seq_relationship.bias"][:cpu]()[:numpy]()))
# MLM Head.
model.mlm.dense.linear.w = Param(atype(torch_model["cls.predictions.transform.dense.weight"][:cpu]()[:numpy]()))
model.mlm.dense.linear.b = Param(atype(torch_model["cls.predictions.transform.dense.bias"][:cpu]()[:numpy]()))
model.mlm.layer_norm.γ = Param(atype(torch_model["cls.predictions.transform.LayerNorm.gamma"][:cpu]()[:numpy]()))
model.mlm.layer_norm.β = Param(atype(torch_model["cls.predictions.transform.LayerNorm.beta"][:cpu]()[:numpy]()))
model.mlm.linear.w = Param(atype(torch_model["cls.predictions.decoder.weight"][:cpu]()[:numpy]()))
model.mlm.linear.b = Param(atype(torch_model["cls.predictions.bias"][:cpu]()[:numpy]()))
return model
end
function load_from_torch_classification(model, num_encoder, atype, torch_model)
model = load_from_torch_base(model, num_encoder, atype, torch_model)
model.linear.w = Param(atype(torch_model["classifier.weight"][:cpu]()[:numpy]()))
model.linear.b = Param(atype(torch_model["classifier.bias"][:cpu]()[:numpy]()))
return model
end
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 1751 | import Knet: update!
warmup_cosine(x, warmup=0.002) = x < warmup ? x/warmup : 0.5 * (1.0 + cos(π * x))
warmup_constant(x, warmup=0.002) = x < warmup ? x/warmup : 1.0
warmup_linear(x, warmup=0.002) = x < warmup ? x/warmup : 1.0 - x
mutable struct BertAdam
lr::AbstractFloat
beta1::AbstractFloat
beta2::AbstractFloat
eps::AbstractFloat
t::Int
gclip::AbstractFloat
fstm
scndm
w_decay_rate::AbstractFloat
schedule
warmup
t_total
end
BertAdam(; lr=0.001, gclip=1.0, beta1=0.9, beta2=0.999, eps=1e-6, w_decay_rate=0.0, schedule="warmup_linear", warmup=-1, t_total=-1)=BertAdam(lr, beta1, beta2, eps, 0, gclip, nothing, nothing, w_decay_rate, schedule, warmup, t_total)
for T in (Array{Float32},Array{Float64},KnetArray{Float32},KnetArray{Float64}); @eval begin
function update!(w::$T, g::$T, p::BertAdam)
Knet.gclip!(g, p.gclip)
if p.fstm===nothing; p.fstm=zero(w); p.scndm=zero(w); end
lmul!(p.beta1, p.fstm)
axpy!(1-p.beta1, g, p.fstm)
lmul!(p.beta2, p.scndm)
axpy!(1-p.beta2, g .* g, p.scndm)
# They don't do bias correction for some reason
#fstm_corrected = p.fstm / (1 - p.beta1 ^ p.t)
#scndm_corrected = p.scndm / (1 - p.beta2 ^ p.t)
if p.t_total !== -1
schedule_func = eval(Meta.parse(p.schedule))
lr_scheduled = p.lr * schedule_func(p.t/p.t_total, p.warmup)
else
lr_scheduled = p.lr
end
if p.w_decay_rate > 0.0
axpy!(-lr_scheduled, (p.fstm ./ (sqrt.(p.scndm) .+ p.eps)) .+ (p.w_decay_rate * w), w)
else
axpy!(-lr_scheduled, (p.fstm ./ (sqrt.(p.scndm) .+ p.eps)), w)
end
p.t += 1
end
end;end
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | code | 1689 | function wordpiece_tokenize(token, dict)
# This is a longest-match-first algorithm.
out_tokens = []
start = 1
while start <= length(token)
finish = length(token)
final_token = ""
for i in finish:-1:start
# String Indexing Error for an unknown reason. Might be because of unicode chars.
tkn = try
start == 1 ? token[start:i] : string("##", token[start:i])
catch
""
end
if tkn in keys(dict)
final_token = tkn
finish = i
break
end
end
if final_token == "" # if there is no match at all, assign unk token
return ["[UNK]"]
end
push!(out_tokens, final_token)
start = finish + 1
end
return out_tokens
end
function process_punc(tokens)
out_tokens = []
for token in tokens
out = []
str = ""
for (i, char) in enumerate(token)
if ispunct(char)
str != "" && push!(out, str)
str = ""
push!(out, string(char))
else
str = string(str, char)
end
end
str != "" && push!(out, str)
append!(out_tokens, out)
end
return out_tokens
end
function bert_tokenize(text, dict; lower_case=true)
text = strip(text)
text == "" && return []
if lower_case
text = lowercase(text)
end
tokens = split(text)
tokens = process_punc(tokens)
out_tokens = []
for token in tokens
append!(out_tokens, wordpiece_tokenize(token, dict))
end
return out_tokens
end
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | docs | 197 | # BERT-in-Knet
This repo is for the final project for Comp541 Deep Learning class in Koc University.
This is a replication attempt of the original https://github.com/google-research/bert in Knet.
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1034 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using DensityInterface
# Doctest setup
DocMeta.setdocmeta!(
DensityInterface,
:DocTestSetup,
quote
using DensityInterface
object = logfuncdensity(x -> -x^2)
log_f = logdensityof(object)
f = densityof(object)
x = 4
end;
recursive=true,
)
makedocs(
sitename = "DensityInterface",
modules = [DensityInterface],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://JuliaMath.github.io/DensityInterface.jl/stable/"
),
pages = [
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
strict = !("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/JuliaMath/DensityInterface.jl.git",
forcepush = true,
push_preview = true,
)
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 342 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
DensityInterface
Trait-based interface for mathematical/statistical densities and objects
associated with a density.
"""
module DensityInterface
using InverseFunctions
using Test
include("interface.jl")
include("interface_test.jl")
end # module
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 10053 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
abstract type DensityKind end
DensityKind(object)
Subtypes of `DensityKind` indicate if an `object` *is* a density or if it *has*
a density, in the sense of the `DensityInterface` API, or if is *not*
associated with a density (not compatible with `DensityInterface`).
`DensityKind(object)` returns either `IsDensity()`, `HasDensity()` or
`NoDensity()`.
In addition to the subtypes [`IsDensity`](@ref), [`HasDensity`](@ref) or
[`NoDensity`](@ref), a union `IsOrHasDensity = Union{IsDensity, HasDensity}`
is defined for convenience.
`DensityKind(object) isa IsOrHasDensity` implies that `object` is either a
density itself or can be said to have an associated density. It also implies
that the value of that density at given points can be calculated via
[`logdensityof`](@ref) and [`densityof`](@ref).
`DensityKind(object)` defaults to `NoDensity()` (object is not and does not
have a density). For a type that *is* (directly represents) a density, like a
probability density, define
```julia
@inline DensityKind(::MyDensityType) = IsDensity()
```
For a type that *has* (is associated with) a density in some way, like
a probability distribution has a probability density, define
```julia
@inline DensityKind(::MyDensityType) = HasDensity()
```
"""
abstract type DensityKind end
export DensityKind
@inline DensityKind(object) = NoDensity()
"""
struct IsDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *is* (represents) a density, like a probability density
object.
See [`DensityKind`](@ref) for details.
"""
struct IsDensity <: DensityKind end
export IsDensity
"""
struct HasDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *has* a density, like a probability distribution has
a probability density.
See [`DensityKind`](@ref) for details.
"""
struct HasDensity <: DensityKind end
export HasDensity
"""
struct NoDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *is not* and *does not have* a density, as understood by
`DensityInterface`.
See [`DensityKind`](@ref) for details.
"""
struct NoDensity <: DensityKind end
export NoDensity
"""
IsOrHasDensity = Union{IsDensity, HasDensity}
As a return value of [`DensityKind(object)`](@ref), indicates that `object`
either *is* or *has* a density, as understood by `DensityInterface`.
See [`DensityKind`](@ref) for details.
"""
const IsOrHasDensity = Union{IsDensity, HasDensity}
export IsOrHasDensity
function _check_is_or_has_density(object)
DensityKind(object) isa IsOrHasDensity || throw(ArgumentError("Object of type $(typeof(object)) neither is nor has a density"))
end
"""
logdensityof(object, x)::Real
Compute the logarithmic value of the density `object` (resp. its associated density)
at a given point `x`.
```jldoctest a
julia> DensityKind(object)
IsDensity()
julia> logy = logdensityof(object, x); logy isa Real
true
```
See also [`DensityKind`](@ref) and [`densityof`](@ref).
"""
function logdensityof end
export logdensityof
"""
logdensityof(object)
Return a function that computes the logarithmic value of the density `object`
(resp. its associated density) at a given point.
```jldoctest a
julia> log_f = logdensityof(object); log_f isa Function
true
julia> log_f(x) == logdensityof(object, x)
true
```
`logdensityof(object)` defaults to `Base.Fix1(logdensityof, object)`, but may be
specialized. If so, [`logfuncdensity`](@ref) will typically have to be
specialized for the return type of `logdensityof` as well.
[`logfuncdensity`](@ref) is the inverse of `logdensityof`, so
`logfuncdensity(log_f)` must be equivalent to `object`.
"""
function logdensityof(object)
_check_is_or_has_density(object)
Base.Fix1(logdensityof, object)
end
"""
densityof(object, x)::Real
Compute the value of the density `object` (resp. its associated density)
at a given point `x`.
```jldoctest a
julia> DensityKind(object)
IsDensity()
julia> densityof(object, x) == exp(logdensityof(object, x))
true
```
`densityof(object, x)` defaults to `exp(logdensityof(object, x))`, but
may be specialized.
See also [`DensityKind`](@ref) and [`densityof`](@ref).
"""
densityof(object, x) = exp(logdensityof(object, x))
export densityof
"""
densityof(object)
Return a function that computes the value of the density `object`
(resp. its associated density) at a given point.
```jldoctest a
julia> f = densityof(object);
julia> f(x) == densityof(object, x)
true
```
`densityof(object)` defaults to `Base.Fix1(densityof, object)`, but may be specialized.
"""
function densityof(object)
_check_is_or_has_density(object)
Base.Fix1(densityof, object)
end
"""
logfuncdensity(log_f)
Return a `DensityInterface`-compatible density that is defined by a given
log-density function `log_f`:
```jldoctest
julia> object = logfuncdensity(log_f);
julia> DensityKind(object)
IsDensity()
julia> logdensityof(object, x) == log_f(x)
true
```
`logfuncdensity(log_f)` returns an instance of [`DensityInterface.LogFuncDensity`](@ref)
by default, but may be specialized to return something else depending on the
type of `log_f`). If so, [`logdensityof`](@ref) will typically have to be
specialized for the return type of `logfuncdensity` as well.
`logfuncdensity` is the inverse of `logdensityof`, so the following must
hold true:
* `d = logfuncdensity(logdensityof(object))` is equivalent to `object` in
respect to `logdensityof` and `densityof`. However, `d` may not be equal to
`object`, especially if `DensityKind(object) == HasDensity()`: `logfuncdensity` always
creates something that *is* density, never something that just *has*
a density in some way (like a distribution or a measure in general).
* `logdensityof(logfuncdensity(log_f))` is equivalent (typically equal or even
identical to) to `log_f`.
See also [`DensityKind`](@ref).
"""
function logfuncdensity end
export logfuncdensity
@inline logfuncdensity(log_f) = LogFuncDensity(log_f)
# For functions stemming from objects that *have* a density, create a new density:
@inline _logfuncdensity_impl(::HasDensity, log_f::Base.Fix1{typeof(logdensityof)}) = LogFuncDensity(log_f)
# For functions stemming from objects that *are* a density, recover original object:
@inline _logfuncdensity_impl(::IsDensity, log_f::Base.Fix1{typeof(logdensityof)}) = log_f.x
@inline logfuncdensity(log_f::Base.Fix1{typeof(logdensityof)}) = _logfuncdensity_impl(DensityKind(log_f.x), log_f)
InverseFunctions.inverse(::typeof(logfuncdensity)) = logdensityof
InverseFunctions.inverse(::typeof(logdensityof)) = logfuncdensity
"""
struct DensityInterface.LogFuncDensity{F}
Wraps a log-density function `log_f` to make it compatible with `DensityInterface`
interface. Typically, `LogFuncDensity(log_f)` should not be called
directly, [`logfuncdensity`](@ref) should be used instead.
"""
struct LogFuncDensity{F}
_log_f::F
end
LogFuncDensity
@inline DensityKind(::LogFuncDensity) = IsDensity()
@inline logdensityof(object::LogFuncDensity, x) = object._log_f(x)
@inline logdensityof(object::LogFuncDensity) = object._log_f
@inline densityof(object::LogFuncDensity, x) = exp(object._log_f(x))
@inline densityof(object::LogFuncDensity) = exp ∘ object._log_f
function Base.show(io::IO, object::LogFuncDensity)
print(io, nameof(typeof(object)), "(")
show(io, object._log_f)
print(io, ")")
end
"""
funcdensity(f)
Return a `DensityInterface`-compatible density that is defined by a given
non-log density function `f`:
```jldoctest
julia> object = funcdensity(f);
julia> DensityKind(object)
IsDensity()
julia> densityof(object, x) == f(x)
true
```
`funcdensity(f)` returns an instance of [`DensityInterface.FuncDensity`](@ref)
by default, but may be specialized to return something else depending on the
type of `f`). If so, [`densityof`](@ref) will typically have to be
specialized for the return type of `funcdensity` as well.
`funcdensity` is the inverse of `densityof`, so the following must
hold true:
* `d = funcdensity(densityof(object))` is equivalent to `object` in
respect to `logdensityof` and `densityof`. However, `d` may not be equal to
`object`, especially if `DensityKind(object) == HasDensity()`: `funcdensity` always
creates something that *is* density, never something that just *has*
a density in some way (like a distribution or a measure in general).
* `densityof(funcdensity(f))` is equivalent (typically equal or even
identical to) to `f`.
See also [`DensityKind`](@ref).
"""
function funcdensity end
export funcdensity
@inline funcdensity(f) = FuncDensity(f)
# For functions stemming from objects that *have* a density, create a new density:
@inline _funcdensity_impl(::HasDensity, f::Base.Fix1{typeof(densityof)}) = FuncDensity(f)
# For functions stemming from objects that *are* a density, recover original object:
@inline _funcdensity_impl(::IsDensity, f::Base.Fix1{typeof(densityof)}) = f.x
@inline funcdensity(f::Base.Fix1{typeof(densityof)}) = _funcdensity_impl(DensityKind(f.x), f)
InverseFunctions.inverse(::typeof(funcdensity)) = densityof
InverseFunctions.inverse(::typeof(densityof)) = funcdensity
"""
struct DensityInterface.FuncDensity{F}
Wraps a non-log density function `f` to make it compatible with
`DensityInterface` interface. Typically, `FuncDensity(f)` should not be
called directly, [`funcdensity`](@ref) should be used instead.
"""
struct FuncDensity{F}
_f::F
end
FuncDensity
@inline DensityKind(::FuncDensity) = IsDensity()
@inline logdensityof(object::FuncDensity, x) = log(object._f(x))
@inline logdensityof(object::FuncDensity) = log ∘ object._f
@inline densityof(object::FuncDensity, x) = object._f(x)
@inline densityof(object::FuncDensity) = object._f
function Base.show(io::IO, object::FuncDensity)
print(io, nameof(typeof(object)), "(")
show(io, object._f)
print(io, ")")
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1901 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
DensityInterface.test_density_interface(object, x, ref_logd_at_x; kwargs...)
Test that `object` is compatible with `DensityInterface`.
Tests that either `DensityKind(object) isa IsOrHasDensity`.
Also tests that [`logdensityof(object, x)`](@ref) equals `ref_logd_at_x` and
that the behavior of [`logdensityof(object)`](@ref),
[`densityof(object, x)`](@ref) and [`densityof(object)`](@ref) is consistent.
The results of `logdensityof(object, x)` and `densityof(object, x)` are compared to
`ref_logd_at_x` and `exp(ref_logd_at_x)` using `isapprox`. `kwargs...` are
forwarded to `isapprox`.
Also tests that `d = logfuncdensity(logdensityof(object))` returns a density
(`DensityKind(d) == IsDensity()`) that is equivalent to `object` in respect to
`logdensityof` and `densityof`, and that `funcdensity(densityof(object))`
behaves the same way.
"""
function test_density_interface(object, x, ref_logd_at_x; kwargs...)
@testset "test_density_interface: $object with input $x" begin
ref_d_at_x = exp(ref_logd_at_x)
@test DensityKind(object) isa IsOrHasDensity
@test isapprox(logdensityof(object, x), ref_logd_at_x; kwargs...)
log_f = logdensityof(object)
@test isapprox(log_f(x), ref_logd_at_x; kwargs...)
@test isapprox(densityof(object,x), ref_d_at_x; kwargs...)
f = densityof(object)
@test isapprox(f(x), ref_d_at_x; kwargs...)
for d in (logfuncdensity(log_f), funcdensity(f))
@test DensityKind(d) == IsDensity()
@test isapprox(logdensityof(d, x), ref_logd_at_x; kwargs...)
@test isapprox(logdensityof(d)(x), ref_logd_at_x; kwargs...)
@test isapprox(densityof(d,x), ref_d_at_x; kwargs...)
@test isapprox(densityof(d)(x), ref_d_at_x; kwargs...)
end
end
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 613 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
import Test
import DensityInterface
import Documenter
Test.@testset "Package DensityInterface" begin
include("test_interface.jl")
# doctests
Documenter.DocMeta.setdocmeta!(
DensityInterface,
:DocTestSetup,
quote
using DensityInterface
object = logfuncdensity(x -> x^2)
log_f = logdensityof(object)
f = densityof(object)
x = 4.2
end;
recursive=true,
)
Documenter.doctest(DensityInterface)
end # testset
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1335 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
using DensityInterface
using Test
using LinearAlgebra, InverseFunctions
struct MyDensity end
@inline DensityInterface.DensityKind(::MyDensity) = IsDensity()
DensityInterface.logdensityof(::MyDensity, x::Any) = -norm(x)^2
struct MyMeasure end
@inline DensityInterface.DensityKind(::MyMeasure) = HasDensity()
DensityInterface.logdensityof(::MyMeasure, x::Any) = -norm(x)^2
@testset "interface" begin
@test inverse(logdensityof) == logfuncdensity
@test inverse(logfuncdensity) == logdensityof
@test inverse(densityof) == funcdensity
@test inverse(funcdensity) == densityof
@test @inferred(DensityKind("foo")) === NoDensity()
@test_throws ArgumentError logdensityof("foo")
@test_throws ArgumentError densityof("foo")
for object1 in (MyDensity(), MyMeasure())
x = [1, 2, 3]
DensityInterface.test_density_interface(object1, x, -norm(x)^2)
object2 = logfuncdensity(x -> -norm(x)^2)
@test DensityKind(object2) === IsDensity()
DensityInterface.test_density_interface(object2, x, -norm(x)^2)
object3 = funcdensity(x -> exp(-norm(x)^2))
@test DensityKind(object3) === IsDensity()
DensityInterface.test_density_interface(object3, x, -norm(x)^2)
end
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 1102 | # DensityInterface.jl
[](https://JuliaMath.github.io/DensityInterface.jl/stable)
[](https://JuliaMath.github.io/DensityInterface.jl/dev)
[](LICENSE.md)
[](https://github.com/JuliaMath/DensityInterface.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/JuliaMath/DensityInterface.jl)
This package defines an interface for mathematical/statistical densities and objects associated with a density in Julia. See the documentation for details.
## Documentation
* [Documentation for stable version](https://JuliaMath.github.io/DensityInterface.jl/stable)
* [Documentation for development version](https://JuliaMath.github.io/DensityInterface.jl/dev)
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 335 | # API
## Interface
```@docs
logdensityof
logdensityof(::Any)
logfuncdensity
funcdensity
densityof
densityof(::Any)
```
## Types
```@docs
IsDensity
HasDensity
IsOrHasDensity
NoDensity
DensityKind
DensityInterface.LogFuncDensity
DensityInterface.FuncDensity
```
## Test utility
```@docs
DensityInterface.test_density_interface
```
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 2311 | # DensityInterface.jl
```@meta
DocTestSetup = quote
struct SomeDensity end
log_of_d_at(x) = x^2
x = 4
end
```
```@docs
DensityInterface
```
This package defines an interface for mathematical/statistical densities and objects associated with a density in Julia. The interface comprises the type [`DensityKind`](@ref) and the functions [`logdensityof`](@ref)/[`densityof`](@ref)[^1] and [`logfuncdensity`](@ref)/[`funcdensity`](@ref).
The following methods must be provided to make a type (e.g. `SomeDensity`) compatible with the interface:
```jldoctest a
import DensityInterface
@inline DensityInterface.DensityKind(::SomeDensity) = IsDensity()
DensityInterface.logdensityof(object::SomeDensity, x) = log_of_d_at(x)
object = SomeDensity()
DensityInterface.logdensityof(object, x) isa Real
# output
true
```
`object` may be/represent a density itself (`DensityKind(object) === IsDensity()`) or it may be something that can be said to have a density (`DensityKind(object) === HasDensity()`)[^2].
In statistical inference applications, for example, `object` might be a likelihood, prior or posterior.
DensityInterface automatically provides `logdensityof(object)`, equivalent to `x -> logdensityof(object, x)`. This constitutes a convenient way of passing a (log-)density function to algorithms like optimizers, samplers, etc.:
```jldoctest a
using DensityInterface
object = SomeDensity()
log_f = logdensityof(object)
log_f(x) == logdensityof(object, x)
# output
true
```
```julia
SomeOptimizerPackage.maximize(logdensityof(object), x_init)
```
Reversely, a given log-density function `log_f` can be converted to a DensityInterface-compatible density object using [`logfuncdensity`](@ref):
```julia
object = logfuncdensity(log_f)
DensityKind(object) === IsDensity() && logdensityof(object, x) == log_f(x)
# output
true
```
[^1]: The function names `logdensityof` and `densityof` were chosen to convey that the target object may either *be* a density or something that can be said to *have* a density. They also have less naming conflict potential than `logdensity` and esp. `density` (the latter already being exported by Plots.jl).
[^2]: The package [`Distributions`](https://github.com/JuliaStats/Distributions.jl) supports `DensityInterface` for `Distributions.Distribution`.
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 1.0.0 | 0fd73bf40485c791e6c33672c643bf1303045e9a | code | 3037 | module BatchIterators
using Statistics
export BatchIterator
export choose_batchsize
export centered_batch_iterator
"""
BatchIterator(X; batchsize = nothing, limit=size(X,2))
Wrapper allowing to iterate over batches of `batchsize` columns of `X`. `X` can be of any type supporting `size` and 2d indexing. When `limit` is provided, iteration is restricted to the columns of `X[:, 1:limit]`.
"""
struct BatchIterator{T}
X::T
length::Int # Number of batches
bsz::Int # Batch size
limit::Int
function BatchIterator(X; batchsize=nothing, limit=size(X,2))
@assert limit > 0 && limit ≤ size(X,2)
bsz = (batchsize == nothing) ? choose_batchsize(size(X,1), limit) : batchsize
nb = ceil(Int, limit/bsz)
new{typeof(X)}(X, nb, bsz, limit)
end
end
view_compatible(::Any) = false
view_compatible(::Array) = true
view_compatible(bi::BatchIterator) = view_compatible(bi.X)
#######################################################################
# Iteration #
#######################################################################
function Base.getindex(it::BatchIterator, i)
d = i - it.length # > 0 means overflow, == 0 means last batch
cbsz = (d == 0) ? mod(it.limit - 1, it.bsz) + 1 : it.bsz # Size of current batch
if (i<1 || d > 0)
@error "Out of bounds."
else
# TODO using views here might impact type stability.
view_compatible(it) ? (@view it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz]) : it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz]
end
end
Base.length(it::BatchIterator) = it.length
function Base.iterate(it::BatchIterator{T}, st = 0) where T
st = st + 1 # new state
d = st - it.length # > 0 means overflow, == 0 means last batch
(d > 0) ? nothing : (it[st], st)
end
"""
centered_batch_iterator(X; kwargs...)
Similar to BatchIterator, but performs first one pass over the data to compute the mean, and centers the batches.
"""
function centered_batch_iterator(X; kwargs...)
bi = BatchIterator(X; kwargs...)
μ = vec(mean(mean(b, dims=2) for b in BatchIterator(X)))
(b .- μ for b in bi)
end
#######################################################################
# Utilities #
#######################################################################
"""
choose_batchsize(d, n; maxmemGB = 1.0, maxbatchsize = 2^14, sizeoneB = d*sizeof(Float64))
Computes the size (nb. of columns) of a batch, so that each column of the batch can be converted to a vector of size `sizeoneB` (in bytes) with a total memory constrained by `maxmemGB` (gigabytes).
"""
function choose_batchsize(d, n;
maxmemGB = 1.0,
maxbatchsize = 2^14,
sizeoneB = d*sizeof(Float64),
forcepow2 = true)
fullsizeGB = n * sizeoneB/1024^3 # Size of the sketches of all samples
batchsize = (fullsizeGB > maxmemGB) ? ceil(Int, n/ceil(Int, fullsizeGB/maxmemGB)) : n
batchsize = min(batchsize, maxbatchsize)
(forcepow2 && batchsize != n) ? prevpow(2, batchsize) : batchsize
end
end # module
| BatchIterators | https://github.com/Djoop/BatchIterators.jl.git |
|
[
"MIT"
] | 1.0.0 | 0fd73bf40485c791e6c33672c643bf1303045e9a | docs | 497 | # Summary
Licence: MIT.
A very small package providing the constructor `BatchIterator(X; batchsize=…, limit=…)` and the function `centered_batch_iterator(X; kwargs…)`, which allow iteration over blocks of columns of `X`, for any object `X` supporting 2d indexing and for which the function `size` is defined.
The function `choose_batchsize` helps finding a good batch size while controlling memory usage.
The package was originally designed to iterate over samples of an out-of-core dataset.
| BatchIterators | https://github.com/Djoop/BatchIterators.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 188 | module MatrixMarket
using SparseArrays
using LinearAlgebra
using CodecZlib
export mmread, mmwrite, mminfo
include("mminfo.jl")
include("mmread.jl")
include("mmwrite.jl")
end # module
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1810 | """
mminfo(file)
Read header information on the size and structure from file. The actual data matrix is not
parsed.
# Arguments
- `file`: The filename or io stream.
"""
function mminfo(filename::String)
stream = open(filename, "r")
if endswith(filename, ".gz")
stream = GzipDecompressorStream(stream)
end
info = mminfo(stream)
close(stream)
return info
end
function mminfo(stream::IO)
firstline = chomp(readline(stream))
if !startswith(firstline, "%%MatrixMarket")
throw(FileFormatException("Expected start of header `%%MatrixMarket`"))
end
tokens = split(firstline)
if length(tokens) != 5
throw(FileFormatException("Not enough words on first line, got $(length(tokens)) words"))
end
(head1, rep, field, symm) = map(lowercase, tokens[2:5])
if head1 != "matrix"
throw(FileFormatException("Unknown MatrixMarket data type: $head1 (only `matrix` is supported)"))
end
dimline = readline(stream)
# Skip all comments and empty lines
while length(chomp(dimline)) == 0 || (length(dimline) > 0 && dimline[1] == '%')
dimline = readline(stream)
end
rows, cols, entries = parse_dimension(dimline, rep)
return rows, cols, entries, rep, field, symm
end
struct FileFormatException <: Exception
msg::String
end
Base.showerror(io::IO, e::FileFormatException) = print(io, e.msg)
function parse_dimension(line::String, rep::String)
dims = map(x -> parse(Int, x), split(line))
if length(dims) < (rep == "coordinate" ? 3 : 2)
throw(FileFormatException(string("Could not read in matrix dimensions from line: ", line)))
end
if rep == "coordinate"
return dims[1], dims[2], dims[3]
else
return dims[1], dims[2], (dims[1] * dims[2])
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 3851 | """
mmread(filename, infoonly=false, retcoord=false)
Read the contents of the Matrix Market file `filename` into a matrix, which will be either
sparse or dense, depending on the Matrix Market format indicated by `coordinate` (coordinate
sparse storage), or `array` (dense array storage).
# Arguments
- `filename::String`: The file to read.
- `infoonly::Bool=false`: Only information on the size and structure is returned from
reading the header. The actual data for the matrix elements are not parsed.
- `retcoord::Bool`: If it is `true`, the rows, column and value vectors are returned along
with the header information.
"""
function mmread(filename::String, infoonly::Bool=false, retcoord::Bool=false)
stream = open(filename, "r")
if endswith(filename, ".gz")
stream = GzipDecompressorStream(stream)
end
result = infoonly ? mminfo(stream) : mmread(stream, retcoord)
close(stream)
return result
end
function mmread(stream::IO, infoonly::Bool=false, retcoord::Bool=false)
rows, cols, entries, rep, field, symm = mminfo(stream)
infoonly && return rows, cols, entries, rep, field, symm
T = parse_eltype(field)
symfunc = parse_symmetric(symm)
if rep == "coordinate"
rn = Vector{Int}(undef, entries)
cn = Vector{Int}(undef, entries)
vals = Vector{T}(undef, entries)
for i in 1:entries
line = readline(stream)
splits = find_splits(line, num_splits(T))
rn[i] = parse_row(line, splits)
cn[i] = parse_col(line, splits, T)
vals[i] = parse_val(line, splits, T)
end
result = retcoord ? (rn, cn, vals, rows, cols, entries, rep, field, symm) :
symfunc(sparse(rn, cn, vals, rows, cols))
else
vals = [parse(Float64, readline(stream)) for _ in 1:entries]
A = reshape(vals, rows, cols)
result = symfunc(A)
end
return result
end
function parse_eltype(field::String)
if field == "real"
return Float64
elseif field == "complex"
return ComplexF64
elseif field == "integer"
return Int64
elseif field == "pattern"
return Bool
else
throw(FileFormatException("Unsupported field $field."))
end
end
function parse_symmetric(symm::String)
if symm == "general"
return identity
elseif symm == "symmetric" || symm == "hermitian"
return hermitianize!
elseif symm == "skew-symmetric"
return skewsymmetrize!
else
throw(FileFormatException("Unknown matrix symmetry: $symm."))
end
end
function hermitianize!(M::AbstractMatrix)
M .+= tril(M, -1)'
return M
end
function skewsymmetrize!(M::AbstractMatrix)
M .-= tril(M, -1)'
return M
end
parse_row(line, splits) = parse(Int, line[1:splits[1]])
parse_col(line, splits, ::Type{Bool}) = parse(Int, line[splits[1]:end])
parse_col(line, splits, eltype) = parse(Int, line[splits[1]:splits[2]])
function parse_val(line, splits, ::Type{ComplexF64})
real = parse(Float64, line[splits[2]:splits[3]])
imag = parse(Float64, line[splits[3]:length(line)])
return ComplexF64(real, imag)
end
parse_val(line, splits, ::Type{Bool}) = true
parse_val(line, splits, ::Type{T}) where {T} = parse(T, line[splits[2]:length(line)])
num_splits(::Type{ComplexF64}) = 3
num_splits(::Type{Bool}) = 1
num_splits(elty) = 2
function find_splits(s::String, num)
splits = Vector{Int}(undef, num)
cur = 1
in_space = s[1] == '\t' || s[1] == ' '
@inbounds for i in 1:length(s)
if s[i] == '\t' || s[i] == ' '
if !in_space
in_space = true
splits[cur] = i
cur += 1
cur > num && break
end
else
in_space = false
end
end
splits
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1924 | """
mmwrite(filename, matrix)
Write a sparse matrix to .mtx file format.
# Arguments
- `filename::String`: The file to write.
- `matrix::SparseMatrixCSC`: The sparse matrix to write.
"""
function mmwrite(filename::String, matrix::SparseMatrixCSC)
stream = open(filename, "w")
if endswith(filename, ".gz")
stream = GzipCompressorStream(stream)
end
mmwrite(stream, matrix)
close(stream)
end
function mmwrite(stream::IO, matrix::SparseMatrixCSC)
nl = "\n"
elem = generate_eltype(eltype(matrix))
sym = generate_symmetric(matrix)
# write header
write(stream, "%%MatrixMarket matrix coordinate $elem $sym$nl")
# only use lower triangular part of symmetric and Hermitian matrices
if issymmetric(matrix) || ishermitian(matrix)
matrix = tril(matrix)
end
# write matrix size and number of nonzeros
write(stream, "$(size(matrix, 1)) $(size(matrix, 2)) $(nnz(matrix))$nl")
rows = rowvals(matrix)
vals = nonzeros(matrix)
for i in 1:size(matrix, 2)
for j in nzrange(matrix, i)
entity = generate_entity(i, j, rows, vals, elem)
write(stream, entity)
end
end
end
generate_eltype(::Type{<:Bool}) = "pattern"
generate_eltype(::Type{<:Integer}) = "integer"
generate_eltype(::Type{<:AbstractFloat}) = "real"
generate_eltype(::Type{<:Complex}) = "complex"
generate_eltype(elty) = error("Invalid matrix type")
function generate_symmetric(m::AbstractMatrix)
if issymmetric(m)
return "symmetric"
elseif ishermitian(m)
return "hermitian"
else
return "general"
end
end
function generate_entity(i, j, rows, vals, kind::String)
nl = "\n"
if kind == "pattern"
return "$(rows[j]) $i$nl"
elseif kind == "complex"
return "$(rows[j]) $i $(real(vals[j])) $(imag(vals[j]))$nl"
else
return "$(rows[j]) $i $(vals[j])$nl"
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 3167 | @testset "mtx" begin
mtx_filename = joinpath(TEST_PATH, "data", "test.mtx")
res = sparse(
[5, 4, 1, 2, 6],
[1, 5, 1, 4, 7],
[1, 1, 1, 1, 1],
11, 12
)
testmatrices = download_unzip_nist_files()
@testset "read/write mtx" begin
rows, cols, entries, rep, field, symm = mminfo(mtx_filename)
@test rows == 11
@test cols == 12
@test entries == 5
@test rep == "coordinate"
@test field == "integer"
@test symm == "general"
A = mmread(mtx_filename)
@test A isa SparseMatrixCSC
@test A == res
newfilename = replace(mtx_filename, "test.mtx" => "test_write.mtx")
mmwrite(newfilename, res)
f = open(mtx_filename)
sha_test = bytes2hex(sha256(read(f, String)))
close(f)
f = open(newfilename)
sha_new = bytes2hex(sha256(read(f, String)))
close(f)
@test sha_test == sha_new
rm(newfilename)
end
@testset "read/write mtx.gz" begin
gz_filename = mtx_filename * ".gz"
rows, cols, entries, rep, field, symm = mminfo(gz_filename)
@test rows == 11
@test cols == 12
@test entries == 5
@test rep == "coordinate"
@test field == "integer"
@test symm == "general"
A = mmread(gz_filename)
@test A isa SparseMatrixCSC
@test A == res
newfilename = replace(gz_filename, "test.mtx.gz" => "test_write.mtx.gz")
mmwrite(newfilename, res)
stream = GzipDecompressorStream(open(gz_filename))
sha_test = bytes2hex(sha256(read(stream, String)))
close(stream)
stream = GzipDecompressorStream(open(newfilename))
sha_new = bytes2hex(sha256(read(stream, String)))
close(stream)
@test sha_test == sha_new
rm(newfilename)
end
@testset "read/write NIST mtx files" begin
# verify mmread(mmwrite(A)) == A
for filename in filter(t -> endswith(t, ".mtx"), readdir())
new_filename = replace(filename, ".mtx" => "_.mtx")
A = MatrixMarket.mmread(filename)
MatrixMarket.mmwrite(new_filename, A)
new_A = MatrixMarket.mmread(new_filename)
@test new_A == A
rm(new_filename)
end
end
@testset "read/write NIST mtx.gz files" begin
for gz_filename in filter(t -> endswith(t, ".mtx.gz"), readdir())
mtx_filename = replace(gz_filename, ".mtx.gz" => ".mtx")
# reading from .mtx and .mtx.gz must be identical
A_gz = MatrixMarket.mmread(gz_filename)
A = MatrixMarket.mmread(mtx_filename)
@test A_gz == A
# writing to .mtx and .mtx.gz must be identical
new_filename = replace(gz_filename, ".mtx.gz" => "_.mtx.gz")
mmwrite(new_filename, A)
new_A = MatrixMarket.mmread(new_filename)
@test new_A == A
rm(new_filename)
end
end
# clean up
for filename in filter(t -> endswith(t, ".mtx"), readdir())
rm(filename)
rm(filename * ".gz")
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 319 | using MatrixMarket
using CodecZlib
using Downloads
using GZip
using SparseArrays
using SHA
using Test
include("test_utils.jl")
const TEST_PATH = @__DIR__
const NIST_FILELIST = download_nist_filelist()
tests = [
"mtx",
]
@testset "MatrixMarket.jl" begin
for t in tests
include("$(t).jl")
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1795 | function gunzip(fname)
destname, ext = splitext(fname)
if ext != ".gz"
error("gunzip: $fname: unknown suffix -- ignored")
end
open(destname, "w") do f
GZip.open(fname) do g
write(f, read(g, String))
end
end
destname
end
function download_nist_filelist()
isfile("matrices.html") ||
Downloads.download("math.nist.gov/MatrixMarket/matrices.html", "matrices.html")
matrixmarketdata = Any[]
open("matrices.html") do f
for line in readlines(f)
if occursin("""<A HREF="/MatrixMarket/data/""", line)
collectionname, setname, matrixname = split(split(line, '"')[2], '/')[4:6]
matrixname = split(matrixname, '.')[1]
push!(matrixmarketdata, (collectionname, setname, matrixname))
end
end
end
rm("matrices.html")
return matrixmarketdata
end
function download_unzip_nist_files()
# Download one matrix at random plus some specifically chosen ones.
n = rand(1:length(NIST_FILELIST))
testmatrices = [
("NEP", "mhd", "mhd1280b"),
("Harwell-Boeing", "acoust", "young4c"),
("Harwell-Boeing", "platz", "plsk1919"),
NIST_FILELIST[n]
]
for (collectionname, setname, matrixname) in testmatrices
fn = string(collectionname, '_', setname, '_', matrixname)
mtxfname = string(fn, ".mtx")
if !isfile(mtxfname)
url = "https://math.nist.gov/pub/MatrixMarket2/$collectionname/$setname/$matrixname.mtx.gz"
gzfname = string(fn, ".mtx.gz")
try
Downloads.download(url, gzfname)
catch
continue
end
gunzip(gzfname)
end
end
return testmatrices
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | docs | 1344 | # MatrixMarket
[](https://travis-ci.org/JuliaSparse/MatrixMarket.jl)
Package to read/write matrices from/to files in the [Matrix Market native exchange
format](http://math.nist.gov/MatrixMarket/formats.html#MMformat).
The [Matrix Market](http://math.nist.gov/MatrixMarket/) is a NIST repository of
"test data for use in comparative studies of algorithms for numerical linear
algebra, featuring nearly 500 sparse matrices from a variety of applications,
as well as matrix generation tools and services." Over time, the [Matrix Market's
native exchange format](http://math.nist.gov/MatrixMarket/formats.html#MMformat)
has become one of the _de facto_ standard file formats for exchanging matrix
data.
## Usage
### Read
using MatrixMarket
M = MatrixMarket.mmread("myfile.mtx")
`M` will be a sparse or dense matrix depending on whether the file contains a matrix
in coordinate format or array format. The specific type of `M` may be `Symmetric` or
`Hermitian` depending on the symmetry information contained in the file header.
MatrixMarket.mmread("myfile.mtx", true)
Returns raw data from the file header. Does not read in the actual matrix elements.
### Write
MatrixMarket.mmwrite("myfile.mtx", M)
`M` has to be a sparse matrix.
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | code | 5014 | module WhereIsMyDocstring
using Documenter
export @docmatch
mutable struct DocStr
binding
mod
source
signature
text
function DocStr(D::Base.Docs.DocStr)
d = new()
if length(D.text) > 0
d.text = D.text[1]
else
if isdefined(D, :object)
d.text = string(D.object)
else
d.text = ""
end
end
d.text = lstrip(d.text, '`')
d.text = lstrip(d.text)
d.binding = D.data[:binding]
d.mod = D.data[:module]
d.source = D.data[:path] * ":" * string(D.data[:linenumber])
d.signature = D.data[:typesig]
return d
end
end
# Some type printing gymnastics for the signatures
function _name(x)
S = string(x)
r1 = r"([a-zA-Z1-9]*)<:([a-zA-Z1-9]*)"
r2 = r"([a-zA-Z1-9]*)<:(.+?)<:([a-zA-Z1-9]*)"
while match(r2, S) !== nothing
S = replace(S, r2 => s"\2")
end
while match(r1, S) !== nothing
S = replace(S, r1 => s"\1")
end
return S
end
function _print_type_hint(x::Type)
@assert x isa UnionAll
vars = []
while x isa UnionAll
push!(vars, x.var)
x = x.body
end
while x isa Union
x = x.b
end
@assert x <: Tuple
res = "(" * join(["::$(_name(T))" for T in x.parameters], ", ") * ")"
while occursin("::<:", res)
res = replace(res, "::<:" => "::")
end
while occursin("<:<:", res)
res = replace(res, "<:<:" => "<:")
end
return res * " where {" * join(vars, ", ") * "}"
end
function _print_type(x::Type)
if x isa Core.TypeofBottom
return [""]
end
if x isa UnionAll
res = _print_type_hint(x)
return ["(::$x)\n try the following:", "$res"]
end
_print_type_real(x)
end
function _print_type_real(x)
if x isa Union
return append!(_print_type_real(x.a), _print_type_real(x.b))
elseif x <: Tuple
return ["(" * join(["::$(T)" for T in x.parameters], ", ") * ")"]
else
return ["(::$x)"]
end
end
function Base.show(io::IO, d::DocStr)
printstyled(io, d.binding, bold = true)
text = join(split(d.text, "\n")[1:1], "\n")
printstyled(io, "\n Content:", color = :light_green)
printstyled(io, "\n ", text, " [...]", italic = true)
printstyled(io, "\n Signature type:", color = :light_green)
printstyled(io, "\n ", d.signature)
printstyled(io, "\n Include in ```@docs``` block one of the following:", color = :light_green)
for s in _print_type(d.signature)
print(io, "\n ")
print(io, "$(d.binding)")
# now print s
if occursin("might need adjustment:", s)
ss = split(s, "might need adjustment:")
print(io, ss[1])
printstyled(io, "might need adjustment:"; color = :light_yellow)
print(io, ss[2])
else
print(io, s)
end
end
printstyled(io, "\n Source:", color = :light_green)
printstyled(io, "\n ", d.source, color = :light_grey)
print(io, "\n", "="^displaysize(stdout)[2])
end
function _list_documenter_docstring(mod, ex)
bind = Documenter.DocSystem.binding(mod, ex)
typesig = Core.eval(mod, Base.Docs.signature(ex))
return list_documenter_docstring(mod, bind; sig = typesig)
end
function list_documenter_docstring(mod, fun; sig = Union{})
bind = Documenter.DocSystem.binding(mod, ex)
return list_documenter_docstring(mod, bind; sig = sig)
end
function list_documenter_docstring(mod, bind::Base.Docs.Binding; sig = Union{})
res = Documenter.DocSystem.getdocs(bind, sig, modules = [mod])
return [DocStr(r) for r in res]
end
function list_documenter_docstring(bind::Base.Docs.Binding; sig = Union{})
res = Documenter.DocSystem.getdocs(bind, sig)
return [DocStr(r) for r in res]
end
"""
@docmatch f
@docmatch f(sig)
@docmatch f module
@docmatch f(sig) module
Retrieves all docstrings that would be included in the block
````
```@docs
f
```
````
or
````
```@docs
f(sig)
```
````
The optional argument `module` controls in which module to look for `f`.
#### Example
```
julia> @docmatch sin
2-element Vector{WhereIsMyDocstring.DocStr}:
Base.sin
Content:
sin(x) [...]
Signature type:
Tuple{Number}
Include in ```@docs``` block:
Base.sin(::Number)
Source:
math.jl:490
================================================================================
Base.sin
Content:
sin(A::AbstractMatrix) [...]
Signature type:
Tuple{AbstractMatrix{<:Real}}
Include in ```@docs``` block:
Base.sin(::AbstractMatrix{<:Real})
Source:
/usr/share/julia/stdlib/v1.10/LinearAlgebra/src/dense.jl:956
```
"""
macro docmatch
end
macro docmatch(ex)
bind = Documenter.DocSystem.binding(Main, ex)
typesig = Core.eval(Main, Base.Docs.signature(ex))
return list_documenter_docstring(bind, sig = typesig)
end
macro docmatch(ex, mod)
# (awkward)
# mod is evaluated directly to get the module (I don't want to eval this)
# but the expression for the function (+ signature)
# needs to be passed to the Documenter function as an expression,
# which is later eval'ed
return quote
_list_documenter_docstring($(esc(mod)), $(QuoteNode(ex)))
end
end
end # module WhereIsMyDocstring
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | code | 1252 | using Test, WhereIsMyDocstring
module TestDocstrings
"foo(::Number)"
foo(::Number) = nothing
"foo(::Float64)"
foo(::Float64) = nothing
"baz(::Number)"
baz(::Number)
"baz(::Float64)"
baz(::Float64)
"bla"
function baz(::T, ::S) where {S <: Integer, T <: S}
end
@doc (@doc baz(::Float64))
foobar(::Number) = nothing
"blub"
function fookw(x::Number, z::Number = 1; y::Number = 2)
end
"blub"
function foopa(x::Vector{S}, z::Matrix{T} = 1; y::Number = 2) where {S, T <: S}
end
end
D = @docmatch foo
@test sprint(show, D) isa String
@test length(D) == 0
D = @docmatch foo(::Number)
@test sprint(show, D) isa String
@test length(D) == 0
D = @docmatch foo TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 2
D = @docmatch foo(::Number) TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 1
D = @docmatch baz TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 3
D = @docmatch foobar TestDocstrings
@test sprint(show, D) isa String
D = @docmatch length
@test sprint(show, D) isa String
D = @docmatch fookw TestDocstrings
@test sprint(show, D) isa String
D = @docmatch foopa TestDocstrings
@test sprint(show, D) isa String
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | docs | 3403 | # WhereIsMyDocstring.jl
---
*Dude, where is my docstring?*
---
- Have you ever wondered, which docstring is included in a ```` ```@docs``` ```` block when writing the documentation?
- Are you tired of finding the magic syntax to include the *right* docstring of a method?
Enter: WhereIsMyDocstring.jl
## Status
[](https://github.com/thofma/WhereIsMyDocstring.jl/actions/workflows/CI.yml?query=branch%3Amaster)
[](https://codecov.io/gh/thofma/WhereIsMyDocstring.jl)
[](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
## Installation
Since WhereIsMyDocstring.jl is a registered package, it can be simply installed as follows:
```
julia> using Pkg; Pkg.install("WhereIsMyDocstring")
```
## Usage
The package provides the `@docmatch` macro, which allows one to simulate the behaviour of ```` ```@docs``` ```` blocks interactively. This is helpful in case a function has many different methods and docstrings, and one wants to include a specific one. In particular in the presence of type parameters, this can be a frustrating experience due to https://github.com/JuliaLang/julia/issues/29437. Here is a simple example:
```
julia> using WhereIsMyDocstring
julia> @docmatch sin
2-element Vector{WhereIsMyDocstring.DocStr}:
Base.sin
Content:
sin(x) [...]
Signature type:
Tuple{Number}
Include in ```@docs``` block:
Base.sin(::Number)
Source:
math.jl:490
====================================================================================
Base.sin
Content:
sin(A::AbstractMatrix) [...]
Signature type:
Tuple{AbstractMatrix{<:Real}}
Include in ```@docs``` block:
Base.sin(::AbstractMatrix{<:Real})
Source:
/usr/share/julia/stdlib/v1.10/LinearAlgebra/src/dense.jl:956
====================================================================================
```
The macro returns the docstrings (including metadata). In view of ```` ```@docs ``` ```` blocks, the most imporant information is the "Include in ..." field. This provides the right invocation to include the specific docstring. For example, if we want to include the second docstring, in our documentation markdown source we would write:
````
```@docs
Base.sin(::AbstractMatrix{<:Real})
```
````
A more complicated example is:
````julia-repl
julia> "blub"
function foo(x::Vector{S}, z::Matrix{T} = 1; y::Number = 2) where {S, T <: S}
end
julia> @docmatch foo
1-element Vector{WhereIsMyDocstring.DocStr}:
foo
Content:
blub [...]
Signature type:
Union{Tuple{Vector{S}}, Tuple{T}, Tuple{S}, Tuple{Vector{S}, Matrix{T}}} where {S, T<:S}
Include in ```@docs``` block:
foo(::Union{Tuple{Vector{S}}, Tuple{T}, Tuple{S}, Tuple{Vector{S}, Matrix{T}}} where {S, T<:S})
try the following:
foo(::Array{S, 1}, ::Array{T, 2}) where {S, T<:S}
Source:
REPL[2]:1
````
Note that the type of the signature is garbled due to https://github.com/JuliaLang/julia/issues/29437. This also messes up the lookup. Here we are warned about this and a suggested way to fix it is provided via `foo(::Array{S, 1}, ::Array{T, 2}) where {S, T<:S}`.
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1709 | using NiLangCore, BenchmarkTools
bg = BenchmarkGroup()
# pop!/push!
bg["NiLang"] = @benchmarkable begin
@instr PUSH!(x)
@instr POP!(x)
end seconds=1 setup=(x=3.0)
# @invcheckoff pop!/push!
bg["NiLang-@invcheckoff"] = @benchmarkable begin
@instr @invcheckoff PUSH!(x)
@instr @invcheckoff POP!(x)
end seconds=1 setup=(x=3.0)
# @invcheckoff pop!/push!
bg["NiLang-@invcheckoff-@inbounds"] = @benchmarkable begin
@instr @invcheckoff @inbounds PUSH!(x)
@instr @invcheckoff @inbounds POP!(x)
end seconds=1 setup=(x=3.0)
# Julia pop!/push!
bg["Julia"] = @benchmarkable begin
push!(stack, x)
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=Float64[])
# FastStack-inbounds-Any
bg["FastStack-inbounds-Any"] = @benchmarkable begin
@inbounds push!(stack, x)
@inbounds pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack(10))
# Julia pop!/push!
bg["Julia-Any"] = @benchmarkable begin
push!(stack, x)
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=Any[])
# setindex
bg["setindex"] = @benchmarkable begin
stack[2] = x
x = 0.0
x = stack[2]
end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0])
# setindex-inbounds
bg["setindex-inbounds"] = @benchmarkable begin
stack[2] = x
x = 0.0
x = stack[2]
end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0])
# FastStack
bg["FastStack"] = @benchmarkable begin
push!(stack, x)
x = 0.0
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10))
# FastStack-inbounds
bg["FastStack-inbounds"] = @benchmarkable begin
@inbounds push!(stack, x)
x = 0.0
@inbounds x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10))
tune!(bg)
run(bg) | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 2297 | using Zygote
f(x, y) = (x+exp(y), y)
invf(x, y) = (x-exp(y), y)
# ∂L/∂x2 = ∂L/∂x1*∂x1/∂x2 + ∂L/∂y1*∂y1/∂y2 = ∂L/∂x1*invf'(x2) + ∂L/∂y1*invf'(y2)
x1, y1 = 1.4, 4.4
x2, y2 = f(x,y)
function gf(x, y, gx, gy)
x2, y2 = f(x, y)
invJ1 = gradient((x2, y2)->invf(x2, y2)[1], x2, y2)
invJ2 = gradient((x2, y2)->invf(x2, y2)[2], x2, y2)
return (x2, y2, gx, gy)
end
gradient((x, y)->invf(x, y)[1], x, y)
mutable struct A{T}
x::T
end
Base.:*(x1::A, x2::A) = A(x1.x*x2.x)
Base.:+(x1::A, x2::A) = A(x1.x+x2.x)
Base.zero(::A{T}) where T = A(T(0))
struct A2{T}
x::T
end
Base.:*(x1::A2, x2::A2) = A2(x1.x*x2.x)
Base.:+(x1::A2, x2::A2) = A2(x1.x+x2.x)
Base.zero(::A2{T}) where T = A2(T(0))
struct BG{T}
x::T
g::B{T}
BG(x::T) where T = new{T}(x)
end
struct BG{T}
x::T
g::BG{T}
BG(x::T) where T = new{T}(x)
end
mutable struct AG{T}
x::T
g::AG{T}
AG(x::T) where T = new{T}(x)
AG(x::T, g::TG) where {T,TG} = new{T}(x, T(g))
end
Base.:*(x1::AG, x2::AG) = AG(x1.x*x2.x)
Base.:+(x1::AG, x2::AG) = AG(x1.x+x2.x)
Base.zero(::AG{T}) where T = AG(T(0))
init(ag::AG{T}) where T = (ag.g = AG(T(0)))
using BenchmarkTools
ma = fill(A(1.0), 100,100)
ma2 = fill(A2(1.0), 100,100)
function f(ma, mb)
M, N, K = size(ma, 1), size(mb, 2), size(ma, 2)
res = fill(zero(ma[1]), M, N)
for i=1:M
for j=1:N
for k=1:K
@inbounds res[i,j] += ma[i,k]*mb[k,j]
end
end
end
return res
end
@benchmark f(ma, ma)
@benchmark f(ma2, ma2)
ma = fill(AG(1.0), 100,100)
@benchmark ma*ma
a = A(0.4)
ag = AG(0.4)
using NiLangCore
@benchmark isdefined($ag, :g)
@benchmark $ag + $ag
ag.g = AG(0.0)
@benchmark $a + $a
struct SG{T}
x::T
g::Ref{T}
SG(x::T) where T = new{T}(x)
end
Base.:*(x1::SG, x2::SG) = SG(x1.x*x2.x)
Base.:+(x1::SG, x2::SG) = SG(x1.x+x2.x)
Base.zero(::SG{T}) where T = SG(T(0))
init(ag::AG{T}) where T = (ag.g = AG(T(0)))
using BenchmarkTools
ma = fill(SG(1.0), 100,100)
@benchmark ma*ma
a = A(0.4)
ag = AG(0.4)
using NiLangCore
@benchmark isdefined($ag, :g)
@benchmark $ag + $ag
ag.g = AG(0.0)
@benchmark $a + $a
using NiLang, NiLang.AD
@i function test(x, one, N::Int)
for i = 1:N
x += one
end
end
invcheckon(true)
@benchmark test'(Loss(0.0), 1.0, 1000000)
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 382 | using Documenter, NiLangCore
makedocs(;
modules=[NiLangCore],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/GiggleLiu/NiLangCore.jl/blob/{commit}{path}#L{line}",
sitename="NiLangCore.jl",
authors="JinGuo Liu, thautwarm",
assets=String[],
)
deploydocs(;
repo="github.com/GiggleLiu/NiLangCore.jl",
)
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 6174 | ############# function properties #############
export isreversible, isreflexive, isprimitive
export protectf
"""
isreversible(f, ARGT)
Return `true` if a function is reversible.
"""
isreversible(f, ::Type{ARGT}) where ARGT = hasmethod(~f, ARGT)
"""
isreflexive(f)
Return `true` if a function is self-inverse.
"""
isreflexive(f) = (~f) === f
"""
isprimitive(f)
Return `true` if `f` is an `instruction` that can not be decomposed anymore.
"""
isprimitive(f) = false
############# ancillas ################
export InvertibilityError, @invcheck
"""
deanc(a, b)
Deallocate varialbe `a` with value `b`. It will throw an error if
* `a` and `b` are objects with different types,
* `a` is not equal to `b` (for floating point numbers, an error within `NiLangCore.GLOBAL_ATOL[]` is allowed),
"""
function deanc end
function deanc(a::T, b::T) where T <: AbstractFloat
if a !== b && abs(b - a) > GLOBAL_ATOL[]
throw(InvertibilityError("deallocate fail (floating point numbers): $a ≂̸ $b"))
end
end
deanc(x::T, val::T) where T<:Tuple = deanc.(x, val)
deanc(x::T, val::T) where T<:AbstractArray = x === val || deanc.(x, val)
deanc(a::T, b::T) where T<:AbstractString = a === b || throw(InvertibilityError("deallocate fail (string): $a ≂̸ $b"))
function deanc(x::T, val::T) where T<:Dict
if x !== val
if length(x) != length(val)
throw(InvertibilityError("deallocate fail (dict): length of dict not the same, got $(length(x)) and $(length(val))!"))
else
for (k, v) in x
if haskey(val, k)
deanc(x[k], val[k])
else
throw(InvertibilityError("deallocate fail (dict): key $k of dict does not exist!"))
end
end
end
end
end
deanc(a, b) = throw(InvertibilityError("deallocate fail (type mismatch): `$(typeof(a))` and `$(typeof(b))`"))
@generated function deanc(a::T, b::T) where T
nf = fieldcount(a)
if isprimitivetype(T)
:(a === b || throw(InvertibilityError("deallocate fail (primitive): $a ≂̸ $b")))
else
Expr(:block, [:($deanc(a.$NAME, b.$NAME)) for NAME in fieldnames(T)]...)
end
end
"""
InvertibilityError <: Exception
InvertibilityError(ex)
The error for irreversible statements.
"""
struct InvertibilityError <: Exception
ex
end
"""
@invcheck x val
The macro version `NiLangCore.deanc`, with more informative error.
"""
macro invcheck(x, val)
esc(_invcheck(x, val))
end
# the expression for reversibility checking
function _invcheck(x, val)
Expr(:try, Expr(:block, :($deanc($x, $val))), :e, Expr(:block,
:(println("deallocate fail `$($(QuoteNode(x))) → $($(QuoteNode(val)))`")),
:(throw(e)))
)
end
_invcheck(docheck::Bool, arg, res) = docheck ? _invcheck(arg, res) : nothing
"""
chfield(x, field, val)
Change a `field` of an object `x`.
The `field` can be a `Val` type
```jldoctest; setup=:(using NiLangCore)
julia> chfield(1+2im, Val(:im), 5)
1 + 5im
```
or a function
```jldoctest; setup=:(using NiLangCore)
julia> using NiLangCore
julia> struct GVar{T, GT}
x::T
g::GT
end
julia> @fieldview xx(x::GVar) = x.x
julia> chfield(GVar(1.0, 0.0), xx, 2.0)
GVar{Float64, Float64}(2.0, 0.0)
```
"""
function chfield end
########### Inv ##########
export Inv, invtype
"""
Inv{FT} <: Function
Inv(f)
The inverse of a function.
"""
struct Inv{FT} <: Function
f::FT
end
Inv(f::Inv) = f.f
@static if VERSION >= v"1.6"
Base.:~(f::Base.ComposedFunction) = (~(f.inner)) ∘ (~(f.outer))
end
Base.:~(f::Function) = Inv(f)
Base.:~(::Type{Inv{T}}) where T = T # for type, it is a destructor
Base.:~(::Type{T}) where T = Inv{T} # for type, it is a destructor
Base.show(io::IO, b::Inv) = print(io, "~$(b.f)")
Base.display(bf::Inv) = print(bf)
"""
protectf(f)
Protect a function from being inverted, useful when using an callable object.
"""
protectf(x) = x
protectf(x::Inv) = x.f
invtype(::Type{T}) where T = Inv{<:T}
######### Infer
export PlusEq, MinusEq, XorEq, MulEq, DivEq
"""
PlusEq{FT} <: Function
PlusEq(f)
Called when executing `out += f(args...)` instruction. The following two statements are same
```jldoctest; setup=:(using NiLangCore)
julia> x, y, z = 0.0, 2.0, 3.0
(0.0, 2.0, 3.0)
julia> x, y, z = PlusEq(*)(x, y, z)
(6.0, 2.0, 3.0)
julia> x, y, z = 0.0, 2.0, 3.0
(0.0, 2.0, 3.0)
julia> @instr x += y*z
julia> x, y, z
(6.0, 2.0, 3.0)
```
"""
struct PlusEq{FT} <: Function
f::FT
end
"""
MinusEq{FT} <: Function
MinusEq(f)
Called when executing `out -= f(args...)` instruction. See `PlusEq` for detail.
"""
struct MinusEq{FT} <: Function
f::FT
end
"""
MulEq{FT} <: Function
MulEq(f)
Called when executing `out *= f(args...)` instruction. See `PlusEq` for detail.
"""
struct MulEq{FT} <: Function
f::FT
end
"""
DivEq{FT} <: Function
DivEq(f)
Called when executing `out /= f(args...)` instruction. See `PlusEq` for detail.
"""
struct DivEq{FT} <: Function
f::FT
end
"""
XorEq{FT} <: Function
XorEq(f)
Called when executing `out ⊻= f(args...)` instruction. See `PlusEq` for detail.
"""
struct XorEq{FT} <: Function
f::FT
end
const OPMX{FT} = Union{PlusEq{FT}, MinusEq{FT}, XorEq{FT}, MulEq{FT}, DivEq{FT}}
for (TP, OP) in [(:PlusEq, :+), (:MinusEq, :-), (:XorEq, :⊻)]
@eval (inf::$TP)(out!, args...; kwargs...) = $OP(out!, inf.f(args...; kwargs...)), args...
@eval (inf::$TP)(out!::Tuple, args...; kwargs...) = $OP.(out!, inf.f(args...; kwargs...)), args... # e.g. allow `(x, y) += sincos(a)`
end
Base.:~(op::PlusEq) = MinusEq(op.f)
Base.:~(om::MinusEq) = PlusEq(om.f)
Base.:~(op::MulEq) = DivEq(op.f)
Base.:~(om::DivEq) = MulEq(om.f)
Base.:~(om::XorEq) = om
for (T, S) in [(:PlusEq, "+="), (:MinusEq, "-="), (:MulEq, "*="), (:DivEq, "/="), (:XorEq, "⊻=")]
@eval Base.display(o::$T) = print($S, "(", o.f, ")")
@eval Base.display(o::Type{$T}) = print($S)
@eval Base.show_function(io::IO, o::$T, compact::Bool) = print(io, "$($S)($(o.f))")
@eval Base.show_function(io::IO, ::MIME"plain/text", o::$T, compact::Bool) = Base.show(io, o)
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 333 | module NiLangCore
using MLStyle
using TupleTools
include("lens.jl")
include("utils.jl")
include("symboltable.jl")
include("stack.jl")
include("Core.jl")
include("vars.jl")
include("instr.jl")
include("dualcode.jl")
include("preprocess.jl")
include("variable_analysis.jl")
include("compiler.jl")
include("checks.jl")
end # module
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
Subsets and Splits